Mercurial > hg
changeset 20773:efbf15979538
merge with stable
author | Matt Mackall <mpm@selenic.com> |
---|---|
date | Tue, 18 Mar 2014 14:25:28 -0500 |
parents | d9378bfa0af0 (diff) 03774a2b6991 (current diff) |
children | cdc3ac896997 |
files | hgext/histedit.py hgext/mq.py hgext/rebase.py mercurial/cmdutil.py mercurial/commands.py mercurial/localrepo.py tests/test-commit.t |
diffstat | 159 files changed, 4671 insertions(+), 4291 deletions(-) [+] |
line wrap: on
line diff
--- a/Makefile Mon Mar 17 14:57:13 2014 -0400 +++ b/Makefile Tue Mar 18 14:25:28 2014 -0500 @@ -71,7 +71,7 @@ install-home: install-home-bin install-home-doc install-home-bin: build - $(PYTHON) setup.py $(PURE) install --home="$(HOME)" --force + $(PYTHON) setup.py $(PURE) install --home="$(HOME)" --prefix="" --force install-home-doc: doc cd doc && $(MAKE) $(MFLAGS) PREFIX="$(HOME)" install @@ -102,7 +102,7 @@ update-pot: i18n/hg.pot -i18n/hg.pot: $(PYFILES) $(DOCFILES) +i18n/hg.pot: $(PYFILES) $(DOCFILES) i18n/posplit i18n/hggettext $(PYTHON) i18n/hggettext mercurial/commands.py \ hgext/*.py hgext/*/__init__.py \ mercurial/fileset.py mercurial/revset.py \
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/contrib/Makefile.python Tue Mar 18 14:25:28 2014 -0500 @@ -0,0 +1,79 @@ +PYTHONVER=2.7.6 +PYTHONNAME=python- +PREFIX=$(HOME)/bin/prefix-$(PYTHONNAME)$(PYTHONVER) +SYMLINKDIR=$(HOME)/bin + +help: + @echo + @echo 'Make a custom installation of a Python version' + @echo + @echo 'Common make parameters:' + @echo ' PYTHONVER=... [$(PYTHONVER)]' + @echo ' PREFIX=... [$(PREFIX)]' + @echo ' SYMLINKDIR=... [$(SYMLINKDIR) creating $(PYTHONNAME)$(PYTHONVER)]' + @echo + @echo 'Common make targets:' + @echo ' python - install Python $$PYTHONVER in $$PREFIX' + @echo ' symlink - create a $$SYMLINKDIR/$(PYTHONNAME)$$PYTHONVER symlink' + @echo + @echo 'Example: create a temporary Python installation:' + @echo ' $$ make -f Makefile.python python PYTHONVER=2.4 PREFIX=/tmp/p24' + @echo ' $$ /tmp/p24/bin/python -V' + @echo ' Python 2.4' + @echo + @echo 'Some external libraries are required for building Python: zlib bzip2 openssl.' + @echo 'Make sure their development packages are installed systemwide.' +# fedora: yum install zlib-devel bzip2-devel openssl-devel +# debian: apt-get install zlib1g-dev libbz2-dev libssl-dev + @echo + @echo 'To build a nice collection of interesting Python versions:' + @echo ' $$ for v in 2.{4{,.2,.3},5{,.6},6{,.1,.2,.9},7{,.6}}; do' + @echo ' make -f Makefile.python symlink PYTHONVER=$$v || break; done' + @echo 'To run a Mercurial test on all these Python versions:' + @echo ' $$ for py in `cd ~/bin && ls $(PYTHONNAME)2.*`; do' + @echo ' echo $$py; $$py run-tests.py test-http.t; echo; done' + @echo + +export LANGUAGE=C +export LC_ALL=C + +python: $(PREFIX)/bin/python docutils + printf 'import sys, zlib, bz2, docutils\nif sys.version_info >= (2,6):\n import ssl' | $(PREFIX)/bin/python + +PYTHON_SRCDIR=Python-$(PYTHONVER) +PYTHON_SRCFILE=$(PYTHON_SRCDIR).tgz + +$(PREFIX)/bin/python: + [ -f $(PYTHON_SRCFILE) ] || wget http://www.python.org/ftp/python/$(PYTHONVER)/$(PYTHON_SRCFILE) || [ -f $(PYTHON_SRCFILE) ] + rm -rf $(PYTHON_SRCDIR) + tar xf $(PYTHON_SRCFILE) + # Ubuntu disables SSLv2 the hard way, disable it on old Pythons too + -sed -i 's,self.*SSLv2_method(),0;//\0,g' $(PYTHON_SRCDIR)/Modules/_ssl.c + # Find multiarch system libraries on Ubuntu with Python 2.4.x + # http://lipyrary.blogspot.dk/2011/05/how-to-compile-python-on-ubuntu-1104.html + -sed -i "s|lib_dirs = .* \[|\0'/usr/lib/`dpkg-architecture -qDEB_HOST_MULTIARCH`',|g" $(PYTHON_SRCDIR)/setup.py + # Find multiarch system libraries on Ubuntu and disable fortify error when setting argv + LDFLAGS="-L/usr/lib/`dpkg-architecture -qDEB_HOST_MULTIARCH`"; \ + BASECFLAGS=-U_FORTIFY_SOURCE; \ + export LDFLAGS BASECFLAGS; \ + cd $(PYTHON_SRCDIR) && ./configure --prefix=$(PREFIX) && make all SVNVERSION=pwd && make install + printf 'import sys, zlib, bz2\nif sys.version_info >= (2,6):\n import ssl' | $(PREFIX)/bin/python + rm -rf $(PYTHON_SRCDIR) + +DOCUTILSVER=0.11 +DOCUTILS_SRCDIR=docutils-$(DOCUTILSVER) +DOCUTILS_SRCFILE=$(DOCUTILS_SRCDIR).tar.gz + +docutils: $(PREFIX)/bin/python + @$(PREFIX)/bin/python -c 'import docutils' || ( set -ex; \ + [ -f $(DOCUTILS_SRCFILE) ] || wget http://downloads.sourceforge.net/project/docutils/docutils/$(DOCUTILSVER)/$(DOCUTILS_SRCFILE) || [ -f $(DOCUTILS_SRCFILE) ]; \ + rm -rf $(DOCUTILS_SRCDIR); \ + tar xf $(DOCUTILS_SRCFILE); \ + cd $(DOCUTILS_SRCDIR) && $(PREFIX)/bin/python setup.py install --prefix=$(PREFIX); \ + $(PREFIX)/bin/python -c 'import docutils'; \ + rm -rf $(DOCUTILS_SRCDIR); ) + +symlink: python $(SYMLINKDIR) + ln -sf $(PREFIX)/bin/python $(SYMLINKDIR)/$(PYTHONNAME)$(PYTHONVER) + +.PHONY: help python docutils symlink
--- a/contrib/check-code.py Mon Mar 17 14:57:13 2014 -0400 +++ b/contrib/check-code.py Tue Mar 18 14:25:28 2014 -0500 @@ -121,6 +121,7 @@ (r'^( *)\t', "don't use tabs to indent"), (r'sed (-e )?\'(\d+|/[^/]*/)i(?!\\\n)', "put a backslash-escaped newline after sed 'i' command"), + (r'^diff *-\w*u.*$\n(^ \$ |^$)', "prefix diff -u with cmp"), ], # warnings [ @@ -150,6 +151,9 @@ "explicit exit code checks unnecessary"), (uprefix + r'set -e', "don't use set -e"), (uprefix + r'(\s|fi\b|done\b)', "use > for continued lines"), + (uprefix + r'.*:\.\S*/', "x:.y in a path does not work on msys, rewrite " + "as x://.y, or see `hg log -k msys` for alternatives", r'-\S+:\.|' #-Rxxx + 'hg pull -q file:../test'), # in test-pull.t which is skipped on windows (r'^ saved backup bundle to \$TESTTMP.*\.hg$', winglobmsg), (r'^ changeset .* references (corrupted|missing) \$TESTTMP/.*[^)]$', winglobmsg), @@ -162,6 +166,8 @@ (r'^ moving \S+/.*[^)]$', winglobmsg), (r'^ no changes made to subrepo since.*/.*[^)]$', winglobmsg), (r'^ .*: largefile \S+ not available from file:.*/.*[^)]$', winglobmsg), + (r'^ .*file://\$TESTTMP', + 'write "file:/*/$TESTTMP" + (glob) to match on windows too'), ], # warnings [ @@ -194,6 +200,8 @@ 'use "import foo.bar" on its own line instead.'), (r'(?<!def)\s+(cmp)\(', "cmp is not available in Python 3+"), (r'\breduce\s*\(.*', "reduce is not available in Python 3+"), + (r'dict\(.*=', 'dict() is different in Py2 and 3 and is slower than {}', + 'dict-from-generator'), (r'\.has_key\b', "dict.has_key is not available in Python 3+"), (r'\s<>\s', '<> operator is not available in Python 3+, use !='), (r'^\s*\t', "don't use tabs"), @@ -306,6 +314,7 @@ txtpats = [ [ ('\s$', 'trailing whitespace'), + ('.. note::[ \n][^\n]', 'add two newlines after note::') ], [] ]
--- a/contrib/hgfixes/fix_bytesmod.py Mon Mar 17 14:57:13 2014 -0400 +++ b/contrib/hgfixes/fix_bytesmod.py Tue Mar 18 14:25:28 2014 -0500 @@ -33,10 +33,11 @@ ''' def transform(self, node, results): - if self.filename in blacklist: - return - elif self.filename == 'mercurial/util.py': - touch_import('.', 'py3kcompat', node=node) + for bfn in blacklist: + if self.filename.endswith(bfn): + return + if not self.filename.endswith('mercurial/py3kcompat.py'): + touch_import('mercurial', 'py3kcompat', node=node) formatstr = results['formatstr'].clone() data = results['data'].clone() @@ -60,4 +61,3 @@ call = Call(Name('bytesformatter', prefix=' '), args) return call -
--- a/contrib/hgk Mon Mar 17 14:57:13 2014 -0400 +++ b/contrib/hgk Tue Mar 18 14:25:28 2014 -0500 @@ -208,7 +208,7 @@ exit 1 } set leftover {} - fconfigure $commfd -blocking 0 -translation lf + fconfigure $commfd -blocking 0 -translation lf -eofchar {} fileevent $commfd readable [list getcommitlines $commfd] $canv delete all $canv create text 3 3 -anchor nw -text "Reading commits..." \ @@ -795,8 +795,8 @@ # set the focus back to the toplevel for any click outside # the entry widgets proc click {w} { - global entries - foreach e $entries { + global ctext entries + foreach e [concat $entries $ctext] { if {$w == $e} return } focus . @@ -2546,6 +2546,7 @@ proc selnextline {dir} { global selectedline + focus . if {![info exists selectedline]} return set l [expr $selectedline + $dir] unmarkmatches @@ -2583,6 +2584,7 @@ proc goback {} { global history historyindex + focus . if {$historyindex > 1} { incr historyindex -1 @@ -2597,6 +2599,7 @@ proc goforw {} { global history historyindex + focus . if {$historyindex < [llength $history]} { set cmd [lindex $history $historyindex]
--- a/contrib/import-checker.py Mon Mar 17 14:57:13 2014 -0400 +++ b/contrib/import-checker.py Tue Mar 18 14:25:28 2014 -0500 @@ -11,12 +11,15 @@ def dotted_name_of_path(path): """Given a relative path to a source file, return its dotted module name. - >>> dotted_name_of_path('mercurial/error.py') 'mercurial.error' + >>> dotted_name_of_path('zlibmodule.so') + 'zlib' """ parts = path.split('/') - parts[-1] = parts[-1][:-3] # remove .py + parts[-1] = parts[-1].split('.', 1)[0] # remove .py and .so and .ARCH.so + if parts[-1].endswith('module'): + parts[-1] = parts[-1][:-6] return '.'.join(parts) @@ -136,7 +139,7 @@ http://bugs.python.org/issue19510. >>> list(verify_stdlib_on_own_line('import sys, foo')) - ['mixed stdlib and relative imports:\\n foo, sys'] + ['mixed imports\\n stdlib: sys\\n relative: foo'] >>> list(verify_stdlib_on_own_line('import sys, os')) [] >>> list(verify_stdlib_on_own_line('import foo, bar')) @@ -144,13 +147,13 @@ """ for node in ast.walk(ast.parse(source)): if isinstance(node, ast.Import): - from_stdlib = {} + from_stdlib = {False: [], True: []} for n in node.names: - from_stdlib[n.name] = n.name in stdlib_modules - num_std = len([x for x in from_stdlib.values() if x]) - if num_std not in (len(from_stdlib.values()), 0): - yield ('mixed stdlib and relative imports:\n %s' % - ', '.join(sorted(from_stdlib.iterkeys()))) + from_stdlib[n.name in stdlib_modules].append(n.name) + if from_stdlib[True] and from_stdlib[False]: + yield ('mixed imports\n stdlib: %s\n relative: %s' % + (', '.join(sorted(from_stdlib[True])), + ', '.join(sorted(from_stdlib[False])))) class CircularImport(Exception): pass
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/contrib/revsetbenchmarks.sh Tue Mar 18 14:25:28 2014 -0500 @@ -0,0 +1,89 @@ +#!/bin/bash + +# Measure the performance of a list of revsets against multiple revisions +# defined by parameter. Checkout one by one and run perfrevset with every +# revset in the list to benchmark its performance. +# +# - First argument is a revset of mercurial own repo to runs against. +# - Second argument is the file from which the revset array will be taken +# If second argument is omitted read it from standard input +# +# You should run this from the root of your mercurial repository. +# +# This script also does one run of the current version of mercurial installed +# to compare performance. + +HG="hg update" +PERF="./hg perfrevset" +BASE_PERF="hg perfrevset" + +TARGETS=$1 +shift +# read from a file or from standard output +if [ $# -ne 0 ]; then + readarray REVSETS < $1 +else + readarray REVSETS +fi + +hg update --quiet + +echo "Starting time benchmarking" +echo + +echo "Revsets to benchmark" +echo "----------------------------" + +for (( j = 0; j < ${#REVSETS[@]}; j++ )); +do + echo "${j}) ${REVSETS[$j]}" +done + +echo "----------------------------" +echo + +# Benchmark baseline +echo "Benchmarking baseline" + +for (( j = 0; j < ${#REVSETS[@]}; j++ )); + do + echo -n "${j}) " + $BASE_PERF "${REVSETS[$j]}" +done + +echo +echo + +# Benchmark revisions +for i in $(hg log --template='{rev}\n' --rev $TARGETS); +do + echo "----------------------------" + echo -n "Revision: " + hg log -r $i --template "{desc|firstline}" + + echo "----------------------------" + $HG $i + for (( j = 0; j < ${#REVSETS[@]}; j++ )); + do + echo -n "${j}) " + $PERF "${REVSETS[$j]}" + done + echo "----------------------------" +done + +$HG + +# Benchmark current code +echo "Benchmarking current code" + +for (( j = 0; j < ${#REVSETS[@]}; j++ )); + do + echo -n "${j}) " + $PERF "${REVSETS[$j]}" +done + + +echo +echo "Time benchmarking finished" + +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/contrib/revsetbenchmarks.txt Tue Mar 18 14:25:28 2014 -0500 @@ -0,0 +1,8 @@ +draft() +draft() and ::tip +roots(0::tip) +author(lmoscovicz) +author(lmoscovicz) or author(mpm) +max(tip:0) +min(0:tip) +min(0::)
--- a/contrib/synthrepo.py Mon Mar 17 14:57:13 2014 -0400 +++ b/contrib/synthrepo.py Tue Mar 18 14:25:28 2014 -0500 @@ -152,7 +152,7 @@ if lastctx.rev() != nullrev: interarrival[roundto(ctx.date()[0] - lastctx.date()[0], 300)] += 1 diff = sum((d.splitlines() - for d in ctx.diff(pctx, opts=dict(git=True))), []) + for d in ctx.diff(pctx, opts={'git': True})), []) fileadds, diradds, fileremoves, filechanges = 0, 0, 0, 0 for filename, mar, lineadd, lineremove, binary in parsegitdiff(diff): if binary: @@ -189,21 +189,21 @@ def pronk(d): return sorted(d.iteritems(), key=lambda x: x[1], reverse=True) - json.dump(dict(revs=len(revs), - lineschanged=pronk(lineschanged), - children=pronk(invchildren), - fileschanged=pronk(fileschanged), - filesadded=pronk(filesadded), - linesinfilesadded=pronk(linesinfilesadded), - dirsadded=pronk(dirsadded), - filesremoved=pronk(filesremoved), - linelengths=pronk(linelengths), - parents=pronk(parents), - p1distance=pronk(p1distance), - p2distance=pronk(p2distance), - interarrival=pronk(interarrival), - tzoffset=pronk(tzoffset), - ), + json.dump({'revs': len(revs), + 'lineschanged': pronk(lineschanged), + 'children': pronk(invchildren), + 'fileschanged': pronk(fileschanged), + 'filesadded': pronk(filesadded), + 'linesinfilesadded': pronk(linesinfilesadded), + 'dirsadded': pronk(dirsadded), + 'filesremoved': pronk(filesremoved), + 'linelengths': pronk(linelengths), + 'parents': pronk(parents), + 'p1distance': pronk(p1distance), + 'p2distance': pronk(p2distance), + 'interarrival': pronk(interarrival), + 'tzoffset': pronk(tzoffset), + }, fp) fp.close()
--- a/contrib/tmplrewrite.py Mon Mar 17 14:57:13 2014 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,23 +0,0 @@ -#!/usr/bin/python -import sys, os, re - -IGNORE = ['.css', '.py'] -oldre = re.compile('#([\w\|%]+)#') - -def rewrite(fn): - f = open(fn) - new = open(fn + '.new', 'wb') - for ln in f: - new.write(oldre.sub('{\\1}', ln)) - new.close() - f.close() - os.rename(new.name, f.name) - -if __name__ == '__main__': - if len(sys.argv) < 2: - print 'usage: python tmplrewrite.py [file [file [file]]]' - for fn in sys.argv[1:]: - if os.path.splitext(fn) in IGNORE: - continue - print 'rewriting %s...' % fn - rewrite(fn)
--- a/doc/gendoc.py Mon Mar 17 14:57:13 2014 -0400 +++ b/doc/gendoc.py Tue Mar 18 14:25:28 2014 -0500 @@ -50,6 +50,9 @@ allopts[-1] += " <%s[+]>" % optlabel elif (default is not None) and not isinstance(default, bool): allopts[-1] += " <%s>" % optlabel + if '\n' in desc: + # only remove line breaks and indentation + desc = ' '.join(l.lstrip() for l in desc.split('\n')) desc += default and _(" (default: %s)") % default or "" yield (", ".join(allopts), desc) @@ -153,6 +156,8 @@ continue d = get_cmd(h[f], cmdtable) ui.write(sectionfunc(d['cmd'])) + # short description + ui.write(d['desc'][0]) # synopsis ui.write("::\n\n") synopsislines = d['synopsis'].splitlines()
--- a/hgext/bugzilla.py Mon Mar 17 14:57:13 2014 -0400 +++ b/hgext/bugzilla.py Tue Mar 18 14:25:28 2014 -0500 @@ -620,7 +620,7 @@ ver = self.bzproxy.Bugzilla.version()['version'].split('.') self.bzvermajor = int(ver[0]) self.bzverminor = int(ver[1]) - self.bzproxy.User.login(dict(login=user, password=passwd)) + self.bzproxy.User.login({'login': user, 'password': passwd}) def transport(self, uri): if urlparse.urlparse(uri, "http")[0] == "https": @@ -630,13 +630,15 @@ def get_bug_comments(self, id): """Return a string with all comment text for a bug.""" - c = self.bzproxy.Bug.comments(dict(ids=[id], include_fields=['text'])) + c = self.bzproxy.Bug.comments({'ids': [id], + 'include_fields': ['text']}) return ''.join([t['text'] for t in c['bugs'][str(id)]['comments']]) def filter_real_bug_ids(self, bugs): - probe = self.bzproxy.Bug.get(dict(ids=sorted(bugs.keys()), - include_fields=[], - permissive=True)) + probe = self.bzproxy.Bug.get({'ids': sorted(bugs.keys()), + 'include_fields': [], + 'permissive': True, + }) for badbug in probe['faults']: id = badbug['id'] self.ui.status(_('bug %d does not exist\n') % id) @@ -717,10 +719,10 @@ than the subject line, and leave a blank line after it. ''' user = self.map_committer(committer) - matches = self.bzproxy.User.get(dict(match=[user])) + matches = self.bzproxy.User.get({'match': [user]}) if not matches['users']: user = self.ui.config('bugzilla', 'user', 'bugs') - matches = self.bzproxy.User.get(dict(match=[user])) + matches = self.bzproxy.User.get({'match': [user]}) if not matches['users']: raise util.Abort(_("default bugzilla user %s email not found") % user) @@ -879,14 +881,13 @@ mapfile = self.ui.config('bugzilla', 'style') tmpl = self.ui.config('bugzilla', 'template') - t = cmdutil.changeset_templater(self.ui, self.repo, - False, None, mapfile, False) if not mapfile and not tmpl: tmpl = _('changeset {node|short} in repo {root} refers ' 'to bug {bug}.\ndetails:\n\t{desc|tabindent}') if tmpl: tmpl = templater.parsestring(tmpl, quoted=False) - t.use_template(tmpl) + t = cmdutil.changeset_templater(self.ui, self.repo, + False, None, tmpl, mapfile, False) self.ui.pushbuffer() t.show(ctx, changes=ctx.changeset(), bug=str(bugid),
--- a/hgext/churn.py Mon Mar 17 14:57:13 2014 -0400 +++ b/hgext/churn.py Tue Mar 18 14:25:28 2014 -0500 @@ -18,10 +18,10 @@ def maketemplater(ui, repo, tmpl): tmpl = templater.parsestring(tmpl, quoted=False) try: - t = cmdutil.changeset_templater(ui, repo, False, None, None, False) + t = cmdutil.changeset_templater(ui, repo, False, None, tmpl, + None, False) except SyntaxError, inst: raise util.Abort(inst.args[0]) - t.use_template(tmpl) return t def changedlines(ui, repo, ctx1, ctx2, fns):
--- a/hgext/color.py Mon Mar 17 14:57:13 2014 -0400 +++ b/hgext/color.py Tue Mar 18 14:25:28 2014 -0500 @@ -424,6 +424,16 @@ _("when to colorize (boolean, always, auto, or never)"), _('TYPE'))) +def debugcolor(ui, repo, **opts): + global _styles + _styles = {} + for effect in _effects.keys(): + _styles[effect] = effect + ui.write(('colormode: %s\n') % ui._colormode) + ui.write(_('available colors:\n')) + for label, colors in _styles.items(): + ui.write(('%s\n') % colors, label=label) + if os.name != 'nt': w32effects = None else: @@ -553,3 +563,8 @@ finally: # Explicitly reset original attributes _kernel32.SetConsoleTextAttribute(stdout, origattr) + +cmdtable = { + 'debugcolor': + (debugcolor, [], ('hg debugcolor')) +}
--- a/hgext/convert/__init__.py Mon Mar 17 14:57:13 2014 -0400 +++ b/hgext/convert/__init__.py Tue Mar 18 14:25:28 2014 -0500 @@ -141,6 +141,14 @@ branch names. This can be used to (for instance) move code in one repository from "default" to a named branch. + The closemap is a file that allows closing of a branch. This is useful if + you want to close a branch. Each entry contains a revision or hash + separated by white space. + + The tagpmap is a file that exactly analogous to the branchmap. This will + rename tags on the fly and prevent the 'update tags' commit usually found + at the end of a convert process. + Mercurial Source ################ @@ -319,6 +327,10 @@ _('splice synthesized history into place'), _('FILE')), ('', 'branchmap', '', _('change branch names while converting'), _('FILE')), + ('', 'closemap', '', + _('closes given revs'), _('FILE')), + ('', 'tagmap', '', + _('change tag names while converting'), _('FILE')), ('', 'branchsort', None, _('try to sort changesets by branches')), ('', 'datesort', None, _('try to sort changesets by date')), ('', 'sourcesort', None, _('preserve source changesets order')),
--- a/hgext/convert/common.py Mon Mar 17 14:57:13 2014 -0400 +++ b/hgext/convert/common.py Tue Mar 18 14:25:28 2014 -0500 @@ -63,13 +63,13 @@ self.encoding = 'utf-8' - def checkhexformat(self, revstr): + def checkhexformat(self, revstr, mapname='splicemap'): """ fails if revstr is not a 40 byte hex. mercurial and git both uses such format for their revision numbering """ if not re.match(r'[0-9a-fA-F]{40,40}$', revstr): - raise util.Abort(_('splicemap entry %s is not a valid revision' - ' identifier') % revstr) + raise util.Abort(_('%s entry %s is not a valid revision' + ' identifier') % (mapname, revstr)) def before(self): pass @@ -172,7 +172,7 @@ """ return {} - def checkrevformat(self, revstr): + def checkrevformat(self, revstr, mapname='splicemap'): """revstr is a string that describes a revision in the given source control system. Return true if revstr has correct format. @@ -192,10 +192,6 @@ self.path = path self.created = [] - def getheads(self): - """Return a list of this repository's heads""" - raise NotImplementedError - def revmapfile(self): """Path to a file that will contain lines source_rev_id sink_rev_id @@ -208,7 +204,8 @@ mapping equivalent authors identifiers for each system.""" return None - def putcommit(self, files, copies, parents, commit, source, revmap): + def putcommit(self, files, copies, parents, commit, source, + revmap, tagmap): """Create a revision with all changed files listed in 'files' and having listed parents. 'commit' is a commit object containing at a minimum the author, date, and message for this
--- a/hgext/convert/convcmd.py Mon Mar 17 14:57:13 2014 -0400 +++ b/hgext/convert/convcmd.py Tue Mar 18 14:25:28 2014 -0500 @@ -120,6 +120,43 @@ self.splicemap = self.parsesplicemap(opts.get('splicemap')) self.branchmap = mapfile(ui, opts.get('branchmap')) + self.closemap = self.parseclosemap(opts.get('closemap')) + self.tagmap = mapfile(ui, opts.get('tagmap')) + + def parseclosemap(self, path): + """ check and validate the closemap format and + return a list of revs to close. + Format checking has two parts. + 1. generic format which is same across all source types + 2. specific format checking which may be different for + different source type. This logic is implemented in + checkrevformat function in source files like + hg.py, subversion.py etc. + """ + + if not path: + return [] + m = [] + try: + fp = open(path, 'r') + for i, line in enumerate(fp): + line = line.splitlines()[0].rstrip() + if not line: + # Ignore blank lines + continue + # split line + lex = shlex.shlex(line, posix=True) + lex.whitespace_split = True + lex.whitespace += ',' + line = list(lex) + for part in line: + self.source.checkrevformat(part, 'closemap') + m.extend(line) + # if file does not exist or error reading, exit + except IOError: + raise util.Abort(_('closemap file not found or error reading %s:') + % path) + return m def parsesplicemap(self, path): """ check and validate the splicemap format and @@ -408,8 +445,11 @@ except KeyError: parents = [b[0] for b in pbranches] source = progresssource(self.ui, self.source, len(files)) + if self.closemap and rev in self.closemap: + commit.extra['close'] = 1 + newnode = self.dest.putcommit(files, copies, parents, commit, - source, self.map) + source, self.map, self.tagmap) source.close() self.source.converted(rev, newnode) self.map[rev] = newnode @@ -445,6 +485,9 @@ self.ui.progress(_('converting'), None) tags = self.source.gettags() + tags = dict((self.tagmap.get(k, k), v) + for k, v in tags.iteritems()) + ctags = {} for k in tags: v = tags[k]
--- a/hgext/convert/git.py Mon Mar 17 14:57:13 2014 -0400 +++ b/hgext/convert/git.py Tue Mar 18 14:25:28 2014 -0500 @@ -297,7 +297,7 @@ return bookmarks - def checkrevformat(self, revstr): + def checkrevformat(self, revstr, mapname='splicemap'): """ git revision string is a 40 byte hex """ - self.checkhexformat(revstr) + self.checkhexformat(revstr, mapname)
--- a/hgext/convert/hg.py Mon Mar 17 14:57:13 2014 -0400 +++ b/hgext/convert/hg.py Tue Mar 18 14:25:28 2014 -0500 @@ -25,6 +25,9 @@ from common import NoRepo, commit, converter_source, converter_sink +import re +sha1re = re.compile(r'\b[0-9a-f]{6,40}\b') + class mercurial_sink(converter_sink): def __init__(self, ui, path): converter_sink.__init__(self, ui, path) @@ -75,10 +78,6 @@ def authorfile(self): return self.repo.join("authormap") - def getheads(self): - h = self.repo.changelog.heads() - return [hex(x) for x in h] - def setbranch(self, branch, pbranches): if not self.clonebranches: return @@ -117,7 +116,7 @@ self.repo.pull(prepo, [prepo.lookup(h) for h in heads]) self.before() - def _rewritetags(self, source, revmap, data): + def _rewritetags(self, source, revmap, tagmap, data): fp = cStringIO.StringIO() for line in data.splitlines(): s = line.split(' ', 1) @@ -126,17 +125,18 @@ revid = revmap.get(source.lookuprev(s[0])) if not revid: continue - fp.write('%s %s\n' % (revid, s[1])) + fp.write('%s %s\n' % (revid, tagmap.get(s[1], s[1]))) return fp.getvalue() - def putcommit(self, files, copies, parents, commit, source, revmap): + def putcommit(self, files, copies, parents, commit, source, + revmap, tagmap): files = dict(files) def getfilectx(repo, memctx, f): v = files[f] data, mode = source.getfile(f, v) if f == '.hgtags': - data = self._rewritetags(source, revmap, data) + data = self._rewritetags(source, revmap, tagmap, data) return context.memfilectx(f, data, 'l' in mode, 'x' in mode, copies.get(f)) @@ -157,6 +157,14 @@ p2 = parents.pop(0) text = commit.desc + + sha1s = re.findall(sha1re, text) + for sha1 in sha1s: + oldrev = source.lookuprev(sha1) + newrev = revmap.get(oldrev) + if newrev is not None: + text = text.replace(sha1, newrev[:len(sha1)]) + extra = commit.extra.copy() if self.branchnames and commit.branch: extra['branch'] = commit.branch @@ -190,14 +198,36 @@ parentctx = None tagparent = nullid - try: - oldlines = sorted(parentctx['.hgtags'].data().splitlines(True)) - except Exception: - oldlines = [] + oldlines = set() + for branch, heads in self.repo.branchmap().iteritems(): + for h in heads: + if '.hgtags' in self.repo[h]: + oldlines.update( + set(self.repo[h]['.hgtags'].data().splitlines(True))) + oldlines = sorted(list(oldlines)) newlines = sorted([("%s %s\n" % (tags[tag], tag)) for tag in tags]) if newlines == oldlines: return None, None + + # if the old and new tags match, then there is nothing to update + oldtags = set() + newtags = set() + for line in oldlines: + s = line.strip().split(' ', 1) + if len(s) != 2: + continue + oldtags.add(s[1]) + for line in newlines: + s = line.strip().split(' ', 1) + if len(s) != 2: + continue + if s[1] not in oldtags: + newtags.add(s[1].strip()) + + if not newtags: + return None, None + data = "".join(newlines) def getfilectx(repo, memctx, f): return context.memfilectx(f, data, False, False, None) @@ -412,6 +442,6 @@ def getbookmarks(self): return bookmarks.listbookmarks(self.repo) - def checkrevformat(self, revstr): + def checkrevformat(self, revstr, mapname='splicemap'): """ Mercurial, revision string is a 40 byte hex """ - self.checkhexformat(revstr) + self.checkhexformat(revstr, mapname)
--- a/hgext/convert/subversion.py Mon Mar 17 14:57:13 2014 -0400 +++ b/hgext/convert/subversion.py Tue Mar 18 14:25:28 2014 -0500 @@ -41,13 +41,30 @@ pass def revsplit(rev): - """Parse a revision string and return (uuid, path, revnum).""" - url, revnum = rev.rsplit('@', 1) - parts = url.split('/', 1) + """Parse a revision string and return (uuid, path, revnum). + >>> revsplit('svn:a2147622-4a9f-4db4-a8d3-13562ff547b2' + ... '/proj%20B/mytrunk/mytrunk@1') + ('a2147622-4a9f-4db4-a8d3-13562ff547b2', '/proj%20B/mytrunk/mytrunk', 1) + >>> revsplit('svn:8af66a51-67f5-4354-b62c-98d67cc7be1d@1') + ('', '', 1) + >>> revsplit('@7') + ('', '', 7) + >>> revsplit('7') + ('', '', 0) + >>> revsplit('bad') + ('', '', 0) + """ + parts = rev.rsplit('@', 1) + revnum = 0 + if len(parts) > 1: + revnum = int(parts[1]) + parts = parts[0].split('/', 1) + uuid = '' mod = '' - if len(parts) > 1: + if len(parts) > 1 and parts[0].startswith('svn:'): + uuid = parts[0][4:] mod = '/' + parts[1] - return parts[0][4:], mod, int(revnum) + return uuid, mod, revnum def quote(s): # As of svn 1.7, many svn calls expect "canonical" paths. In @@ -157,6 +174,30 @@ self._stdout.close() self._stdout = None +class directlogstream(list): + """Direct revision log iterator. + This can be used for debugging and development but it will probably leak + memory and is not suitable for real conversions.""" + def __init__(self, url, paths, start, end, limit=0, + discover_changed_paths=True, strict_node_history=False): + + def receiver(orig_paths, revnum, author, date, message, pool): + paths = {} + if orig_paths is not None: + for k, v in orig_paths.iteritems(): + paths[k] = changedpath(v) + self.append((paths, revnum, author, date, message)) + + # Use an ra of our own so that our parent can consume + # our results without confusing the server. + t = transport.SvnRaTransport(url=url) + svn.ra.get_log(t.ra, paths, start, end, limit, + discover_changed_paths, + strict_node_history, + receiver) + + def close(self): + pass # Check to see if the given path is a local Subversion repo. Verify this by # looking for several svn-specific files and directories in the given @@ -454,13 +495,13 @@ del self.commits[rev] return commit - def checkrevformat(self, revstr): + def checkrevformat(self, revstr, mapname='splicemap'): """ fails if revision format does not match the correct format""" if not re.match(r'svn:[0-9a-f]{8,8}-[0-9a-f]{4,4}-' '[0-9a-f]{4,4}-[0-9a-f]{4,4}-[0-9a-f]' '{12,12}(.*)\@[0-9]+$',revstr): - raise util.Abort(_('splicemap entry %s is not a valid revision' - ' identifier') % revstr) + raise util.Abort(_('%s entry %s is not a valid revision' + ' identifier') % (mapname, revstr)) def gettags(self): tags = {} @@ -975,6 +1016,9 @@ relpaths.append(p.strip('/')) args = [self.baseurl, relpaths, start, end, limit, discover_changed_paths, strict_node_history] + # undocumented feature: debugsvnlog can be disabled + if not self.ui.configbool('convert', 'svn.debugsvnlog', True): + return directlogstream(*args) arg = encodeargs(args) hgexe = util.hgexecutable() cmd = '%s debugsvnlog' % util.shellquote(hgexe) @@ -1183,7 +1227,8 @@ def revid(self, rev): return u"svn:%s@%s" % (self.uuid, rev) - def putcommit(self, files, copies, parents, commit, source, revmap): + def putcommit(self, files, copies, parents, commit, source, + revmap, tagmap): for parent in parents: try: return self.revid(self.childmap[parent])
--- a/hgext/extdiff.py Mon Mar 17 14:57:13 2014 -0400 +++ b/hgext/extdiff.py Tue Mar 18 14:25:28 2014 -0500 @@ -207,10 +207,10 @@ # Function to quote file/dir names in the argument string. # When not operating in 3-way mode, an empty string is # returned for parent2 - replace = dict(parent=dir1a, parent1=dir1a, parent2=dir1b, - plabel1=label1a, plabel2=label1b, - clabel=label2, child=dir2, - root=repo.root) + replace = {'parent': dir1a, 'parent1': dir1a, 'parent2': dir1b, + 'plabel1': label1a, 'plabel2': label1b, + 'clabel': label2, 'child': dir2, + 'root': repo.root} def quote(match): key = match.group()[1:] if not do3way and key == 'parent2': @@ -316,7 +316,7 @@ that revision is compared to the working directory, and, when no revisions are specified, the working directory files are compared to its parent.\ -''') % dict(path=util.uirepr(path)) +''') % {'path': util.uirepr(path)} # We must translate the docstring right away since it is # used as a format string. The string will unfortunately
--- a/hgext/hgcia.py Mon Mar 17 14:57:13 2014 -0400 +++ b/hgext/hgcia.py Tue Mar 18 14:25:28 2014 -0500 @@ -202,8 +202,7 @@ template = self.diffstat and self.dstemplate or self.deftemplate template = templater.parsestring(template, quoted=False) t = cmdutil.changeset_templater(self.ui, self.repo, False, None, - style, False) - t.use_template(template) + template, style, False) self.templater = t def strip(self, path):
--- a/hgext/histedit.py Mon Mar 17 14:57:13 2014 -0400 +++ b/hgext/histedit.py Tue Mar 18 14:25:28 2014 -0500 @@ -30,10 +30,12 @@ # Edit history between c561b4e977df and 7c2fd3b9020c # + # Commits are listed from least to most recent + # # Commands: # p, pick = use commit # e, edit = use commit, but stop for amending - # f, fold = use commit, but fold into previous commit (combines N and N-1) + # f, fold = use commit, but combine it with the one above # d, drop = remove commit from history # m, mess = edit message without changing commit content # @@ -49,10 +51,12 @@ # Edit history between c561b4e977df and 7c2fd3b9020c # + # Commits are listed from least to most recent + # # Commands: # p, pick = use commit # e, edit = use commit, but stop for amending - # f, fold = use commit, but fold into previous commit (combines N and N-1) + # f, fold = use commit, but combine it with the one above # d, drop = remove commit from history # m, mess = edit message without changing commit content # @@ -152,7 +156,6 @@ from mercurial import copies from mercurial import context from mercurial import hg -from mercurial import lock as lockmod from mercurial import node from mercurial import repair from mercurial import scmutil @@ -170,10 +173,12 @@ # i18n: command names and abbreviations must remain untranslated editcomment = _("""# Edit history between %s and %s # +# Commits are listed from least to most recent +# # Commands: # p, pick = use commit # e, edit = use commit, but stop for amending -# f, fold = use commit, but fold into previous commit (combines N and N-1) +# f, fold = use commit, but combine it with the one above # d, drop = remove commit from history # m, mess = edit message without changing commit content # @@ -642,23 +647,28 @@ if os.path.exists(repo.sjoin('undo')): os.unlink(repo.sjoin('undo')) - -def bootstrapcontinue(ui, repo, parentctx, rules, opts): - action, currentnode = rules.pop(0) - ctx = repo[currentnode] +def gatherchildren(repo, ctx): # is there any new commit between the expected parent and "." # # note: does not take non linear new change in account (but previous # implementation didn't used them anyway (issue3655) - newchildren = [c.node() for c in repo.set('(%d::.)', parentctx)] - if parentctx.node() != node.nullid: + newchildren = [c.node() for c in repo.set('(%d::.)', ctx)] + if ctx.node() != node.nullid: if not newchildren: - # `parentctxnode` should match but no result. This means that - # currentnode is not a descendant from parentctxnode. + # `ctx` should match but no result. This means that + # currentnode is not a descendant from ctx. msg = _('%s is not an ancestor of working directory') hint = _('use "histedit --abort" to clear broken state') - raise util.Abort(msg % parentctx, hint=hint) - newchildren.pop(0) # remove parentctxnode + raise util.Abort(msg % ctx, hint=hint) + newchildren.pop(0) # remove ctx + return newchildren + +def bootstrapcontinue(ui, repo, parentctx, rules, opts): + action, currentnode = rules.pop(0) + ctx = repo[currentnode] + + newchildren = gatherchildren(repo, parentctx) + # Commit dirty working directory if necessary new = None m, a, r, d = repo.status()[:4] @@ -896,7 +906,7 @@ # This would reduce bundle overhead repair.strip(ui, repo, c) finally: - lockmod.release(lock) + release(lock) def summaryhook(ui, repo): if not os.path.exists(repo.join('histedit-state')):
--- a/hgext/inotify/__init__.py Mon Mar 17 14:57:13 2014 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,93 +0,0 @@ -# __init__.py - inotify-based status acceleration for Linux -# -# Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com> -# Copyright 2007, 2008 Brendan Cully <brendan@kublai.com> -# -# This software may be used and distributed according to the terms of the -# GNU General Public License version 2 or any later version. - -'''accelerate status report using Linux's inotify service''' - -# todo: socket permissions - -from mercurial.i18n import _ -from mercurial import util -import server -from client import client, QueryFailed - -testedwith = 'internal' - -def serve(ui, repo, **opts): - '''start an inotify server for this repository''' - server.start(ui, repo.dirstate, repo.root, opts) - -def debuginotify(ui, repo, **opts): - '''debugging information for inotify extension - - Prints the list of directories being watched by the inotify server. - ''' - cli = client(ui, repo) - response = cli.debugquery() - - ui.write(_('directories being watched:\n')) - for path in response: - ui.write((' %s/\n') % path) - -def reposetup(ui, repo): - if not util.safehasattr(repo, 'dirstate'): - return - - class inotifydirstate(repo.dirstate.__class__): - - # We'll set this to false after an unsuccessful attempt so that - # next calls of status() within the same instance don't try again - # to start an inotify server if it won't start. - _inotifyon = True - - def status(self, match, subrepos, ignored, clean, unknown): - files = match.files() - if '.' in files: - files = [] - if (self._inotifyon and not ignored and not subrepos and - not self._dirty): - cli = client(ui, repo) - try: - result = cli.statusquery(files, match, False, - clean, unknown) - except QueryFailed, instr: - ui.debug(str(instr)) - # don't retry within the same hg instance - inotifydirstate._inotifyon = False - pass - else: - if ui.config('inotify', 'debug'): - r2 = super(inotifydirstate, self).status( - match, [], False, clean, unknown) - for c, a, b in zip('LMARDUIC', result, r2): - for f in a: - if f not in b: - ui.warn('*** inotify: %s +%s\n' % (c, f)) - for f in b: - if f not in a: - ui.warn('*** inotify: %s -%s\n' % (c, f)) - result = r2 - return result - return super(inotifydirstate, self).status( - match, subrepos, ignored, clean, unknown) - - repo.dirstate.__class__ = inotifydirstate - -cmdtable = { - 'debuginotify': - (debuginotify, [], ('hg debuginotify')), - '^inserve': - (serve, - [('d', 'daemon', None, _('run server in background')), - ('', 'daemon-pipefds', '', - _('used internally by daemon mode'), _('NUM')), - ('t', 'idle-timeout', '', - _('minutes to sit idle before exiting'), _('NUM')), - ('', 'pid-file', '', - _('name of file to write process ID to'), _('FILE'))], - _('hg inserve [OPTION]...')), - }
--- a/hgext/inotify/client.py Mon Mar 17 14:57:13 2014 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,173 +0,0 @@ -# client.py - inotify status client -# -# Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com> -# Copyright 2007, 2008 Brendan Cully <brendan@kublai.com> -# Copyright 2009 Nicolas Dumazet <nicdumz@gmail.com> -# -# This software may be used and distributed according to the terms of the -# GNU General Public License version 2 or any later version. - -from mercurial.i18n import _ -import common, server -import errno, os, socket, struct - -class QueryFailed(Exception): - pass - -def start_server(function): - """ - Decorator. - Tries to call function, if it fails, try to (re)start inotify server. - Raise QueryFailed if something went wrong - """ - def decorated_function(self, *args): - try: - return function(self, *args) - except (OSError, socket.error), err: - autostart = self.ui.configbool('inotify', 'autostart', True) - - if err.args[0] == errno.ECONNREFUSED: - self.ui.warn(_('inotify-client: found dead inotify server ' - 'socket; removing it\n')) - os.unlink(os.path.join(self.root, '.hg', 'inotify.sock')) - if err.args[0] in (errno.ECONNREFUSED, errno.ENOENT) and autostart: - try: - try: - server.start(self.ui, self.dirstate, self.root, - dict(daemon=True, daemon_pipefds='')) - except server.AlreadyStartedException, inst: - # another process may have started its own - # inotify server while this one was starting. - self.ui.debug(str(inst)) - except Exception, inst: - self.ui.warn(_('inotify-client: could not start inotify ' - 'server: %s\n') % inst) - else: - try: - return function(self, *args) - except socket.error, err: - self.ui.warn(_('inotify-client: could not talk to new ' - 'inotify server: %s\n') % err.args[-1]) - elif err.args[0] in (errno.ECONNREFUSED, errno.ENOENT): - # silently ignore normal errors if autostart is False - self.ui.debug('(inotify server not running)\n') - else: - self.ui.warn(_('inotify-client: failed to contact inotify ' - 'server: %s\n') % err.args[-1]) - - self.ui.traceback() - raise QueryFailed('inotify query failed') - - return decorated_function - - -class client(object): - def __init__(self, ui, repo): - self.ui = ui - self.dirstate = repo.dirstate - self.root = repo.root - self.sock = socket.socket(socket.AF_UNIX) - - def _connect(self): - sockpath = os.path.join(self.root, '.hg', 'inotify.sock') - try: - self.sock.connect(sockpath) - except socket.error, err: - if err.args[0] == "AF_UNIX path too long": - sockpath = os.readlink(sockpath) - self.sock.connect(sockpath) - else: - raise - - def _send(self, type, data): - """Sends protocol version number, and the data""" - self.sock.sendall(chr(common.version) + type + data) - - self.sock.shutdown(socket.SHUT_WR) - - def _receive(self, type): - """ - Read data, check version number, extract headers, - and returns a tuple (data descriptor, header) - Raises QueryFailed on error - """ - cs = common.recvcs(self.sock) - try: - version = ord(cs.read(1)) - except TypeError: - # empty answer, assume the server crashed - self.ui.warn(_('inotify-client: received empty answer from inotify ' - 'server')) - raise QueryFailed('server crashed') - - if version != common.version: - self.ui.warn(_('(inotify: received response from incompatible ' - 'server version %d)\n') % version) - raise QueryFailed('incompatible server version') - - readtype = cs.read(4) - if readtype != type: - self.ui.warn(_('(inotify: received \'%s\' response when expecting' - ' \'%s\')\n') % (readtype, type)) - raise QueryFailed('wrong response type') - - hdrfmt = common.resphdrfmts[type] - hdrsize = common.resphdrsizes[type] - try: - resphdr = struct.unpack(hdrfmt, cs.read(hdrsize)) - except struct.error: - raise QueryFailed('unable to retrieve query response headers') - - return cs, resphdr - - def query(self, type, req): - self._connect() - - self._send(type, req) - - return self._receive(type) - - @start_server - def statusquery(self, names, match, ignored, clean, unknown=True): - - def genquery(): - for n in names: - yield n - states = 'almrx!' - if ignored: - raise ValueError('this is insanity') - if clean: - states += 'c' - if unknown: - states += '?' - yield states - - req = '\0'.join(genquery()) - - cs, resphdr = self.query('STAT', req) - - def readnames(nbytes): - if nbytes: - names = cs.read(nbytes) - if names: - return filter(match, names.split('\0')) - return [] - results = tuple(map(readnames, resphdr[:-1])) - - if names: - nbytes = resphdr[-1] - vdirs = cs.read(nbytes) - if vdirs: - for vdir in vdirs.split('\0'): - if match.explicitdir: - match.explicitdir(vdir) - - return results - - @start_server - def debugquery(self): - cs, resphdr = self.query('DBUG', '') - - nbytes = resphdr[0] - names = cs.read(nbytes) - return names.split('\0')
--- a/hgext/inotify/common.py Mon Mar 17 14:57:13 2014 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,53 +0,0 @@ -# server.py - inotify common protocol code -# -# Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com> -# Copyright 2007, 2008 Brendan Cully <brendan@kublai.com> -# -# This software may be used and distributed according to the terms of the -# GNU General Public License version 2 or any later version. - -import cStringIO, socket, struct - -""" - Protocol between inotify clients and server: - - Client sending query: - 1) send protocol version number - 2) send query type (string, 4 letters long) - 3) send query parameters: - - For STAT, N+1 \0-separated strings: - 1) N different names that need checking - 2) 1 string containing all the status types to match - - No parameter needed for DBUG - - Server sending query answer: - 1) send protocol version number - 2) send query type - 3) send struct.pack'ed headers describing the length of the content: - e.g. for STAT, receive 9 integers describing the length of the - 9 \0-separated string lists to be read: - * one file list for each lmar!?ic status type - * one list containing the directories visited during lookup - -""" - -version = 3 - -resphdrfmts = { - 'STAT': '>lllllllll', # status requests - 'DBUG': '>l' # debugging queries -} -resphdrsizes = dict((k, struct.calcsize(v)) - for k, v in resphdrfmts.iteritems()) - -def recvcs(sock): - cs = cStringIO.StringIO() - s = True - try: - while s: - s = sock.recv(65536) - cs.write(s) - finally: - sock.shutdown(socket.SHUT_RD) - cs.seek(0) - return cs
--- a/hgext/inotify/linux/__init__.py Mon Mar 17 14:57:13 2014 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,44 +0,0 @@ -# __init__.py - low-level interfaces to the Linux inotify subsystem - -# Copyright 2006 Bryan O'Sullivan <bos@serpentine.com> - -# This library is free software; you can redistribute it and/or modify -# it under the terms of version 2.1 of the GNU Lesser General Public -# License, or any later version. - -'''Low-level interface to the Linux inotify subsystem. - -The inotify subsystem provides an efficient mechanism for file status -monitoring and change notification. - -This package provides the low-level inotify system call interface and -associated constants and helper functions. - -For a higher-level interface that remains highly efficient, use the -inotify.watcher package.''' - -__author__ = "Bryan O'Sullivan <bos@serpentine.com>" - -from _inotify import * - -procfs_path = '/proc/sys/fs/inotify' - -def _read_procfs_value(name): - def read_value(): - try: - fp = open(procfs_path + '/' + name) - r = int(fp.read()) - fp.close() - return r - except OSError: - return None - - read_value.__doc__ = '''Return the value of the %s setting from /proc. - - If inotify is not enabled on this system, return None.''' % name - - return read_value - -max_queued_events = _read_procfs_value('max_queued_events') -max_user_instances = _read_procfs_value('max_user_instances') -max_user_watches = _read_procfs_value('max_user_watches')
--- a/hgext/inotify/linux/_inotify.c Mon Mar 17 14:57:13 2014 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,649 +0,0 @@ -/* - * _inotify.c - Python extension interfacing to the Linux inotify subsystem - * - * Copyright 2006 Bryan O'Sullivan <bos@serpentine.com> - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of version 2.1 of the GNU Lesser General - * Public License or any later version. - */ - -#include <Python.h> -#include <alloca.h> -#include <sys/inotify.h> -#include <stdint.h> -#include <sys/ioctl.h> -#include <unistd.h> - -#include <util.h> - -/* Variables used in the event string representation */ -static PyObject *join; -static PyObject *er_wm; -static PyObject *er_wmc; -static PyObject *er_wmn; -static PyObject *er_wmcn; - -static PyObject *init(PyObject *self, PyObject *args) -{ - PyObject *ret = NULL; - int fd = -1; - - if (!PyArg_ParseTuple(args, ":init")) - goto bail; - - Py_BEGIN_ALLOW_THREADS; - fd = inotify_init(); - Py_END_ALLOW_THREADS; - - if (fd == -1) { - PyErr_SetFromErrno(PyExc_OSError); - goto bail; - } - - ret = PyInt_FromLong(fd); - if (ret == NULL) - goto bail; - - goto done; - -bail: - if (fd != -1) - close(fd); - - Py_CLEAR(ret); - -done: - return ret; -} - -PyDoc_STRVAR( - init_doc, - "init() -> fd\n" - "\n" - "Initialize an inotify instance.\n" - "Return a file descriptor associated with a new inotify event queue."); - -static PyObject *add_watch(PyObject *self, PyObject *args) -{ - PyObject *ret = NULL; - uint32_t mask; - int wd = -1; - char *path; - int fd; - - if (!PyArg_ParseTuple(args, "isI:add_watch", &fd, &path, &mask)) - goto bail; - - Py_BEGIN_ALLOW_THREADS; - wd = inotify_add_watch(fd, path, mask); - Py_END_ALLOW_THREADS; - - if (wd == -1) { - PyErr_SetFromErrnoWithFilename(PyExc_OSError, path); - goto bail; - } - - ret = PyInt_FromLong(wd); - if (ret == NULL) - goto bail; - - goto done; - -bail: - if (wd != -1) - inotify_rm_watch(fd, wd); - - Py_CLEAR(ret); - -done: - return ret; -} - -PyDoc_STRVAR( - add_watch_doc, - "add_watch(fd, path, mask) -> wd\n" - "\n" - "Add a watch to an inotify instance, or modify an existing watch.\n" - "\n" - " fd: file descriptor returned by init()\n" - " path: path to watch\n" - " mask: mask of events to watch for\n" - "\n" - "Return a unique numeric watch descriptor for the inotify instance\n" - "mapped by the file descriptor."); - -static PyObject *remove_watch(PyObject *self, PyObject *args) -{ - uint32_t wd; - int fd; - int r; - - if (!PyArg_ParseTuple(args, "iI:remove_watch", &fd, &wd)) - return NULL; - - Py_BEGIN_ALLOW_THREADS; - r = inotify_rm_watch(fd, wd); - Py_END_ALLOW_THREADS; - - if (r == -1) { - PyErr_SetFromErrno(PyExc_OSError); - return NULL; - } - - Py_INCREF(Py_None); - return Py_None; -} - -PyDoc_STRVAR( - remove_watch_doc, - "remove_watch(fd, wd)\n" - "\n" - " fd: file descriptor returned by init()\n" - " wd: watch descriptor returned by add_watch()\n" - "\n" - "Remove a watch associated with the watch descriptor wd from the\n" - "inotify instance associated with the file descriptor fd.\n" - "\n" - "Removing a watch causes an IN_IGNORED event to be generated for this\n" - "watch descriptor."); - -#define bit_name(x) {x, #x} - -static struct { - int bit; - const char *name; - PyObject *pyname; -} bit_names[] = { - bit_name(IN_ACCESS), - bit_name(IN_MODIFY), - bit_name(IN_ATTRIB), - bit_name(IN_CLOSE_WRITE), - bit_name(IN_CLOSE_NOWRITE), - bit_name(IN_OPEN), - bit_name(IN_MOVED_FROM), - bit_name(IN_MOVED_TO), - bit_name(IN_CREATE), - bit_name(IN_DELETE), - bit_name(IN_DELETE_SELF), - bit_name(IN_MOVE_SELF), - bit_name(IN_UNMOUNT), - bit_name(IN_Q_OVERFLOW), - bit_name(IN_IGNORED), - bit_name(IN_ONLYDIR), - bit_name(IN_DONT_FOLLOW), - bit_name(IN_MASK_ADD), - bit_name(IN_ISDIR), - bit_name(IN_ONESHOT), - {0} -}; - -static PyObject *decode_mask(int mask) -{ - PyObject *ret = PyList_New(0); - int i; - - if (ret == NULL) - goto bail; - - for (i = 0; bit_names[i].bit; i++) { - if (mask & bit_names[i].bit) { - if (bit_names[i].pyname == NULL) { - bit_names[i].pyname = PyString_FromString(bit_names[i].name); - if (bit_names[i].pyname == NULL) - goto bail; - } - Py_INCREF(bit_names[i].pyname); - if (PyList_Append(ret, bit_names[i].pyname) == -1) - goto bail; - } - } - - goto done; - -bail: - Py_CLEAR(ret); - -done: - return ret; -} - -static PyObject *pydecode_mask(PyObject *self, PyObject *args) -{ - int mask; - - if (!PyArg_ParseTuple(args, "i:decode_mask", &mask)) - return NULL; - - return decode_mask(mask); -} - -PyDoc_STRVAR( - decode_mask_doc, - "decode_mask(mask) -> list_of_strings\n" - "\n" - "Decode an inotify mask value into a list of strings that give the\n" - "name of each bit set in the mask."); - -static char doc[] = "Low-level inotify interface wrappers."; - -static void define_const(PyObject *dict, const char *name, uint32_t val) -{ - PyObject *pyval = PyInt_FromLong(val); - PyObject *pyname = PyString_FromString(name); - - if (!pyname || !pyval) - goto bail; - - PyDict_SetItem(dict, pyname, pyval); - -bail: - Py_XDECREF(pyname); - Py_XDECREF(pyval); -} - -static void define_consts(PyObject *dict) -{ - define_const(dict, "IN_ACCESS", IN_ACCESS); - define_const(dict, "IN_MODIFY", IN_MODIFY); - define_const(dict, "IN_ATTRIB", IN_ATTRIB); - define_const(dict, "IN_CLOSE_WRITE", IN_CLOSE_WRITE); - define_const(dict, "IN_CLOSE_NOWRITE", IN_CLOSE_NOWRITE); - define_const(dict, "IN_OPEN", IN_OPEN); - define_const(dict, "IN_MOVED_FROM", IN_MOVED_FROM); - define_const(dict, "IN_MOVED_TO", IN_MOVED_TO); - - define_const(dict, "IN_CLOSE", IN_CLOSE); - define_const(dict, "IN_MOVE", IN_MOVE); - - define_const(dict, "IN_CREATE", IN_CREATE); - define_const(dict, "IN_DELETE", IN_DELETE); - define_const(dict, "IN_DELETE_SELF", IN_DELETE_SELF); - define_const(dict, "IN_MOVE_SELF", IN_MOVE_SELF); - define_const(dict, "IN_UNMOUNT", IN_UNMOUNT); - define_const(dict, "IN_Q_OVERFLOW", IN_Q_OVERFLOW); - define_const(dict, "IN_IGNORED", IN_IGNORED); - - define_const(dict, "IN_ONLYDIR", IN_ONLYDIR); - define_const(dict, "IN_DONT_FOLLOW", IN_DONT_FOLLOW); - define_const(dict, "IN_MASK_ADD", IN_MASK_ADD); - define_const(dict, "IN_ISDIR", IN_ISDIR); - define_const(dict, "IN_ONESHOT", IN_ONESHOT); - define_const(dict, "IN_ALL_EVENTS", IN_ALL_EVENTS); -} - -struct event { - PyObject_HEAD - PyObject *wd; - PyObject *mask; - PyObject *cookie; - PyObject *name; -}; - -static PyObject *event_wd(PyObject *self, void *x) -{ - struct event *evt = (struct event *)self; - Py_INCREF(evt->wd); - return evt->wd; -} - -static PyObject *event_mask(PyObject *self, void *x) -{ - struct event *evt = (struct event *)self; - Py_INCREF(evt->mask); - return evt->mask; -} - -static PyObject *event_cookie(PyObject *self, void *x) -{ - struct event *evt = (struct event *)self; - Py_INCREF(evt->cookie); - return evt->cookie; -} - -static PyObject *event_name(PyObject *self, void *x) -{ - struct event *evt = (struct event *)self; - Py_INCREF(evt->name); - return evt->name; -} - -static struct PyGetSetDef event_getsets[] = { - {"wd", event_wd, NULL, - "watch descriptor"}, - {"mask", event_mask, NULL, - "event mask"}, - {"cookie", event_cookie, NULL, - "rename cookie, if rename-related event"}, - {"name", event_name, NULL, - "file name"}, - {NULL} -}; - -PyDoc_STRVAR( - event_doc, - "event: Structure describing an inotify event."); - -static PyObject *event_new(PyTypeObject *t, PyObject *a, PyObject *k) -{ - return (*t->tp_alloc)(t, 0); -} - -static void event_dealloc(struct event *evt) -{ - Py_XDECREF(evt->wd); - Py_XDECREF(evt->mask); - Py_XDECREF(evt->cookie); - Py_XDECREF(evt->name); - - Py_TYPE(evt)->tp_free(evt); -} - -static PyObject *event_repr(struct event *evt) -{ - int cookie = evt->cookie == Py_None ? -1 : PyInt_AsLong(evt->cookie); - PyObject *ret = NULL, *pymasks = NULL, *pymask = NULL; - PyObject *tuple = NULL, *formatstr = NULL; - - pymasks = decode_mask(PyInt_AsLong(evt->mask)); - if (pymasks == NULL) - goto bail; - - pymask = _PyString_Join(join, pymasks); - if (pymask == NULL) - goto bail; - - if (evt->name != Py_None) { - if (cookie == -1) { - formatstr = er_wmn; - tuple = PyTuple_Pack(3, evt->wd, pymask, evt->name); - } - else { - formatstr = er_wmcn; - tuple = PyTuple_Pack(4, evt->wd, pymask, - evt->cookie, evt->name); - } - } else { - if (cookie == -1) { - formatstr = er_wm; - tuple = PyTuple_Pack(2, evt->wd, pymask); - } - else { - formatstr = er_wmc; - tuple = PyTuple_Pack(3, evt->wd, pymask, evt->cookie); - } - } - - if (tuple == NULL) - goto bail; - - ret = PyNumber_Remainder(formatstr, tuple); - - if (ret == NULL) - goto bail; - - goto done; -bail: - Py_CLEAR(ret); - -done: - Py_XDECREF(pymask); - Py_XDECREF(pymasks); - Py_XDECREF(tuple); - - return ret; -} - -static PyTypeObject event_type = { - PyVarObject_HEAD_INIT(NULL, 0) - "_inotify.event", /*tp_name*/ - sizeof(struct event), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - (destructor)event_dealloc, /*tp_dealloc*/ - 0, /*tp_print*/ - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - 0, /*tp_compare*/ - (reprfunc)event_repr, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash */ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/ - event_doc, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - event_getsets, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - event_new, /* tp_new */ -}; - -PyObject *read_events(PyObject *self, PyObject *args) -{ - PyObject *ctor_args = NULL; - PyObject *pybufsize = NULL; - PyObject *ret = NULL; - int bufsize = 65536; - char *buf = NULL; - int nread, pos; - int fd; - - if (!PyArg_ParseTuple(args, "i|O:read", &fd, &pybufsize)) - goto bail; - - if (pybufsize && pybufsize != Py_None) - bufsize = PyInt_AsLong(pybufsize); - - ret = PyList_New(0); - if (ret == NULL) - goto bail; - - if (bufsize <= 0) { - int r; - - Py_BEGIN_ALLOW_THREADS; - r = ioctl(fd, FIONREAD, &bufsize); - Py_END_ALLOW_THREADS; - - if (r == -1) { - PyErr_SetFromErrno(PyExc_OSError); - goto bail; - } - if (bufsize == 0) - goto done; - } - else { - static long name_max; - static long name_fd = -1; - long min; - - if (name_fd != fd) { - name_fd = fd; - Py_BEGIN_ALLOW_THREADS; - name_max = fpathconf(fd, _PC_NAME_MAX); - Py_END_ALLOW_THREADS; - } - - min = sizeof(struct inotify_event) + name_max + 1; - - if (bufsize < min) { - PyErr_Format(PyExc_ValueError, - "bufsize must be at least %d", (int)min); - goto bail; - } - } - - buf = alloca(bufsize); - - Py_BEGIN_ALLOW_THREADS; - nread = read(fd, buf, bufsize); - Py_END_ALLOW_THREADS; - - if (nread == -1) { - PyErr_SetFromErrno(PyExc_OSError); - goto bail; - } - - ctor_args = PyTuple_New(0); - - if (ctor_args == NULL) - goto bail; - - pos = 0; - - while (pos < nread) { - struct inotify_event *in = (struct inotify_event *)(buf + pos); - struct event *evt; - PyObject *obj; - - obj = PyObject_CallObject((PyObject *)&event_type, ctor_args); - - if (obj == NULL) - goto bail; - - evt = (struct event *)obj; - - evt->wd = PyInt_FromLong(in->wd); - evt->mask = PyInt_FromLong(in->mask); - if (in->mask & IN_MOVE) - evt->cookie = PyInt_FromLong(in->cookie); - else { - Py_INCREF(Py_None); - evt->cookie = Py_None; - } - if (in->len) - evt->name = PyString_FromString(in->name); - else { - Py_INCREF(Py_None); - evt->name = Py_None; - } - - if (!evt->wd || !evt->mask || !evt->cookie || !evt->name) - goto mybail; - - if (PyList_Append(ret, obj) == -1) - goto mybail; - - pos += sizeof(struct inotify_event) + in->len; - continue; - - mybail: - Py_CLEAR(evt->wd); - Py_CLEAR(evt->mask); - Py_CLEAR(evt->cookie); - Py_CLEAR(evt->name); - Py_DECREF(obj); - - goto bail; - } - - goto done; - -bail: - Py_CLEAR(ret); - -done: - Py_XDECREF(ctor_args); - - return ret; -} - -static int init_globals(void) -{ - join = PyString_FromString("|"); - er_wm = PyString_FromString("event(wd=%d, mask=%s)"); - er_wmn = PyString_FromString("event(wd=%d, mask=%s, name=%s)"); - er_wmc = PyString_FromString("event(wd=%d, mask=%s, cookie=0x%x)"); - er_wmcn = PyString_FromString("event(wd=%d, mask=%s, cookie=0x%x, name=%s)"); - - return join && er_wm && er_wmn && er_wmc && er_wmcn; -} - -PyDoc_STRVAR( - read_doc, - "read(fd, bufsize[=65536]) -> list_of_events\n" - "\n" - "\nRead inotify events from a file descriptor.\n" - "\n" - " fd: file descriptor returned by init()\n" - " bufsize: size of buffer to read into, in bytes\n" - "\n" - "Return a list of event objects.\n" - "\n" - "If bufsize is > 0, block until events are available to be read.\n" - "Otherwise, immediately return all events that can be read without\n" - "blocking."); - -static PyMethodDef methods[] = { - {"init", init, METH_VARARGS, init_doc}, - {"add_watch", add_watch, METH_VARARGS, add_watch_doc}, - {"remove_watch", remove_watch, METH_VARARGS, remove_watch_doc}, - {"read", read_events, METH_VARARGS, read_doc}, - {"decode_mask", pydecode_mask, METH_VARARGS, decode_mask_doc}, - {NULL}, -}; - -#ifdef IS_PY3K -static struct PyModuleDef _inotify_module = { - PyModuleDef_HEAD_INIT, - "_inotify", - doc, - -1, - methods -}; - -PyMODINIT_FUNC PyInit__inotify(void) -{ - PyObject *mod, *dict; - - mod = PyModule_Create(&_inotify_module); - - if (mod == NULL) - return NULL; - - if (!init_globals()) - return; - - dict = PyModule_GetDict(mod); - - if (dict) - define_consts(dict); - - return mod; -} -#else -void init_inotify(void) -{ - PyObject *mod, *dict; - - if (PyType_Ready(&event_type) == -1) - return; - - if (!init_globals()) - return; - - mod = Py_InitModule3("_inotify", methods, doc); - - dict = PyModule_GetDict(mod); - - if (dict) - define_consts(dict); -} -#endif
--- a/hgext/inotify/linux/watcher.py Mon Mar 17 14:57:13 2014 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,335 +0,0 @@ -# watcher.py - high-level interfaces to the Linux inotify subsystem - -# Copyright 2006 Bryan O'Sullivan <bos@serpentine.com> - -# This library is free software; you can redistribute it and/or modify -# it under the terms of version 2.1 of the GNU Lesser General Public -# License, or any later version. - -'''High-level interfaces to the Linux inotify subsystem. - -The inotify subsystem provides an efficient mechanism for file status -monitoring and change notification. - -The watcher class hides the low-level details of the inotify -interface, and provides a Pythonic wrapper around it. It generates -events that provide somewhat more information than raw inotify makes -available. - -The autowatcher class is more useful, as it automatically watches -newly-created directories on your behalf.''' - -__author__ = "Bryan O'Sullivan <bos@serpentine.com>" - -import _inotify as inotify -import array -import errno -import fcntl -import os -import termios - - -class event(object): - '''Derived inotify event class. - - The following fields are available: - - mask: event mask, indicating what kind of event this is - - cookie: rename cookie, if a rename-related event - - path: path of the directory in which the event occurred - - name: name of the directory entry to which the event occurred - (may be None if the event happened to a watched directory) - - fullpath: complete path at which the event occurred - - wd: watch descriptor that triggered this event''' - - __slots__ = ( - 'cookie', - 'fullpath', - 'mask', - 'name', - 'path', - 'raw', - 'wd', - ) - - def __init__(self, raw, path): - self.path = path - self.raw = raw - if raw.name: - self.fullpath = path + '/' + raw.name - else: - self.fullpath = path - - self.wd = raw.wd - self.mask = raw.mask - self.cookie = raw.cookie - self.name = raw.name - - def __repr__(self): - r = repr(self.raw) - return 'event(path=' + repr(self.path) + ', ' + r[r.find('(') + 1:] - - -_event_props = { - 'access': 'File was accessed', - 'modify': 'File was modified', - 'attrib': 'Attribute of a directory entry was changed', - 'close_write': 'File was closed after being written to', - 'close_nowrite': 'File was closed without being written to', - 'open': 'File was opened', - 'moved_from': 'Directory entry was renamed from this name', - 'moved_to': 'Directory entry was renamed to this name', - 'create': 'Directory entry was created', - 'delete': 'Directory entry was deleted', - 'delete_self': 'The watched directory entry was deleted', - 'move_self': 'The watched directory entry was renamed', - 'unmount': 'Directory was unmounted, and can no longer be watched', - 'q_overflow': 'Kernel dropped events due to queue overflow', - 'ignored': 'Directory entry is no longer being watched', - 'isdir': 'Event occurred on a directory', - } - -for k, v in _event_props.iteritems(): - mask = getattr(inotify, 'IN_' + k.upper()) - def getter(self): - return self.mask & mask - getter.__name__ = k - getter.__doc__ = v - setattr(event, k, property(getter, doc=v)) - -del _event_props - - -class watcher(object): - '''Provide a Pythonic interface to the low-level inotify API. - - Also adds derived information to each event that is not available - through the normal inotify API, such as directory name.''' - - __slots__ = ( - 'fd', - '_paths', - '_wds', - ) - - def __init__(self): - '''Create a new inotify instance.''' - - self.fd = inotify.init() - self._paths = {} - self._wds = {} - - def fileno(self): - '''Return the file descriptor this watcher uses. - - Useful for passing to select and poll.''' - - return self.fd - - def add(self, path, mask): - '''Add or modify a watch. - - Return the watch descriptor added or modified.''' - - path = os.path.normpath(path) - wd = inotify.add_watch(self.fd, path, mask) - self._paths[path] = wd, mask - self._wds[wd] = path, mask - return wd - - def remove(self, wd): - '''Remove the given watch.''' - - inotify.remove_watch(self.fd, wd) - self._remove(wd) - - def _remove(self, wd): - path_mask = self._wds.pop(wd, None) - if path_mask is not None: - self._paths.pop(path_mask[0]) - - def path(self, path): - '''Return a (watch descriptor, event mask) pair for the given path. - - If the path is not being watched, return None.''' - - return self._paths.get(path) - - def wd(self, wd): - '''Return a (path, event mask) pair for the given watch descriptor. - - If the watch descriptor is not valid or not associated with - this watcher, return None.''' - - return self._wds.get(wd) - - def read(self, bufsize=None): - '''Read a list of queued inotify events. - - If bufsize is zero, only return those events that can be read - immediately without blocking. Otherwise, block until events are - available.''' - - events = [] - for evt in inotify.read(self.fd, bufsize): - events.append(event(evt, self._wds[evt.wd][0])) - if evt.mask & inotify.IN_IGNORED: - self._remove(evt.wd) - elif evt.mask & inotify.IN_UNMOUNT: - self.close() - return events - - def close(self): - '''Shut down this watcher. - - All subsequent method calls are likely to raise exceptions.''' - - os.close(self.fd) - self.fd = None - self._paths = None - self._wds = None - - def __len__(self): - '''Return the number of active watches.''' - - return len(self._paths) - - def __iter__(self): - '''Yield a (path, watch descriptor, event mask) tuple for each - entry being watched.''' - - for path, (wd, mask) in self._paths.iteritems(): - yield path, wd, mask - - def __del__(self): - if self.fd is not None: - os.close(self.fd) - - ignored_errors = [errno.ENOENT, errno.EPERM, errno.ENOTDIR] - - def add_iter(self, path, mask, onerror=None): - '''Add or modify watches over path and its subdirectories. - - Yield each added or modified watch descriptor. - - To ensure that this method runs to completion, you must - iterate over all of its results, even if you do not care what - they are. For example: - - for wd in w.add_iter(path, mask): - pass - - By default, errors are ignored. If optional arg "onerror" is - specified, it should be a function; it will be called with one - argument, an OSError instance. It can report the error to - continue with the walk, or raise the exception to abort the - walk.''' - - # Add the IN_ONLYDIR flag to the event mask, to avoid a possible - # race when adding a subdirectory. In the time between the - # event being queued by the kernel and us processing it, the - # directory may have been deleted, or replaced with a different - # kind of entry with the same name. - - submask = mask | inotify.IN_ONLYDIR - - try: - yield self.add(path, mask) - except OSError, err: - if onerror and err.errno not in self.ignored_errors: - onerror(err) - for root, dirs, names in os.walk(path, topdown=False, onerror=onerror): - for d in dirs: - try: - yield self.add(root + '/' + d, submask) - except OSError, err: - if onerror and err.errno not in self.ignored_errors: - onerror(err) - - def add_all(self, path, mask, onerror=None): - '''Add or modify watches over path and its subdirectories. - - Return a list of added or modified watch descriptors. - - By default, errors are ignored. If optional arg "onerror" is - specified, it should be a function; it will be called with one - argument, an OSError instance. It can report the error to - continue with the walk, or raise the exception to abort the - walk.''' - - return [w for w in self.add_iter(path, mask, onerror)] - - -class autowatcher(watcher): - '''watcher class that automatically watches newly created directories.''' - - __slots__ = ( - 'addfilter', - ) - - def __init__(self, addfilter=None): - '''Create a new inotify instance. - - This instance will automatically watch newly created - directories. - - If the optional addfilter parameter is not None, it must be a - callable that takes one parameter. It will be called each time - a directory is about to be automatically watched. If it returns - True, the directory will be watched if it still exists, - otherwise, it will be skipped.''' - - super(autowatcher, self).__init__() - self.addfilter = addfilter - - _dir_create_mask = inotify.IN_ISDIR | inotify.IN_CREATE - - def read(self, bufsize=None): - events = super(autowatcher, self).read(bufsize) - for evt in events: - if evt.mask & self._dir_create_mask == self._dir_create_mask: - if self.addfilter is None or self.addfilter(evt): - parentmask = self._wds[evt.wd][1] - # See note about race avoidance via IN_ONLYDIR above. - mask = parentmask | inotify.IN_ONLYDIR - try: - self.add_all(evt.fullpath, mask) - except OSError, err: - if err.errno not in self.ignored_errors: - raise - return events - - -class threshold(object): - '''Class that indicates whether a file descriptor has reached a - threshold of readable bytes available. - - This class is not thread-safe.''' - - __slots__ = ( - 'fd', - 'threshold', - '_iocbuf', - ) - - def __init__(self, fd, threshold=1024): - self.fd = fd - self.threshold = threshold - self._iocbuf = array.array('i', [0]) - - def readable(self): - '''Return the number of bytes readable on this file descriptor.''' - - fcntl.ioctl(self.fd, termios.FIONREAD, self._iocbuf, True) - return self._iocbuf[0] - - def __call__(self): - '''Indicate whether the number of readable bytes has met or - exceeded the threshold.''' - - return self.readable() >= self.threshold
--- a/hgext/inotify/linuxserver.py Mon Mar 17 14:57:13 2014 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,437 +0,0 @@ -# linuxserver.py - inotify status server for linux -# -# Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com> -# Copyright 2007, 2008 Brendan Cully <brendan@kublai.com> -# -# This software may be used and distributed according to the terms of the -# GNU General Public License version 2 or any later version. - -from mercurial.i18n import _ -from mercurial import osutil, util, error -import server -import errno, os, select, stat, sys, time - -try: - import linux as inotify - from linux import watcher -except ImportError: - raise - -def walkrepodirs(dirstate, absroot): - '''Iterate over all subdirectories of this repo. - Exclude the .hg directory, any nested repos, and ignored dirs.''' - def walkit(dirname, top): - fullpath = server.join(absroot, dirname) - try: - for name, kind in osutil.listdir(fullpath): - if kind == stat.S_IFDIR: - if name == '.hg': - if not top: - return - else: - d = server.join(dirname, name) - if dirstate._ignore(d): - continue - for subdir in walkit(d, False): - yield subdir - except OSError, err: - if err.errno not in server.walk_ignored_errors: - raise - yield fullpath - - return walkit('', True) - -def _explain_watch_limit(ui, dirstate, rootabs): - path = '/proc/sys/fs/inotify/max_user_watches' - try: - limit = int(util.readfile(path)) - except IOError, err: - if err.errno != errno.ENOENT: - raise - raise util.Abort(_('this system does not seem to ' - 'support inotify')) - ui.warn(_('*** the current per-user limit on the number ' - 'of inotify watches is %s\n') % limit) - ui.warn(_('*** this limit is too low to watch every ' - 'directory in this repository\n')) - ui.warn(_('*** counting directories: ')) - ndirs = len(list(walkrepodirs(dirstate, rootabs))) - ui.warn(_('found %d\n') % ndirs) - newlimit = min(limit, 1024) - while newlimit < ((limit + ndirs) * 1.1): - newlimit *= 2 - ui.warn(_('*** to raise the limit from %d to %d (run as root):\n') % - (limit, newlimit)) - ui.warn(_('*** echo %d > %s\n') % (newlimit, path)) - raise util.Abort(_('cannot watch %s until inotify watch limit is raised') - % rootabs) - -class pollable(object): - """ - Interface to support polling. - The file descriptor returned by fileno() is registered to a polling - object. - Usage: - Every tick, check if an event has happened since the last tick: - * If yes, call handle_events - * If no, call handle_timeout - """ - poll_events = select.POLLIN - instances = {} - poll = select.poll() - - def fileno(self): - raise NotImplementedError - - def handle_events(self, events): - raise NotImplementedError - - def handle_timeout(self): - raise NotImplementedError - - def shutdown(self): - raise NotImplementedError - - def register(self, timeout): - fd = self.fileno() - - pollable.poll.register(fd, pollable.poll_events) - pollable.instances[fd] = self - - self.registered = True - self.timeout = timeout - - def unregister(self): - pollable.poll.unregister(self) - self.registered = False - - @classmethod - def run(cls): - while True: - timeout = None - timeobj = None - for obj in cls.instances.itervalues(): - if obj.timeout is not None and (timeout is None - or obj.timeout < timeout): - timeout, timeobj = obj.timeout, obj - try: - events = cls.poll.poll(timeout) - except select.error, err: - if err.args[0] == errno.EINTR: - continue - raise - if events: - by_fd = {} - for fd, event in events: - by_fd.setdefault(fd, []).append(event) - - for fd, events in by_fd.iteritems(): - cls.instances[fd].handle_pollevents(events) - - elif timeobj: - timeobj.handle_timeout() - -def eventaction(code): - """ - Decorator to help handle events in repowatcher - """ - def decorator(f): - def wrapper(self, wpath): - if code == 'm' and wpath in self.lastevent and \ - self.lastevent[wpath] in 'cm': - return - self.lastevent[wpath] = code - self.timeout = 250 - - f(self, wpath) - - wrapper.func_name = f.func_name - return wrapper - return decorator - -class repowatcher(server.repowatcher, pollable): - """ - Watches inotify events - """ - mask = ( - inotify.IN_ATTRIB | - inotify.IN_CREATE | - inotify.IN_DELETE | - inotify.IN_DELETE_SELF | - inotify.IN_MODIFY | - inotify.IN_MOVED_FROM | - inotify.IN_MOVED_TO | - inotify.IN_MOVE_SELF | - inotify.IN_ONLYDIR | - inotify.IN_UNMOUNT | - 0) - - def __init__(self, ui, dirstate, root): - server.repowatcher.__init__(self, ui, dirstate, root) - - self.lastevent = {} - self.dirty = False - try: - self.watcher = watcher.watcher() - except OSError, err: - raise util.Abort(_('inotify service not available: %s') % - err.strerror) - self.threshold = watcher.threshold(self.watcher) - self.fileno = self.watcher.fileno - self.register(timeout=None) - - self.handle_timeout() - self.scan() - - def event_time(self): - last = self.last_event - now = time.time() - self.last_event = now - - if last is None: - return 'start' - delta = now - last - if delta < 5: - return '+%.3f' % delta - if delta < 50: - return '+%.2f' % delta - return '+%.1f' % delta - - def add_watch(self, path, mask): - if not path: - return - if self.watcher.path(path) is None: - if self.ui.debugflag: - self.ui.note(_('watching %r\n') % path[self.prefixlen:]) - try: - self.watcher.add(path, mask) - except OSError, err: - if err.errno in (errno.ENOENT, errno.ENOTDIR): - return - if err.errno != errno.ENOSPC: - raise - _explain_watch_limit(self.ui, self.dirstate, self.wprefix) - - def setup(self): - self.ui.note(_('watching directories under %r\n') % self.wprefix) - self.add_watch(self.wprefix + '.hg', inotify.IN_DELETE) - - def scan(self, topdir=''): - ds = self.dirstate._map.copy() - self.add_watch(server.join(self.wprefix, topdir), self.mask) - for root, dirs, files in server.walk(self.dirstate, self.wprefix, - topdir): - for d in dirs: - self.add_watch(server.join(root, d), self.mask) - wroot = root[self.prefixlen:] - for fn in files: - wfn = server.join(wroot, fn) - self.updatefile(wfn, self.getstat(wfn)) - ds.pop(wfn, None) - wtopdir = topdir - if wtopdir and wtopdir[-1] != '/': - wtopdir += '/' - for wfn, state in ds.iteritems(): - if not wfn.startswith(wtopdir): - continue - try: - st = self.stat(wfn) - except OSError: - status = state[0] - self.deletefile(wfn, status) - else: - self.updatefile(wfn, st) - self.check_deleted('!') - self.check_deleted('r') - - @eventaction('c') - def created(self, wpath): - if wpath == '.hgignore': - self.update_hgignore() - try: - st = self.stat(wpath) - if stat.S_ISREG(st[0]) or stat.S_ISLNK(st[0]): - self.updatefile(wpath, st) - except OSError: - pass - - @eventaction('m') - def modified(self, wpath): - if wpath == '.hgignore': - self.update_hgignore() - try: - st = self.stat(wpath) - if stat.S_ISREG(st[0]): - if self.dirstate[wpath] in 'lmn': - self.updatefile(wpath, st) - except OSError: - pass - - @eventaction('d') - def deleted(self, wpath): - if wpath == '.hgignore': - self.update_hgignore() - elif wpath.startswith('.hg/'): - return - - self.deletefile(wpath, self.dirstate[wpath]) - - def process_create(self, wpath, evt): - if self.ui.debugflag: - self.ui.note(_('%s event: created %s\n') % - (self.event_time(), wpath)) - - if evt.mask & inotify.IN_ISDIR: - self.scan(wpath) - else: - self.created(wpath) - - def process_delete(self, wpath, evt): - if self.ui.debugflag: - self.ui.note(_('%s event: deleted %s\n') % - (self.event_time(), wpath)) - - if evt.mask & inotify.IN_ISDIR: - tree = self.tree.dir(wpath) - todelete = [wfn for wfn, ignore in tree.walk('?')] - for fn in todelete: - self.deletefile(fn, '?') - self.scan(wpath) - else: - self.deleted(wpath) - - def process_modify(self, wpath, evt): - if self.ui.debugflag: - self.ui.note(_('%s event: modified %s\n') % - (self.event_time(), wpath)) - - if not (evt.mask & inotify.IN_ISDIR): - self.modified(wpath) - - def process_unmount(self, evt): - self.ui.warn(_('filesystem containing %s was unmounted\n') % - evt.fullpath) - sys.exit(0) - - def handle_pollevents(self, events): - if self.ui.debugflag: - self.ui.note(_('%s readable: %d bytes\n') % - (self.event_time(), self.threshold.readable())) - if not self.threshold(): - if self.registered: - if self.ui.debugflag: - self.ui.note(_('%s below threshold - unhooking\n') % - (self.event_time())) - self.unregister() - self.timeout = 250 - else: - self.read_events() - - def read_events(self, bufsize=None): - events = self.watcher.read(bufsize) - if self.ui.debugflag: - self.ui.note(_('%s reading %d events\n') % - (self.event_time(), len(events))) - for evt in events: - if evt.fullpath == self.wprefix[:-1]: - # events on the root of the repository - # itself, e.g. permission changes or repository move - continue - assert evt.fullpath.startswith(self.wprefix) - wpath = evt.fullpath[self.prefixlen:] - - # paths have been normalized, wpath never ends with a '/' - - if wpath.startswith('.hg/') and evt.mask & inotify.IN_ISDIR: - # ignore subdirectories of .hg/ (merge, patches...) - continue - if wpath == ".hg/wlock": - if evt.mask & inotify.IN_DELETE: - self.dirstate.invalidate() - self.dirty = False - self.scan() - elif evt.mask & inotify.IN_CREATE: - self.dirty = True - else: - if self.dirty: - continue - - if evt.mask & inotify.IN_UNMOUNT: - self.process_unmount(wpath, evt) - elif evt.mask & (inotify.IN_MODIFY | inotify.IN_ATTRIB): - self.process_modify(wpath, evt) - elif evt.mask & (inotify.IN_DELETE | inotify.IN_DELETE_SELF | - inotify.IN_MOVED_FROM): - self.process_delete(wpath, evt) - elif evt.mask & (inotify.IN_CREATE | inotify.IN_MOVED_TO): - self.process_create(wpath, evt) - - self.lastevent.clear() - - def handle_timeout(self): - if not self.registered: - if self.ui.debugflag: - self.ui.note(_('%s hooking back up with %d bytes readable\n') % - (self.event_time(), self.threshold.readable())) - self.read_events(0) - self.register(timeout=None) - - self.timeout = None - - def shutdown(self): - self.watcher.close() - - def debug(self): - """ - Returns a sorted list of relatives paths currently watched, - for debugging purposes. - """ - return sorted(tuple[0][self.prefixlen:] for tuple in self.watcher) - -class socketlistener(server.socketlistener, pollable): - """ - Listens for client queries on unix socket inotify.sock - """ - def __init__(self, ui, root, repowatcher, timeout): - server.socketlistener.__init__(self, ui, root, repowatcher, timeout) - self.register(timeout=timeout) - - def handle_timeout(self): - raise server.TimeoutException - - def handle_pollevents(self, events): - for e in events: - self.accept_connection() - - def shutdown(self): - self.sock.close() - self.sock.cleanup() - - def answer_stat_query(self, cs): - if self.repowatcher.timeout: - # We got a query while a rescan is pending. Make sure we - # rescan before responding, or we could give back a wrong - # answer. - self.repowatcher.handle_timeout() - return server.socketlistener.answer_stat_query(self, cs) - -class master(object): - def __init__(self, ui, dirstate, root, timeout=None): - self.ui = ui - self.repowatcher = repowatcher(ui, dirstate, root) - self.socketlistener = socketlistener(ui, root, self.repowatcher, - timeout) - - def shutdown(self): - for obj in pollable.instances.itervalues(): - try: - obj.shutdown() - except error.SignalInterrupt: - pass - - def run(self): - self.repowatcher.setup() - self.ui.note(_('finished setup\n')) - if os.getenv('TIME_STARTUP'): - sys.exit(0) - pollable.run()
--- a/hgext/inotify/server.py Mon Mar 17 14:57:13 2014 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,465 +0,0 @@ -# server.py - common entry point for inotify status server -# -# Copyright 2009 Nicolas Dumazet <nicdumz@gmail.com> -# -# This software may be used and distributed according to the terms of the -# GNU General Public License version 2 or any later version. - -from mercurial.i18n import _ -from mercurial import cmdutil, posix, osutil, util -import common - -import errno -import os -import socket -import stat -import struct -import sys - -class AlreadyStartedException(Exception): - pass -class TimeoutException(Exception): - pass - -def join(a, b): - if a: - if a[-1] == '/': - return a + b - return a + '/' + b - return b - -def split(path): - c = path.rfind('/') - if c == -1: - return '', path - return path[:c], path[c + 1:] - -walk_ignored_errors = (errno.ENOENT, errno.ENAMETOOLONG) - -def walk(dirstate, absroot, root): - '''Like os.walk, but only yields regular files.''' - - # This function is critical to performance during startup. - - def walkit(root, reporoot): - files, dirs = [], [] - - try: - fullpath = join(absroot, root) - for name, kind in osutil.listdir(fullpath): - if kind == stat.S_IFDIR: - if name == '.hg': - if not reporoot: - return - else: - dirs.append(name) - path = join(root, name) - if dirstate._ignore(path): - continue - for result in walkit(path, False): - yield result - elif kind in (stat.S_IFREG, stat.S_IFLNK): - files.append(name) - yield fullpath, dirs, files - - except OSError, err: - if err.errno == errno.ENOTDIR: - # fullpath was a directory, but has since been replaced - # by a file. - yield fullpath, dirs, files - elif err.errno not in walk_ignored_errors: - raise - - return walkit(root, root == '') - -class directory(object): - """ - Representing a directory - - * path is the relative path from repo root to this directory - * files is a dict listing the files in this directory - - keys are file names - - values are file status - * dirs is a dict listing the subdirectories - - key are subdirectories names - - values are directory objects - """ - def __init__(self, relpath=''): - self.path = relpath - self.files = {} - self.dirs = {} - - def dir(self, relpath): - """ - Returns the directory contained at the relative path relpath. - Creates the intermediate directories if necessary. - """ - if not relpath: - return self - l = relpath.split('/') - ret = self - while l: - next = l.pop(0) - try: - ret = ret.dirs[next] - except KeyError: - d = directory(join(ret.path, next)) - ret.dirs[next] = d - ret = d - return ret - - def walk(self, states, visited=None): - """ - yield (filename, status) pairs for items in the trees - that have status in states. - filenames are relative to the repo root - """ - for file, st in self.files.iteritems(): - if st in states: - yield join(self.path, file), st - for dir in self.dirs.itervalues(): - if visited is not None: - visited.add(dir.path) - for e in dir.walk(states): - yield e - - def lookup(self, states, path, visited): - """ - yield root-relative filenames that match path, and whose - status are in states: - * if path is a file, yield path - * if path is a directory, yield directory files - * if path is not tracked, yield nothing - """ - if path[-1] == '/': - path = path[:-1] - - paths = path.split('/') - - # we need to check separately for last node - last = paths.pop() - - tree = self - try: - for dir in paths: - tree = tree.dirs[dir] - except KeyError: - # path is not tracked - visited.add(tree.path) - return - - try: - # if path is a directory, walk it - target = tree.dirs[last] - visited.add(target.path) - for file, st in target.walk(states, visited): - yield file - except KeyError: - try: - if tree.files[last] in states: - # path is a file - visited.add(tree.path) - yield path - except KeyError: - # path is not tracked - pass - -class repowatcher(object): - """ - Watches inotify events - """ - statuskeys = 'almr!?' - - def __init__(self, ui, dirstate, root): - self.ui = ui - self.dirstate = dirstate - - self.wprefix = join(root, '') - self.prefixlen = len(self.wprefix) - - self.tree = directory() - self.statcache = {} - self.statustrees = dict([(s, directory()) for s in self.statuskeys]) - - self.ds_info = self.dirstate_info() - - self.last_event = None - - - def handle_timeout(self): - pass - - def dirstate_info(self): - try: - st = os.lstat(self.wprefix + '.hg/dirstate') - return st.st_mtime, st.st_ino - except OSError, err: - if err.errno != errno.ENOENT: - raise - return 0, 0 - - def filestatus(self, fn, st): - try: - type_, mode, size, time = self.dirstate._map[fn][:4] - except KeyError: - type_ = '?' - if type_ == 'n': - st_mode, st_size, st_mtime = st - if size == -1: - return 'l' - if size and (size != st_size or (mode ^ st_mode) & 0100): - return 'm' - if time != int(st_mtime): - return 'l' - return 'n' - if type_ == '?' and self.dirstate._dirignore(fn): - # we must check not only if the file is ignored, but if any part - # of its path match an ignore pattern - return 'i' - return type_ - - def updatefile(self, wfn, osstat): - ''' - update the file entry of an existing file. - - osstat: (mode, size, time) tuple, as returned by os.lstat(wfn) - ''' - - self._updatestatus(wfn, self.filestatus(wfn, osstat)) - - def deletefile(self, wfn, oldstatus): - ''' - update the entry of a file which has been deleted. - - oldstatus: char in statuskeys, status of the file before deletion - ''' - if oldstatus == 'r': - newstatus = 'r' - elif oldstatus in 'almn': - newstatus = '!' - else: - newstatus = None - - self.statcache.pop(wfn, None) - self._updatestatus(wfn, newstatus) - - def _updatestatus(self, wfn, newstatus): - ''' - Update the stored status of a file. - - newstatus: - char in (statuskeys + 'ni'), new status to apply. - - or None, to stop tracking wfn - ''' - root, fn = split(wfn) - d = self.tree.dir(root) - - oldstatus = d.files.get(fn) - # oldstatus can be either: - # - None : fn is new - # - a char in statuskeys: fn is a (tracked) file - - if self.ui.debugflag and oldstatus != newstatus: - self.ui.note(_('status: %r %s -> %s\n') % - (wfn, oldstatus, newstatus)) - - if oldstatus and oldstatus in self.statuskeys \ - and oldstatus != newstatus: - del self.statustrees[oldstatus].dir(root).files[fn] - - if newstatus in (None, 'i'): - d.files.pop(fn, None) - elif oldstatus != newstatus: - d.files[fn] = newstatus - if newstatus != 'n': - self.statustrees[newstatus].dir(root).files[fn] = newstatus - - def check_deleted(self, key): - # Files that had been deleted but were present in the dirstate - # may have vanished from the dirstate; we must clean them up. - nuke = [] - for wfn, ignore in self.statustrees[key].walk(key): - if wfn not in self.dirstate: - nuke.append(wfn) - for wfn in nuke: - root, fn = split(wfn) - del self.statustrees[key].dir(root).files[fn] - del self.tree.dir(root).files[fn] - - def update_hgignore(self): - # An update of the ignore file can potentially change the - # states of all unknown and ignored files. - - # XXX If the user has other ignore files outside the repo, or - # changes their list of ignore files at run time, we'll - # potentially never see changes to them. We could get the - # client to report to us what ignore data they're using. - # But it's easier to do nothing than to open that can of - # worms. - - if '_ignore' in self.dirstate.__dict__: - delattr(self.dirstate, '_ignore') - self.ui.note(_('rescanning due to .hgignore change\n')) - self.handle_timeout() - self.scan() - - def getstat(self, wpath): - try: - return self.statcache[wpath] - except KeyError: - try: - return self.stat(wpath) - except OSError, err: - if err.errno != errno.ENOENT: - raise - - def stat(self, wpath): - try: - st = os.lstat(join(self.wprefix, wpath)) - ret = st.st_mode, st.st_size, st.st_mtime - self.statcache[wpath] = ret - return ret - except OSError: - self.statcache.pop(wpath, None) - raise - -class socketlistener(object): - """ - Listens for client queries on unix socket inotify.sock - """ - def __init__(self, ui, root, repowatcher, timeout): - self.ui = ui - self.repowatcher = repowatcher - try: - self.sock = posix.unixdomainserver( - lambda p: os.path.join(root, '.hg', p), - 'inotify') - except (OSError, socket.error), err: - if err.args[0] == errno.EADDRINUSE: - raise AlreadyStartedException(_('cannot start: ' - 'socket is already bound')) - raise - self.fileno = self.sock.fileno - - def answer_stat_query(self, cs): - names = cs.read().split('\0') - - states = names.pop() - - self.ui.note(_('answering query for %r\n') % states) - - visited = set() - if not names: - def genresult(states, tree): - for fn, state in tree.walk(states): - yield fn - else: - def genresult(states, tree): - for fn in names: - for f in tree.lookup(states, fn, visited): - yield f - - return ['\0'.join(r) for r in [ - genresult('l', self.repowatcher.statustrees['l']), - genresult('m', self.repowatcher.statustrees['m']), - genresult('a', self.repowatcher.statustrees['a']), - genresult('r', self.repowatcher.statustrees['r']), - genresult('!', self.repowatcher.statustrees['!']), - '?' in states - and genresult('?', self.repowatcher.statustrees['?']) - or [], - [], - 'c' in states and genresult('n', self.repowatcher.tree) or [], - visited - ]] - - def answer_dbug_query(self): - return ['\0'.join(self.repowatcher.debug())] - - def accept_connection(self): - sock, addr = self.sock.accept() - - cs = common.recvcs(sock) - version = ord(cs.read(1)) - - if version != common.version: - self.ui.warn(_('received query from incompatible client ' - 'version %d\n') % version) - try: - # try to send back our version to the client - # this way, the client too is informed of the mismatch - sock.sendall(chr(common.version)) - except socket.error: - pass - return - - type = cs.read(4) - - if type == 'STAT': - results = self.answer_stat_query(cs) - elif type == 'DBUG': - results = self.answer_dbug_query() - else: - self.ui.warn(_('unrecognized query type: %s\n') % type) - return - - try: - try: - v = chr(common.version) - - sock.sendall(v + type + struct.pack(common.resphdrfmts[type], - *map(len, results))) - sock.sendall(''.join(results)) - finally: - sock.shutdown(socket.SHUT_WR) - except socket.error, err: - if err.args[0] != errno.EPIPE: - raise - -if sys.platform.startswith('linux'): - import linuxserver as _server -else: - raise ImportError - -master = _server.master - -def start(ui, dirstate, root, opts): - timeout = opts.get('idle_timeout') - if timeout: - timeout = float(timeout) * 60000 - else: - timeout = None - - class service(object): - def init(self): - try: - self.master = master(ui, dirstate, root, timeout) - except AlreadyStartedException, inst: - raise util.Abort("inotify-server: %s" % inst) - - def run(self): - try: - try: - self.master.run() - except TimeoutException: - pass - finally: - self.master.shutdown() - - if 'inserve' not in sys.argv: - runargs = util.hgcmd() + ['inserve', '-R', root] - else: - runargs = util.hgcmd() + sys.argv[1:] - - pidfile = ui.config('inotify', 'pidfile') - opts.setdefault('pid_file', '') - if opts['daemon'] and pidfile is not None and not opts['pid_file']: - opts['pid_file'] = pidfile - - service = service() - logfile = ui.config('inotify', 'log') - - appendpid = ui.configbool('inotify', 'appendpid', False) - - ui.debug('starting inotify server: %s\n' % ' '.join(runargs)) - cmdutil.service(opts, initfn=service.init, runfn=service.run, - logfile=logfile, runargs=runargs, appendpid=appendpid)
--- a/hgext/interhg.py Mon Mar 17 14:57:13 2014 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,83 +0,0 @@ -# interhg.py - interhg -# -# Copyright 2007 OHASHI Hideya <ohachige@gmail.com> -# -# Contributor(s): -# Edward Lee <edward.lee@engineering.uiuc.edu> -# -# This software may be used and distributed according to the terms of the -# GNU General Public License version 2 or any later version. - -'''expand expressions into changelog and summaries - -This extension allows the use of a special syntax in summaries, which -will be automatically expanded into links or any other arbitrary -expression, much like InterWiki does. - -A few example patterns (link to bug tracking, etc.) that may be used -in your hgrc:: - - [interhg] - issues = s!issue(\\d+)!<a href="http://bts/issue\\1">issue\\1</a>! - bugzilla = s!((?:bug|b=|(?=#?\\d{4,}))(?:\\s*#?)(\\d+))!<a..=\\2">\\1</a>!i - boldify = s!(^|\\s)#(\\d+)\\b! <b>#\\2</b>! -''' - -import re -from mercurial.hgweb import hgweb_mod -from mercurial import templatefilters, extensions -from mercurial.i18n import _ - -testedwith = 'internal' - -interhg_table = [] - -def uisetup(ui): - orig_escape = templatefilters.filters["escape"] - - def interhg_escape(x): - escstr = orig_escape(x) - for regexp, format in interhg_table: - escstr = regexp.sub(format, escstr) - return escstr - - templatefilters.filters["escape"] = interhg_escape - -def interhg_refresh(orig, self, *args, **kwargs): - interhg_table[:] = [] - for key, pattern in self.repo.ui.configitems('interhg'): - # grab the delimiter from the character after the "s" - unesc = pattern[1] - delim = re.escape(unesc) - - # identify portions of the pattern, taking care to avoid escaped - # delimiters. the replace format and flags are optional, but delimiters - # are required. - match = re.match(r'^s%s(.+)(?:(?<=\\\\)|(?<!\\))%s(.*)%s([ilmsux])*$' - % (delim, delim, delim), pattern) - if not match: - self.repo.ui.warn(_("interhg: invalid pattern for %s: %s\n") - % (key, pattern)) - continue - - # we need to unescape the delimiter for regexp and format - delim_re = re.compile(r'(?<!\\)\\%s' % delim) - regexp = delim_re.sub(unesc, match.group(1)) - format = delim_re.sub(unesc, match.group(2)) - - # the pattern allows for 6 regexp flags, so set them if necessary - flagin = match.group(3) - flags = 0 - if flagin: - for flag in flagin.upper(): - flags |= re.__dict__[flag] - - try: - regexp = re.compile(regexp, flags) - interhg_table.append((regexp, format)) - except re.error: - self.repo.ui.warn(_("interhg: invalid regexp for %s: %s\n") - % (key, regexp)) - return orig(self, *args, **kwargs) - -extensions.wrapfunction(hgweb_mod.hgweb, 'refresh', interhg_refresh)
--- a/hgext/keyword.py Mon Mar 17 14:57:13 2014 -0400 +++ b/hgext/keyword.py Tue Mar 18 14:25:28 2014 -0500 @@ -218,9 +218,8 @@ '''Replaces keywords in data with expanded template.''' def kwsub(mobj): kw = mobj.group(1) - ct = cmdutil.changeset_templater(self.ui, self.repo, - False, None, '', False) - ct.use_template(self.templates[kw]) + ct = cmdutil.changeset_templater(self.ui, self.repo, False, None, + self.templates[kw], '', False) self.ui.pushbuffer() ct.show(ctx, root=self.repo.root, file=path) ekw = templatefilters.firstline(self.ui.popbuffer())
--- a/hgext/largefiles/overrides.py Mon Mar 17 14:57:13 2014 -0400 +++ b/hgext/largefiles/overrides.py Tue Mar 18 14:25:28 2014 -0500 @@ -12,7 +12,7 @@ import copy from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \ - node, archival, error, merge, discovery, pathutil + node, archival, error, merge, discovery, pathutil, revset from mercurial.i18n import _ from mercurial.node import hex from hgext import rebase @@ -365,8 +365,8 @@ # Finally, the merge.applyupdates function will then take care of # writing the files into the working copy and lfcommands.updatelfiles # will update the largefiles. -def overridemanifestmerge(origfn, repo, p1, p2, pa, branchmerge, force, - partial, acceptremote=False): +def overridecalculateupdates(origfn, repo, p1, p2, pa, branchmerge, force, + partial, acceptremote=False): overwrite = force and not branchmerge actions = origfn(repo, p1, p2, pa, branchmerge, force, partial, acceptremote) @@ -752,7 +752,7 @@ firstpulled = repo.firstpulled except AttributeError: raise util.Abort(_("pulled() only available in --lfrev")) - return [r for r in subset if r >= firstpulled] + return revset.baseset([r for r in subset if r >= firstpulled]) def overrideclone(orig, ui, source, dest=None, **opts): d = dest
--- a/hgext/largefiles/remotestore.py Mon Mar 17 14:57:13 2014 -0400 +++ b/hgext/largefiles/remotestore.py Tue Mar 18 14:25:28 2014 -0500 @@ -30,7 +30,8 @@ % (source, util.hidepassword(self.url))) def exists(self, hashes): - return dict((h, s == 0) for (h, s) in self._stat(hashes).iteritems()) + return dict((h, s == 0) for (h, s) in # dict-from-generator + self._stat(hashes).iteritems()) def sendfile(self, filename, hash): self.ui.debug('remotestore: sendfile(%s, %s)\n' % (filename, hash)) @@ -97,4 +98,3 @@ def batch(self): '''Support for remote batching.''' return remotebatch(self) -
--- a/hgext/largefiles/uisetup.py Mon Mar 17 14:57:13 2014 -0400 +++ b/hgext/largefiles/uisetup.py Tue Mar 18 14:25:28 2014 -0500 @@ -96,8 +96,8 @@ overrides.overridecat) entry = extensions.wrapfunction(merge, '_checkunknownfile', overrides.overridecheckunknownfile) - entry = extensions.wrapfunction(merge, 'manifestmerge', - overrides.overridemanifestmerge) + entry = extensions.wrapfunction(merge, 'calculateupdates', + overrides.overridecalculateupdates) entry = extensions.wrapfunction(filemerge, 'filemerge', overrides.overridefilemerge) entry = extensions.wrapfunction(cmdutil, 'copy',
--- a/hgext/mq.py Mon Mar 17 14:57:13 2014 -0400 +++ b/hgext/mq.py Tue Mar 18 14:25:28 2014 -0500 @@ -1987,9 +1987,11 @@ raise util.Abort(_('-e is incompatible with import from -')) filename = normname(filename) self.checkreservedname(filename) - originpath = self.join(filename) - if not os.path.isfile(originpath): - raise util.Abort(_("patch %s does not exist") % filename) + if util.url(filename).islocal(): + originpath = self.join(filename) + if not os.path.isfile(originpath): + raise util.Abort( + _("patch %s does not exist") % filename) if patchname: self.checkpatchname(patchname, force) @@ -3269,6 +3271,12 @@ def mq(self): return queue(self.ui, self.baseui, self.path) + def invalidateall(self): + super(mqrepo, self).invalidateall() + if localrepo.hasunfilteredcache(self, 'mq'): + # recreate mq in case queue path was changed + delattr(self.unfiltered(), 'mq') + def abortifwdirpatched(self, errmsg, force=False): if self.mq.applied and self.mq.checkapplied and not force: parents = self.dirstate.parents() @@ -3409,7 +3417,7 @@ """ revset.getargs(x, 0, 0, _("mq takes no arguments")) applied = set([repo[r.node].rev() for r in repo.mq.applied]) - return [r for r in subset if r in applied] + return revset.baseset([r for r in subset if r in applied]) # tell hggettext to extract docstrings from these functions: i18nfunctions = [revsetmq]
--- a/hgext/notify.py Mon Mar 17 14:57:13 2014 -0400 +++ b/hgext/notify.py Tue Mar 18 14:25:28 2014 -0500 @@ -188,13 +188,12 @@ mapfile = self.ui.config('notify', 'style') template = (self.ui.config('notify', hooktype) or self.ui.config('notify', 'template')) - self.t = cmdutil.changeset_templater(self.ui, self.repo, - False, None, mapfile, False) if not mapfile and not template: template = deftemplates.get(hooktype) or single_template if template: template = templater.parsestring(template, quoted=False) - self.t.use_template(template) + self.t = cmdutil.changeset_templater(self.ui, self.repo, False, None, + template, mapfile, False) def strip(self, path): '''strip leading slashes from local path, turn into web-safe path.'''
--- a/hgext/rebase.py Mon Mar 17 14:57:13 2014 -0400 +++ b/hgext/rebase.py Tue Mar 18 14:25:28 2014 -0500 @@ -516,6 +516,12 @@ if state.get(p.rev()) == repo[p1].rev(): base = p.node() break + else: # fallback when base not found + base = None + + # Raise because this function is called wrong (see issue 4106) + raise AssertionError('no base found to rebase on ' + '(rebasenode called wrong)') if base is not None: repo.ui.debug(" detach base %d:%s\n" % (repo[base].rev(), repo[base])) # When collapsing in-place, the parent is the common ancestor, we @@ -703,7 +709,8 @@ if new != nullrev and new in seen: skipped.add(old) seen.add(new) - repo.ui.debug('computed skipped revs: %s\n' % skipped) + repo.ui.debug('computed skipped revs: %s\n' % + (' '.join(str(r) for r in sorted(skipped)) or None)) repo.ui.debug('rebase status resumed\n') return (originalwd, target, state, skipped, collapse, keep, keepbranches, external, activebookmark) @@ -790,7 +797,7 @@ repo.ui.debug('source is a child of destination\n') return None - repo.ui.debug('rebase onto %d starting from %s\n' % (dest, roots)) + repo.ui.debug('rebase onto %d starting from %s\n' % (dest, root)) state.update(dict.fromkeys(rebaseset, nullrev)) # Rebase tries to turn <dest> into a parent of <root> while # preserving the number of parents of rebased changesets:
--- a/hgext/shelve.py Mon Mar 17 14:57:13 2014 -0400 +++ b/hgext/shelve.py Tue Mar 18 14:25:28 2014 -0500 @@ -22,8 +22,8 @@ """ from mercurial.i18n import _ -from mercurial.node import nullid, bin, hex -from mercurial import changegroup, cmdutil, scmutil, phases +from mercurial.node import nullid, nullrev, bin, hex +from mercurial import changegroup, cmdutil, scmutil, phases, commands from mercurial import error, hg, mdiff, merge, patch, repair, util from mercurial import templatefilters from mercurial import lock as lockmod @@ -122,22 +122,21 @@ """subcommand that creates a new shelve""" def publicancestors(ctx): - """Compute the heads of the public ancestors of a commit. + """Compute the public ancestors of a commit. - Much faster than the revset heads(ancestors(ctx) - draft())""" - seen = set() + Much faster than the revset ancestors(ctx) & draft()""" + seen = set([nullrev]) visit = util.deque() visit.append(ctx) while visit: ctx = visit.popleft() + yield ctx.node() for parent in ctx.parents(): rev = parent.rev() if rev not in seen: seen.add(rev) if parent.mutable(): visit.append(parent) - else: - yield parent.node() wctx = repo[None] parents = wctx.parents() @@ -173,9 +172,9 @@ repo.mq.checkapplied = saved if parent.node() != nullid: - desc = parent.description().split('\n', 1)[0] + desc = "changes to '%s'" % parent.description().split('\n', 1)[0] else: - desc = '(empty repository)' + desc = '(changes in empty repository)' if not opts['message']: opts['message'] = desc @@ -518,6 +517,7 @@ if not shelvedfile(repo, basename, 'files').exists(): raise util.Abort(_("shelved change '%s' not found") % basename) + oldquiet = ui.quiet wlock = lock = tr = None try: lock = repo.lock() @@ -537,6 +537,8 @@ # Store pending changes in a commit m, a, r, d = repo.status()[:4] if m or a or r or d: + ui.status(_("temporarily committing pending changes " + "(restore with 'hg unshelve --abort')\n")) def commitfunc(ui, repo, message, match, opts): hasmq = util.safehasattr(repo, 'mq') if hasmq: @@ -551,15 +553,12 @@ tempopts = {} tempopts['message'] = "pending changes temporary commit" - oldquiet = ui.quiet - try: - ui.quiet = True - node = cmdutil.commit(ui, repo, commitfunc, [], tempopts) - finally: - ui.quiet = oldquiet + ui.quiet = True + node = cmdutil.commit(ui, repo, commitfunc, [], tempopts) tmpwctx = repo[node] try: + ui.quiet = True fp = shelvedfile(repo, basename, 'hg').opener() gen = changegroup.readbundle(fp, fp.name) repo.addchangegroup(gen, 'unshelve', 'bundle:' + fp.name) @@ -568,11 +567,14 @@ finally: fp.close() + ui.quiet = oldquiet + shelvectx = repo['tip'] # If the shelve is not immediately on top of the commit # we'll be merging with, rebase it to be on top. if tmpwctx.node() != shelvectx.parents()[0].node(): + ui.status(_('rebasing shelved changes\n')) try: rebase.rebase(ui, repo, **{ 'rev' : [shelvectx.rev()], @@ -610,6 +612,7 @@ unshelvecleanup(ui, repo, basename, opts) finally: + ui.quiet = oldquiet if tr: tr.release() lockmod.release(lock, wlock) @@ -632,8 +635,8 @@ ('p', 'patch', None, _('show patch')), ('', 'stat', None, - _('output diffstat-style summary of changes'))], - _('hg shelve')) + _('output diffstat-style summary of changes'))] + commands.walkopts, + _('hg shelve [OPTION]... [FILE]...')) def shelvecmd(ui, repo, *pats, **opts): '''save and set aside changes from the working directory
--- a/hgext/transplant.py Mon Mar 17 14:57:13 2014 -0400 +++ b/hgext/transplant.py Tue Mar 18 14:25:28 2014 -0500 @@ -670,7 +670,8 @@ s = revset.getset(repo, subset, x) else: s = subset - return [r for r in s if repo[r].extra().get('transplant_source')] + return revset.baseset([r for r in s if + repo[r].extra().get('transplant_source')]) def kwtransplanted(repo, ctx, **args): """:transplanted: String. The node identifier of the transplanted
--- a/hgext/win32text.py Mon Mar 17 14:57:13 2014 -0400 +++ b/hgext/win32text.py Tue Mar 18 14:25:28 2014 -0500 @@ -5,7 +5,7 @@ # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. -'''perform automatic newline conversion +'''perform automatic newline conversion (DEPRECATED) Deprecation: The win32text extension requires each user to configure the extension again and again for each clone since the configuration
--- a/i18n/check-translation.py Mon Mar 17 14:57:13 2014 -0400 +++ b/i18n/check-translation.py Tue Mar 18 14:25:28 2014 -0500 @@ -66,6 +66,46 @@ def warningchecker(msgidpat=None): return checker('warning', msgidpat) +@warningchecker() +def taildoublecolons(pe): + """Check equality of tail '::'-ness between msgid and msgstr + + >>> pe = polib.POEntry( + ... msgid ='ends with ::', + ... msgstr='ends with ::') + >>> for e in taildoublecolons(pe): print e + >>> pe = polib.POEntry( + ... msgid ='ends with ::', + ... msgstr='ends without double-colons') + >>> for e in taildoublecolons(pe): print e + tail '::'-ness differs between msgid and msgstr + >>> pe = polib.POEntry( + ... msgid ='ends without double-colons', + ... msgstr='ends with ::') + >>> for e in taildoublecolons(pe): print e + tail '::'-ness differs between msgid and msgstr + """ + if pe.msgid.endswith('::') != pe.msgstr.endswith('::'): + yield "tail '::'-ness differs between msgid and msgstr" + +@warningchecker() +def indentation(pe): + """Check equality of initial indentation between msgid and msgstr + + This may report unexpected warning, because this doesn't aware + the syntax of rst document and the context of msgstr. + + >>> pe = polib.POEntry( + ... msgid =' indented text', + ... msgstr=' narrowed indentation') + >>> for e in indentation(pe): print e + initial indentation width differs betweeen msgid and msgstr + """ + idindent = len(pe.msgid) - len(pe.msgid.lstrip()) + strindent = len(pe.msgstr) - len(pe.msgstr.lstrip()) + if idindent != strindent: + yield "initial indentation width differs betweeen msgid and msgstr" + #################### def check(pofile, fatal=True, warning=False):
--- a/i18n/de.po Mon Mar 17 14:57:13 2014 -0400 +++ b/i18n/de.po Tue Mar 18 14:25:28 2014 -0500 @@ -20,7 +20,7 @@ msgstr "" "Project-Id-Version: Mercurial\n" "Report-Msgid-Bugs-To: <mercurial-devel@selenic.com>\n" -"POT-Creation-Date: 2014-01-25 17:51+0100\n" +"POT-Creation-Date: 2014-01-29 16:47+0100\n" "PO-Revision-Date: 2013-09-30 20:52+0100\n" "Last-Translator: Simon Heimberg <simohe@besonet.ch>\n" "Language-Team: \n" @@ -2928,6 +2928,7 @@ " [repository]\n" " native = LF" +#. do not translate: .. note:: msgid ".. note::" msgstr "" @@ -5029,6 +5030,7 @@ " Siehe Hilfe zu 'paths' zu Pfad-Kurznamen und 'urls' für erlaubte\n" " Formate für die Quellangabe." +#. do not translate: .. container:: msgid " .. container:: verbose" msgstr "" @@ -6548,6 +6550,7 @@ " Ohne Argumente werden die aktuell aktiven Wächter ausgegeben.\n" " Mit einem Argument wird der aktuelle Wächter gesetzt." +#. do not translate: .. note:: msgid " .. note::" msgstr "" @@ -15694,6 +15697,7 @@ " order until one or more configuration files are detected." msgstr "" +#. do not translate: .. note:: msgid "" ".. note:: The registry key ``HKEY_LOCAL_MACHINE\\SOFTWARE\\Wow6432Node" "\\Mercurial``\n" @@ -15873,6 +15877,7 @@ msgid " stable5 = latest -b stable" msgstr "" +#. do not translate: .. note:: msgid "" ".. note:: It is possible to create aliases with the same names as\n" " existing commands, which will then override the original\n" @@ -15918,6 +15923,7 @@ "echo foo`` call above, ``$HG_ARGS`` would expand to ``echo foo``." msgstr "" +#. do not translate: .. note:: msgid "" ".. note:: Some global configuration options such as ``-R`` are\n" " processed before shell aliases and will thus not be passed to\n" @@ -16101,6 +16107,7 @@ "the command." msgstr "" +#. do not translate: .. note:: msgid "" ".. note:: The tempfile mechanism is recommended for Windows systems,\n" " where the standard shell I/O redirection operators often have\n" @@ -16572,6 +16579,7 @@ " update failed (e.g. because conflicts not resolved), ``$HG_ERROR=1``." msgstr "" +#. do not translate: .. note:: msgid "" ".. note:: It is generally better to use standard hooks rather than the\n" " generic pre- and post- command hooks as they are guaranteed to be\n" @@ -16580,6 +16588,7 @@ " generate a commit (e.g. tag) and not just the commit command." msgstr "" +#. do not translate: .. note:: msgid "" ".. note:: Environment variables with empty values may not be passed to\n" " hooks on platforms such as Windows. As an example, ``$HG_PARENT2``\n" @@ -18967,6 +18976,7 @@ ":Manual group: Mercurial Manual" msgstr "" +#. do not translate: .. contents:: msgid "" ".. contents::\n" " :backlinks: top\n" @@ -19017,6 +19027,7 @@ " repository." msgstr "" +#. do not translate: .. include:: msgid ".. include:: hg.1.gendoc.txt" msgstr "" @@ -19121,6 +19132,7 @@ "Public License version 2 or any later version." msgstr "" +#. do not translate: .. include:: msgid ".. include:: common.txt\n" msgstr "" @@ -19143,6 +19155,7 @@ ":Manual group: Mercurial Manual" msgstr "" +#. do not translate: .. include:: msgid ".. include:: hgignore.5.gendoc.txt" msgstr "" @@ -19170,6 +19183,7 @@ "Public License version 2 or any later version." msgstr "" +#. do not translate: .. include:: msgid ".. include:: common.txt" msgstr "" @@ -19281,6 +19295,7 @@ "regexp pattern, start it with ``^``." msgstr "" +#. do not translate: .. note:: msgid "" ".. note::\n" " Patterns specified in other than ``.hgignore`` are always rooted.\n" @@ -19333,6 +19348,7 @@ ":Manual group: Mercurial Manual" msgstr "" +#. do not translate: .. contents:: msgid "" ".. contents::\n" " :backlinks: top\n" @@ -19348,6 +19364,7 @@ "Beschreibung\n" "============" +#. do not translate: .. include:: msgid ".. include:: hgrc.5.gendoc.txt" msgstr "" @@ -19564,6 +19581,7 @@ msgid "8. The merge of the file fails and must be resolved before commit." msgstr "" +#. do not translate: .. note:: msgid "" ".. note::\n" " After selecting a merge program, Mercurial will by default attempt\n" @@ -19633,6 +19651,7 @@ msgid "Alternate pattern notations must be specified explicitly." msgstr "Andere Schreibweisen von Mustern müssen explizit angegeben werden." +#. do not translate: .. note:: msgid "" ".. note::\n" " Patterns specified in ``.hgignore`` are not rooted.\n" @@ -19804,6 +19823,7 @@ msgid " - secret changesets are neither pushed, pulled, or cloned" msgstr "" +#. do not translate: .. note:: msgid "" ".. note::\n" " Pulling a draft changeset from a publishing server does not mark it\n" @@ -19823,12 +19843,14 @@ " [phases]\n" " publish = False" +#. do not translate: .. note:: msgid "" ".. note::\n" " Servers running older versions of Mercurial are treated as\n" " publishing." msgstr "" +#. do not translate: .. note:: msgid "" ".. note::\n" " Changesets in secret phase are not exchanged with the server. This\n" @@ -20216,6 +20238,7 @@ " repositories states when committing in the parent repository." msgstr "" +#. do not translate: .. note:: msgid "" " .. note::\n" " The ``.hgsubstate`` file should not be edited manually."
--- a/i18n/posplit Mon Mar 17 14:57:13 2014 -0400 +++ b/i18n/posplit Tue Mar 18 14:25:28 2014 -0500 @@ -5,6 +5,7 @@ # license: MIT/X11/Expat # +import re import sys import polib @@ -30,6 +31,7 @@ cache = {} entries = po[:] po[:] = [] + findd = re.compile(r' *\.\. (\w+)::') # for finding directives for entry in entries: msgids = entry.msgid.split(u'\n\n') if entry.msgstr: @@ -49,8 +51,27 @@ delta = 0 for msgid, msgstr in zip(msgids, msgstrs): - if msgid: + if msgid and msgid != '::': newentry = mkentry(entry, delta, msgid, msgstr) + mdirective = findd.match(msgid) + if mdirective: + if not msgid[mdirective.end():].rstrip(): + # only directive, nothing to translate here + continue + directive = mdirective.group(1) + if directive in ('container', 'include'): + if msgid.rstrip('\n').count('\n') == 0: + # only rst syntax, nothing to translate + continue + else: + # lines following directly, unexpected + print 'Warning: text follows line with directive' \ + ' %s' % directive + comment = 'do not translate: .. %s::' % directive + if not newentry.comment: + newentry.comment = comment + elif comment not in newentry.comment: + newentry.comment += '\n' + comment addentry(po, newentry, cache) delta += 2 + msgid.count('\n') po.save()
--- a/mercurial/ancestor.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/ancestor.py Tue Mar 18 14:25:28 2014 -0500 @@ -31,7 +31,7 @@ poison = 1 << (i + 1) gca = set() - interesting = left = len(nodes) + interesting = len(nodes) nv = len(seen) - 1 while nv >= 0 and interesting: v = nv @@ -45,10 +45,8 @@ gca.add(v) sv |= poison if v in nodes: - left -= 1 - if left <= 1: - # history is linear - return set([v]) + # history is linear + return set([v]) if sv < poison: for p in pfunc(v): sp = seen[p]
--- a/mercurial/bookmarks.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/bookmarks.py Tue Mar 18 14:25:28 2014 -0500 @@ -363,22 +363,6 @@ writer(msg) localmarks.write() -def updateremote(ui, repo, remote, revs): - ui.debug("checking for updated bookmarks\n") - revnums = map(repo.changelog.rev, revs or []) - ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)] - (addsrc, adddst, advsrc, advdst, diverge, differ, invalid - ) = compare(repo, repo._bookmarks, remote.listkeys('bookmarks'), - srchex=hex) - - for b, scid, dcid in advsrc: - if ancestors and repo[scid].rev() not in ancestors: - continue - if remote.pushkey('bookmarks', b, dcid, scid): - ui.status(_("updating bookmark %s\n") % b) - else: - ui.warn(_('updating bookmark %s failed!\n') % b) - def pushtoremote(ui, repo, remote, targets): (addsrc, adddst, advsrc, advdst, diverge, differ, invalid ) = compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
--- a/mercurial/changegroup.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/changegroup.py Tue Mar 18 14:25:28 2014 -0500 @@ -173,7 +173,7 @@ if not l: return {} fname = readexactly(self._stream, l) - return dict(filename=fname) + return {'filename': fname} def _deltaheader(self, headertuple, prevnode): node, p1, p2, cs = headertuple @@ -191,8 +191,8 @@ header = struct.unpack(self.deltaheader, headerdata) delta = readexactly(self._stream, l - self.deltaheadersize) node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode) - return dict(node=node, p1=p1, p2=p2, cs=cs, - deltabase=deltabase, delta=delta) + return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs, + 'deltabase': deltabase, 'delta': delta} class headerlessfixup(object): def __init__(self, fh, h):
--- a/mercurial/cmdutil.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/cmdutil.py Tue Mar 18 14:25:28 2014 -0500 @@ -10,7 +10,7 @@ import os, sys, errno, re, tempfile import util, scmutil, templater, patch, error, templatekw, revlog, copies import match as matchmod -import subrepo, context, repair, graphmod, revset, phases, obsolete, pathutil +import context, repair, graphmod, revset, phases, obsolete, pathutil import changelog import bookmarks import lock as lockmod @@ -542,6 +542,131 @@ if runfn: return runfn() +def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc): + """Utility function used by commands.import to import a single patch + + This function is explicitly defined here to help the evolve extension to + wrap this part of the import logic. + + The API is currently a bit ugly because it a simple code translation from + the import command. Feel free to make it better. + + :hunk: a patch (as a binary string) + :parents: nodes that will be parent of the created commit + :opts: the full dict of option passed to the import command + :msgs: list to save commit message to. + (used in case we need to save it when failing) + :updatefunc: a function that update a repo to a given node + updatefunc(<repo>, <node>) + """ + tmpname, message, user, date, branch, nodeid, p1, p2 = \ + patch.extract(ui, hunk) + + editor = commiteditor + if opts.get('edit'): + editor = commitforceeditor + update = not opts.get('bypass') + strip = opts["strip"] + sim = float(opts.get('similarity') or 0) + if not tmpname: + return (None, None) + msg = _('applied to working directory') + + try: + cmdline_message = logmessage(ui, opts) + if cmdline_message: + # pickup the cmdline msg + message = cmdline_message + elif message: + # pickup the patch msg + message = message.strip() + else: + # launch the editor + message = None + ui.debug('message:\n%s\n' % message) + + if len(parents) == 1: + parents.append(repo[nullid]) + if opts.get('exact'): + if not nodeid or not p1: + raise util.Abort(_('not a Mercurial patch')) + p1 = repo[p1] + p2 = repo[p2 or nullid] + elif p2: + try: + p1 = repo[p1] + p2 = repo[p2] + # Without any options, consider p2 only if the + # patch is being applied on top of the recorded + # first parent. + if p1 != parents[0]: + p1 = parents[0] + p2 = repo[nullid] + except error.RepoError: + p1, p2 = parents + else: + p1, p2 = parents + + n = None + if update: + if p1 != parents[0]: + updatefunc(repo, p1.node()) + if p2 != parents[1]: + repo.setparents(p1.node(), p2.node()) + + if opts.get('exact') or opts.get('import_branch'): + repo.dirstate.setbranch(branch or 'default') + + files = set() + patch.patch(ui, repo, tmpname, strip=strip, files=files, + eolmode=None, similarity=sim / 100.0) + files = list(files) + if opts.get('no_commit'): + if message: + msgs.append(message) + else: + if opts.get('exact') or p2: + # If you got here, you either use --force and know what + # you are doing or used --exact or a merge patch while + # being updated to its first parent. + m = None + else: + m = scmutil.matchfiles(repo, files or []) + n = repo.commit(message, opts.get('user') or user, + opts.get('date') or date, match=m, + editor=editor) + else: + if opts.get('exact') or opts.get('import_branch'): + branch = branch or 'default' + else: + branch = p1.branch() + store = patch.filestore() + try: + files = set() + try: + patch.patchrepo(ui, repo, p1, store, tmpname, strip, + files, eolmode=None) + except patch.PatchError, e: + raise util.Abort(str(e)) + memctx = context.makememctx(repo, (p1.node(), p2.node()), + message, + opts.get('user') or user, + opts.get('date') or date, + branch, files, store, + editor=commiteditor) + repo.savecommitmessage(memctx.description()) + n = memctx.commit() + finally: + store.close() + if opts.get('exact') and hex(n) != nodeid: + raise util.Abort(_('patch is damaged or loses information')) + if n: + # i18n: refers to a short changeset id + msg = _('created %s') % short(n) + return (msg, n) + finally: + os.unlink(tmpname) + def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False, opts=None): '''export changesets as hg patches.''' @@ -629,7 +754,7 @@ if listsubrepos: ctx1 = repo[node1] ctx2 = repo[node2] - for subpath, sub in subrepo.itersubrepos(ctx1, ctx2): + for subpath, sub in scmutil.itersubrepos(ctx1, ctx2): tempnode2 = node2 try: if node2 is not None: @@ -823,7 +948,7 @@ class changeset_templater(changeset_printer): '''format changeset information.''' - def __init__(self, ui, repo, patch, diffopts, mapfile, buffered): + def __init__(self, ui, repo, patch, diffopts, tmpl, mapfile, buffered): changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered) formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12]) defaulttempl = { @@ -836,11 +961,10 @@ defaulttempl['filecopy'] = defaulttempl['file_copy'] self.t = templater.templater(mapfile, {'formatnode': formatnode}, cache=defaulttempl) - self.cache = {} + if tmpl: + self.t.cache['changeset'] = tmpl - def use_template(self, t): - '''set template string to use''' - self.t.cache['changeset'] = t + self.cache = {} def _meaningful_parentrevs(self, ctx): """Return list of meaningful (or all if debug) parentrevs for rev. @@ -922,6 +1046,66 @@ except SyntaxError, inst: raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0])) +def gettemplate(ui, tmpl, style): + """ + Find the template matching the given template spec or style. + """ + + # ui settings + if not tmpl and not style: + tmpl = ui.config('ui', 'logtemplate') + if tmpl: + try: + tmpl = templater.parsestring(tmpl) + except SyntaxError: + tmpl = templater.parsestring(tmpl, quoted=False) + return tmpl, None + else: + style = util.expandpath(ui.config('ui', 'style', '')) + + if style: + mapfile = style + if not os.path.split(mapfile)[0]: + mapname = (templater.templatepath('map-cmdline.' + mapfile) + or templater.templatepath(mapfile)) + if mapname: + mapfile = mapname + return None, mapfile + + if not tmpl: + return None, None + + # looks like a literal template? + if '{' in tmpl: + return tmpl, None + + # perhaps a stock style? + if not os.path.split(tmpl)[0]: + mapname = (templater.templatepath('map-cmdline.' + tmpl) + or templater.templatepath(tmpl)) + if mapname and os.path.isfile(mapname): + return None, mapname + + # perhaps it's a reference to [templates] + t = ui.config('templates', tmpl) + if t: + try: + tmpl = templater.parsestring(t) + except SyntaxError: + tmpl = templater.parsestring(t, quoted=False) + return tmpl, None + + # perhaps it's a path to a map or a template + if ('/' in tmpl or '\\' in tmpl) and os.path.isfile(tmpl): + # is it a mapfile for a style? + if os.path.basename(tmpl).startswith("map-"): + return None, os.path.realpath(tmpl) + tmpl = open(tmpl).read() + return tmpl, None + + # constant string? + return tmpl, None + def show_changeset(ui, repo, opts, buffered=False): """show one changeset using template or regular display. @@ -938,42 +1122,30 @@ if opts.get('patch') or opts.get('stat'): patch = scmutil.matchall(repo) - tmpl = opts.get('template') - style = None - if not tmpl: - style = opts.get('style') + tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style')) - # ui settings - if not (tmpl or style): - tmpl = ui.config('ui', 'logtemplate') - if tmpl: - try: - tmpl = templater.parsestring(tmpl) - except SyntaxError: - tmpl = templater.parsestring(tmpl, quoted=False) - else: - style = util.expandpath(ui.config('ui', 'style', '')) - - if not (tmpl or style): + if not tmpl and not mapfile: return changeset_printer(ui, repo, patch, opts, buffered) - mapfile = None - if style and not tmpl: - mapfile = style - if not os.path.split(mapfile)[0]: - mapname = (templater.templatepath('map-cmdline.' + mapfile) - or templater.templatepath(mapfile)) - if mapname: - mapfile = mapname - try: - t = changeset_templater(ui, repo, patch, opts, mapfile, buffered) + t = changeset_templater(ui, repo, patch, opts, tmpl, mapfile, buffered) except SyntaxError, inst: raise util.Abort(inst.args[0]) - if tmpl: - t.use_template(tmpl) return t +def showmarker(ui, marker): + """utility function to display obsolescence marker in a readable way + + To be used by debug function.""" + ui.write(hex(marker.precnode())) + for repl in marker.succnodes(): + ui.write(' ') + ui.write(hex(repl)) + ui.write(' %X ' % marker._data[2]) + ui.write('{%s}' % (', '.join('%r: %r' % t for t in + sorted(marker.metadata().items())))) + ui.write('\n') + def finddate(ui, repo, date): """Find the tipmost changeset that matches the given date spec""" @@ -995,19 +1167,11 @@ raise util.Abort(_("revision matching date not found")) -def increasingwindows(start, end, windowsize=8, sizelimit=512): - if start < end: - while start < end: - yield start, min(windowsize, end - start) - start += windowsize - if windowsize < sizelimit: - windowsize *= 2 - else: - while start > end: - yield start, min(windowsize, start - end - 1) - start -= windowsize - if windowsize < sizelimit: - windowsize *= 2 +def increasingwindows(windowsize=8, sizelimit=512): + while True: + yield windowsize + if windowsize < sizelimit: + windowsize *= 2 class FileWalkError(Exception): pass @@ -1132,7 +1296,7 @@ elif follow: revs = repo.revs('reverse(:.)') else: - revs = list(repo) + revs = revset.spanset(repo) revs.reverse() if not revs: return [] @@ -1148,7 +1312,7 @@ if not slowpath and not match.files(): # No files, no patterns. Display all revs. - wanted = set(revs) + wanted = revs if not slowpath and match.files(): # We only have to read through the filelog to find wanted revisions @@ -1250,14 +1414,7 @@ stop = min(revs[0], revs[-1]) for x in xrange(rev, stop - 1, -1): if ff.match(x): - wanted.discard(x) - - # Choose a small initial window if we will probably only visit a - # few commits. - limit = loglimit(opts) - windowsize = 8 - if limit: - windowsize = min(limit, windowsize) + wanted = wanted - [x] # Now that wanted is correctly initialized, we can iterate over the # revision range, yielding only revisions in wanted. @@ -1270,8 +1427,18 @@ def want(rev): return rev in wanted - for i, window in increasingwindows(0, len(revs), windowsize): - nrevs = [rev for rev in revs[i:i + window] if want(rev)] + it = iter(revs) + stopiteration = False + for windowsize in increasingwindows(): + nrevs = [] + for i in xrange(windowsize): + try: + rev = it.next() + if want(rev): + nrevs.append(rev) + except (StopIteration): + stopiteration = True + break for rev in sorted(nrevs): fns = fncache.get(rev) ctx = change(rev) @@ -1284,6 +1451,10 @@ prepare(ctx, fns) for rev in nrevs: yield change(rev) + + if stopiteration: + break + return iterate() def _makegraphfilematcher(repo, pats, followfirst): @@ -1344,8 +1515,12 @@ follow = opts.get('follow') or opts.get('follow_first') followfirst = opts.get('follow_first') and 1 or 0 # --follow with FILE behaviour depends on revs... - startrev = revs[0] - followdescendants = (len(revs) > 1 and revs[0] < revs[1]) and 1 or 0 + it = iter(revs) + startrev = it.next() + try: + followdescendants = startrev < it.next() + except (StopIteration): + followdescendants = False # branch and only_branch are really aliases and must be handled at # the same time @@ -1471,10 +1646,10 @@ if follow and len(repo) > 0: revs = repo.revs('reverse(:.)') else: - revs = list(repo.changelog) + revs = revset.spanset(repo) revs.reverse() if not revs: - return [], None, None + return revset.baseset(), None, None expr, filematcher = _makegraphlogrevset(repo, pats, opts, revs) if possiblyunsorted: revs.sort(reverse=True) @@ -1489,7 +1664,12 @@ revs = matcher(repo, revs) revs.sort(reverse=True) if limit is not None: - revs = revs[:limit] + limitedrevs = revset.baseset() + for idx, rev in enumerate(revs): + if idx >= limit: + break + limitedrevs.append(rev) + revs = limitedrevs return revs, expr, filematcher @@ -1531,7 +1711,7 @@ if opts.get('copies'): endrev = None if opts.get('rev'): - endrev = max(scmutil.revrange(repo, opts.get('rev'))) + 1 + endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev) displayer = show_changeset(ui, repo, opts, buffered=True) showparents = [ctx.node() for ctx in repo[None].parents()] @@ -1875,7 +2055,7 @@ # run editor in the repository root olddir = os.getcwd() os.chdir(repo.root) - text = repo.ui.edit("\n".join(edittext), ctx.user()) + text = repo.ui.edit("\n".join(edittext), ctx.user(), ctx.extra()) text = re.sub("(?m)^HG:.*(\n|$)", "", text) os.chdir(olddir) @@ -2062,54 +2242,8 @@ handle(revert, False) else: handle(remove, False) - if not opts.get('dry_run'): - def checkout(f): - fc = ctx[f] - repo.wwrite(f, fc.data(), fc.flags()) - - audit_path = pathutil.pathauditor(repo.root) - for f in remove[0]: - if repo.dirstate[f] == 'a': - repo.dirstate.drop(f) - continue - audit_path(f) - try: - util.unlinkpath(repo.wjoin(f)) - except OSError: - pass - repo.dirstate.remove(f) - - normal = None - if node == parent: - # We're reverting to our parent. If possible, we'd like status - # to report the file as clean. We have to use normallookup for - # merges to avoid losing information about merged/dirty files. - if p2 != nullid: - normal = repo.dirstate.normallookup - else: - normal = repo.dirstate.normal - for f in revert[0]: - checkout(f) - if normal: - normal(f) - - for f in add[0]: - checkout(f) - repo.dirstate.add(f) - - normal = repo.dirstate.normallookup - if node == parent and p2 == nullid: - normal = repo.dirstate.normal - for f in undelete[0]: - checkout(f) - normal(f) - - copied = copies.pathcopies(repo[parent], ctx) - - for f in add[0] + undelete[0] + revert[0]: - if f in copied: - repo.dirstate.copy(copied[f], f) + _performrevert(repo, parents, ctx, revert, add, remove, undelete) if targetsubs: # Revert the subrepos on the revert list @@ -2118,6 +2252,63 @@ finally: wlock.release() +def _performrevert(repo, parents, ctx, revert, add, remove, undelete): + """function that actually perform all the action computed for revert + + This is an independent function to let extension to plug in and react to + the imminent revert. + + Make sure you have the working directory locked when caling this function. + """ + parent, p2 = parents + node = ctx.node() + def checkout(f): + fc = ctx[f] + repo.wwrite(f, fc.data(), fc.flags()) + + audit_path = pathutil.pathauditor(repo.root) + for f in remove[0]: + if repo.dirstate[f] == 'a': + repo.dirstate.drop(f) + continue + audit_path(f) + try: + util.unlinkpath(repo.wjoin(f)) + except OSError: + pass + repo.dirstate.remove(f) + + normal = None + if node == parent: + # We're reverting to our parent. If possible, we'd like status + # to report the file as clean. We have to use normallookup for + # merges to avoid losing information about merged/dirty files. + if p2 != nullid: + normal = repo.dirstate.normallookup + else: + normal = repo.dirstate.normal + for f in revert[0]: + checkout(f) + if normal: + normal(f) + + for f in add[0]: + checkout(f) + repo.dirstate.add(f) + + normal = repo.dirstate.normallookup + if node == parent and p2 == nullid: + normal = repo.dirstate.normal + for f in undelete[0]: + checkout(f) + normal(f) + + copied = copies.pathcopies(repo[parent], ctx) + + for f in add[0] + undelete[0] + revert[0]: + if f in copied: + repo.dirstate.copy(copied[f], f) + def command(table): '''returns a function object bound to table which can be used as a decorator for populating table as a command table'''
--- a/mercurial/commands.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/commands.py Tue Mar 18 14:25:28 2014 -0500 @@ -9,6 +9,7 @@ from lock import release from i18n import _ import os, re, difflib, time, tempfile, errno +import sys import hg, scmutil, util, revlog, copies, error, bookmarks import patch, help, encoding, templatekw, discovery import archival, changegroup, cmdutil, hbisect @@ -89,8 +90,8 @@ templateopts = [ ('', 'style', '', - _('display using template map file'), _('STYLE')), - ('', 'template', '', + _('display using template map file (DEPRECATED)'), _('STYLE')), + ('T', 'template', '', _('display with template'), _('TEMPLATE')), ] @@ -1459,6 +1460,83 @@ cmdutil.commitstatus(repo, node, branch, bheads, opts) +@command('config|showconfig|debugconfig', + [('u', 'untrusted', None, _('show untrusted configuration options')), + ('e', 'edit', None, _('start editor'))], + _('[-u] [NAME]...')) +def config(ui, repo, *values, **opts): + """show combined config settings from all hgrc files + + With no arguments, print names and values of all config items. + + With one argument of the form section.name, print just the value + of that config item. + + With multiple arguments, print names and values of all config + items with matching section names. + + With --debug, the source (filename and line number) is printed + for each config item. + + Returns 0 on success. + """ + + if opts.get('edit'): + paths = scmutil.userrcpath() + for f in paths: + if os.path.exists(f): + break + else: + f = paths[0] + fp = open(f, "w") + fp.write( + '# example config (see "hg help config" for more info)\n' + '\n' + '[ui]\n' + '# name and email, e.g.\n' + '# username = Jane Doe <jdoe@example.com>\n' + 'username =\n' + '\n' + '[extensions]\n' + '# uncomment these lines to enable some popular extensions\n' + '# (see "hg help extensions" for more info)\n' + '# pager =\n' + '# progress =\n' + '# color =\n') + fp.close() + + editor = ui.geteditor() + util.system("%s \"%s\"" % (editor, f), + onerr=util.Abort, errprefix=_("edit failed"), + out=ui.fout) + return + + for f in scmutil.rcpath(): + ui.debug('read config from: %s\n' % f) + untrusted = bool(opts.get('untrusted')) + if values: + sections = [v for v in values if '.' not in v] + items = [v for v in values if '.' in v] + if len(items) > 1 or items and sections: + raise util.Abort(_('only one config item permitted')) + for section, name, value in ui.walkconfig(untrusted=untrusted): + value = str(value).replace('\n', '\\n') + sectname = section + '.' + name + if values: + for v in values: + if v == section: + ui.debug('%s: ' % + ui.configsource(section, name, untrusted)) + ui.write('%s=%s\n' % (sectname, value)) + elif v == sectname: + ui.debug('%s: ' % + ui.configsource(section, name, untrusted)) + ui.write(value, '\n') + else: + ui.debug('%s: ' % + ui.configsource(section, name, untrusted)) + ui.write('%s=%s\n' % (sectname, value)) + @command('copy|cp', [('A', 'after', None, _('record a copy that has already occurred')), ('f', 'force', None, _('forcibly copy over an existing managed file')), @@ -1941,7 +2019,7 @@ tree = fileset.parse(expr)[0] ui.note(tree, "\n") - for f in fileset.getfileset(ctx, expr): + for f in ctx.getfileset(expr): ui.write("%s\n" % f) @command('debugfsinfo', [], _('[PATH]')) @@ -2085,7 +2163,10 @@ ui.write(_(" (check that your locale is properly set)\n")) problems += 1 - # Python lib + # Python + ui.status(_("checking Python executable (%s)\n") % sys.executable) + ui.status(_("checking Python version (%s)\n") + % ("%s.%s.%s" % sys.version_info[:3])) ui.status(_("checking Python lib (%s)...\n") % os.path.dirname(os.__file__)) @@ -2105,10 +2186,21 @@ import templater p = templater.templatepath() ui.status(_("checking templates (%s)...\n") % ' '.join(p)) - try: - templater.templater(templater.templatepath("map-cmdline.default")) - except Exception, inst: - ui.write(" %s\n" % inst) + if p: + m = templater.templatepath("map-cmdline.default") + if m: + # template found, check if it is working + try: + templater.templater(m) + except Exception, inst: + ui.write(" %s\n" % inst) + p = None + else: + ui.write(_(" template 'default' not found\n")) + p = None + else: + ui.write(_(" no template directories found\n")) + if not p: ui.write(_(" (templates seem to have been installed incorrectly)\n")) problems += 1 @@ -2214,14 +2306,7 @@ l.release() else: for m in obsolete.allmarkers(repo): - ui.write(hex(m.precnode())) - for repl in m.succnodes(): - ui.write(' ') - ui.write(hex(repl)) - ui.write(' %X ' % m._data[2]) - ui.write('{%s}' % (', '.join('%r: %r' % t for t in - sorted(m.metadata().items())))) - ui.write('\n') + cmdutil.showmarker(ui, m) @command('debugpathcomplete', [('f', 'full', None, _('complete an entire path')), @@ -2542,8 +2627,10 @@ ui.write(('deltas against other : ') + fmt % pcfmt(numother, numdeltas)) -@command('debugrevspec', [], ('REVSPEC')) -def debugrevspec(ui, repo, expr): +@command('debugrevspec', + [('', 'optimize', None, _('print parsed tree after optimizing'))], + ('REVSPEC')) +def debugrevspec(ui, repo, expr, **opts): """parse and apply a revision specification Use --verbose to print the parsed tree before and after aliases @@ -2555,8 +2642,11 @@ newtree = revset.findaliases(ui, tree) if newtree != tree: ui.note(revset.prettyformat(newtree), "\n") + if opts["optimize"]: + weight, optimizedtree = revset.optimize(newtree, True) + ui.note("* optimized:\n", revset.prettyformat(optimizedtree), "\n") func = revset.match(ui, expr) - for c in func(repo, range(len(repo))): + for c in func(repo, revset.spanset(repo)): ui.write("%s\n" % c) @command('debugsetparents', [], _('REV1 [REV2]')) @@ -3667,10 +3757,6 @@ if date: opts['date'] = util.parsedate(date) - editor = cmdutil.commiteditor - if opts.get('edit'): - editor = cmdutil.commitforceeditor - update = not opts.get('bypass') if not update and opts.get('no_commit'): raise util.Abort(_('cannot use --no-commit with --bypass')) @@ -3689,112 +3775,9 @@ cmdutil.bailifchanged(repo) base = opts["base"] - strip = opts["strip"] wlock = lock = tr = None msgs = [] - def tryone(ui, hunk, parents): - tmpname, message, user, date, branch, nodeid, p1, p2 = \ - patch.extract(ui, hunk) - - if not tmpname: - return (None, None) - msg = _('applied to working directory') - - try: - cmdline_message = cmdutil.logmessage(ui, opts) - if cmdline_message: - # pickup the cmdline msg - message = cmdline_message - elif message: - # pickup the patch msg - message = message.strip() - else: - # launch the editor - message = None - ui.debug('message:\n%s\n' % message) - - if len(parents) == 1: - parents.append(repo[nullid]) - if opts.get('exact'): - if not nodeid or not p1: - raise util.Abort(_('not a Mercurial patch')) - p1 = repo[p1] - p2 = repo[p2 or nullid] - elif p2: - try: - p1 = repo[p1] - p2 = repo[p2] - # Without any options, consider p2 only if the - # patch is being applied on top of the recorded - # first parent. - if p1 != parents[0]: - p1 = parents[0] - p2 = repo[nullid] - except error.RepoError: - p1, p2 = parents - else: - p1, p2 = parents - - n = None - if update: - if p1 != parents[0]: - hg.clean(repo, p1.node()) - if p2 != parents[1]: - repo.setparents(p1.node(), p2.node()) - - if opts.get('exact') or opts.get('import_branch'): - repo.dirstate.setbranch(branch or 'default') - - files = set() - patch.patch(ui, repo, tmpname, strip=strip, files=files, - eolmode=None, similarity=sim / 100.0) - files = list(files) - if opts.get('no_commit'): - if message: - msgs.append(message) - else: - if opts.get('exact') or p2: - # If you got here, you either use --force and know what - # you are doing or used --exact or a merge patch while - # being updated to its first parent. - m = None - else: - m = scmutil.matchfiles(repo, files or []) - n = repo.commit(message, opts.get('user') or user, - opts.get('date') or date, match=m, - editor=editor) - else: - if opts.get('exact') or opts.get('import_branch'): - branch = branch or 'default' - else: - branch = p1.branch() - store = patch.filestore() - try: - files = set() - try: - patch.patchrepo(ui, repo, p1, store, tmpname, strip, - files, eolmode=None) - except patch.PatchError, e: - raise util.Abort(str(e)) - memctx = context.makememctx(repo, (p1.node(), p2.node()), - message, - opts.get('user') or user, - opts.get('date') or date, - branch, files, store, - editor=cmdutil.commiteditor) - repo.savecommitmessage(memctx.description()) - n = memctx.commit() - finally: - store.close() - if opts.get('exact') and hex(n) != nodeid: - raise util.Abort(_('patch is damaged or loses information')) - if n: - # i18n: refers to a short changeset id - msg = _('created %s') % short(n) - return (msg, n) - finally: - os.unlink(tmpname) try: try: @@ -3815,7 +3798,8 @@ haspatch = False for hunk in patch.split(patchfile): - (msg, node) = tryone(ui, hunk, parents) + (msg, node) = cmdutil.tryimportone(ui, repo, hunk, parents, + opts, msgs, hg.clean) if msg: haspatch = True ui.note(msg + '\n') @@ -4000,6 +3984,12 @@ each commit. When the -v/--verbose switch is used, the list of changed files and full commit message are shown. + With --graph the revisions are shown as an ASCII art DAG with the most + recent changeset at the top. + 'o' is a changeset, '@' is a working directory parent, 'x' is obsolete, + and '+' represents a fork where the changeset from the lines below is a + parent of the 'o' merge on the same same line. + .. note:: log -p/--patch may generate unexpected diff output for merge @@ -4712,7 +4702,15 @@ dest, branches = hg.parseurl(dest, opts.get('branch')) ui.status(_('pushing to %s\n') % util.hidepassword(dest)) revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev')) - other = hg.peer(repo, opts, dest) + try: + other = hg.peer(repo, opts, dest) + except error.RepoError: + if dest == "default-push": + raise util.Abort(_("default repository not configured!"), + hint=_('see the "path" section in "hg help config"')) + else: + raise + if revs: revs = [repo.lookup(r) for r in scmutil.revrange(repo, revs)] @@ -5173,7 +5171,6 @@ s.serve_forever() if opts["cmdserver"]: - checkrepo() s = commandserver.server(ui, repo, opts["cmdserver"]) return s.serve() @@ -5245,52 +5242,6 @@ self.httpd.serve_forever() -@command('showconfig|debugconfig', - [('u', 'untrusted', None, _('show untrusted configuration options'))], - _('[-u] [NAME]...')) -def showconfig(ui, repo, *values, **opts): - """show combined config settings from all hgrc files - - With no arguments, print names and values of all config items. - - With one argument of the form section.name, print just the value - of that config item. - - With multiple arguments, print names and values of all config - items with matching section names. - - With --debug, the source (filename and line number) is printed - for each config item. - - Returns 0 on success. - """ - - for f in scmutil.rcpath(): - ui.debug('read config from: %s\n' % f) - untrusted = bool(opts.get('untrusted')) - if values: - sections = [v for v in values if '.' not in v] - items = [v for v in values if '.' in v] - if len(items) > 1 or items and sections: - raise util.Abort(_('only one config item permitted')) - for section, name, value in ui.walkconfig(untrusted=untrusted): - value = str(value).replace('\n', '\\n') - sectname = section + '.' + name - if values: - for v in values: - if v == section: - ui.debug('%s: ' % - ui.configsource(section, name, untrusted)) - ui.write('%s=%s\n' % (sectname, value)) - elif v == sectname: - ui.debug('%s: ' % - ui.configsource(section, name, untrusted)) - ui.write(value, '\n') - else: - ui.debug('%s: ' % - ui.configsource(section, name, untrusted)) - ui.write('%s=%s\n' % (sectname, value)) - @command('^status|st', [('A', 'all', None, _('show status of all files')), ('m', 'modified', None, _('show only modified files')), @@ -5341,7 +5292,7 @@ ! = missing (deleted by non-hg command, but still tracked) ? = not tracked I = ignored - = origin of the previous file listed as A (added) + = origin of the previous file (with --copies) .. container:: verbose @@ -5929,7 +5880,7 @@ norepo = ("clone init version help debugcommands debugcomplete" " debugdate debuginstall debugfsinfo debugpushkey debugwireargs" " debugknown debuggetbundle debugbundle") -optionalrepo = ("identify paths serve showconfig debugancestor debugdag" +optionalrepo = ("identify paths serve config showconfig debugancestor debugdag" " debugdata debugindex debugindexdot debugrevlog") inferrepo = ("add addremove annotate cat commit diff grep forget log parents" " remove resolve status debugwalk")
--- a/mercurial/commandserver.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/commandserver.py Tue Mar 18 14:25:28 2014 -0500 @@ -142,11 +142,15 @@ else: logfile = open(logpath, 'a') - # the ui here is really the repo ui so take its baseui so we don't end - # up with its local configuration - self.ui = repo.baseui - self.repo = repo - self.repoui = repo.ui + if repo: + # the ui here is really the repo ui so take its baseui so we don't + # end up with its local configuration + self.ui = repo.baseui + self.repo = repo + self.repoui = repo.ui + else: + self.ui = ui + self.repo = self.repoui = None if mode == 'pipe': self.cerr = channeledoutput(sys.stderr, sys.stdout, 'e') @@ -183,18 +187,18 @@ # copy the uis so changes (e.g. --config or --verbose) don't # persist between requests copiedui = self.ui.copy() - self.repo.baseui = copiedui - # clone ui without using ui.copy because this is protected - repoui = self.repoui.__class__(self.repoui) - repoui.copy = copiedui.copy # redo copy protection - self.repo.ui = self.repo.dirstate._ui = repoui - self.repo.invalidate() - self.repo.invalidatedirstate() + if self.repo: + self.repo.baseui = copiedui + # clone ui without using ui.copy because this is protected + repoui = self.repoui.__class__(self.repoui) + repoui.copy = copiedui.copy # redo copy protection + self.repo.ui = self.repo.dirstate._ui = repoui + self.repo.invalidateall() req = dispatch.request(args[:], copiedui, self.repo, self.cin, self.cout, self.cerr) - ret = dispatch.dispatch(req) or 0 # might return None + ret = (dispatch.dispatch(req) or 0) & 255 # might return None # restore old cwd if '--cwd' in args:
--- a/mercurial/context.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/context.py Tue Mar 18 14:25:28 2014 -0500 @@ -12,6 +12,7 @@ import os, errno, stat import obsolete as obsmod import repoview +import fileset propertycache = util.propertycache @@ -79,6 +80,9 @@ def mutable(self): return self.phase() > phases.public + def getfileset(self, expr): + return fileset.getfileset(self, expr) + def obsolete(self): """True if the changeset is obsolete""" return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
--- a/mercurial/copies.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/copies.py Tue Mar 18 14:25:28 2014 -0500 @@ -262,6 +262,18 @@ else: diverge2.update(fl) # reverse map for below + bothnew = sorted([d for d in m1 if d in m2 and d not in ma]) + if bothnew: + repo.ui.debug(" unmatched files new in both:\n %s\n" + % "\n ".join(bothnew)) + bothdiverge, _copy, _fullcopy = {}, {}, {} + for f in bothnew: + checkcopies(ctx, f, m1, m2, ca, limit, bothdiverge, _copy, _fullcopy) + checkcopies(ctx, f, m2, m1, ca, limit, bothdiverge, _copy, _fullcopy) + for of, fl in bothdiverge.items(): + if len(fl) == 2 and fl[0] == fl[1]: + copy[fl[0]] = of # not actually divergent, just matching renames + if fullcopy: repo.ui.debug(" all copies found (* = to merge, ! = divergent, " "% = renamed and deleted):\n")
--- a/mercurial/dirstate.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/dirstate.py Tue Mar 18 14:25:28 2014 -0500 @@ -4,7 +4,6 @@ # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. -import errno from node import nullid from i18n import _
--- a/mercurial/discovery.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/discovery.py Tue Mar 18 14:25:28 2014 -0500 @@ -154,7 +154,7 @@ - branch: the branch name - remoteheads: the list of remote heads known locally - None is the branch is new + None if the branch is new - newheads: the new remote heads (known locally) with outgoing pushed - unsyncedheads: the list of remote heads unknown locally. """ @@ -250,8 +250,7 @@ hint=_("use 'hg push --new-branch' to create" " new remote branches")) - # 2 compute newly pushed bookmarks. We - # we don't warned about bookmarked heads. + # 2. Compute newly pushed bookmarks. We don't warn about bookmarked heads. localbookmarks = repo._bookmarks remotebookmarks = remote.listkeys('bookmarks') bookmarkedheads = set() @@ -269,23 +268,23 @@ # If there are more heads after the push than before, a suitable # error message, depending on unsynced status, is displayed. error = None - unsynced = False allmissing = set(outgoing.missing) allfuturecommon = set(c.node() for c in repo.set('%ld', outgoing.common)) allfuturecommon.update(allmissing) for branch, heads in sorted(headssum.iteritems()): - candidate_newhs = set(heads[1]) + remoteheads, newheads, unsyncedheads = heads + candidate_newhs = set(newheads) # add unsynced data - if heads[0] is None: + if remoteheads is None: oldhs = set() else: - oldhs = set(heads[0]) - oldhs.update(heads[2]) - candidate_newhs.update(heads[2]) - dhs = None + oldhs = set(remoteheads) + oldhs.update(unsyncedheads) + candidate_newhs.update(unsyncedheads) + dhs = None # delta heads, the new heads on branch discardedheads = set() if repo.obsstore: - # remove future heads which are actually obsolete by another + # remove future heads which are actually obsoleted by another # pushed element: # # XXX as above, There are several cases this case does not handle @@ -297,8 +296,8 @@ # (2) if the new heads have ancestors which are not obsolete and # not ancestors of any other heads we will have a new head too. # - # This two case will be easy to handle for know changeset but much - # more tricky for unsynced changes. + # These two cases will be easy to handle for known changeset but + # much more tricky for unsynced changes. newhs = set() for nh in candidate_newhs: if nh in repo and repo[nh].phase() <= phases.public: @@ -312,10 +311,17 @@ newhs.add(nh) else: newhs = candidate_newhs - if [h for h in heads[2] if h not in discardedheads]: - unsynced = True - if heads[0] is None: - if 1 < len(newhs): + unsynced = sorted(h for h in unsyncedheads if h not in discardedheads) + if unsynced: + heads = ' '.join(short(h) for h in unsynced) + if branch is None: + repo.ui.status(_("remote has heads that are " + "not known locally: %s\n") % heads) + else: + repo.ui.status(_("remote has heads on branch '%s' that are " + "not known locally: %s\n") % (branch, heads)) + if remoteheads is None: + if len(newhs) > 1: dhs = list(newhs) if error is None: error = (_("push creates new branch '%s' " @@ -324,7 +330,7 @@ " see \"hg help push\" for details about" " pushing new heads") elif len(newhs) > len(oldhs): - # strip updates to existing remote heads from the new heads list + # remove bookmarked or existing remote heads from the new heads list dhs = sorted(newhs - bookmarkedheads - oldhs) if dhs: if error is None: @@ -334,7 +340,7 @@ else: error = _("push creates new remote head %s!" ) % short(dhs[0]) - if heads[2]: # unsynced + if unsyncedheads: hint = _("pull and merge or" " see \"hg help push\" for details about" " pushing new heads") @@ -350,7 +356,3 @@ repo.ui.note((" %s\n") % short(h)) if error: raise util.Abort(error, hint=hint) - - # 6. Check for unsynced changes on involved branches. - if unsynced: - repo.ui.warn(_("note: unsynced remote changes!\n"))
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/exchange.py Tue Mar 18 14:25:28 2014 -0500 @@ -0,0 +1,530 @@ +# exchange.py - utily to exchange data between repo. +# +# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +from node import hex, nullid +import errno +import util, scmutil, changegroup, base85 +import discovery, phases, obsolete, bookmarks + + +class pushoperation(object): + """A object that represent a single push operation + + It purpose is to carry push related state and very common operation. + + A new should be created at the begining of each push and discarded + afterward. + """ + + def __init__(self, repo, remote, force=False, revs=None, newbranch=False): + # repo we push from + self.repo = repo + self.ui = repo.ui + # repo we push to + self.remote = remote + # force option provided + self.force = force + # revs to be pushed (None is "all") + self.revs = revs + # allow push of new branch + self.newbranch = newbranch + # did a local lock get acquired? + self.locallocked = None + # Integer version of the push result + # - None means nothing to push + # - 0 means HTTP error + # - 1 means we pushed and remote head count is unchanged *or* + # we have outgoing changesets but refused to push + # - other values as described by addchangegroup() + self.ret = None + # discover.outgoing object (contains common and outgoin data) + self.outgoing = None + # all remote heads before the push + self.remoteheads = None + # testable as a boolean indicating if any nodes are missing locally. + self.incoming = None + # set of all heads common after changeset bundle push + self.commonheads = None + +def push(repo, remote, force=False, revs=None, newbranch=False): + '''Push outgoing changesets (limited by revs) from a local + repository to remote. Return an integer: + - None means nothing to push + - 0 means HTTP error + - 1 means we pushed and remote head count is unchanged *or* + we have outgoing changesets but refused to push + - other values as described by addchangegroup() + ''' + pushop = pushoperation(repo, remote, force, revs, newbranch) + if pushop.remote.local(): + missing = (set(pushop.repo.requirements) + - pushop.remote.local().supported) + if missing: + msg = _("required features are not" + " supported in the destination:" + " %s") % (', '.join(sorted(missing))) + raise util.Abort(msg) + + # there are two ways to push to remote repo: + # + # addchangegroup assumes local user can lock remote + # repo (local filesystem, old ssh servers). + # + # unbundle assumes local user cannot lock remote repo (new ssh + # servers, http servers). + + if not pushop.remote.canpush(): + raise util.Abort(_("destination does not support push")) + # get local lock as we might write phase data + locallock = None + try: + locallock = pushop.repo.lock() + pushop.locallocked = True + except IOError, err: + pushop.locallocked = False + if err.errno != errno.EACCES: + raise + # source repo cannot be locked. + # We do not abort the push, but just disable the local phase + # synchronisation. + msg = 'cannot lock source repository: %s\n' % err + pushop.ui.debug(msg) + try: + pushop.repo.checkpush(pushop.force, pushop.revs) + lock = None + unbundle = pushop.remote.capable('unbundle') + if not unbundle: + lock = pushop.remote.lock() + try: + _pushdiscovery(pushop) + if _pushcheckoutgoing(pushop): + _pushchangeset(pushop) + _pushcomputecommonheads(pushop) + _pushsyncphase(pushop) + _pushobsolete(pushop) + finally: + if lock is not None: + lock.release() + finally: + if locallock is not None: + locallock.release() + + _pushbookmark(pushop) + return pushop.ret + +def _pushdiscovery(pushop): + # discovery + unfi = pushop.repo.unfiltered() + fci = discovery.findcommonincoming + commoninc = fci(unfi, pushop.remote, force=pushop.force) + common, inc, remoteheads = commoninc + fco = discovery.findcommonoutgoing + outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs, + commoninc=commoninc, force=pushop.force) + pushop.outgoing = outgoing + pushop.remoteheads = remoteheads + pushop.incoming = inc + +def _pushcheckoutgoing(pushop): + outgoing = pushop.outgoing + unfi = pushop.repo.unfiltered() + if not outgoing.missing: + # nothing to push + scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded) + return False + # something to push + if not pushop.force: + # if repo.obsstore == False --> no obsolete + # then, save the iteration + if unfi.obsstore: + # this message are here for 80 char limit reason + mso = _("push includes obsolete changeset: %s!") + mst = "push includes %s changeset: %s!" + # plain versions for i18n tool to detect them + _("push includes unstable changeset: %s!") + _("push includes bumped changeset: %s!") + _("push includes divergent changeset: %s!") + # If we are to push if there is at least one + # obsolete or unstable changeset in missing, at + # least one of the missinghead will be obsolete or + # unstable. So checking heads only is ok + for node in outgoing.missingheads: + ctx = unfi[node] + if ctx.obsolete(): + raise util.Abort(mso % ctx) + elif ctx.troubled(): + raise util.Abort(_(mst) + % (ctx.troubles()[0], + ctx)) + newbm = pushop.ui.configlist('bookmarks', 'pushing') + discovery.checkheads(unfi, pushop.remote, outgoing, + pushop.remoteheads, + pushop.newbranch, + bool(pushop.incoming), + newbm) + return True + +def _pushchangeset(pushop): + """Make the actual push of changeset bundle to remote repo""" + outgoing = pushop.outgoing + unbundle = pushop.remote.capable('unbundle') + # TODO: get bundlecaps from remote + bundlecaps = None + # create a changegroup from local + if pushop.revs is None and not (outgoing.excluded + or pushop.repo.changelog.filteredrevs): + # push everything, + # use the fast path, no race possible on push + bundler = changegroup.bundle10(pushop.repo, bundlecaps) + cg = pushop.repo._changegroupsubset(outgoing, + bundler, + 'push', + fastpath=True) + else: + cg = pushop.repo.getlocalbundle('push', outgoing, bundlecaps) + + # apply changegroup to remote + if unbundle: + # local repo finds heads on server, finds out what + # revs it must push. once revs transferred, if server + # finds it has different heads (someone else won + # commit/push race), server aborts. + if pushop.force: + remoteheads = ['force'] + else: + remoteheads = pushop.remoteheads + # ssh: return remote's addchangegroup() + # http: return remote's addchangegroup() or 0 for error + pushop.ret = pushop.remote.unbundle(cg, remoteheads, + 'push') + else: + # we return an integer indicating remote head count + # change + pushop.ret = pushop.remote.addchangegroup(cg, 'push', + pushop.repo.url()) + +def _pushcomputecommonheads(pushop): + unfi = pushop.repo.unfiltered() + if pushop.ret: + # push succeed, synchronize target of the push + cheads = pushop.outgoing.missingheads + elif pushop.revs is None: + # All out push fails. synchronize all common + cheads = pushop.outgoing.commonheads + else: + # I want cheads = heads(::missingheads and ::commonheads) + # (missingheads is revs with secret changeset filtered out) + # + # This can be expressed as: + # cheads = ( (missingheads and ::commonheads) + # + (commonheads and ::missingheads))" + # ) + # + # while trying to push we already computed the following: + # common = (::commonheads) + # missing = ((commonheads::missingheads) - commonheads) + # + # We can pick: + # * missingheads part of common (::commonheads) + common = set(pushop.outgoing.common) + nm = pushop.repo.changelog.nodemap + cheads = [node for node in pushop.revs if nm[node] in common] + # and + # * commonheads parents on missing + revset = unfi.set('%ln and parents(roots(%ln))', + pushop.outgoing.commonheads, + pushop.outgoing.missing) + cheads.extend(c.node() for c in revset) + pushop.commonheads = cheads + +def _pushsyncphase(pushop): + """synchronise phase information locally and remotly""" + unfi = pushop.repo.unfiltered() + cheads = pushop.commonheads + if pushop.ret: + # push succeed, synchronize target of the push + cheads = pushop.outgoing.missingheads + elif pushop.revs is None: + # All out push fails. synchronize all common + cheads = pushop.outgoing.commonheads + else: + # I want cheads = heads(::missingheads and ::commonheads) + # (missingheads is revs with secret changeset filtered out) + # + # This can be expressed as: + # cheads = ( (missingheads and ::commonheads) + # + (commonheads and ::missingheads))" + # ) + # + # while trying to push we already computed the following: + # common = (::commonheads) + # missing = ((commonheads::missingheads) - commonheads) + # + # We can pick: + # * missingheads part of common (::commonheads) + common = set(pushop.outgoing.common) + nm = pushop.repo.changelog.nodemap + cheads = [node for node in pushop.revs if nm[node] in common] + # and + # * commonheads parents on missing + revset = unfi.set('%ln and parents(roots(%ln))', + pushop.outgoing.commonheads, + pushop.outgoing.missing) + cheads.extend(c.node() for c in revset) + pushop.commonheads = cheads + # even when we don't push, exchanging phase data is useful + remotephases = pushop.remote.listkeys('phases') + if (pushop.ui.configbool('ui', '_usedassubrepo', False) + and remotephases # server supports phases + and pushop.ret is None # nothing was pushed + and remotephases.get('publishing', False)): + # When: + # - this is a subrepo push + # - and remote support phase + # - and no changeset was pushed + # - and remote is publishing + # We may be in issue 3871 case! + # We drop the possible phase synchronisation done by + # courtesy to publish changesets possibly locally draft + # on the remote. + remotephases = {'publishing': 'True'} + if not remotephases: # old server or public only rer + _localphasemove(pushop, cheads) + # don't push any phase data as there is nothing to push + else: + ana = phases.analyzeremotephases(pushop.repo, cheads, + remotephases) + pheads, droots = ana + ### Apply remote phase on local + if remotephases.get('publishing', False): + _localphasemove(pushop, cheads) + else: # publish = False + _localphasemove(pushop, pheads) + _localphasemove(pushop, cheads, phases.draft) + ### Apply local phase on remote + + # Get the list of all revs draft on remote by public here. + # XXX Beware that revset break if droots is not strictly + # XXX root we may want to ensure it is but it is costly + outdated = unfi.set('heads((%ln::%ln) and public())', + droots, cheads) + for newremotehead in outdated: + r = pushop.remote.pushkey('phases', + newremotehead.hex(), + str(phases.draft), + str(phases.public)) + if not r: + pushop.ui.warn(_('updating %s to public failed!\n') + % newremotehead) + +def _localphasemove(pushop, nodes, phase=phases.public): + """move <nodes> to <phase> in the local source repo""" + if pushop.locallocked: + phases.advanceboundary(pushop.repo, phase, nodes) + else: + # repo is not locked, do not change any phases! + # Informs the user that phases should have been moved when + # applicable. + actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()] + phasestr = phases.phasenames[phase] + if actualmoves: + pushop.ui.status(_('cannot lock source repo, skipping ' + 'local %s phase update\n') % phasestr) + +def _pushobsolete(pushop): + """utility function to push obsolete markers to a remote""" + pushop.ui.debug('try to push obsolete markers to remote\n') + repo = pushop.repo + remote = pushop.remote + if (obsolete._enabled and repo.obsstore and + 'obsolete' in remote.listkeys('namespaces')): + rslts = [] + remotedata = repo.listkeys('obsolete') + for key in sorted(remotedata, reverse=True): + # reverse sort to ensure we end with dump0 + data = remotedata[key] + rslts.append(remote.pushkey('obsolete', key, '', data)) + if [r for r in rslts if not r]: + msg = _('failed to push some obsolete markers!\n') + repo.ui.warn(msg) + +def _pushbookmark(pushop): + """Update bookmark position on remote""" + ui = pushop.ui + repo = pushop.repo.unfiltered() + remote = pushop.remote + ui.debug("checking for updated bookmarks\n") + revnums = map(repo.changelog.rev, pushop.revs or []) + ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)] + (addsrc, adddst, advsrc, advdst, diverge, differ, invalid + ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'), + srchex=hex) + + for b, scid, dcid in advsrc: + if ancestors and repo[scid].rev() not in ancestors: + continue + if remote.pushkey('bookmarks', b, dcid, scid): + ui.status(_("updating bookmark %s\n") % b) + else: + ui.warn(_('updating bookmark %s failed!\n') % b) + +class pulloperation(object): + """A object that represent a single pull operation + + It purpose is to carry push related state and very common operation. + + A new should be created at the begining of each pull and discarded + afterward. + """ + + def __init__(self, repo, remote, heads=None, force=False): + # repo we pull into + self.repo = repo + # repo we pull from + self.remote = remote + # revision we try to pull (None is "all") + self.heads = heads + # do we force pull? + self.force = force + # the name the pull transaction + self._trname = 'pull\n' + util.hidepassword(remote.url()) + # hold the transaction once created + self._tr = None + # set of common changeset between local and remote before pull + self.common = None + # set of pulled head + self.rheads = None + # list of missing changeset to fetch remotly + self.fetch = None + + @util.propertycache + def pulledsubset(self): + """heads of the set of changeset target by the pull""" + # compute target subset + if self.heads is None: + # We pulled every thing possible + # sync on everything common + return self.common + self.rheads + else: + # We pulled a specific subset + # sync on this subset + return self.heads + + def gettransaction(self): + """get appropriate pull transaction, creating it if needed""" + if self._tr is None: + self._tr = self.repo.transaction(self._trname) + return self._tr + + def closetransaction(self): + """close transaction if created""" + if self._tr is not None: + self._tr.close() + + def releasetransaction(self): + """release transaction if created""" + if self._tr is not None: + self._tr.release() + +def pull(repo, remote, heads=None, force=False): + pullop = pulloperation(repo, remote, heads, force) + if pullop.remote.local(): + missing = set(pullop.remote.requirements) - pullop.repo.supported + if missing: + msg = _("required features are not" + " supported in the destination:" + " %s") % (', '.join(sorted(missing))) + raise util.Abort(msg) + + lock = pullop.repo.lock() + try: + tmp = discovery.findcommonincoming(pullop.repo.unfiltered(), + pullop.remote, + heads=pullop.heads, + force=force) + pullop.common, pullop.fetch, pullop.rheads = tmp + if not pullop.fetch: + pullop.repo.ui.status(_("no changes found\n")) + result = 0 + else: + result = _pullchangeset(pullop) + + _pullphase(pullop) + _pullobsolete(pullop) + pullop.closetransaction() + finally: + pullop.releasetransaction() + lock.release() + + return result + +def _pullchangeset(pullop): + """pull changeset from unbundle into the local repo""" + # We delay the open of the transaction as late as possible so we + # don't open transaction for nothing or you break future useful + # rollback call + pullop.gettransaction() + if pullop.heads is None and list(pullop.common) == [nullid]: + pullop.repo.ui.status(_("requesting all changes\n")) + elif pullop.heads is None and pullop.remote.capable('changegroupsubset'): + # issue1320, avoid a race if remote changed after discovery + pullop.heads = pullop.rheads + + if pullop.remote.capable('getbundle'): + # TODO: get bundlecaps from remote + cg = pullop.remote.getbundle('pull', common=pullop.common, + heads=pullop.heads or pullop.rheads) + elif pullop.heads is None: + cg = pullop.remote.changegroup(pullop.fetch, 'pull') + elif not pullop.remote.capable('changegroupsubset'): + raise util.Abort(_("partial pull cannot be done because " + "other repository doesn't support " + "changegroupsubset.")) + else: + cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull') + return pullop.repo.addchangegroup(cg, 'pull', pullop.remote.url()) + +def _pullphase(pullop): + # Get remote phases data from remote + remotephases = pullop.remote.listkeys('phases') + publishing = bool(remotephases.get('publishing', False)) + if remotephases and not publishing: + # remote is new and unpublishing + pheads, _dr = phases.analyzeremotephases(pullop.repo, + pullop.pulledsubset, + remotephases) + phases.advanceboundary(pullop.repo, phases.public, pheads) + phases.advanceboundary(pullop.repo, phases.draft, + pullop.pulledsubset) + else: + # Remote is old or publishing all common changesets + # should be seen as public + phases.advanceboundary(pullop.repo, phases.public, + pullop.pulledsubset) + +def _pullobsolete(pullop): + """utility function to pull obsolete markers from a remote + + The `gettransaction` is function that return the pull transaction, creating + one if necessary. We return the transaction to inform the calling code that + a new transaction have been created (when applicable). + + Exists mostly to allow overriding for experimentation purpose""" + tr = None + if obsolete._enabled: + pullop.repo.ui.debug('fetching remote obsolete markers\n') + remoteobs = pullop.remote.listkeys('obsolete') + if 'dump0' in remoteobs: + tr = pullop.gettransaction() + for key in sorted(remoteobs, reverse=True): + if key.startswith('dump'): + data = base85.b85decode(remoteobs[key]) + pullop.repo.obsstore.mergemarkers(tr, data) + pullop.repo.invalidatevolatilesets() + return tr +
--- a/mercurial/extensions.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/extensions.py Tue Mar 18 14:25:28 2014 -0500 @@ -11,7 +11,7 @@ _extensions = {} _order = [] -_ignore = ['hbisect', 'bookmarks', 'parentrevspec', 'interhg'] +_ignore = ['hbisect', 'bookmarks', 'parentrevspec', 'interhg', 'inotify'] def extensions(ui=None): if ui: @@ -43,10 +43,10 @@ def loadpath(path, module_name): module_name = module_name.replace('.', '_') - path = util.expandpath(path) + path = util.normpath(util.expandpath(path)) if os.path.isdir(path): # module/__init__.py style - d, f = os.path.split(path.rstrip('/')) + d, f = os.path.split(path) fd, fpath, desc = imp.find_module(f, [d]) return imp.load_module(module_name, fd, fpath, desc) else:
--- a/mercurial/filemerge.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/filemerge.py Tue Mar 18 14:25:28 2014 -0500 @@ -248,20 +248,21 @@ tool, toolpath, binary, symlink = toolconf a, b, c, back = files out = "" - env = dict(HG_FILE=fcd.path(), - HG_MY_NODE=short(mynode), - HG_OTHER_NODE=str(fco.changectx()), - HG_BASE_NODE=str(fca.changectx()), - HG_MY_ISLINK='l' in fcd.flags(), - HG_OTHER_ISLINK='l' in fco.flags(), - HG_BASE_ISLINK='l' in fca.flags()) + env = {'HG_FILE': fcd.path(), + 'HG_MY_NODE': short(mynode), + 'HG_OTHER_NODE': str(fco.changectx()), + 'HG_BASE_NODE': str(fca.changectx()), + 'HG_MY_ISLINK': 'l' in fcd.flags(), + 'HG_OTHER_ISLINK': 'l' in fco.flags(), + 'HG_BASE_ISLINK': 'l' in fca.flags(), + } ui = repo.ui args = _toolstr(ui, tool, "args", '$local $base $other') if "$output" in args: out, a = a, back # read input from backup, write to original - replace = dict(local=a, base=b, other=c, output=out) + replace = {'local': a, 'base': b, 'other': c, 'output': out} args = util.interpolate(r'\$', replace, args, lambda s: util.shellquote(util.localpath(s))) r = util.system(toolpath + ' ' + args, cwd=repo.root, environ=env,
--- a/mercurial/graphmod.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/graphmod.py Tue Mar 18 14:25:28 2014 -0500 @@ -34,10 +34,10 @@ return cl = repo.changelog - lowestrev = min(revs) + lowestrev = revs.min() gpcache = {} - knownrevs = set(revs) + knownrevs = revs.set() for rev in revs: ctx = repo[rev] parents = sorted(set([p.rev() for p in ctx.parents()
--- a/mercurial/help.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/help.py Tue Mar 18 14:25:28 2014 -0500 @@ -12,18 +12,21 @@ import encoding, util, minirst import cmdutil -def listexts(header, exts, indent=1): +def listexts(header, exts, indent=1, showdeprecated=False): '''return a text listing of the given extensions''' rst = [] if exts: rst.append('\n%s\n\n' % header) for name, desc in sorted(exts.iteritems()): + if '(DEPRECATED)' in desc and not showdeprecated: + continue rst.append('%s:%s: %s\n' % (' ' * indent, name, desc)) return rst def extshelp(): rst = loaddoc('extensions')().splitlines(True) - rst.extend(listexts(_('enabled extensions:'), extensions.enabled())) + rst.extend(listexts( + _('enabled extensions:'), extensions.enabled(), showdeprecated=True)) rst.extend(listexts(_('disabled extensions:'), extensions.disabled())) doc = ''.join(rst) return doc @@ -38,7 +41,7 @@ shortopt, longopt, default, desc = option optlabel = _("VALUE") # default label - if _("DEPRECATED") in desc and not verbose: + if not verbose and ("DEPRECATED" in desc or _("DEPRECATED") in desc): continue so = ''
--- a/mercurial/help/config.txt Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/help/config.txt Tue Mar 18 14:25:28 2014 -0500 @@ -85,7 +85,9 @@ be read. Mercurial checks each of these locations in the specified order until one or more configuration files are detected. -.. note:: The registry key ``HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Mercurial`` +.. note:: + + The registry key ``HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Mercurial`` is used when running 32-bit Python on 64-bit Windows. Syntax @@ -204,7 +206,9 @@ stable5 = latest -b stable -.. note:: It is possible to create aliases with the same names as +.. note:: + + It is possible to create aliases with the same names as existing commands, which will then override the original definitions. This is almost always a bad idea! @@ -235,7 +239,9 @@ ``$HG_ARGS`` expands to the arguments given to Mercurial. In the ``hg echo foo`` call above, ``$HG_ARGS`` would expand to ``echo foo``. -.. note:: Some global configuration options such as ``-R`` are +.. note:: + + Some global configuration options such as ``-R`` are processed before shell aliases and will thus not be passed to aliases. @@ -362,7 +368,9 @@ of an empty temporary file, where the filtered data must be written by the command. -.. note:: The tempfile mechanism is recommended for Windows systems, +.. note:: + + The tempfile mechanism is recommended for Windows systems, where the standard shell I/O redirection operators often have strange effects and may corrupt the contents of your files. @@ -708,13 +716,17 @@ in ``$HG_PARENT2``. If the update succeeded, ``$HG_ERROR=0``. If the update failed (e.g. because conflicts not resolved), ``$HG_ERROR=1``. -.. note:: It is generally better to use standard hooks rather than the +.. note:: + + It is generally better to use standard hooks rather than the generic pre- and post- command hooks as they are guaranteed to be called in the appropriate contexts for influencing transactions. Also, hooks like "commit" will be called in all contexts that generate a commit (e.g. tag) and not just the commit command. -.. note:: Environment variables with empty values may not be passed to +.. note:: + + Environment variables with empty values may not be passed to hooks on platforms such as Windows. As an example, ``$HG_PARENT2`` will have an empty value under Unix-like platforms for non-merge changesets, while it will not be available at all under Windows.
--- a/mercurial/help/hgignore.txt Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/help/hgignore.txt Tue Mar 18 14:25:28 2014 -0500 @@ -69,6 +69,7 @@ regexp pattern, start it with ``^``. .. note:: + Patterns specified in other than ``.hgignore`` are always rooted. Please see :hg:`help patterns` for details.
--- a/mercurial/help/merge-tools.txt Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/help/merge-tools.txt Tue Mar 18 14:25:28 2014 -0500 @@ -73,6 +73,7 @@ 8. The merge of the file fails and must be resolved before commit. .. note:: + After selecting a merge program, Mercurial will by default attempt to merge the files using a simple merge algorithm first. Only if it doesn't succeed because of conflicting changes Mercurial will actually execute the
--- a/mercurial/help/patterns.txt Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/help/patterns.txt Tue Mar 18 14:25:28 2014 -0500 @@ -7,6 +7,7 @@ Alternate pattern notations must be specified explicitly. .. note:: + Patterns specified in ``.hgignore`` are not rooted. Please see :hg:`help hgignore` for details.
--- a/mercurial/help/phases.txt Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/help/phases.txt Tue Mar 18 14:25:28 2014 -0500 @@ -42,6 +42,7 @@ - secret changesets are neither pushed, pulled, or cloned .. note:: + Pulling a draft changeset from a publishing server does not mark it as public on the server side due to the read-only nature of pull. @@ -55,10 +56,12 @@ See :hg:`help config` for more information on configuration files. .. note:: + Servers running older versions of Mercurial are treated as publishing. .. note:: + Changesets in secret phase are not exchanged with the server. This applies to their content: file names, file contents, and changeset metadata. For technical reasons, the identifier (e.g. d825e4025e39)
--- a/mercurial/help/subrepos.txt Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/help/subrepos.txt Tue Mar 18 14:25:28 2014 -0500 @@ -39,6 +39,7 @@ repositories states when committing in the parent repository. .. note:: + The ``.hgsubstate`` file should not be edited manually.
--- a/mercurial/help/templates.txt Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/help/templates.txt Tue Mar 18 14:25:28 2014 -0500 @@ -52,14 +52,20 @@ - if(expr, then[, else]) +- ifcontains(expr, expr, then[, else]) + - ifeq(expr, expr, then[, else]) - join(list, sep) - label(label, expr) +- revset(query[, formatargs]) + - rstdoc(text, style) +- shortest(node) + - strip(text[, chars]) - sub(pat, repl, expr) @@ -106,3 +112,11 @@ - Display the contents of the 'extra' field, one per line:: $ hg log -r 0 --template "{join(extras, '\n')}\n" + +- Mark the current bookmark with '*':: + + $ hg log --template "{bookmarks % '{bookmark}{ifeq(bookmark, current, \"*\")} '}\n" + +- Mark the working copy parent with '@':: + + $ hg log --template "{ifcontains(rev, revset('.'), '@')}\n"
--- a/mercurial/hgweb/hgwebdir_mod.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/hgweb/hgwebdir_mod.py Tue Mar 18 14:25:28 2014 -0500 @@ -308,17 +308,17 @@ # add '/' to the name to make it obvious that # the entry is a directory, not a regular repository - row = dict(contact="", - contact_sort="", - name=name + '/', - name_sort=name, - url=url, - description="", - description_sort="", - lastchange=d, - lastchange_sort=d[1]-d[0], - archives=[], - isdirectory=True) + row = {'contact': "", + 'contact_sort': "", + 'name': name + '/', + 'name_sort': name, + 'url': url, + 'description': "", + 'description_sort': "", + 'lastchange': d, + 'lastchange_sort': d[1]-d[0], + 'archives': [], + 'isdirectory': True} seendirs.add(name) yield row @@ -356,17 +356,18 @@ contact = get_contact(get) description = get("web", "description", "") name = get("web", "name", name) - row = dict(contact=contact or "unknown", - contact_sort=contact.upper() or "unknown", - name=name, - name_sort=name, - url=url, - description=description or "unknown", - description_sort=description.upper() or "unknown", - lastchange=d, - lastchange_sort=d[1]-d[0], - archives=archivelist(u, "tip", url), - isdirectory=None) + row = {'contact': contact or "unknown", + 'contact_sort': contact.upper() or "unknown", + 'name': name, + 'name_sort': name, + 'url': url, + 'description': description or "unknown", + 'description_sort': description.upper() or "unknown", + 'lastchange': d, + 'lastchange_sort': d[1]-d[0], + 'archives': archivelist(u, "tip", url), + 'isdirectory': None, + } seenrepos.add(name) yield row
--- a/mercurial/hgweb/webcommands.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/hgweb/webcommands.py Tue Mar 18 14:25:28 2014 -0500 @@ -187,7 +187,7 @@ mfunc = revset.match(web.repo.ui, revdef) try: - revs = mfunc(web.repo, list(web.repo)) + revs = mfunc(web.repo, revset.baseset(web.repo)) return MODE_REVSET, revs # ParseError: wrongly placed tokens, wrongs arguments, etc # RepoLookupError: no such revision, e.g. in 'revision:' @@ -982,7 +982,11 @@ if len(revs) >= revcount: break - dag = graphmod.dagwalker(web.repo, revs) + # We have to feed a baseset to dagwalker as it is expecting smartset + # object. This does not have a big impact on hgweb performance itself + # since hgweb graphing code is not itself lazy yet. + dag = graphmod.dagwalker(web.repo, revset.baseset(revs)) + # As we said one line above... not lazy. tree = list(graphmod.colored(dag, web.repo)) def getcolumns(tree): @@ -1018,26 +1022,26 @@ [cgi.escape(x) for x in ctx.tags()], [cgi.escape(x) for x in ctx.bookmarks()])) else: - edgedata = [dict(col=edge[0], nextcol=edge[1], - color=(edge[2] - 1) % 6 + 1, - width=edge[3], bcolor=edge[4]) + edgedata = [{'col': edge[0], 'nextcol': edge[1], + 'color': (edge[2] - 1) % 6 + 1, + 'width': edge[3], 'bcolor': edge[4]} for edge in edges] data.append( - dict(node=node, - col=vtx[0], - color=(vtx[1] - 1) % 6 + 1, - edges=edgedata, - row=row, - nextrow=row + 1, - desc=desc, - user=user, - age=age, - bookmarks=webutil.nodebookmarksdict( - web.repo, ctx.node()), - branches=webutil.nodebranchdict(web.repo, ctx), - inbranch=webutil.nodeinbranch(web.repo, ctx), - tags=webutil.nodetagsdict(web.repo, ctx.node()))) + {'node': node, + 'col': vtx[0], + 'color': (vtx[1] - 1) % 6 + 1, + 'edges': edgedata, + 'row': row, + 'nextrow': row + 1, + 'desc': desc, + 'user': user, + 'age': age, + 'bookmarks': webutil.nodebookmarksdict( + web.repo, ctx.node()), + 'branches': webutil.nodebranchdict(web.repo, ctx), + 'inbranch': webutil.nodeinbranch(web.repo, ctx), + 'tags': webutil.nodetagsdict(web.repo, ctx.node())}) row += 1
--- a/mercurial/hgweb/webutil.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/hgweb/webutil.py Tue Mar 18 14:25:28 2014 -0500 @@ -146,7 +146,7 @@ def renamelink(fctx): r = fctx.renamed() if r: - return [dict(file=r[0], node=hex(r[1]))] + return [{'file': r[0], 'node': hex(r[1])}] return [] def nodetagsdict(repo, node):
--- a/mercurial/hook.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/hook.py Tue Mar 18 14:25:28 2014 -0500 @@ -6,7 +6,7 @@ # GNU General Public License version 2 or any later version. from i18n import _ -import os, sys, time, types +import os, sys, time import extensions, util, demandimport def _pythonhook(ui, repo, name, hname, funcname, args, throw): @@ -19,11 +19,10 @@ unmodified commands (e.g. mercurial.commands.update) can be run as hooks without wrappers to convert return values.''' - ui.note(_("calling hook %s: %s\n") % (hname, funcname)) - starttime = time.time() - - obj = funcname - if not util.safehasattr(obj, '__call__'): + if util.safehasattr(funcname, '__call__'): + obj = funcname + funcname = obj.__module__ + "." + obj.__name__ + else: d = funcname.rfind('.') if d == -1: raise util.Abort(_('%s hook is invalid ("%s" not in ' @@ -75,6 +74,10 @@ raise util.Abort(_('%s hook is invalid ' '("%s" is not callable)') % (hname, funcname)) + + ui.note(_("calling hook %s: %s\n") % (hname, funcname)) + starttime = time.time() + try: try: # redirect IO descriptors to the ui descriptors so hooks @@ -100,11 +103,8 @@ finally: sys.stdout, sys.stderr, sys.stdin = old duration = time.time() - starttime - readablefunc = funcname - if isinstance(funcname, types.FunctionType): - readablefunc = funcname.__module__ + "." + funcname.__name__ ui.log('pythonhook', 'pythonhook-%s: %s finished in %0.2f seconds\n', - name, readablefunc, duration) + name, funcname, duration) if r: if throw: raise util.Abort(_('%s hook failed') % hname)
--- a/mercurial/localrepo.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/localrepo.py Tue Mar 18 14:25:28 2014 -0500 @@ -9,7 +9,7 @@ import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview import changelog, dirstate, filelog, manifest, context, bookmarks, phases import lock as lockmod -import transaction, store, encoding +import transaction, store, encoding, exchange import scmutil, util, extensions, hook, error, revset import match as matchmod import merge as mergemod @@ -428,7 +428,7 @@ '''Return a list of revisions matching the given revset''' expr = revset.formatspec(expr, *args) m = revset.match(None, expr) - return [r for r in m(self, list(self))] + return m(self, revset.spanset(self)) def set(self, expr, *args): ''' @@ -993,6 +993,13 @@ pass self.invalidatecaches() + def invalidateall(self): + '''Fully invalidates both store and non-store parts, causing the + subsequent operation to reread any outside changes.''' + # extension should hook this to invalidate its caches + self.invalidate() + self.invalidatedirstate() + def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc): try: l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc) @@ -1005,6 +1012,7 @@ l = lockmod.lock(vfs, lockname, int(self.ui.config("ui", "timeout", "600")), releasefn, desc=desc) + self.ui.warn(_("got lock after %s seconds\n") % l.delay) if acquirefn: acquirefn() return l @@ -1122,6 +1130,8 @@ self.ui.warn(_("warning: can't find ancestor for '%s' " "copied from '%s'!\n") % (fname, cfname)) + elif fparent1 == nullid: + fparent1, fparent2 = fparent2, nullid elif fparent2 != nullid: # is one parent an ancestor of the other? fparentancestor = flog.ancestor(fparent1, fparent2) @@ -1578,7 +1588,7 @@ r = modified, added, removed, deleted, unknown, ignored, clean if listsubrepos: - for subpath, sub in subrepo.itersubrepos(ctx1, ctx2): + for subpath, sub in scmutil.itersubrepos(ctx1, ctx2): if working: rev2 = None else: @@ -1658,89 +1668,7 @@ return r def pull(self, remote, heads=None, force=False): - if remote.local(): - missing = set(remote.requirements) - self.supported - if missing: - msg = _("required features are not" - " supported in the destination:" - " %s") % (', '.join(sorted(missing))) - raise util.Abort(msg) - - # don't open transaction for nothing or you break future useful - # rollback call - tr = None - trname = 'pull\n' + util.hidepassword(remote.url()) - lock = self.lock() - try: - tmp = discovery.findcommonincoming(self.unfiltered(), remote, - heads=heads, force=force) - common, fetch, rheads = tmp - if not fetch: - self.ui.status(_("no changes found\n")) - result = 0 - else: - tr = self.transaction(trname) - if heads is None and list(common) == [nullid]: - self.ui.status(_("requesting all changes\n")) - elif heads is None and remote.capable('changegroupsubset'): - # issue1320, avoid a race if remote changed after discovery - heads = rheads - - if remote.capable('getbundle'): - # TODO: get bundlecaps from remote - cg = remote.getbundle('pull', common=common, - heads=heads or rheads) - elif heads is None: - cg = remote.changegroup(fetch, 'pull') - elif not remote.capable('changegroupsubset'): - raise util.Abort(_("partial pull cannot be done because " - "other repository doesn't support " - "changegroupsubset.")) - else: - cg = remote.changegroupsubset(fetch, heads, 'pull') - result = self.addchangegroup(cg, 'pull', remote.url()) - - # compute target subset - if heads is None: - # We pulled every thing possible - # sync on everything common - subset = common + rheads - else: - # We pulled a specific subset - # sync on this subset - subset = heads - - # Get remote phases data from remote - remotephases = remote.listkeys('phases') - publishing = bool(remotephases.get('publishing', False)) - if remotephases and not publishing: - # remote is new and unpublishing - pheads, _dr = phases.analyzeremotephases(self, subset, - remotephases) - phases.advanceboundary(self, phases.public, pheads) - phases.advanceboundary(self, phases.draft, subset) - else: - # Remote is old or publishing all common changesets - # should be seen as public - phases.advanceboundary(self, phases.public, subset) - - def gettransaction(): - if tr is None: - return self.transaction(trname) - return tr - - obstr = obsolete.syncpull(self, remote, gettransaction) - if obstr is not None: - tr = obstr - - if tr is not None: - tr.close() - finally: - if tr is not None: - tr.release() - lock.release() - - return result + return exchange.pull (self, remote, heads, force) def checkpush(self, force, revs): """Extensions can override this function if additional checks have @@ -1750,223 +1678,7 @@ pass def push(self, remote, force=False, revs=None, newbranch=False): - '''Push outgoing changesets (limited by revs) from the current - repository to remote. Return an integer: - - None means nothing to push - - 0 means HTTP error - - 1 means we pushed and remote head count is unchanged *or* - we have outgoing changesets but refused to push - - other values as described by addchangegroup() - ''' - if remote.local(): - missing = set(self.requirements) - remote.local().supported - if missing: - msg = _("required features are not" - " supported in the destination:" - " %s") % (', '.join(sorted(missing))) - raise util.Abort(msg) - - # there are two ways to push to remote repo: - # - # addchangegroup assumes local user can lock remote - # repo (local filesystem, old ssh servers). - # - # unbundle assumes local user cannot lock remote repo (new ssh - # servers, http servers). - - if not remote.canpush(): - raise util.Abort(_("destination does not support push")) - unfi = self.unfiltered() - def localphasemove(nodes, phase=phases.public): - """move <nodes> to <phase> in the local source repo""" - if locallock is not None: - phases.advanceboundary(self, phase, nodes) - else: - # repo is not locked, do not change any phases! - # Informs the user that phases should have been moved when - # applicable. - actualmoves = [n for n in nodes if phase < self[n].phase()] - phasestr = phases.phasenames[phase] - if actualmoves: - self.ui.status(_('cannot lock source repo, skipping local' - ' %s phase update\n') % phasestr) - # get local lock as we might write phase data - locallock = None - try: - locallock = self.lock() - except IOError, err: - if err.errno != errno.EACCES: - raise - # source repo cannot be locked. - # We do not abort the push, but just disable the local phase - # synchronisation. - msg = 'cannot lock source repository: %s\n' % err - self.ui.debug(msg) - try: - self.checkpush(force, revs) - lock = None - unbundle = remote.capable('unbundle') - if not unbundle: - lock = remote.lock() - try: - # discovery - fci = discovery.findcommonincoming - commoninc = fci(unfi, remote, force=force) - common, inc, remoteheads = commoninc - fco = discovery.findcommonoutgoing - outgoing = fco(unfi, remote, onlyheads=revs, - commoninc=commoninc, force=force) - - - if not outgoing.missing: - # nothing to push - scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded) - ret = None - else: - # something to push - if not force: - # if self.obsstore == False --> no obsolete - # then, save the iteration - if unfi.obsstore: - # this message are here for 80 char limit reason - mso = _("push includes obsolete changeset: %s!") - mst = "push includes %s changeset: %s!" - # plain versions for i18n tool to detect them - _("push includes unstable changeset: %s!") - _("push includes bumped changeset: %s!") - _("push includes divergent changeset: %s!") - # If we are to push if there is at least one - # obsolete or unstable changeset in missing, at - # least one of the missinghead will be obsolete or - # unstable. So checking heads only is ok - for node in outgoing.missingheads: - ctx = unfi[node] - if ctx.obsolete(): - raise util.Abort(mso % ctx) - elif ctx.troubled(): - raise util.Abort(_(mst) - % (ctx.troubles()[0], - ctx)) - newbm = self.ui.configlist('bookmarks', 'pushing') - discovery.checkheads(unfi, remote, outgoing, - remoteheads, newbranch, - bool(inc), newbm) - - # TODO: get bundlecaps from remote - bundlecaps = None - # create a changegroup from local - if revs is None and not (outgoing.excluded - or self.changelog.filteredrevs): - # push everything, - # use the fast path, no race possible on push - bundler = changegroup.bundle10(self, bundlecaps) - cg = self._changegroupsubset(outgoing, - bundler, - 'push', - fastpath=True) - else: - cg = self.getlocalbundle('push', outgoing, bundlecaps) - - # apply changegroup to remote - if unbundle: - # local repo finds heads on server, finds out what - # revs it must push. once revs transferred, if server - # finds it has different heads (someone else won - # commit/push race), server aborts. - if force: - remoteheads = ['force'] - # ssh: return remote's addchangegroup() - # http: return remote's addchangegroup() or 0 for error - ret = remote.unbundle(cg, remoteheads, 'push') - else: - # we return an integer indicating remote head count - # change - ret = remote.addchangegroup(cg, 'push', self.url()) - - if ret: - # push succeed, synchronize target of the push - cheads = outgoing.missingheads - elif revs is None: - # All out push fails. synchronize all common - cheads = outgoing.commonheads - else: - # I want cheads = heads(::missingheads and ::commonheads) - # (missingheads is revs with secret changeset filtered out) - # - # This can be expressed as: - # cheads = ( (missingheads and ::commonheads) - # + (commonheads and ::missingheads))" - # ) - # - # while trying to push we already computed the following: - # common = (::commonheads) - # missing = ((commonheads::missingheads) - commonheads) - # - # We can pick: - # * missingheads part of common (::commonheads) - common = set(outgoing.common) - nm = self.changelog.nodemap - cheads = [node for node in revs if nm[node] in common] - # and - # * commonheads parents on missing - revset = unfi.set('%ln and parents(roots(%ln))', - outgoing.commonheads, - outgoing.missing) - cheads.extend(c.node() for c in revset) - # even when we don't push, exchanging phase data is useful - remotephases = remote.listkeys('phases') - if (self.ui.configbool('ui', '_usedassubrepo', False) - and remotephases # server supports phases - and ret is None # nothing was pushed - and remotephases.get('publishing', False)): - # When: - # - this is a subrepo push - # - and remote support phase - # - and no changeset was pushed - # - and remote is publishing - # We may be in issue 3871 case! - # We drop the possible phase synchronisation done by - # courtesy to publish changesets possibly locally draft - # on the remote. - remotephases = {'publishing': 'True'} - if not remotephases: # old server or public only repo - localphasemove(cheads) - # don't push any phase data as there is nothing to push - else: - ana = phases.analyzeremotephases(self, cheads, remotephases) - pheads, droots = ana - ### Apply remote phase on local - if remotephases.get('publishing', False): - localphasemove(cheads) - else: # publish = False - localphasemove(pheads) - localphasemove(cheads, phases.draft) - ### Apply local phase on remote - - # Get the list of all revs draft on remote by public here. - # XXX Beware that revset break if droots is not strictly - # XXX root we may want to ensure it is but it is costly - outdated = unfi.set('heads((%ln::%ln) and public())', - droots, cheads) - for newremotehead in outdated: - r = remote.pushkey('phases', - newremotehead.hex(), - str(phases.draft), - str(phases.public)) - if not r: - self.ui.warn(_('updating %s to public failed!\n') - % newremotehead) - self.ui.debug('try to push obsolete markers to remote\n') - obsolete.syncpush(self, remote) - finally: - if lock is not None: - lock.release() - finally: - if locallock is not None: - locallock.release() - - bookmarks.updateremote(self.ui, unfi, remote, revs) - return ret + return exchange.push(self, remote, force, revs, newbranch) def changegroupinfo(self, nodes, source): if self.ui.verbose or source == 'bundle': @@ -1976,9 +1688,9 @@ for node in nodes: self.ui.debug("%s\n" % hex(node)) - def changegroupsubset(self, bases, heads, source): + def changegroupsubset(self, roots, heads, source): """Compute a changegroup consisting of all the nodes that are - descendants of any of the bases and ancestors of any of the heads. + descendants of any of the roots and ancestors of any of the heads. Return a chunkbuffer object whose read() method will return successive changegroup chunks. @@ -1990,12 +1702,12 @@ the changegroup a particular filenode or manifestnode belongs to. """ cl = self.changelog - if not bases: - bases = [nullid] + if not roots: + roots = [nullid] # TODO: remove call to nodesbetween. - csets, bases, heads = cl.nodesbetween(bases, heads) + csets, roots, heads = cl.nodesbetween(roots, heads) discbases = [] - for n in bases: + for n in roots: discbases.extend([p for p in cl.parents(n) if p != nullid]) outgoing = discovery.outgoing(cl, discbases, heads) bundler = changegroup.bundle10(self) @@ -2176,9 +1888,9 @@ added = [cl.node(r) for r in xrange(clstart, clend)] publishing = self.ui.configbool('phases', 'publish', True) if srctype == 'push': - # Old server can not push the boundary themself. - # New server won't push the boundary if changeset already - # existed locally as secrete + # Old servers can not push the boundary themselves. + # New servers won't push the boundary if changeset already + # exists locally as secret # # We should not use added here but the list of all change in # the bundle
--- a/mercurial/lock.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/lock.py Tue Mar 18 14:25:28 2014 -0500 @@ -38,7 +38,7 @@ self.desc = desc self.postrelease = [] self.pid = os.getpid() - self.lock() + self.delay = self.lock() def __del__(self): if self.held: @@ -57,7 +57,7 @@ while True: try: self.trylock() - return 1 + return self.timeout - timeout except error.LockHeld, inst: if timeout != 0: time.sleep(1)
--- a/mercurial/match.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/match.py Tue Mar 18 14:25:28 2014 -0500 @@ -6,7 +6,7 @@ # GNU General Public License version 2 or any later version. import re -import util, fileset, pathutil +import util, pathutil from i18n import _ def _rematcher(pat): @@ -26,7 +26,7 @@ if kind == 'set': if not ctx: raise util.Abort("fileset expression with no context") - s = fileset.getfileset(ctx, expr) + s = ctx.getfileset(expr) fset.update(s) continue other.append((kind, expr))
--- a/mercurial/merge.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/merge.py Tue Mar 18 14:25:28 2014 -0500 @@ -34,7 +34,7 @@ [type][length][content] Type is a single character, length is a 4 bytes integer, content is an - arbitrary suites of bytes of lenght `length`. + arbitrary suites of bytes of length `length`. Type should be a letter. Capital letter are mandatory record, Mercurial should abort if they are unknown. lower case record can be safely ignored. @@ -47,10 +47,12 @@ ''' statepathv1 = "merge/state" statepathv2 = "merge/state2" + def __init__(self, repo): self._repo = repo self._dirty = False self._read() + def reset(self, node=None, other=None): self._state = {} if node: @@ -58,7 +60,13 @@ self._other = other shutil.rmtree(self._repo.join("merge"), True) self._dirty = False + def _read(self): + """Analyse each record content to restore a serialized state from disk + + This function process "record" entry produced by the de-serialization + of on disk file. + """ self._state = {} records = self._readrecords() for rtype, record in records: @@ -73,7 +81,21 @@ raise util.Abort(_('unsupported merge state record:' % rtype)) self._dirty = False + def _readrecords(self): + """Read merge state from disk and return a list of record (TYPE, data) + + We read data from both V1 and Ve files decide which on to use. + + V1 have been used by version prior to 2.9.1 and contains less data than + v2. We read both version and check if no data in v2 contradict one in + v1. If there is not contradiction we can safely assume that both v1 + and v2 were written at the same time and use the extract data in v2. If + there is contradiction we ignore v2 content as we assume an old version + of Mercurial have over written the mergstate file and left an old v2 + file around. + + returns list of record [(TYPE, data), ...]""" v1records = self._readrecordsv1() v2records = self._readrecordsv2() oldv2 = set() # old format version of v2 record @@ -101,7 +123,15 @@ return v1records else: return v2records + def _readrecordsv1(self): + """read on disk merge state for version 1 file + + returns list of record [(TYPE, data), ...] + + Note: the "F" data from this file are one entry short + (no "other file node" entry) + """ records = [] try: f = self._repo.opener(self.statepathv1) @@ -115,7 +145,12 @@ if err.errno != errno.ENOENT: raise return records + def _readrecordsv2(self): + """read on disk merge state for version 2 file + + returns list of record [(TYPE, data), ...] + """ records = [] try: f = self._repo.opener(self.statepathv2) @@ -125,17 +160,19 @@ while off < end: rtype = data[off] off += 1 - lenght = _unpack('>I', data[off:(off + 4)])[0] + length = _unpack('>I', data[off:(off + 4)])[0] off += 4 - record = data[off:(off + lenght)] - off += lenght + record = data[off:(off + length)] + off += length records.append((rtype, record)) f.close() except IOError, err: if err.errno != errno.ENOENT: raise return records + def commit(self): + """Write current state on disk (if necessary)""" if self._dirty: records = [] records.append(("L", hex(self._local))) @@ -144,10 +181,14 @@ records.append(("F", "\0".join([d] + v))) self._writerecords(records) self._dirty = False + def _writerecords(self, records): + """Write current state on disk (both v1 and v2)""" self._writerecordsv1(records) self._writerecordsv2(records) + def _writerecordsv1(self, records): + """Write current state on disk in a version 1 file""" f = self._repo.opener(self.statepathv1, "w") irecords = iter(records) lrecords = irecords.next() @@ -157,14 +198,25 @@ if rtype == "F": f.write("%s\n" % _droponode(data)) f.close() + def _writerecordsv2(self, records): + """Write current state on disk in a version 2 file""" f = self._repo.opener(self.statepathv2, "w") for key, data in records: assert len(key) == 1 format = ">sI%is" % len(data) f.write(_pack(format, key, len(data), data)) f.close() + def add(self, fcl, fco, fca, fd): + """add a new (potentially?) conflicting file the merge state + fcl: file context for local, + fco: file context for remote, + fca: file context for ancestors, + fd: file path of the resulting merge. + + note: also write the local version to the `.hg/merge` directory. + """ hash = util.sha1(fcl.path()).hexdigest() self._repo.opener.write("merge/" + hash, fcl.data()) self._state[fd] = ['u', hash, fcl.path(), @@ -172,21 +224,28 @@ fco.path(), hex(fco.filenode()), fcl.flags()] self._dirty = True + def __contains__(self, dfile): return dfile in self._state + def __getitem__(self, dfile): return self._state[dfile][0] + def __iter__(self): l = self._state.keys() l.sort() for f in l: yield f + def files(self): return self._state.keys() + def mark(self, dfile, state): self._state[dfile][0] = state self._dirty = True + def resolve(self, dfile, wctx): + """rerun merge process for file path `dfile`""" if self[dfile] == 'r': return 0 stateentry = self._state[dfile] @@ -263,7 +322,7 @@ return actions -def _checkcollision(repo, wmf, actions, prompts): +def _checkcollision(repo, wmf, actions): # build provisional merged manifest up pmmf = set(wmf) @@ -295,21 +354,14 @@ "m": mergeop, "r": removeop, "rd": nop, + "cd": addop, + "dc": addop, } for f, m, args, msg in actions: op = opmap.get(m) assert op, m op(f, args) - opmap = { - "cd": addop, - "dc": addop, - } - for f, m in prompts: - op = opmap.get(m) - assert op, m - op(f, None) - # check case-folding collision in provisional merged manifest foldmap = {} for f in sorted(pmmf): @@ -370,7 +422,7 @@ m1['.hgsubstate'] += "+" break - aborts, prompts = [], [] + aborts = [] # Compare manifests fdiff = dicthelpers.diff(m1, m2) flagsdiff = m1.flagsdiff(m2) @@ -395,9 +447,14 @@ if partial and not partial(f): continue if n1 and n2: - fla = ma.flags(f) + fa = f + a = ma.get(f, nullid) + if a == nullid: + fa = copy.get(f, f) + # Note: f as default is wrong - we can't really make a 3-way + # merge without an ancestor file. + fla = ma.flags(fa) nol = 'l' not in fl1 + fl2 + fla - a = ma.get(f, nullid) if n2 == a and fl2 == fla: pass # remote unchanged - keep local elif n1 == a and fl1 == fla: # local unchanged - use remote @@ -423,7 +480,10 @@ "local copied/moved to " + f2)) elif n1 and f in ma: # clean, a different, no remote if n1 != ma[f]: - prompts.append((f, "cd")) # prompt changed/deleted + if acceptremote: + actions.append((f, "r", None, "remote delete")) + else: + actions.append((f, "cd", None, "prompt changed/deleted")) elif n1[20:] == "a": # added, no remote actions.append((f, "f", None, "remote deleted")) else: @@ -470,7 +530,12 @@ aborts.append((f, "ud")) else: # if different: old untracked f may be overwritten and lost - prompts.append((f, "dc")) # prompt deleted/changed + if acceptremote: + actions.append((f, "g", (m2.flags(f),), + "remote recreating")) + else: + actions.append((f, "dc", (m2.flags(f),), + "prompt deleted/changed")) for f, m in sorted(aborts): if m == "ud": @@ -484,30 +549,10 @@ # check collision between files only in p2 for clean update if (not branchmerge and (force or not wctx.dirty(missing=True, branch=False))): - _checkcollision(repo, m2, [], []) + _checkcollision(repo, m2, []) else: - _checkcollision(repo, m1, actions, prompts) + _checkcollision(repo, m1, actions) - for f, m in sorted(prompts): - if m == "cd": - if acceptremote: - actions.append((f, "r", None, "remote delete")) - elif repo.ui.promptchoice( - _("local changed %s which remote deleted\n" - "use (c)hanged version or (d)elete?" - "$$ &Changed $$ &Delete") % f, 0): - actions.append((f, "r", None, "prompt delete")) - else: - actions.append((f, "a", None, "prompt keep")) - elif m == "dc": - if acceptremote: - actions.append((f, "g", (m2.flags(f),), "remote recreating")) - elif repo.ui.promptchoice( - _("remote changed %s which local deleted\n" - "use (c)hanged version or leave (d)eleted?" - "$$ &Changed $$ &Deleted") % f, 0) == 0: - actions.append((f, "g", (m2.flags(f),), "prompt recreating")) - else: assert False, m return actions def actionkey(a): @@ -691,9 +736,37 @@ ancestor, branchmerge, force, partial, acceptremote) + + # Filter out prompts. + newactions, prompts = [], [] + for a in actions: + if a[1] in ("cd", "dc"): + prompts.append(a) + else: + newactions.append(a) + # Prompt and create actions. TODO: Move this towards resolve phase. + for f, m, args, msg in sorted(prompts): + if m == "cd": + if repo.ui.promptchoice( + _("local changed %s which remote deleted\n" + "use (c)hanged version or (d)elete?" + "$$ &Changed $$ &Delete") % f, 0): + newactions.append((f, "r", None, "prompt delete")) + else: + newactions.append((f, "a", None, "prompt keep")) + elif m == "dc": + flags, = args + if repo.ui.promptchoice( + _("remote changed %s which local deleted\n" + "use (c)hanged version or leave (d)eleted?" + "$$ &Changed $$ &Deleted") % f, 0) == 0: + newactions.append((f, "g", (flags,), "prompt recreating")) + else: assert False, m + if tctx.rev() is None: - actions += _forgetremoved(tctx, mctx, branchmerge) - return actions + newactions += _forgetremoved(tctx, mctx, branchmerge) + + return newactions def recordupdates(repo, actions, branchmerge): "record merge actions to the dirstate"
--- a/mercurial/minirst.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/minirst.py Tue Mar 18 14:25:28 2014 -0500 @@ -73,7 +73,7 @@ if lines: indent = min((len(l) - len(l.lstrip())) for l in lines) lines = [l[indent:] for l in lines] - blocks.append(dict(indent=indent, lines=lines)) + blocks.append({'indent': indent, 'lines': lines}) return blocks def findliteralblocks(blocks): @@ -109,7 +109,7 @@ elif len(blocks[i]['lines']) == 1 and \ blocks[i]['lines'][0].lstrip(' ').startswith('.. ') and \ blocks[i]['lines'][0].find(' ', 3) == -1: - # directive on its onw line, not a literal block + # directive on its own line, not a literal block i += 1 continue else: @@ -174,8 +174,8 @@ items = [] for j, line in enumerate(lines): if match(lines, j, itemre, singleline): - items.append(dict(type=type, lines=[], - indent=blocks[i]['indent'])) + items.append({'type': type, 'lines': [], + 'indent': blocks[i]['indent']}) items[-1]['lines'].append(line) blocks[i:i + 1] = items break @@ -382,10 +382,10 @@ blocks[i]['type'] in ('bullet', 'option', 'field')): i += 1 elif not blocks[i - 1]['lines']: - # no lines in previous block, do not seperate + # no lines in previous block, do not separate i += 1 else: - blocks.insert(i, dict(lines=[''], indent=0, type='margin')) + blocks.insert(i, {'lines': [''], 'indent': 0, 'type': 'margin'}) i += 2 return blocks @@ -697,6 +697,10 @@ for row in data: l = [] for w, v in zip(widths, row): + if '\n' in v: + # only remove line breaks and indentation, long lines are + # handled by the next tool + v = ' '.join(e.lstrip() for e in v.split('\n')) pad = ' ' * (w - encoding.colwidth(v)) l.append(v + pad) out.append(indent + ' '.join(l) + "\n")
--- a/mercurial/obsolete.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/obsolete.py Tue Mar 18 14:25:28 2014 -0500 @@ -247,6 +247,9 @@ def __iter__(self): return iter(self._all) + def __len__(self): + return len(self._all) + def __nonzero__(self): return bool(self._all) @@ -256,6 +259,12 @@ * ensuring it is hashable * check mandatory metadata * encode metadata + + If you are a human writing code creating marker you want to use the + `createmarkers` function in this module instead. + + return True if a new marker have been added, False if the markers + already existed (no op). """ if metadata is None: metadata = {} @@ -267,7 +276,7 @@ if len(succ) != 20: raise ValueError(succ) marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata)) - self.add(transaction, [marker]) + return bool(self.add(transaction, [marker])) def add(self, transaction, markers): """Add new markers to the store @@ -343,14 +352,15 @@ # - the base85 encoding _maxpayload = 5300 -def listmarkers(repo): - """List markers over pushkey""" - if not repo.obsstore: - return {} +def _pushkeyescape(markers): + """encode markers into a dict suitable for pushkey exchange + + - binary data is base86 encoded + - splitted in chunks less than 5300 bytes""" keys = {} parts = [] currentlen = _maxpayload * 2 # ensure we create a new part - for marker in repo.obsstore: + for marker in markers: nextdata = _encodeonemarker(marker) if (len(nextdata) + currentlen > _maxpayload): currentpart = [] @@ -363,6 +373,12 @@ keys['dump%i' % idx] = base85.b85encode(data) return keys +def listmarkers(repo): + """List markers over pushkey""" + if not repo.obsstore: + return {} + return _pushkeyescape(repo.obsstore) + def pushmarker(repo, key, old, new): """Push markers over pushkey""" if not key.startswith('dump'): @@ -384,43 +400,6 @@ finally: lock.release() -def syncpush(repo, remote): - """utility function to push obsolete markers to a remote - - Exist mostly to allow overriding for experimentation purpose""" - if (_enabled and repo.obsstore and - 'obsolete' in remote.listkeys('namespaces')): - rslts = [] - remotedata = repo.listkeys('obsolete') - for key in sorted(remotedata, reverse=True): - # reverse sort to ensure we end with dump0 - data = remotedata[key] - rslts.append(remote.pushkey('obsolete', key, '', data)) - if [r for r in rslts if not r]: - msg = _('failed to push some obsolete markers!\n') - repo.ui.warn(msg) - -def syncpull(repo, remote, gettransaction): - """utility function to pull obsolete markers from a remote - - The `gettransaction` is function that return the pull transaction, creating - one if necessary. We return the transaction to inform the calling code that - a new transaction have been created (when applicable). - - Exists mostly to allow overriding for experimentation purpose""" - tr = None - if _enabled: - repo.ui.debug('fetching remote obsolete markers\n') - remoteobs = remote.listkeys('obsolete') - if 'dump0' in remoteobs: - tr = gettransaction() - for key in sorted(remoteobs, reverse=True): - if key.startswith('dump'): - data = base85.b85decode(remoteobs[key]) - repo.obsstore.mergemarkers(tr, data) - repo.invalidatevolatilesets() - return tr - def allmarkers(repo): """all obsolete markers known in a repository""" for markerdata in repo.obsstore: @@ -800,7 +779,7 @@ def _computebumpedset(repo): """the set of revs trying to obsolete public revisions""" bumped = set() - # utils function (avoid attribut lookup in the loop) + # utils function (avoid attribute lookup in the loop) phase = repo._phasecache.phase # would be faster to grab the full list public = phases.public cl = repo.changelog @@ -845,8 +824,10 @@ def createmarkers(repo, relations, flag=0, metadata=None): """Add obsolete markers between changesets in a repo - <relations> must be an iterable of (<old>, (<new>, ...)) tuple. - `old` and `news` are changectx. + <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}]) + tuple. `old` and `news` are changectx. metadata is an optional dictionnary + containing metadata for this marker only. It is merged with the global + metadata specified through the `metadata` argument of this function, Trying to obsolete a public changeset will raise an exception. @@ -865,7 +846,13 @@ metadata['user'] = repo.ui.username() tr = repo.transaction('add-obsolescence-marker') try: - for prec, sucs in relations: + for rel in relations: + prec = rel[0] + sucs = rel[1] + localmetadata = metadata.copy() + if 2 < len(rel): + localmetadata.update(rel[2]) + if not prec.mutable(): raise util.Abort("cannot obsolete immutable changeset: %s" % prec) @@ -873,7 +860,7 @@ nsucs = tuple(s.node() for s in sucs) if nprec in nsucs: raise util.Abort("changeset %s cannot obsolete itself" % prec) - repo.obsstore.create(tr, nprec, nsucs, flag, metadata) + repo.obsstore.create(tr, nprec, nsucs, flag, localmetadata) repo.filteredrevcache.clear() tr.close() finally:
--- a/mercurial/parsers.c Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/parsers.c Tue Mar 18 14:25:28 2014 -0500 @@ -14,6 +14,8 @@ #include "util.h" +static char *versionerrortext = "Python minor version mismatch"; + static int8_t hextable[256] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, @@ -1208,7 +1210,7 @@ const bitmask allseen = (1ull << revcount) - 1; const bitmask poison = 1ull << revcount; PyObject *gca = PyList_New(0); - int i, v, interesting, left; + int i, v, interesting; int maxrev = -1; long sp; bitmask *seen; @@ -1230,7 +1232,7 @@ for (i = 0; i < revcount; i++) seen[revs[i]] = 1ull << i; - interesting = left = revcount; + interesting = revcount; for (v = maxrev; v >= 0 && interesting; v--) { long sv = seen[v]; @@ -1251,11 +1253,8 @@ } sv |= poison; for (i = 0; i < revcount; i++) { - if (revs[i] == v) { - if (--left <= 1) - goto done; - break; - } + if (revs[i] == v) + goto done; } } } @@ -1529,10 +1528,6 @@ ret = gca; Py_INCREF(gca); } - else if (PyList_GET_SIZE(gca) == 1) { - ret = PyList_GET_ITEM(gca, 0); - Py_INCREF(ret); - } else ret = find_deepest(self, gca); done: @@ -1918,6 +1913,16 @@ static void module_init(PyObject *mod) { + /* This module constant has two purposes. First, it lets us unit test + * the ImportError raised without hard-coding any error text. This + * means we can change the text in the future without breaking tests, + * even across changesets without a recompile. Second, its presence + * can be used to determine whether the version-checking logic is + * present, which also helps in testing across changesets without a + * recompile. Note that this means the pure-Python version of parsers + * should not have this module constant. */ + PyModule_AddStringConstant(mod, "versionerrortext", versionerrortext); + dirs_module_init(mod); indexType.tp_new = PyType_GenericNew; @@ -1935,6 +1940,24 @@ dirstate_unset = Py_BuildValue("ciii", 'n', 0, -1, -1); } +static int check_python_version(void) +{ + PyObject *sys = PyImport_ImportModule("sys"); + long hexversion = PyInt_AsLong(PyObject_GetAttrString(sys, "hexversion")); + /* sys.hexversion is a 32-bit number by default, so the -1 case + * should only occur in unusual circumstances (e.g. if sys.hexversion + * is manually set to an invalid value). */ + if ((hexversion == -1) || (hexversion >> 16 != PY_VERSION_HEX >> 16)) { + PyErr_Format(PyExc_ImportError, "%s: The Mercurial extension " + "modules were compiled with Python " PY_VERSION ", but " + "Mercurial is currently using Python with sys.hexversion=%ld: " + "Python %s\n at: %s", versionerrortext, hexversion, + Py_GetVersion(), Py_GetProgramFullPath()); + return -1; + } + return 0; +} + #ifdef IS_PY3K static struct PyModuleDef parsers_module = { PyModuleDef_HEAD_INIT, @@ -1946,6 +1969,8 @@ PyMODINIT_FUNC PyInit_parsers(void) { + if (check_python_version() == -1) + return; PyObject *mod = PyModule_Create(&parsers_module); module_init(mod); return mod; @@ -1953,6 +1978,8 @@ #else PyMODINIT_FUNC initparsers(void) { + if (check_python_version() == -1) + return; PyObject *mod = Py_InitModule3("parsers", methods, parsers_doc); module_init(mod); }
--- a/mercurial/phases.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/phases.py Tue Mar 18 14:25:28 2014 -0500 @@ -258,7 +258,7 @@ filtered = False nodemap = repo.changelog.nodemap # to filter unknown nodes for phase, nodes in enumerate(self.phaseroots): - missing = [node for node in nodes if node not in nodemap] + missing = sorted(node for node in nodes if node not in nodemap) if missing: for mnode in missing: repo.ui.debug(
--- a/mercurial/repoview.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/repoview.py Tue Mar 18 14:25:28 2014 -0500 @@ -9,7 +9,7 @@ import copy import phases import util -import obsolete, revset +import obsolete def hideablerevs(repo): @@ -28,8 +28,9 @@ cl = repo.changelog firsthideable = min(hideable) revs = cl.revs(start=firsthideable) - blockers = [r for r in revset._children(repo, revs, hideable) - if r not in hideable] + tofilter = repo.revs( + '(%ld) and children(%ld)', list(revs), list(hideable)) + blockers = [r for r in tofilter if r not in hideable] for par in repo[None].parents(): blockers.append(par.rev()) for bm in repo._bookmarks.values(): @@ -95,7 +96,7 @@ # function to compute filtered set # -# When addding a new filter you MUST update the table at: +# When adding a new filter you MUST update the table at: # mercurial.branchmap.subsettable # Otherwise your filter will have to recompute all its branches cache # from scratch (very slow).
--- a/mercurial/revlog.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/revlog.py Tue Mar 18 14:25:28 2014 -0500 @@ -734,17 +734,21 @@ break return False - def ancestor(self, a, b): - """calculate the least common ancestor of nodes a and b""" - + def commonancestors(self, a, b): + """calculate the least common ancestors of nodes a and b""" a, b = self.rev(a), self.rev(b) try: ancs = self.index.ancestors(a, b) - except (AttributeError, OverflowError): + except (AttributeError, OverflowError): # C implementation failed ancs = ancestor.ancestors(self.parentrevs, a, b) + return map(self.node, ancs) + + def ancestor(self, a, b): + """calculate a least common ancestor of nodes a and b""" + ancs = self.commonancestors(a, b) if ancs: # choose a consistent winner when there's a tie - return min(map(self.node, ancs)) + return min(ancs) return nullid def _match(self, id):
--- a/mercurial/revset.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/revset.py Tue Mar 18 14:25:28 2014 -0500 @@ -8,7 +8,9 @@ import re import parser, util, error, discovery, hbisect, phases import node +import heapq import match as matchmod +import ancestor as ancestormod from i18n import _ import encoding import obsolete as obsmod @@ -19,43 +21,64 @@ """Like revlog.ancestors(), but supports followfirst.""" cut = followfirst and 1 or None cl = repo.changelog - visit = util.deque(revs) - seen = set([node.nullrev]) - while visit: - for parent in cl.parentrevs(visit.popleft())[:cut]: - if parent not in seen: - visit.append(parent) - seen.add(parent) - yield parent + + def iterate(): + revqueue, revsnode = None, None + h = [] + + revs.descending() + revqueue = util.deque(revs) + if revqueue: + revsnode = revqueue.popleft() + heapq.heappush(h, -revsnode) + + seen = set([node.nullrev]) + while h: + current = -heapq.heappop(h) + if current not in seen: + if revsnode and current == revsnode: + if revqueue: + revsnode = revqueue.popleft() + heapq.heappush(h, -revsnode) + seen.add(current) + yield current + for parent in cl.parentrevs(current)[:cut]: + if parent != node.nullrev: + heapq.heappush(h, -parent) + + return _descgeneratorset(iterate()) def _revdescendants(repo, revs, followfirst): """Like revlog.descendants() but supports followfirst.""" cut = followfirst and 1 or None - cl = repo.changelog - first = min(revs) - nullrev = node.nullrev - if first == nullrev: - # Are there nodes with a null first parent and a non-null - # second one? Maybe. Do we care? Probably not. - for i in cl: - yield i - return - - seen = set(revs) - for i in cl.revs(first + 1): - for x in cl.parentrevs(i)[:cut]: - if x != nullrev and x in seen: - seen.add(i) + + def iterate(): + cl = repo.changelog + first = min(revs) + nullrev = node.nullrev + if first == nullrev: + # Are there nodes with a null first parent and a non-null + # second one? Maybe. Do we care? Probably not. + for i in cl: yield i - break + else: + seen = set(revs) + for i in cl.revs(first + 1): + for x in cl.parentrevs(i)[:cut]: + if x != nullrev and x in seen: + seen.add(i) + yield i + break + + return _ascgeneratorset(iterate()) def _revsbetween(repo, roots, heads): """Return all paths between roots and heads, inclusive of both endpoint sets.""" if not roots: - return [] + return baseset([]) parentrevs = repo.changelog.parentrevs - visit = heads[:] + visit = baseset(heads) reachable = set() seen = {} minroot = min(roots) @@ -72,12 +95,12 @@ if parent >= minroot and parent not in seen: visit.append(parent) if not reachable: - return [] + return baseset([]) for rev in sorted(seen): for parent in seen[rev]: if parent in reachable: reachable.add(rev) - return sorted(reachable) + return baseset(sorted(reachable)) elements = { "(": (20, ("group", 1, ")"), ("func", 1, ")")), @@ -195,7 +218,10 @@ def getset(repo, subset, x): if not x: raise error.ParseError(_("missing argument")) - return methods[x[0]](repo, subset, *x[1:]) + s = methods[x[0]](repo, subset, *x[1:]) + if util.safehasattr(s, 'set'): + return s + return baseset(s) def _getrevsource(repo, r): extra = repo[r].extra() @@ -212,10 +238,10 @@ def stringset(repo, subset, x): x = repo[x].rev() if x == -1 and len(subset) == len(repo): - return [-1] + return baseset([-1]) if len(subset) == len(repo) or x in subset: - return [x] - return [] + return baseset([x]) + return baseset([]) def symbolset(repo, subset, x): if x in symbols: @@ -223,39 +249,36 @@ return stringset(repo, subset, x) def rangeset(repo, subset, x, y): - cl = repo.changelog + cl = baseset(repo.changelog) m = getset(repo, cl, x) n = getset(repo, cl, y) if not m or not n: - return [] + return baseset([]) m, n = m[0], n[-1] if m < n: - r = range(m, n + 1) + r = spanset(repo, m, n + 1) else: - r = range(m, n - 1, -1) - s = set(subset) - return [x for x in r if x in s] + r = spanset(repo, m, n - 1) + return r & subset def dagrange(repo, subset, x, y): - r = list(repo) + r = spanset(repo) xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y)) - s = set(subset) - return [r for r in xs if r in s] + s = subset.set() + return xs.filter(lambda r: r in s) def andset(repo, subset, x, y): return getset(repo, getset(repo, subset, x), y) def orset(repo, subset, x, y): xl = getset(repo, subset, x) - s = set(xl) - yl = getset(repo, [r for r in subset if r not in s], y) + yl = getset(repo, subset - xl, y) return xl + yl def notset(repo, subset, x): - s = set(getset(repo, subset, x)) - return [r for r in subset if r not in s] + return subset - getset(repo, subset, x) def listset(repo, subset, a, b): raise error.ParseError(_("can't use a list in this context")) @@ -289,7 +312,7 @@ """ # i18n: "ancestor" is a keyword l = getlist(x) - rl = list(repo) + rl = spanset(repo) anc = None # (getset(repo, rl, i) for i in l) generates a list of lists @@ -304,15 +327,15 @@ anc = rev(ancestor(node(anc), node(r))) if anc is not None and anc in subset: - return [anc] - return [] + return baseset([anc]) + return baseset([]) def _ancestors(repo, subset, x, followfirst=False): - args = getset(repo, list(repo), x) + args = getset(repo, spanset(repo), x) if not args: - return [] - s = set(_revancestors(repo, args, followfirst)) | set(args) - return [r for r in subset if r in s] + return baseset([]) + s = _revancestors(repo, args, followfirst) + return subset.filter(lambda r: r in s) def ancestors(repo, subset, x): """``ancestors(set)`` @@ -336,11 +359,11 @@ raise error.ParseError(_("~ expects a number")) ps = set() cl = repo.changelog - for r in getset(repo, cl, x): + for r in getset(repo, baseset(cl), x): for i in range(n): r = cl.parentrevs(r)[0] ps.add(r) - return [r for r in subset if r in ps] + return subset.filter(lambda r: r in ps) def author(repo, subset, x): """``author(string)`` @@ -349,7 +372,27 @@ # i18n: "author" is a keyword n = encoding.lower(getstring(x, _("author requires a string"))) kind, pattern, matcher = _substringmatcher(n) - return [r for r in subset if matcher(encoding.lower(repo[r].user()))] + return subset.filter(lambda x: matcher(encoding.lower(repo[x].user()))) + +def only(repo, subset, x): + """``only(set, [set])`` + Changesets that are ancestors of the first set that are not ancestors + of any other head in the repo. If a second set is specified, the result + is ancestors of the first set that are not ancestors of the second set + (i.e. ::<set1> - ::<set2>). + """ + cl = repo.changelog + args = getargs(x, 1, 2, _('only takes one or two arguments')) + include = getset(repo, spanset(repo), args[0]).set() + if len(args) == 1: + descendants = set(_revdescendants(repo, include, False)) + exclude = [rev for rev in cl.headrevs() + if not rev in descendants and not rev in include] + else: + exclude = getset(repo, spanset(repo), args[1]) + + results = set(ancestormod.missingancestors(include, exclude, cl.parentrevs)) + return lazyset(subset, lambda x: x in results) def bisect(repo, subset, x): """``bisect(string)`` @@ -366,7 +409,7 @@ # i18n: "bisect" is a keyword status = getstring(x, _("bisect requires a string")).lower() state = set(hbisect.get(repo, status)) - return [r for r in subset if r in state] + return subset.filter(lambda r: r in state) # Backward-compatibility # - no help entry so that we do not advertise it any more @@ -393,7 +436,7 @@ if not bmrev: raise util.Abort(_("bookmark '%s' does not exist") % bm) bmrev = repo[bmrev].rev() - return [r for r in subset if r == bmrev] + return subset.filter(lambda r: r == bmrev) else: matchrevs = set() for name, bmrev in repo._bookmarks.iteritems(): @@ -405,11 +448,11 @@ bmrevs = set() for bmrev in matchrevs: bmrevs.add(repo[bmrev].rev()) - return [r for r in subset if r in bmrevs] + return subset & bmrevs bms = set([repo[r].rev() for r in repo._bookmarks.values()]) - return [r for r in subset if r in bms] + return subset.filter(lambda r: r in bms) def branch(repo, subset, x): """``branch(string or set)`` @@ -431,16 +474,16 @@ # note: falls through to the revspec case if no branch with # this name exists if pattern in repo.branchmap(): - return [r for r in subset if matcher(repo[r].branch())] + return subset.filter(lambda r: matcher(repo[r].branch())) else: - return [r for r in subset if matcher(repo[r].branch())] - - s = getset(repo, list(repo), x) + return subset.filter(lambda r: matcher(repo[r].branch())) + + s = getset(repo, spanset(repo), x) b = set() for r in s: b.add(repo[r].branch()) - s = set(s) - return [r for r in subset if r in s or repo[r].branch() in b] + s = s.set() + return subset.filter(lambda r: r in s or repo[r].branch() in b) def bumped(repo, subset, x): """``bumped()`` @@ -451,7 +494,7 @@ # i18n: "bumped" is a keyword getargs(x, 0, 0, _("bumped takes no arguments")) bumped = obsmod.getrevs(repo, 'bumped') - return [r for r in subset if r in bumped] + return subset & bumped def bundle(repo, subset, x): """``bundle()`` @@ -463,43 +506,43 @@ bundlerevs = repo.changelog.bundlerevs except AttributeError: raise util.Abort(_("no bundle provided - specify with -R")) - return [r for r in subset if r in bundlerevs] + return subset & bundlerevs def checkstatus(repo, subset, pat, field): - m = None - s = [] hasset = matchmod.patkind(pat) == 'set' - fname = None - for r in subset: - c = repo[r] + + def matches(x): + m = None + fname = None + c = repo[x] if not m or hasset: m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c) if not m.anypats() and len(m.files()) == 1: fname = m.files()[0] if fname is not None: if fname not in c.files(): - continue + return False else: for f in c.files(): if m(f): break else: - continue + return False files = repo.status(c.p1().node(), c.node())[field] if fname is not None: if fname in files: - s.append(r) + return True else: for f in files: if m(f): - s.append(r) - break - return s + return True + + return subset.filter(matches) def _children(repo, narrow, parentset): cs = set() if not parentset: - return cs + return baseset(cs) pr = repo.changelog.parentrevs minrev = min(parentset) for r in narrow: @@ -508,15 +551,15 @@ for p in pr(r): if p in parentset: cs.add(r) - return cs + return baseset(cs) def children(repo, subset, x): """``children(set)`` Child changesets of changesets in set. """ - s = set(getset(repo, list(repo), x)) + s = getset(repo, baseset(repo), x).set() cs = _children(repo, subset, s) - return [r for r in subset if r in cs] + return subset & cs def closed(repo, subset, x): """``closed()`` @@ -524,7 +567,7 @@ """ # i18n: "closed" is a keyword getargs(x, 0, 0, _("closed takes no arguments")) - return [r for r in subset if repo[r].closesbranch()] + return subset.filter(lambda r: repo[r].closesbranch()) def contains(repo, subset, x): """``contains(pattern)`` @@ -537,23 +580,21 @@ """ # i18n: "contains" is a keyword pat = getstring(x, _("contains requires a pattern")) - s = [] - if not matchmod.patkind(pat): - pat = pathutil.canonpath(repo.root, repo.getcwd(), pat) - for r in subset: - if pat in repo[r]: - s.append(r) - else: - m = None - for r in subset: - c = repo[r] - if not m or matchmod.patkind(pat) == 'set': - m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c) + + def matches(x): + if not matchmod.patkind(pat): + pats = pathutil.canonpath(repo.root, repo.getcwd(), pat) + if pats in repo[x]: + return True + else: + c = repo[x] + m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c) for f in c.manifest(): if m(f): - s.append(r) - break - return s + return True + return False + + return subset.filter(matches) def converted(repo, subset, x): """``converted([id])`` @@ -575,7 +616,7 @@ source = repo[r].extra().get('convert_revision', None) return source is not None and (rev is None or source.startswith(rev)) - return [r for r in subset if _matchvalue(r)] + return subset.filter(lambda r: _matchvalue(r)) def date(repo, subset, x): """``date(interval)`` @@ -584,7 +625,7 @@ # i18n: "date" is a keyword ds = getstring(x, _("date requires a string")) dm = util.matchdate(ds) - return [r for r in subset if dm(repo[r].date()[0])] + return subset.filter(lambda x: dm(repo[x].date()[0])) def desc(repo, subset, x): """``desc(string)`` @@ -592,19 +633,20 @@ """ # i18n: "desc" is a keyword ds = encoding.lower(getstring(x, _("desc requires a string"))) - l = [] - for r in subset: - c = repo[r] - if ds in encoding.lower(c.description()): - l.append(r) - return l + + def matches(x): + c = repo[x] + return ds in encoding.lower(c.description()) + + return subset.filter(matches) def _descendants(repo, subset, x, followfirst=False): - args = getset(repo, list(repo), x) + args = getset(repo, spanset(repo), x) if not args: - return [] - s = set(_revdescendants(repo, args, followfirst)) | set(args) - return [r for r in subset if r in s] + return baseset([]) + s = _revdescendants(repo, args, followfirst) + a = set(args) + return subset.filter(lambda r: r in s or r in a) def descendants(repo, subset, x): """``descendants(set)`` @@ -624,9 +666,9 @@ is the same as passing all(). """ if x is not None: - args = set(getset(repo, list(repo), x)) + args = getset(repo, spanset(repo), x).set() else: - args = set(getall(repo, list(repo), x)) + args = getall(repo, spanset(repo), x).set() dests = set() @@ -659,7 +701,7 @@ r = src src = _getrevsource(repo, r) - return [r for r in subset if r in dests] + return subset.filter(lambda r: r in dests) def divergent(repo, subset, x): """``divergent()`` @@ -668,7 +710,7 @@ # i18n: "divergent" is a keyword getargs(x, 0, 0, _("divergent takes no arguments")) divergent = obsmod.getrevs(repo, 'divergent') - return [r for r in subset if r in divergent] + return subset.filter(lambda r: r in divergent) def draft(repo, subset, x): """``draft()`` @@ -676,7 +718,7 @@ # i18n: "draft" is a keyword getargs(x, 0, 0, _("draft takes no arguments")) pc = repo._phasecache - return [r for r in subset if pc.phase(repo, r) == phases.draft] + return subset.filter(lambda r: pc.phase(repo, r) == phases.draft) def extinct(repo, subset, x): """``extinct()`` @@ -685,7 +727,7 @@ # i18n: "extinct" is a keyword getargs(x, 0, 0, _("extinct takes no arguments")) extincts = obsmod.getrevs(repo, 'extinct') - return [r for r in subset if r in extincts] + return subset & extincts def extra(repo, subset, x): """``extra(label, [value])`` @@ -712,7 +754,7 @@ extra = repo[r].extra() return label in extra and (value is None or matcher(extra[label])) - return [r for r in subset if _matchvalue(r)] + return subset.filter(lambda r: _matchvalue(r)) def filelog(repo, subset, x): """``filelog(pattern)`` @@ -744,7 +786,7 @@ for fr in fl: s.add(fl.linkrev(fr)) - return [r for r in subset if r in s] + return subset.filter(lambda r: r in s) def first(repo, subset, x): """``first(set, [n])`` @@ -763,11 +805,11 @@ # include the revision responsible for the most recent version s.add(cx.linkrev()) else: - return [] + return baseset([]) else: - s = set(_revancestors(repo, [c.rev()], followfirst)) | set([c.rev()]) - - return [r for r in subset if r in s] + s = _revancestors(repo, baseset([c.rev()]), followfirst) + + return subset.filter(lambda r: r in s) def follow(repo, subset, x): """``follow([file])`` @@ -802,14 +844,15 @@ gr = re.compile(getstring(x, _("grep requires a string"))) except re.error, e: raise error.ParseError(_('invalid match pattern: %s') % e) - l = [] - for r in subset: - c = repo[r] + + def matches(x): + c = repo[x] for e in c.files() + [c.user(), c.description()]: if gr.search(e): - l.append(r) - break - return l + return True + return False + + return subset.filter(matches) def _matchfiles(repo, subset, x): # _matchfiles takes a revset list of prefixed arguments: @@ -858,10 +901,10 @@ hasset = True if not default: default = 'glob' - m = None - s = [] - for r in subset: - c = repo[r] + + def matches(x): + m = None + c = repo[x] if not m or (hasset and rev is None): ctx = c if rev is not None: @@ -870,9 +913,10 @@ exclude=exc, ctx=ctx, default=default) for f in c.files(): if m(f): - s.append(r) - break - return s + return True + return False + + return subset.filter(matches) def hasfile(repo, subset, x): """``file(pattern)`` @@ -896,15 +940,15 @@ hs = set() for b, ls in repo.branchmap().iteritems(): hs.update(repo[h].rev() for h in ls) - return [r for r in subset if r in hs] + return baseset(hs).filter(subset.__contains__) def heads(repo, subset, x): """``heads(set)`` Members of set with no children in set. """ s = getset(repo, subset, x) - ps = set(parents(repo, subset, x)) - return [r for r in s if r not in ps] + ps = parents(repo, subset, x) + return s - ps def hidden(repo, subset, x): """``hidden()`` @@ -913,7 +957,7 @@ # i18n: "hidden" is a keyword getargs(x, 0, 0, _("hidden takes no arguments")) hiddenrevs = repoview.filterrevs(repo, 'visible') - return [r for r in subset if r in hiddenrevs] + return subset & hiddenrevs def keyword(repo, subset, x): """``keyword(string)`` @@ -922,13 +966,13 @@ """ # i18n: "keyword" is a keyword kw = encoding.lower(getstring(x, _("keyword requires a string"))) - l = [] - for r in subset: + + def matches(r): c = repo[r] - if util.any(kw in encoding.lower(t) - for t in c.files() + [c.user(), c.description()]): - l.append(r) - return l + return util.any(kw in encoding.lower(t) for t in c.files() + [c.user(), + c.description()]) + + return subset.filter(matches) def limit(repo, subset, x): """``limit(set, [n])`` @@ -944,9 +988,18 @@ except (TypeError, ValueError): # i18n: "limit" is a keyword raise error.ParseError(_("limit expects a number")) - ss = set(subset) - os = getset(repo, list(repo), l[0])[:lim] - return [r for r in os if r in ss] + ss = subset.set() + os = getset(repo, spanset(repo), l[0]) + bs = baseset([]) + it = iter(os) + for x in xrange(lim): + try: + y = it.next() + if y in ss: + bs.append(y) + except (StopIteration): + break + return bs def last(repo, subset, x): """``last(set, [n])`` @@ -962,20 +1015,30 @@ except (TypeError, ValueError): # i18n: "last" is a keyword raise error.ParseError(_("last expects a number")) - ss = set(subset) - os = getset(repo, list(repo), l[0])[-lim:] - return [r for r in os if r in ss] + ss = subset.set() + os = getset(repo, spanset(repo), l[0]) + os.reverse() + bs = baseset([]) + it = iter(os) + for x in xrange(lim): + try: + y = it.next() + if y in ss: + bs.append(y) + except (StopIteration): + break + return bs def maxrev(repo, subset, x): """``max(set)`` Changeset with highest revision number in set. """ - os = getset(repo, list(repo), x) + os = getset(repo, spanset(repo), x) if os: - m = max(os) + m = os.max() if m in subset: - return [m] - return [] + return baseset([m]) + return baseset([]) def merge(repo, subset, x): """``merge()`` @@ -984,7 +1047,7 @@ # i18n: "merge" is a keyword getargs(x, 0, 0, _("merge takes no arguments")) cl = repo.changelog - return [r for r in subset if cl.parentrevs(r)[1] != -1] + return subset.filter(lambda r: cl.parentrevs(r)[1] != -1) def branchpoint(repo, subset, x): """``branchpoint()`` @@ -994,25 +1057,35 @@ getargs(x, 0, 0, _("branchpoint takes no arguments")) cl = repo.changelog if not subset: - return [] + return baseset([]) baserev = min(subset) parentscount = [0]*(len(repo) - baserev) for r in cl.revs(start=baserev + 1): for p in cl.parentrevs(r): if p >= baserev: parentscount[p - baserev] += 1 - return [r for r in subset if (parentscount[r - baserev] > 1)] + return subset.filter(lambda r: parentscount[r - baserev] > 1) def minrev(repo, subset, x): """``min(set)`` Changeset with lowest revision number in set. """ - os = getset(repo, list(repo), x) + os = getset(repo, spanset(repo), x) if os: - m = min(os) + m = os.min() if m in subset: - return [m] - return [] + return baseset([m]) + return baseset([]) + +def _missingancestors(repo, subset, x): + # i18n: "_missingancestors" is a keyword + revs, bases = getargs(x, 2, 2, + _("_missingancestors requires two arguments")) + rs = baseset(repo) + revs = getset(repo, rs, revs) + bases = getset(repo, rs, bases) + missing = set(repo.changelog.findmissingrevs(bases, revs)) + return baseset([r for r in subset if r in missing]) def modifies(repo, subset, x): """``modifies(pattern)`` @@ -1042,7 +1115,7 @@ if pm is not None: rn = repo.changelog.rev(pm) - return [r for r in subset if r == rn] + return subset.filter(lambda r: r == rn) def obsolete(repo, subset, x): """``obsolete()`` @@ -1050,7 +1123,7 @@ # i18n: "obsolete" is a keyword getargs(x, 0, 0, _("obsolete takes no arguments")) obsoletes = obsmod.getrevs(repo, 'obsolete') - return [r for r in subset if r in obsoletes] + return subset & obsoletes def origin(repo, subset, x): """``origin([set])`` @@ -1061,9 +1134,9 @@ for the first operation is selected. """ if x is not None: - args = set(getset(repo, list(repo), x)) + args = getset(repo, spanset(repo), x).set() else: - args = set(getall(repo, list(repo), x)) + args = getall(repo, spanset(repo), x).set() def _firstsrc(rev): src = _getrevsource(repo, rev) @@ -1078,7 +1151,7 @@ src = prev o = set([_firstsrc(r) for r in args]) - return [r for r in subset if r in o] + return subset.filter(lambda r: r in o) def outgoing(repo, subset, x): """``outgoing([path])`` @@ -1101,7 +1174,7 @@ repo.ui.popbuffer() cl = repo.changelog o = set([cl.rev(r) for r in outgoing.missing]) - return [r for r in subset if r in o] + return subset.filter(lambda r: r in o) def p1(repo, subset, x): """``p1([set])`` @@ -1109,13 +1182,13 @@ """ if x is None: p = repo[x].p1().rev() - return [r for r in subset if r == p] + return subset.filter(lambda r: r == p) ps = set() cl = repo.changelog - for r in getset(repo, list(repo), x): + for r in getset(repo, spanset(repo), x): ps.add(cl.parentrevs(r)[0]) - return [r for r in subset if r in ps] + return subset & ps def p2(repo, subset, x): """``p2([set])`` @@ -1125,15 +1198,15 @@ ps = repo[x].parents() try: p = ps[1].rev() - return [r for r in subset if r == p] + return subset.filter(lambda r: r == p) except IndexError: - return [] + return baseset([]) ps = set() cl = repo.changelog - for r in getset(repo, list(repo), x): + for r in getset(repo, spanset(repo), x): ps.add(cl.parentrevs(r)[1]) - return [r for r in subset if r in ps] + return subset & ps def parents(repo, subset, x): """``parents([set])`` @@ -1141,13 +1214,13 @@ """ if x is None: ps = tuple(p.rev() for p in repo[x].parents()) - return [r for r in subset if r in ps] + return subset & ps ps = set() cl = repo.changelog - for r in getset(repo, list(repo), x): + for r in getset(repo, spanset(repo), x): ps.update(cl.parentrevs(r)) - return [r for r in subset if r in ps] + return subset & ps def parentspec(repo, subset, x, n): """``set^0`` @@ -1163,7 +1236,7 @@ raise error.ParseError(_("^ expects a number 0, 1, or 2")) ps = set() cl = repo.changelog - for r in getset(repo, cl, x): + for r in getset(repo, baseset(cl), x): if n == 0: ps.add(r) elif n == 1: @@ -1172,7 +1245,7 @@ parents = cl.parentrevs(r) if len(parents) > 1: ps.add(parents[1]) - return [r for r in subset if r in ps] + return subset & ps def present(repo, subset, x): """``present(set)`` @@ -1186,7 +1259,7 @@ try: return getset(repo, subset, x) except error.RepoLookupError: - return [] + return baseset([]) def public(repo, subset, x): """``public()`` @@ -1194,7 +1267,7 @@ # i18n: "public" is a keyword getargs(x, 0, 0, _("public takes no arguments")) pc = repo._phasecache - return [r for r in subset if pc.phase(repo, r) == phases.public] + return subset.filter(lambda r: pc.phase(repo, r) == phases.public) def remote(repo, subset, x): """``remote([id [,path]])`` @@ -1228,8 +1301,8 @@ if n in repo: r = repo[n].rev() if r in subset: - return [r] - return [] + return baseset([r]) + return baseset([]) def removes(repo, subset, x): """``removes(pattern)`` @@ -1255,7 +1328,7 @@ except (TypeError, ValueError): # i18n: "rev" is a keyword raise error.ParseError(_("rev expects a number")) - return [r for r in subset if r == l] + return subset.filter(lambda r: r == l) def matching(repo, subset, x): """``matching(revision [, field])`` @@ -1285,7 +1358,7 @@ # i18n: "matching" is a keyword l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments")) - revs = getset(repo, repo.changelog, l[0]) + revs = getset(repo, baseset(repo.changelog), l[0]) fieldlist = ['metadata'] if len(l) > 1: @@ -1356,26 +1429,24 @@ # is only one field to match) getinfo = lambda r: [f(r) for f in getfieldfuncs] - matches = set() - for rev in revs: - target = getinfo(rev) - for r in subset: + def matches(x): + for rev in revs: + target = getinfo(rev) match = True for n, f in enumerate(getfieldfuncs): - if target[n] != f(r): + if target[n] != f(x): match = False - break if match: - matches.add(r) - return [r for r in subset if r in matches] + return True + return False + + return subset.filter(matches) def reverse(repo, subset, x): """``reverse(set)`` Reverse order of set. """ l = getset(repo, subset, x) - if not isinstance(l, list): - l = list(l) l.reverse() return l @@ -1383,10 +1454,10 @@ """``roots(set)`` Changesets in set with no parent changeset in set. """ - s = set(getset(repo, repo.changelog, x)) - subset = [r for r in subset if r in s] + s = getset(repo, baseset(repo.changelog), x).set() + subset = baseset([r for r in subset if r in s]) cs = _children(repo, subset, s) - return [r for r in subset if r not in cs] + return subset - cs def secret(repo, subset, x): """``secret()`` @@ -1394,7 +1465,7 @@ # i18n: "secret" is a keyword getargs(x, 0, 0, _("secret takes no arguments")) pc = repo._phasecache - return [r for r in subset if pc.phase(repo, r) == phases.secret] + return subset.filter(lambda x: pc.phase(repo, x) == phases.secret) def sort(repo, subset, x): """``sort(set[, [-]key...])`` @@ -1421,7 +1492,14 @@ l = [] def invert(s): return "".join(chr(255 - ord(c)) for c in s) - for r in getset(repo, subset, s): + revs = getset(repo, subset, s) + if keys == ["rev"]: + revs.sort() + return revs + elif keys == ["-rev"]: + revs.sort(reverse=True) + return revs + for r in revs: c = repo[r] e = [] for k in keys: @@ -1450,7 +1528,7 @@ e.append(r) l.append(e) l.sort() - return [e[-1] for e in l] + return baseset([e[-1] for e in l]) def _stringmatcher(pattern): """ @@ -1519,7 +1597,7 @@ s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)]) else: s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip']) - return [r for r in subset if r in s] + return subset & s def tagged(repo, subset, x): return tag(repo, subset, x) @@ -1531,7 +1609,7 @@ # i18n: "unstable" is a keyword getargs(x, 0, 0, _("unstable takes no arguments")) unstables = obsmod.getrevs(repo, 'unstable') - return [r for r in subset if r in unstables] + return subset & unstables def user(repo, subset, x): @@ -1548,11 +1626,29 @@ def _list(repo, subset, x): s = getstring(x, "internal error") if not s: - return [] - if not isinstance(subset, set): - subset = set(subset) + return baseset([]) ls = [repo[r].rev() for r in s.split('\0')] - return [r for r in ls if r in subset] + s = subset.set() + return baseset([r for r in ls if r in s]) + +# for internal use +def _intlist(repo, subset, x): + s = getstring(x, "internal error") + if not s: + return baseset([]) + ls = [int(r) for r in s.split('\0')] + s = subset.set() + return baseset([r for r in ls if r in s]) + +# for internal use +def _hexlist(repo, subset, x): + s = getstring(x, "internal error") + if not s: + return baseset([]) + cl = repo.changelog + ls = [cl.rev(node.bin(r)) for r in s.split('\0')] + s = subset.set() + return baseset([r for r in ls if r in s]) symbols = { "adds": adds, @@ -1561,6 +1657,7 @@ "ancestors": ancestors, "_firstancestors": _firstancestors, "author": author, + "only": only, "bisect": bisect, "bisected": bisected, "bookmark": bookmark, @@ -1598,6 +1695,7 @@ "max": maxrev, "merge": merge, "min": minrev, + "_missingancestors": _missingancestors, "modifies": modifies, "obsolete": obsolete, "origin": origin, @@ -1620,6 +1718,8 @@ "user": user, "unstable": unstable, "_list": _list, + "_intlist": _intlist, + "_hexlist": _hexlist, } # symbols which can't be used for a DoS attack for any given input @@ -1667,6 +1767,7 @@ "max", "merge", "min", + "_missingancestors", "modifies", "obsolete", "origin", @@ -1689,6 +1790,8 @@ "user", "unstable", "_list", + "_intlist", + "_hexlist", ]) methods = { @@ -1733,7 +1836,24 @@ elif op == 'and': wa, ta = optimize(x[1], True) wb, tb = optimize(x[2], True) + + # (::x and not ::y)/(not ::y and ::x) have a fast path + def ismissingancestors(revs, bases): + return ( + revs[0] == 'func' + and getstring(revs[1], _('not a symbol')) == 'ancestors' + and bases[0] == 'not' + and bases[1][0] == 'func' + and getstring(bases[1][1], _('not a symbol')) == 'ancestors') + w = min(wa, wb) + if ismissingancestors(ta, tb): + return w, ('func', ('symbol', '_missingancestors'), + ('list', ta[2], tb[1][2])) + if ismissingancestors(tb, ta): + return w, ('func', ('symbol', '_missingancestors'), + ('list', tb[2], ta[1][2])) + if wa > wb: return w, (op, tb, ta) return w, (op, ta, tb) @@ -1917,7 +2037,9 @@ tree = findaliases(ui, tree) weight, tree = optimize(tree, True) def mfunc(repo, subset): - return getset(repo, subset, tree) + if util.safehasattr(subset, 'set'): + return getset(repo, subset, tree) + return getset(repo, baseset(subset), tree) return mfunc def formatspec(expr, *args): @@ -1976,11 +2098,11 @@ elif l == 1: return argtype(t, s[0]) elif t == 'd': - return "_list('%s')" % "\0".join(str(int(a)) for a in s) + return "_intlist('%s')" % "\0".join(str(int(a)) for a in s) elif t == 's': return "_list('%s')" % "\0".join(s) elif t == 'n': - return "_list('%s')" % "\0".join(node.hex(a) for a in s) + return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s) elif t == 'b': return "_list('%s')" % "\0".join(a.branch() for a in s) @@ -2046,5 +2168,656 @@ funcs.add(tree[1][1]) return funcs +class baseset(list): + """Basic data structure that represents a revset and contains the basic + operation that it should be able to perform. + + Every method in this class should be implemented by any smartset class. + """ + def __init__(self, data=()): + super(baseset, self).__init__(data) + self._set = None + + def ascending(self): + """Sorts the set in ascending order (in place). + + This is part of the mandatory API for smartset.""" + self.sort() + + def descending(self): + """Sorts the set in descending order (in place). + + This is part of the mandatory API for smartset.""" + self.sort(reverse=True) + + def min(self): + return min(self) + + def max(self): + return max(self) + + def set(self): + """Returns a set or a smartset containing all the elements. + + The returned structure should be the fastest option for membership + testing. + + This is part of the mandatory API for smartset.""" + if not self._set: + self._set = set(self) + return self._set + + def __sub__(self, other): + """Returns a new object with the substraction of the two collections. + + This is part of the mandatory API for smartset.""" + if isinstance(other, baseset): + s = other.set() + else: + s = set(other) + return baseset(self.set() - s) + + def __and__(self, other): + """Returns a new object with the intersection of the two collections. + + This is part of the mandatory API for smartset.""" + if isinstance(other, baseset): + other = other.set() + return baseset([y for y in self if y in other]) + + def __add__(self, other): + """Returns a new object with the union of the two collections. + + This is part of the mandatory API for smartset.""" + s = self.set() + l = [r for r in other if r not in s] + return baseset(list(self) + l) + + def isascending(self): + """Returns True if the collection is ascending order, False if not. + + This is part of the mandatory API for smartset.""" + return False + + def isdescending(self): + """Returns True if the collection is descending order, False if not. + + This is part of the mandatory API for smartset.""" + return False + + def filter(self, condition): + """Returns this smartset filtered by condition as a new smartset. + + `condition` is a callable which takes a revision number and returns a + boolean. + + This is part of the mandatory API for smartset.""" + return lazyset(self, condition) + +class _orderedsetmixin(object): + """Mixin class with utility methods for smartsets + + This should be extended by smartsets which have the isascending(), + isdescending() and reverse() methods""" + + def _first(self): + """return the first revision in the set""" + for r in self: + return r + return None + + def _last(self): + """return the last revision in the set""" + self.reverse() + m = self._first() + self.reverse() + return m + + def min(self): + """return the smallest element in the set""" + if self.isascending(): + return self._first() + return self._last() + + def max(self): + """return the largest element in the set""" + if self.isascending(): + return self._last() + return self._first() + +class lazyset(object): + """Duck type for baseset class which iterates lazily over the revisions in + the subset and contains a function which tests for membership in the + revset + """ + def __init__(self, subset, condition=lambda x: True): + """ + condition: a function that decide whether a revision in the subset + belongs to the revset or not. + """ + self._subset = subset + self._condition = condition + self._cache = {} + + def ascending(self): + self._subset.sort() + + def descending(self): + self._subset.sort(reverse=True) + + def min(self): + return min(self) + + def max(self): + return max(self) + + def __contains__(self, x): + c = self._cache + if x not in c: + c[x] = x in self._subset and self._condition(x) + return c[x] + + def __iter__(self): + cond = self._condition + for x in self._subset: + if cond(x): + yield x + + def __and__(self, x): + return lazyset(self, lambda r: r in x) + + def __sub__(self, x): + return lazyset(self, lambda r: r not in x) + + def __add__(self, x): + return _addset(self, x) + + def __nonzero__(self): + for r in self: + return True + return False + + def __len__(self): + # Basic implementation to be changed in future patches. + l = baseset([r for r in self]) + return len(l) + + def __getitem__(self, x): + # Basic implementation to be changed in future patches. + l = baseset([r for r in self]) + return l[x] + + def sort(self, reverse=False): + if not util.safehasattr(self._subset, 'sort'): + self._subset = baseset(self._subset) + self._subset.sort(reverse=reverse) + + def reverse(self): + self._subset.reverse() + + def set(self): + return set([r for r in self]) + + def isascending(self): + return False + + def isdescending(self): + return False + + def filter(self, l): + return lazyset(self, l) + +class orderedlazyset(_orderedsetmixin, lazyset): + """Subclass of lazyset which subset can be ordered either ascending or + descendingly + """ + def __init__(self, subset, condition, ascending=True): + super(orderedlazyset, self).__init__(subset, condition) + self._ascending = ascending + + def filter(self, l): + return orderedlazyset(self, l, ascending=self._ascending) + + def ascending(self): + if not self._ascending: + self.reverse() + + def descending(self): + if self._ascending: + self.reverse() + + def __and__(self, x): + return orderedlazyset(self, lambda r: r in x, + ascending=self._ascending) + + def __sub__(self, x): + return orderedlazyset(self, lambda r: r not in x, + ascending=self._ascending) + + def __add__(self, x): + kwargs = {} + if self.isascending() and x.isascending(): + kwargs['ascending'] = True + if self.isdescending() and x.isdescending(): + kwargs['ascending'] = False + return _addset(self, x, **kwargs) + + def sort(self, reverse=False): + if reverse: + if self._ascending: + self._subset.sort(reverse=reverse) + else: + if not self._ascending: + self._subset.sort(reverse=reverse) + self._ascending = not reverse + + def isascending(self): + return self._ascending + + def isdescending(self): + return not self._ascending + + def reverse(self): + self._subset.reverse() + self._ascending = not self._ascending + +class _addset(_orderedsetmixin): + """Represent the addition of two sets + + Wrapper structure for lazily adding two structures without losing much + performance on the __contains__ method + + If the ascending attribute is set, that means the two structures are + ordered in either an ascending or descending way. Therefore, we can add + them mantaining the order by iterating over both at the same time + + This class does not duck-type baseset and it's only supposed to be used + internally + """ + def __init__(self, revs1, revs2, ascending=None): + self._r1 = revs1 + self._r2 = revs2 + self._iter = None + self._ascending = ascending + self._genlist = None + + @util.propertycache + def _list(self): + if not self._genlist: + self._genlist = baseset(self._iterator()) + return self._genlist + + def filter(self, condition): + if self._ascending is not None: + return orderedlazyset(self, condition, ascending=self._ascending) + return lazyset(self, condition) + + def ascending(self): + if self._ascending is None: + self.sort() + self._ascending = True + else: + if not self._ascending: + self.reverse() + + def descending(self): + if self._ascending is None: + self.sort(reverse=True) + self._ascending = False + else: + if self._ascending: + self.reverse() + + def __and__(self, other): + filterfunc = other.__contains__ + if self._ascending is not None: + return orderedlazyset(self, filterfunc, ascending=self._ascending) + return lazyset(self, filterfunc) + + def __sub__(self, other): + filterfunc = lambda r: r not in other + if self._ascending is not None: + return orderedlazyset(self, filterfunc, ascending=self._ascending) + return lazyset(self, filterfunc) + + def __add__(self, other): + """When both collections are ascending or descending, preserve the order + """ + kwargs = {} + if self._ascending is not None: + if self.isascending() and other.isascending(): + kwargs['ascending'] = True + if self.isdescending() and other.isdescending(): + kwargs['ascending'] = False + return _addset(self, other, **kwargs) + + def _iterator(self): + """Iterate over both collections without repeating elements + + If the ascending attribute is not set, iterate over the first one and + then over the second one checking for membership on the first one so we + dont yield any duplicates. + + If the ascending attribute is set, iterate over both collections at the + same time, yielding only one value at a time in the given order. + """ + if not self._iter: + def gen(): + if self._ascending is None: + for r in self._r1: + yield r + s = self._r1.set() + for r in self._r2: + if r not in s: + yield r + else: + iter1 = iter(self._r1) + iter2 = iter(self._r2) + + val1 = None + val2 = None + + choice = max + if self._ascending: + choice = min + try: + # Consume both iterators in an ordered way until one is + # empty + while True: + if val1 is None: + val1 = iter1.next() + if val2 is None: + val2 = iter2.next() + next = choice(val1, val2) + yield next + if val1 == next: + val1 = None + if val2 == next: + val2 = None + except StopIteration: + # Flush any remaining values and consume the other one + it = iter2 + if val1 is not None: + yield val1 + it = iter1 + elif val2 is not None: + # might have been equality and both are empty + yield val2 + for val in it: + yield val + + self._iter = _generatorset(gen()) + + return self._iter + + def __iter__(self): + if self._genlist: + return iter(self._genlist) + return iter(self._iterator()) + + def __contains__(self, x): + return x in self._r1 or x in self._r2 + + def set(self): + return self + + def sort(self, reverse=False): + """Sort the added set + + For this we use the cached list with all the generated values and if we + know they are ascending or descending we can sort them in a smart way. + """ + if self._ascending is None: + self._list.sort(reverse=reverse) + self._ascending = not reverse + else: + if bool(self._ascending) == bool(reverse): + self.reverse() + + def isascending(self): + return self._ascending is not None and self._ascending + + def isdescending(self): + return self._ascending is not None and not self._ascending + + def reverse(self): + self._list.reverse() + if self._ascending is not None: + self._ascending = not self._ascending + +class _generatorset(object): + """Wrap a generator for lazy iteration + + Wrapper structure for generators that provides lazy membership and can + be iterated more than once. + When asked for membership it generates values until either it finds the + requested one or has gone through all the elements in the generator + + This class does not duck-type baseset and it's only supposed to be used + internally + """ + def __init__(self, gen): + """ + gen: a generator producing the values for the generatorset. + """ + self._gen = gen + self._iter = iter(gen) + self._cache = {} + self._genlist = baseset([]) + self._iterated = False + self._finished = False + + def __contains__(self, x): + if x in self._cache: + return self._cache[x] + + # Use __iter__ which caches values and stores them into self._genlist + for l in self: + if l == x: + return True + + self._finished = True + self._cache[x] = False + return False + + def __iter__(self): + if self._iterated: + # At least a part of the list should be cached if iteration has + # started over the generatorset. + for l in self._genlist: + yield l + else: + # Starting iteration over the generatorset. + self._iterated = True + + for item in self._gen: + self._cache[item] = True + self._genlist.append(item) + yield item + + # Iteration over the generator has finished. Whole value list should be + # cached in self._genlist + self._finished = True + + def set(self): + return self + + def sort(self, reverse=False): + if not self._finished: + for i in self: + continue + self._genlist.sort(reverse=reverse) + +class _ascgeneratorset(_generatorset): + """Wrap a generator of ascending elements for lazy iteration + + Same structure as _generatorset but stops iterating after it goes past + the value when asked for membership and the element is not contained + + This class does not duck-type baseset and it's only supposed to be used + internally + """ + def __contains__(self, x): + if x in self._cache: + return self._cache[x] + + for l in self: + if l == x: + return True + if l > x: + break + + self._cache[x] = False + return False + +class _descgeneratorset(_generatorset): + """Wrap a generator of descending elements for lazy iteration + + Same structure as _generatorset but stops iterating after it goes past + the value when asked for membership and the element is not contained + + This class does not duck-type baseset and it's only supposed to be used + internally + """ + def __contains__(self, x): + if x in self._cache: + return self._cache[x] + + for l in self: + if l == x: + return True + if l < x: + break + + self._cache[x] = False + return False + +class spanset(_orderedsetmixin): + """Duck type for baseset class which represents a range of revisions and + can work lazily and without having all the range in memory + + Note that spanset(x, y) behave almost like xrange(x, y) except for two + notable points: + - when x < y it will be automatically descending, + - revision filtered with this repoview will be skipped. + + """ + def __init__(self, repo, start=0, end=None): + """ + start: first revision included the set + (default to 0) + end: first revision excluded (last+1) + (default to len(repo) + + Spanset will be descending if `end` < `start`. + """ + self._start = start + if end is not None: + self._end = end + else: + self._end = len(repo) + self._hiddenrevs = repo.changelog.filteredrevs + + def ascending(self): + if self._start > self._end: + self.reverse() + + def descending(self): + if self._start < self._end: + self.reverse() + + def _contained(self, rev): + return (rev <= self._start and rev > self._end) or (rev >= self._start + and rev < self._end) + + def __iter__(self): + if self._start <= self._end: + iterrange = xrange(self._start, self._end) + else: + iterrange = xrange(self._start, self._end, -1) + + if self._hiddenrevs: + s = self._hiddenrevs + for r in iterrange: + if r not in s: + yield r + else: + for r in iterrange: + yield r + + def __contains__(self, x): + return self._contained(x) and not (self._hiddenrevs and rev in + self._hiddenrevs) + + def __nonzero__(self): + for r in self: + return True + return False + + def __and__(self, x): + if isinstance(x, baseset): + x = x.set() + if self._start <= self._end: + return orderedlazyset(self, lambda r: r in x) + else: + return orderedlazyset(self, lambda r: r in x, ascending=False) + + def __sub__(self, x): + if isinstance(x, baseset): + x = x.set() + if self._start <= self._end: + return orderedlazyset(self, lambda r: r not in x) + else: + return orderedlazyset(self, lambda r: r not in x, ascending=False) + + def __add__(self, x): + kwargs = {} + if self.isascending() and x.isascending(): + kwargs['ascending'] = True + if self.isdescending() and x.isdescending(): + kwargs['ascending'] = False + return _addset(self, x, **kwargs) + + def __len__(self): + if not self._hiddenrevs: + return abs(self._end - self._start) + else: + count = 0 + for rev in self._hiddenrevs: + if self._contained(rev): + count += 1 + return abs(self._end - self._start) - count + + def __getitem__(self, x): + # Basic implementation to be changed in future patches. + l = baseset([r for r in self]) + return l[x] + + def sort(self, reverse=False): + if bool(reverse) != (self._start > self._end): + self.reverse() + + def reverse(self): + # Just switch the _start and _end parameters + if self._start <= self._end: + self._start, self._end = self._end - 1, self._start - 1 + else: + self._start, self._end = self._end + 1, self._start + 1 + + def set(self): + return self + + def isascending(self): + return self._start < self._end + + def isdescending(self): + return self._start > self._end + + def filter(self, l): + if self._start <= self._end: + return orderedlazyset(self, l) + else: + return orderedlazyset(self, l, ascending=False) + # tell hggettext to extract docstrings from these functions: i18nfunctions = symbols.values()
--- a/mercurial/scmutil.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/scmutil.py Tue Mar 18 14:25:28 2014 -0500 @@ -20,6 +20,16 @@ systemrcpath = scmplatform.systemrcpath userrcpath = scmplatform.userrcpath +def itersubrepos(ctx1, ctx2): + """find subrepos in ctx1 or ctx2""" + # Create a (subpath, ctx) mapping where we prefer subpaths from + # ctx1. The subpaths from ctx2 are important when the .hgsub file + # has been modified (in ctx2) but not yet committed (in ctx1). + subpaths = dict.fromkeys(ctx2.substate, ctx2) + subpaths.update(dict.fromkeys(ctx1.substate, ctx1)) + for subpath, ctx in sorted(subpaths.iteritems()): + yield subpath, ctx.sub(subpath) + def nochangesfound(ui, repo, excluded=None): '''Report no changes for push/pull, excluded is None or a list of nodes excluded from the push/pull. @@ -480,7 +490,7 @@ return defval return repo[val].rev() - seen, l = set(), [] + seen, l = set(), revset.baseset([]) for spec in revs: if l and not seen: seen = set(l) @@ -489,7 +499,7 @@ try: if isinstance(spec, int): seen.add(spec) - l.append(spec) + l = l + [spec] continue if _revrangesep in spec: @@ -501,7 +511,7 @@ rangeiter = repo.changelog.revs(start, end) if not seen and not l: # by far the most common case: revs = ["-1:0"] - l = list(rangeiter) + l = revset.baseset(rangeiter) # defer syncing seen until next iteration continue newrevs = set(rangeiter) @@ -510,23 +520,26 @@ seen.update(newrevs) else: seen = newrevs - l.extend(sorted(newrevs, reverse=start > end)) + l = l + sorted(newrevs, reverse=start > end) continue elif spec and spec in repo: # single unquoted rev rev = revfix(repo, spec, None) if rev in seen: continue seen.add(rev) - l.append(rev) + l = l + [rev] continue except error.RepoLookupError: pass # fall through to new-style queries if old-style fails m = revset.match(repo.ui, spec) - dl = [r for r in m(repo, list(repo)) if r not in seen] - l.extend(dl) - seen.update(dl) + if seen or l: + dl = [r for r in m(repo, revset.spanset(repo)) if r not in seen] + l = l + dl + seen.update(dl) + else: + l = m(repo, revset.spanset(repo)) return l @@ -722,7 +735,9 @@ if missings: raise error.RequirementError( _("unknown repository format: requires features '%s' (upgrade " - "Mercurial)") % "', '".join(missings)) + "Mercurial)") % "', '".join(missings), + hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement" + " for details")) return requirements class filecachesubentry(object):
--- a/mercurial/setdiscovery.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/setdiscovery.py Tue Mar 18 14:25:28 2014 -0500 @@ -5,6 +5,40 @@ # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. +""" +Algorithm works in the following way. You have two repository: local and +remote. They both contains a DAG of changelists. + +The goal of the discovery protocol is to find one set of node *common*, +the set of nodes shared by local and remote. + +One of the issue with the original protocol was latency, it could +potentially require lots of roundtrips to discover that the local repo was a +subset of remote (which is a very common case, you usually have few changes +compared to upstream, while upstream probably had lots of development). + +The new protocol only requires one interface for the remote repo: `known()`, +which given a set of changelists tells you if they are present in the DAG. + +The algorithm then works as follow: + + - We will be using three sets, `common`, `missing`, `unknown`. Originally + all nodes are in `unknown`. + - Take a sample from `unknown`, call `remote.known(sample)` + - For each node that remote knows, move it and all its ancestors to `common` + - For each node that remote doesn't know, move it and all its descendants + to `missing` + - Iterate until `unknown` is empty + +There are a couple optimizations, first is instead of starting with a random +sample of missing, start by sending all heads, in the case where the local +repo is a subset, you computed the answer in one round trip. + +Then you can do something similar to the bisecting strategy used when +finding faulty changesets. Instead of random samples, you can try picking +nodes that will maximize the number of nodes that will be +classified with it (since all ancestors or descendants will be marked as well). +""" from node import nullid from i18n import _
--- a/mercurial/subrepo.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/subrepo.py Tue Mar 18 14:25:28 2014 -0500 @@ -326,16 +326,6 @@ os.unlink(os.path.join(dirname, f)) os.walk(path, v, None) -def itersubrepos(ctx1, ctx2): - """find subrepos in ctx1 or ctx2""" - # Create a (subpath, ctx) mapping where we prefer subpaths from - # ctx1. The subpaths from ctx2 are important when the .hgsub file - # has been modified (in ctx2) but not yet committed (in ctx1). - subpaths = dict.fromkeys(ctx2.substate, ctx2) - subpaths.update(dict.fromkeys(ctx1.substate, ctx1)) - for subpath, ctx in sorted(subpaths.iteritems()): - yield subpath, ctx.sub(subpath) - def subrepo(ctx, path): """return instance of the right subrepo class for subrepo in path""" # subrepo inherently violates our import layering rules
--- a/mercurial/templatekw.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/templatekw.py Tue Mar 18 14:25:28 2014 -0500 @@ -195,8 +195,12 @@ """:bookmarks: List of strings. Any bookmarks associated with the changeset. """ + repo = args['ctx']._repo bookmarks = args['ctx'].bookmarks() - return showlist('bookmark', bookmarks, **args) + hybrid = showlist('bookmark', bookmarks, **args) + for value in hybrid.values: + value['current'] = repo._bookmarkcurrent + return hybrid def showchildren(**args): """:children: List of strings. The children of the changeset.""" @@ -297,8 +301,8 @@ def showmanifest(**args): repo, ctx, templ = args['repo'], args['ctx'], args['templ'] args = args.copy() - args.update(dict(rev=repo.manifest.rev(ctx.changeset()[0]), - node=hex(ctx.changeset()[0]))) + args.update({'rev': repo.manifest.rev(ctx.changeset()[0]), + 'node': hex(ctx.changeset()[0])}) return templ('manifest', **args) def shownode(repo, ctx, templ, **args):
--- a/mercurial/templater.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/templater.py Tue Mar 18 14:25:28 2014 -0500 @@ -7,7 +7,7 @@ from i18n import _ import sys, os, re -import util, config, templatefilters, parser, error +import util, config, templatefilters, templatekw, parser, error import types import minirst @@ -244,6 +244,31 @@ return templatefilters.fill(text, width, initindent, hangindent) +def pad(context, mapping, args): + """usage: pad(text, width, fillchar=' ', right=False) + """ + if not (2 <= len(args) <= 4): + raise error.ParseError(_("pad() expects two to four arguments")) + + width = int(args[1][1]) + + text = stringify(args[0][0](context, mapping, args[0][1])) + if args[0][0] == runstring: + text = stringify(runtemplate(context, mapping, + compiletemplate(text, context))) + + right = False + fillchar = ' ' + if len(args) > 2: + fillchar = stringify(args[2][0](context, mapping, args[2][1])) + if len(args) > 3: + right = util.parsebool(args[3][1]) + + if right: + return text.rjust(width, fillchar) + else: + return text.ljust(width, fillchar) + def get(context, mapping, args): if len(args) != 2: # i18n: "get" is a keyword @@ -276,6 +301,19 @@ elif len(args) == 3: yield _evalifliteral(args[2], context, mapping) +def ifcontains(context, mapping, args): + if not (3 <= len(args) <= 4): + # i18n: "ifcontains" is a keyword + raise error.ParseError(_("ifcontains expects three or four arguments")) + + item = stringify(args[0][0](context, mapping, args[0][1])) + items = args[1][0](context, mapping, args[1][1]) + + if item in items: + yield _evalifliteral(args[2], context, mapping) + elif len(args) == 4: + yield _evalifliteral(args[3], context, mapping) + def ifeq(context, mapping, args): if not (3 <= len(args) <= 4): # i18n: "ifeq" is a keyword @@ -318,6 +356,32 @@ # ignore args[0] (the label string) since this is supposed to be a a no-op yield _evalifliteral(args[1], context, mapping) +def revset(context, mapping, args): + """usage: revset(query[, formatargs...]) + """ + if not len(args) > 0: + # i18n: "revset" is a keyword + raise error.ParseError(_("revset expects one or more arguments")) + + raw = args[0][1] + ctx = mapping['ctx'] + repo = ctx._repo + + if len(args) > 1: + formatargs = list([a[0](context, mapping, a[1]) for a in args[1:]]) + revs = repo.revs(raw, *formatargs) + revs = list([str(r) for r in revs]) + else: + revsetcache = mapping['cache'].setdefault("revsetcache", {}) + if raw in revsetcache: + revs = revsetcache[raw] + else: + revs = repo.revs(raw) + revs = list([str(r) for r in revs]) + revsetcache[raw] = revs + + return templatekw.showlist("revision", revs, **mapping) + def rstdoc(context, mapping, args): if len(args) != 2: # i18n: "rstdoc" is a keyword @@ -328,6 +392,57 @@ return minirst.format(text, style=style, keep=['verbose']) +def shortest(context, mapping, args): + """usage: shortest(node, minlength=4) + """ + if not (1 <= len(args) <= 2): + raise error.ParseError(_("shortest() expects one or two arguments")) + + node = stringify(args[0][0](context, mapping, args[0][1])) + + minlength = 4 + if len(args) > 1: + minlength = int(args[1][1]) + + cl = mapping['ctx']._repo.changelog + def isvalid(test): + try: + try: + cl.index.partialmatch(test) + except AttributeError: + # Pure mercurial doesn't support partialmatch on the index. + # Fallback to the slow way. + if cl._partialmatch(test) is None: + return False + + try: + i = int(test) + # if we are a pure int, then starting with zero will not be + # confused as a rev; or, obviously, if the int is larger than + # the value of the tip rev + if test[0] == '0' or i > len(cl): + return True + return False + except ValueError: + return True + except error.RevlogError: + return False + + shortest = node + startlength = max(6, minlength) + length = startlength + while True: + test = node[:length] + if isvalid(test): + shortest = test + if length == minlength or length > startlength: + return shortest + length -= 1 + else: + length += 1 + if len(shortest) <= length: + return shortest + def strip(context, mapping, args): if not (1 <= len(args) <= 2): raise error.ParseError(_("strip expects one or two arguments")) @@ -364,10 +479,14 @@ "fill": fill, "get": get, "if": if_, + "ifcontains": ifcontains, "ifeq": ifeq, "join": join, "label": label, + "pad": pad, + "revset": revset, "rstdoc": rstdoc, + "shortest": shortest, "strip": strip, "sub": sub, }
--- a/mercurial/transaction.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/transaction.py Tue Mar 18 14:25:28 2014 -0500 @@ -174,7 +174,10 @@ lines = fp.readlines() fp.close() for l in lines: - f, o = l.split('\0') - entries.append((f, int(o), None)) + try: + f, o = l.split('\0') + entries.append((f, int(o), None)) + except ValueError: + report(_("couldn't read journal entry %r!\n") % l) _playback(file, report, opener, entries)
--- a/mercurial/ui.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/ui.py Tue Mar 18 14:25:28 2014 -0500 @@ -8,6 +8,7 @@ from i18n import _ import errno, getpass, os, socket, sys, tempfile, traceback import config, scmutil, util, error, formatter +from node import hex class ui(object): def __init__(self, src=None): @@ -449,7 +450,9 @@ except KeyError: pass if not user: - raise util.Abort(_('no username supplied (see "hg help config")')) + raise util.Abort(_('no username supplied'), + hint=_('use "hg config --edit" ' + 'to set your username')) if "\n" in user: raise util.Abort(_("username %s contains a newline\n") % repr(user)) return user @@ -712,7 +715,7 @@ if self.debugflag: opts['label'] = opts.get('label', '') + ' ui.debug' self.write(*msg, **opts) - def edit(self, text, user): + def edit(self, text, user, extra={}): (fd, name) = tempfile.mkstemp(prefix="hg-editor-", suffix=".txt", text=True) try: @@ -720,10 +723,18 @@ f.write(text) f.close() + environ = {'HGUSER': user} + if 'transplant_source' in extra: + environ.update({'HGREVISION': hex(extra['transplant_source'])}) + for label in ('source', 'rebase_source'): + if label in extra: + environ.update({'HGREVISION': extra[label]}) + break + editor = self.geteditor() util.system("%s \"%s\"" % (editor, name), - environ={'HGUSER': user}, + environ=environ, onerr=util.Abort, errprefix=_("edit failed"), out=self.fout)
--- a/mercurial/util.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/util.py Tue Mar 18 14:25:28 2014 -0500 @@ -1198,11 +1198,11 @@ """ def lower(date): - d = dict(mb="1", d="1") + d = {'mb': "1", 'd': "1"} return parsedate(date, extendeddateformats, d)[0] def upper(date): - d = dict(mb="12", HI="23", M="59", S="59") + d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"} for days in ("31", "30", "29"): try: d["d"] = days @@ -1989,12 +1989,14 @@ for source, hook in self._hooks: hook(*args) -def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr): +def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout): '''Writes a message to f (stderr) with a nicely formatted stacktrace. - Skips the 'skip' last entries. + Skips the 'skip' last entries. By default it will flush stdout first. It can be used everywhere and do intentionally not require an ui object. Not be used in production code but very convenient while developing. ''' + if otherf: + otherf.flush() f.write('%s at:\n' % msg) entries = [('%s:%s' % (fn, ln), func) for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]] @@ -2002,6 +2004,7 @@ fnmax = max(len(entry[0]) for entry in entries) for fnln, func in entries: f.write(' %-*s in %s\n' % (fnmax, fnln, func)) + f.flush() # convenient shortcut dst = debugstacktrace
--- a/mercurial/win32.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/win32.py Tue Mar 18 14:25:28 2014 -0500 @@ -24,6 +24,7 @@ # GetLastError _ERROR_SUCCESS = 0 +_ERROR_SHARING_VIOLATION = 32 _ERROR_INVALID_PARAMETER = 87 _ERROR_INSUFFICIENT_BUFFER = 122 @@ -59,7 +60,9 @@ _OPEN_EXISTING = 3 +_FILE_FLAG_OPEN_REPARSE_POINT = 0x00200000 _FILE_FLAG_BACKUP_SEMANTICS = 0x02000000 +_FILE_FLAG_DELETE_ON_CLOSE = 0x04000000 # SetFileAttributes _FILE_ATTRIBUTE_NORMAL = 0x80 @@ -119,6 +122,27 @@ _STD_ERROR_HANDLE = _DWORD(-12).value +# CreateToolhelp32Snapshot, Process32First, Process32Next +_TH32CS_SNAPPROCESS = 0x00000002 +_MAX_PATH = 260 + +class _tagPROCESSENTRY32(ctypes.Structure): + _fields_ = [('dwsize', _DWORD), + ('cntUsage', _DWORD), + ('th32ProcessID', _DWORD), + ('th32DefaultHeapID', ctypes.c_void_p), + ('th32ModuleID', _DWORD), + ('cntThreads', _DWORD), + ('th32ParentProcessID', _DWORD), + ('pcPriClassBase', _LONG), + ('dwFlags', _DWORD), + ('szExeFile', ctypes.c_char * _MAX_PATH)] + + def __init__(self): + super(_tagPROCESSENTRY32, self).__init__() + self.dwsize = ctypes.sizeof(self) + + # types of parameters of C functions used (required by pypy) _kernel32.CreateFileA.argtypes = [_LPCSTR, _DWORD, _DWORD, ctypes.c_void_p, @@ -186,6 +210,15 @@ _user32.EnumWindows.argtypes = [_WNDENUMPROC, _LPARAM] _user32.EnumWindows.restype = _BOOL +_kernel32.CreateToolhelp32Snapshot.argtypes = [_DWORD, _DWORD] +_kernel32.CreateToolhelp32Snapshot.restype = _BOOL + +_kernel32.Process32First.argtypes = [_HANDLE, ctypes.c_void_p] +_kernel32.Process32First.restype = _BOOL + +_kernel32.Process32Next.argtypes = [_HANDLE, ctypes.c_void_p] +_kernel32.Process32Next.restype = _BOOL + def _raiseoserror(name): err = ctypes.WinError() raise OSError(err.errno, '%s: %s' % (name, err.strerror)) @@ -309,6 +342,51 @@ width = csbi.srWindow.Right - csbi.srWindow.Left return width +def _1stchild(pid): + '''return the 1st found child of the given pid + + None is returned when no child is found''' + pe = _tagPROCESSENTRY32() + + # create handle to list all processes + ph = _kernel32.CreateToolhelp32Snapshot(_TH32CS_SNAPPROCESS, 0) + if ph == _INVALID_HANDLE_VALUE: + raise ctypes.WinError + try: + r = _kernel32.Process32First(ph, ctypes.byref(pe)) + # loop over all processes + while r: + if pe.th32ParentProcessID == pid: + # return first child found + return pe.th32ProcessID + r = _kernel32.Process32Next(ph, ctypes.byref(pe)) + finally: + _kernel32.CloseHandle(ph) + if _kernel32.GetLastError() != _ERROR_NO_MORE_FILES: + raise ctypes.WinError + return None # no child found + +class _tochildpid(int): # pid is _DWORD, which always matches in an int + '''helper for spawndetached, returns the child pid on conversion to string + + Does not resolve the child pid immediately because the child may not yet be + started. + ''' + def childpid(self): + '''returns the child pid of the first found child of the process + with this pid''' + return _1stchild(self) + def __str__(self): + # run when the pid is written to the file + ppid = self.childpid() + if ppid is None: + # race, child has exited since check + # fall back to this pid. Its process will also have disappeared, + # raising the same error type later as when the child pid would + # be returned. + return " %d" % self + return str(ppid) + def spawndetached(args): # No standard library function really spawns a fully detached # process under win32 because they allocate pipes or other objects @@ -339,16 +417,24 @@ if not res: raise ctypes.WinError - return pi.dwProcessId + # _tochildpid because the process is the child of COMSPEC + return _tochildpid(pi.dwProcessId) def unlink(f): '''try to implement POSIX' unlink semantics on Windows''' - if os.path.isdir(f): - # use EPERM because it is POSIX prescribed value, even though - # unlink(2) on directories returns EISDIR on Linux - raise IOError(errno.EPERM, - "Unlinking directory not permitted: '%s'" % f) + # If we can open f exclusively, no other processes must have open handles + # for it and we can expect its name will be deleted immediately when we + # close the handle unless we have another in the same process. We also + # expect we shall simply fail to open f if it is a directory. + fh = _kernel32.CreateFileA(f, 0, 0, None, _OPEN_EXISTING, + _FILE_FLAG_OPEN_REPARSE_POINT | _FILE_FLAG_DELETE_ON_CLOSE, None) + if fh != _INVALID_HANDLE_VALUE: + _kernel32.CloseHandle(fh) + return + error = _kernel32.GetLastError() + if error != _ERROR_SHARING_VIOLATION: + raise ctypes.WinError(error) # POSIX allows to unlink and rename open files. Windows has serious # problems with doing that:
--- a/mercurial/wireproto.py Mon Mar 17 14:57:13 2014 -0400 +++ b/mercurial/wireproto.py Tue Mar 18 14:25:28 2014 -0500 @@ -145,9 +145,6 @@ # client side -def todict(**args): - return args - class wirepeer(peer.peerrepository): def batch(self): @@ -166,7 +163,7 @@ def lookup(self, key): self.requirecap('lookup', _('look up remote revision')) f = future() - yield todict(key=encoding.fromlocal(key)), f + yield {'key': encoding.fromlocal(key)}, f d = f.value success, data = d[:-1].split(" ", 1) if int(success): @@ -186,7 +183,7 @@ @batchable def known(self, nodes): f = future() - yield todict(nodes=encodelist(nodes)), f + yield {'nodes': encodelist(nodes)}, f d = f.value try: yield [bool(int(f)) for f in d] @@ -236,10 +233,10 @@ yield False, None f = future() self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key)) - yield todict(namespace=encoding.fromlocal(namespace), - key=encoding.fromlocal(key), - old=encoding.fromlocal(old), - new=encoding.fromlocal(new)), f + yield {'namespace': encoding.fromlocal(namespace), + 'key': encoding.fromlocal(key), + 'old': encoding.fromlocal(old), + 'new': encoding.fromlocal(new)}, f d = f.value d, output = d.split('\n', 1) try: @@ -257,7 +254,7 @@ yield {}, None f = future() self.ui.debug('preparing listkeys for "%s"\n' % namespace) - yield todict(namespace=encoding.fromlocal(namespace)), f + yield {'namespace': encoding.fromlocal(namespace)}, f d = f.value r = {} for l in d.splitlines():
--- a/setup.py Mon Mar 17 14:57:13 2014 -0400 +++ b/setup.py Tue Mar 18 14:25:28 2014 -0500 @@ -13,11 +13,18 @@ '''A helper function to emulate 2.6+ bytes literals using string literals.''' return s.encode('latin1') + printf = eval('print') + libdir_escape = 'unicode_escape' else: + libdir_escape = 'string_escape' def b(s): '''A helper function to emulate 2.6+ bytes literals using string literals.''' return s + def printf(*args, **kwargs): + f = kwargs.get('file', sys.stdout) + end = kwargs.get('end', '\n') + f.write(b(' ').join(args) + end) # Solaris Python packaging brain damage try: @@ -64,7 +71,6 @@ from distutils.command.build_py import build_py from distutils.command.install_scripts import install_scripts from distutils.spawn import spawn, find_executable -from distutils.ccompiler import new_compiler from distutils import cygwinccompiler from distutils.errors import CCompilerError, DistutilsExecError from distutils.sysconfig import get_python_inc @@ -152,8 +158,8 @@ and not e.startswith(b('warning: Not importing')) \ and not e.startswith(b('obsolete feature not enabled'))] if err: - print >> sys.stderr, "stderr from '%s':" % (' '.join(cmd)) - print >> sys.stderr, '\n'.join([' ' + e for e in err]) + printf("stderr from '%s':" % (' '.join(cmd)), file=sys.stderr) + printf(b('\n').join([b(' ') + e for e in err]), file=sys.stderr) return '' return out @@ -403,7 +409,7 @@ if b('\0') in data: continue - data = data.replace('@LIBDIR@', libdir.encode('string_escape')) + data = data.replace(b('@LIBDIR@'), libdir.encode(libdir_escape)) fp = open(outfile, 'wb') fp.write(data) fp.close() @@ -467,20 +473,6 @@ cygwinccompiler.Mingw32CCompiler = HackedMingw32CCompiler -if sys.platform.startswith('linux') and os.uname()[2] > '2.6': - # The inotify extension is only usable with Linux 2.6 kernels. - # You also need a reasonably recent C library. - # In any case, if it fails to build the error will be skipped ('optional'). - cc = new_compiler() - if hasfunction(cc, 'inotify_add_watch'): - inotify = Extension('hgext.inotify.linux._inotify', - ['hgext/inotify/linux/_inotify.c'], - ['mercurial'], - depends=common_depends) - inotify.optional = True - extmodules.append(inotify) - packages.extend(['hgext.inotify', 'hgext.inotify.linux']) - packagedata = {'mercurial': ['locale/*/LC_MESSAGES/hg.mo', 'help/*.txt']} @@ -569,9 +561,11 @@ package_data=packagedata, cmdclass=cmdclass, distclass=hgdist, - options=dict(py2exe=dict(packages=['hgext', 'email']), - bdist_mpkg=dict(zipdist=True, - license='COPYING', - readme='contrib/macosx/Readme.html', - welcome='contrib/macosx/Welcome.html')), + options={'py2exe': {'packages': ['hgext', 'email']}, + 'bdist_mpkg': {'zipdist': True, + 'license': 'COPYING', + 'readme': 'contrib/macosx/Readme.html', + 'welcome': 'contrib/macosx/Welcome.html', + }, + }, **extra)
--- a/tests/blacklists/inotify-failures Mon Mar 17 14:57:13 2014 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,21 +0,0 @@ -# When --inotify is activated, help output and config changes: -test-debugcomplete -test-empty -test-fncache -test-globalopts -test-help -test-hgrc -test-inherit-mode -test-qrecord -test-strict - -# --inotify activates de facto the inotify extension. It does not play well -# with inotify-specific tests, which activate/deactivate inotify at will: -test-inotify -test-inotify-debuginotify -test-inotify-dirty-dirstate -test-inotify-issue1208 -test-inotify-issue1371 -test-inotify-issue1542 -test-inotify-issue1556 -test-inotify-lookup
--- a/tests/blacklists/linux-vfat Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/blacklists/linux-vfat Tue Mar 18 14:25:28 2014 -0500 @@ -8,14 +8,6 @@ # no sockets or fifos test-hup.t -test-inotify-debuginotify.t -test-inotify-dirty-dirstate.t -test-inotify-issue1208.t -test-inotify-issue1371.t -test-inotify-issue1542.t -test-inotify-lookup.t -test-inotify.t -test-inotify-issue1556.t # no hardlinks test-hardlinks.t
--- a/tests/hghave.py Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/hghave.py Tue Mar 18 14:25:28 2014 -0500 @@ -1,4 +1,4 @@ -import os, stat, socket +import os, stat import re import sys import tempfile @@ -96,21 +96,6 @@ finally: os.remove(path) -def has_inotify(): - try: - import hgext.inotify.linux.watcher - except ImportError: - return False - name = tempfile.mktemp(dir='.', prefix=tempprefix) - sock = socket.socket(socket.AF_UNIX) - try: - sock.bind(name) - except socket.error: - return False - sock.close() - os.unlink(name) - return True - def has_fifo(): if getattr(os, "mkfifo", None) is None: return False @@ -248,6 +233,9 @@ except ImportError: return False +def has_python243(): + return sys.version_info >= (2, 4, 3) + def has_outer_repo(): # failing for other reasons than 'no repo' imply that there is a repo return not matchoutput('hg root 2>&1', @@ -312,7 +300,6 @@ "gpg": (has_gpg, "gpg client"), "hardlink": (has_hardlink, "hardlinks"), "icasefs": (has_icasefs, "case insensitive file system"), - "inotify": (has_inotify, "inotify extension support"), "killdaemons": (has_killdaemons, 'killdaemons.py support'), "lsprof": (has_lsprof, "python lsprof module"), "mtn": (has_mtn, "monotone client (>= 1.0)"), @@ -320,6 +307,7 @@ "p4": (has_p4, "Perforce server and client"), "pyflakes": (has_pyflakes, "Pyflakes python linter"), "pygments": (has_pygments, "Pygments source highlighting library"), + "python243": (has_python243, "python >= 2.4.3"), "root": (has_root, "root permissions"), "serve": (has_serve, "platform and python can manage 'hg serve -d'"), "ssl": (has_ssl, "python >= 2.6 ssl module and python OpenSSL"),
--- a/tests/killdaemons.py Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/killdaemons.py Tue Mar 18 14:25:28 2014 -0500 @@ -4,13 +4,51 @@ if os.name =='nt': import ctypes + + def _check(ret, expectederr=None): + if ret == 0: + winerrno = ctypes.GetLastError() + if winerrno == expectederr: + return True + raise ctypes.WinError(winerrno) + def kill(pid, logfn, tryhard=True): logfn('# Killing daemon process %d' % pid) PROCESS_TERMINATE = 1 + PROCESS_QUERY_INFORMATION = 0x400 + SYNCHRONIZE = 0x00100000 + WAIT_OBJECT_0 = 0 + WAIT_TIMEOUT = 258 handle = ctypes.windll.kernel32.OpenProcess( - PROCESS_TERMINATE, False, pid) - ctypes.windll.kernel32.TerminateProcess(handle, -1) - ctypes.windll.kernel32.CloseHandle(handle) + PROCESS_TERMINATE|SYNCHRONIZE|PROCESS_QUERY_INFORMATION, + False, pid) + if handle == 0: + _check(0, 87) # err 87 when process not found + return # process not found, already finished + try: + r = ctypes.windll.kernel32.WaitForSingleObject(handle, 100) + if r == WAIT_OBJECT_0: + pass # terminated, but process handle still available + elif r == WAIT_TIMEOUT: + _check(ctypes.windll.kernel32.TerminateProcess(handle, -1)) + else: + _check(r) + + # TODO?: forcefully kill when timeout + # and ?shorter waiting time? when tryhard==True + r = ctypes.windll.kernel32.WaitForSingleObject(handle, 100) + # timeout = 100 ms + if r == WAIT_OBJECT_0: + pass # process is terminated + elif r == WAIT_TIMEOUT: + logfn('# Daemon process %d is stuck') + else: + check(r) # any error + except: #re-raises + ctypes.windll.kernel32.CloseHandle(handle) # no _check, keep error + raise + _check(ctypes.windll.kernel32.CloseHandle(handle)) + else: def kill(pid, logfn, tryhard=True): try: @@ -51,4 +89,3 @@ if __name__ == '__main__': path, = sys.argv[1:] killdaemons(path) -
--- a/tests/run-tests.py Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/run-tests.py Tue Mar 18 14:25:28 2014 -0500 @@ -152,8 +152,6 @@ help="exit on the first test failure") parser.add_option("-H", "--htmlcov", action="store_true", help="create an HTML report of the coverage of the files") - parser.add_option("--inotify", action="store_true", - help="enable inotify extension when running tests") parser.add_option("-i", "--interactive", action="store_true", help="prompt to accept changed output") parser.add_option("-j", "--jobs", type="int", @@ -344,12 +342,6 @@ hgrc.write('commit = -d "0 0"\n') hgrc.write('shelve = --date "0 0"\n') hgrc.write('tag = -d "0 0"\n') - if options.inotify: - hgrc.write('[extensions]\n') - hgrc.write('inotify=\n') - hgrc.write('[inotify]\n') - hgrc.write('pidfile=daemon.pids') - hgrc.write('appendpid=True\n') if options.extra_config_opt: for opt in options.extra_config_opt: section, key = opt.split('.', 1) @@ -434,7 +426,7 @@ if getattr(os, 'symlink', None): vlog("# Making python executable in test path a symlink to '%s'" % sys.executable) - mypython = os.path.join(BINDIR, pyexename) + mypython = os.path.join(TMPBINDIR, pyexename) try: if os.readlink(mypython) == sys.executable: return @@ -487,10 +479,10 @@ ' build %(compiler)s --build-base="%(base)s"' ' install --force --prefix="%(prefix)s" --install-lib="%(libdir)s"' ' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1' - % dict(exe=sys.executable, py3=py3, pure=pure, compiler=compiler, - base=os.path.join(HGTMP, "build"), - prefix=INST, libdir=PYTHONDIR, bindir=BINDIR, - nohome=nohome, logfile=installerrs)) + % {'exe': sys.executable, 'py3': py3, 'pure': pure, + 'compiler': compiler, 'base': os.path.join(HGTMP, "build"), + 'prefix': INST, 'libdir': PYTHONDIR, 'bindir': BINDIR, + 'nohome': nohome, 'logfile': installerrs}) vlog("# Running", cmd) if os.system(cmd) == 0: if not options.verbose: @@ -776,9 +768,11 @@ # Merge the script output back into a unified test + warnonly = 1 # 1: not yet, 2: yes, 3: for sure not + if exitcode != 0: # failure has been reported + warnonly = 3 # set to "for sure not" pos = -1 postout = [] - ret = 0 for l in output: lout, lcmd = l, None if salt in l: @@ -797,11 +791,10 @@ if isinstance(r, str): if r == '+glob': lout = el[:-1] + ' (glob)\n' - r = False + r = '' # warn only this line elif r == '-glob': - log('\ninfo, unnecessary glob in %s (after line %d):' - ' %s (glob)\n' % (test, pos, el[-1])) - r = True # pass on unnecessary glob + lout = ''.join(el.rsplit(' (glob)', 1)) + r = '' # warn only this line else: log('\ninfo, unknown linematch result: %r\n' % r) r = False @@ -811,6 +804,10 @@ if needescape(lout): lout = stringescape(lout.rstrip('\n')) + " (esc)\n" postout.append(" " + lout) # let diff deal with it + if r != '': # if line failed + warnonly = 3 # set to "for sure not" + elif warnonly == 1: # is "not yet" (and line is warn only) + warnonly = 2 # set to "yes" do warn if lcmd: # add on last return code @@ -825,6 +822,8 @@ if pos in after: postout += after.pop(pos) + if warnonly == 2: + exitcode = False # set exitcode to warned return exitcode, postout wifexited = getattr(os, "WIFEXITED", lambda x: False) @@ -882,8 +881,9 @@ return 's', test, msg def fail(msg, ret): + warned = ret is False if not options.nodiff: - log("\nERROR: %s %s" % (testpath, msg)) + log("\n%s: %s %s" % (warned and 'Warning' or 'ERROR', test, msg)) if (not ret and options.interactive and os.path.exists(testpath + ".err")): iolock.acquire() @@ -896,7 +896,7 @@ else: rename(testpath + ".err", testpath + ".out") return '.', test, '' - return '!', test, msg + return warned and '~' or '!', test, msg def success(): return '.', test, '' @@ -1075,7 +1075,7 @@ ' (expected %s)\n' % (verb, actualhg, expecthg)) -results = {'.':[], '!':[], 's':[], 'i':[]} +results = {'.':[], '!':[], '~': [], 's':[], 'i':[]} times = [] iolock = threading.Lock() abort = False @@ -1139,7 +1139,8 @@ scheduletests(options, tests) failed = len(results['!']) - tested = len(results['.']) + failed + warned = len(results['~']) + tested = len(results['.']) + failed + warned skipped = len(results['s']) ignored = len(results['i']) @@ -1147,11 +1148,13 @@ if not options.noskips: for s in results['s']: print "Skipped %s: %s" % s + for s in results['~']: + print "Warned %s: %s" % s for s in results['!']: print "Failed %s: %s" % s _checkhglib("Tested") - print "# Ran %d tests, %d skipped, %d failed." % ( - tested, skipped + ignored, failed) + print "# Ran %d tests, %d skipped, %d warned, %d failed." % ( + tested, skipped + ignored, warned, failed) if results['!']: print 'python hash seed:', os.environ['PYTHONHASHSEED'] if options.time: @@ -1164,7 +1167,9 @@ print "\ninterrupted!" if failed: - sys.exit(1) + return 1 + if warned: + return 80 testtypes = [('.py', pytest, '.out'), ('.t', tsttest, '')] @@ -1206,7 +1211,7 @@ # we do the randomness ourself to know what seed is used os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32)) - global TESTDIR, HGTMP, INST, BINDIR, PYTHONDIR, COVERAGE_FILE + global TESTDIR, HGTMP, INST, BINDIR, TMPBINDIR, PYTHONDIR, COVERAGE_FILE TESTDIR = os.environ["TESTDIR"] = os.getcwd() if options.tmpdir: options.keep_tmpdir = True @@ -1235,6 +1240,8 @@ if options.with_hg: INST = None BINDIR = os.path.dirname(os.path.realpath(options.with_hg)) + TMPBINDIR = os.path.join(HGTMP, 'install', 'bin') + os.makedirs(TMPBINDIR) # This looks redundant with how Python initializes sys.path from # the location of the script being executed. Needed because the @@ -1245,18 +1252,22 @@ else: INST = os.path.join(HGTMP, "install") BINDIR = os.environ["BINDIR"] = os.path.join(INST, "bin") + TMPBINDIR = BINDIR PYTHONDIR = os.path.join(INST, "lib", "python") os.environ["BINDIR"] = BINDIR os.environ["PYTHON"] = PYTHON path = [BINDIR] + os.environ["PATH"].split(os.pathsep) + if TMPBINDIR != BINDIR: + path = [TMPBINDIR] + path os.environ["PATH"] = os.pathsep.join(path) # Include TESTDIR in PYTHONPATH so that out-of-tree extensions # can run .../tests/run-tests.py test-foo where test-foo - # adds an extension to HGRC - pypath = [PYTHONDIR, TESTDIR] + # adds an extension to HGRC. Also include run-test.py directory to import + # modules like heredoctest. + pypath = [PYTHONDIR, TESTDIR, os.path.abspath(os.path.dirname(__file__))] # We have to augment PYTHONPATH, rather than simply replacing # it, in case external libraries are only available via current # PYTHONPATH. (In particular, the Subversion bindings on OS X @@ -1274,7 +1285,7 @@ vlog("# Using", IMPL_PATH, os.environ[IMPL_PATH]) try: - runtests(options, tests) + sys.exit(runtests(options, tests) or 0) finally: time.sleep(.1) cleanup(options)
--- a/tests/test-basic.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-basic.t Tue Mar 18 14:25:28 2014 -0500 @@ -1,5 +1,12 @@ Create a repository: + $ hg config + defaults.backout=-d "0 0" + defaults.commit=-d "0 0" + defaults.shelve=--date "0 0" + defaults.tag=-d "0 0" + ui.slash=True + ui.interactive=False $ hg init t $ cd t
--- a/tests/test-check-code.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-check-code.t Tue Mar 18 14:25:28 2014 -0500 @@ -123,6 +123,7 @@ $ cat > python3-compat.py << EOF > foo <> bar > reduce(lambda a, b: a + b, [1, 2, 3, 4]) + > dict(key=value) > EOF $ "$check_code" python3-compat.py python3-compat.py:1: @@ -131,6 +132,9 @@ python3-compat.py:2: > reduce(lambda a, b: a + b, [1, 2, 3, 4]) reduce is not available in Python 3+ + python3-compat.py:3: + > dict(key=value) + dict() is different in Py2 and 3 and is slower than {} [1] $ cat > is-op.py <<EOF
--- a/tests/test-check-pyflakes.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-check-pyflakes.t Tue Mar 18 14:25:28 2014 -0500 @@ -10,12 +10,10 @@ setup.py:*: 'zlib' imported but unused (glob) setup.py:*: 'bz2' imported but unused (glob) setup.py:*: 'py2exe' imported but unused (glob) - tests/hghave.py:*: 'hgext' imported but unused (glob) tests/hghave.py:*: '_lsprof' imported but unused (glob) tests/hghave.py:*: 'publish_cmdline' imported but unused (glob) tests/hghave.py:*: 'pygments' imported but unused (glob) tests/hghave.py:*: 'ssl' imported but unused (glob) - contrib/win32/hgwebdir_wsgi.py:*: 'from isapi.install import *' used; unable to detect undefined names (glob) - hgext/inotify/linux/__init__.py:*: 'from _inotify import *' used; unable to detect undefined names (glob) + contrib/win32/hgwebdir_wsgi.py:93: 'from isapi.install import *' used; unable to detect undefined names (glob)
--- a/tests/test-command-template.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-command-template.t Tue Mar 18 14:25:28 2014 -0500 @@ -45,7 +45,7 @@ $ hg log --template '{join(file_copies, ",\n")}\n' -r . fourth (second) - $ hg log --template '{file_copies % "{source} -> {name}\n"}' -r . + $ hg log -T '{file_copies % "{source} -> {name}\n"}' -r . second -> fourth Quoting for ui.logtemplate @@ -63,6 +63,29 @@ $ echo 'logtemplate =' >> .hg/hgrc $ echo 'style =' >> .hg/hgrc +Add some simple styles to settings + + $ echo '[templates]' >> .hg/hgrc + $ printf 'simple = "{rev}\\n"\n' >> .hg/hgrc + $ printf 'simple2 = {rev}\\n\n' >> .hg/hgrc + + $ hg log -l1 -Tsimple + 8 + $ hg log -l1 -Tsimple2 + 8 + +Test templates and style maps in files: + + $ echo "{rev}" > tmpl + $ hg log -l1 -T./tmpl + 8 + $ hg log -l1 -Tblah/blah + blah/blah (no-eol) + + $ printf 'changeset = "{rev}\\n"\n' > map-simple + $ hg log -l1 -T./map-simple + 8 + Default style is like normal output: $ hg log > log.out @@ -84,7 +107,7 @@ Compact style works: - $ hg log --style compact + $ hg log -Tcompact 8[tip] 95c24699272e 2020-01-01 10:01 +0000 test third @@ -1712,20 +1735,20 @@ $ echo aa >> aa $ hg ci -u '{node|short}' -m 'desc to be wrapped desc to be wrapped' - $ hg log -r 1 --template '{fill(desc, "20", author, branch)}' + $ hg log -l1 --template '{fill(desc, "20", author, branch)}' {node|short}desc to text.{rev}be wrapped text.{rev}desc to be text.{rev}wrapped (no-eol) - $ hg log -r 1 --template '{fill(desc, "20", "{node|short}:", "text.{rev}:")}' + $ hg log -l1 --template '{fill(desc, "20", "{node|short}:", "text.{rev}:")}' bcc7ff960b8e:desc to text.1:be wrapped text.1:desc to be text.1:wrapped (no-eol) - $ hg log -r 1 --template '{sub(r"[0-9]", "-", author)}' + $ hg log -l 1 --template '{sub(r"[0-9]", "-", author)}' {node|short} (no-eol) - $ hg log -r 1 --template '{sub(r"[0-9]", "-", "{node|short}")}' + $ hg log -l 1 --template '{sub(r"[0-9]", "-", "{node|short}")}' bcc-ff---b-e (no-eol) $ cat >> .hg/hgrc <<EOF @@ -1736,9 +1759,9 @@ > text.{rev} = red > text.1 = green > EOF - $ hg log --color=always -r 1 --template '{label(branch, "text\n")}' + $ hg log --color=always -l 1 --template '{label(branch, "text\n")}' \x1b[0;31mtext\x1b[0m (esc) - $ hg log --color=always -r 1 --template '{label("text.{rev}", "text\n")}' + $ hg log --color=always -l 1 --template '{label("text.{rev}", "text\n")}' \x1b[0;32mtext\x1b[0m (esc) Test branches inside if statement: @@ -1746,11 +1769,82 @@ $ hg log -r 0 --template '{if(branches, "yes", "no")}\n' no - $ cd .. +Test shortest(node) function: + + $ echo b > b + $ hg ci -qAm b + $ hg log --template '{shortest(node)}\n' + e777 + bcc7 + f776 + $ hg log --template '{shortest(node, 10)}\n' + e777603221 + bcc7ff960b + f7769ec2ab + +Test pad function + + $ hg log --template '{pad(rev, 20)} {author|user}\n' + 2 test + 1 {node|short} + 0 test + + $ hg log --template '{pad(rev, 20, " ", True)} {author|user}\n' + 2 test + 1 {node|short} + 0 test + + $ hg log --template '{pad(rev, 20, "-", False)} {author|user}\n' + 2------------------- test + 1------------------- {node|short} + 0------------------- test + +Test ifcontains function + + $ hg log --template '{rev} {ifcontains("a", file_adds, "added a", "did not add a")}\n' + 2 did not add a + 1 did not add a + 0 added a + +Test revset function + + $ hg log --template '{rev} {ifcontains(rev, revset("."), "current rev", "not current rev")}\n' + 2 current rev + 1 not current rev + 0 not current rev + + $ hg log --template '{rev} Parents: {revset("parents(%s)", rev)}\n' + 2 Parents: 1 + 1 Parents: 0 + 0 Parents: + + $ hg log --template 'Rev: {rev}\n{revset("::%s", rev) % "Ancestor: {revision}\n"}\n' + Rev: 2 + Ancestor: 0 + Ancestor: 1 + Ancestor: 2 + + Rev: 1 + Ancestor: 0 + Ancestor: 1 + + Rev: 0 + Ancestor: 0 + +Test current bookmark templating + + $ hg book foo + $ hg book bar + $ hg log --template "{rev} {bookmarks % '{bookmark}{ifeq(bookmark, current, \"*\")} '}\n" + 2 bar* foo + 1 + 0 Test stringify on sub expressions + $ cd .. $ hg log -R a -r 8 --template '{join(files, if("1", if("1", ", ")))}\n' fourth, second, third $ hg log -R a -r 8 --template '{strip(if("1", if("1", "-abc-")), if("1", if("1", "-")))}\n' abc +
--- a/tests/test-commandserver.py Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-commandserver.py Tue Mar 18 14:25:28 2014 -0500 @@ -51,7 +51,10 @@ elif ch == 'L': writeblock(server, input.readline(data)) elif ch == 'r': - return struct.unpack('>i', data)[0] + ret, = struct.unpack('>i', data) + if ret != 0: + print ' [%d]' % ret + return ret else: print "unexpected channel %c: %r" % (ch, data) if ch.isupper(): @@ -101,6 +104,9 @@ # make sure --config doesn't stick runcommand(server, ['id']) + # negative return code should be masked + runcommand(server, ['id', '-runknown']) + def inputeof(server): readchannel(server) server.stdin.write('runcommand\n') @@ -267,12 +273,35 @@ runcommand(server, ['up', 'null']) runcommand(server, ['phase', '-df', 'tip']) - os.system('hg debugobsolete `hg log -r tip --template {node}`') + cmd = 'hg debugobsolete `hg log -r tip --template {node}`' + if os.name == 'nt': + cmd = 'sh -c "%s"' % cmd # run in sh, not cmd.exe + os.system(cmd) runcommand(server, ['log', '--hidden']) runcommand(server, ['log']) +def mqoutsidechanges(server): + readchannel(server) + + # load repo.mq + runcommand(server, ['qapplied']) + os.system('hg qnew 0.diff') + # repo.mq should be invalidated + runcommand(server, ['qapplied']) + + runcommand(server, ['qpop', '--all']) + os.system('hg qqueue --create foo') + # repo.mq should be recreated to point to new queue + runcommand(server, ['qqueue', '--active']) + +def startwithoutrepo(server): + readchannel(server) + runcommand(server, ['init', 'repo2']) + runcommand(server, ['id', '-R', 'repo2']) + if __name__ == '__main__': - os.system('hg init') + os.system('hg init repo') + os.chdir('repo') check(hellomessage) check(unknowncommand) @@ -301,3 +330,11 @@ hgrc.write('[extensions]\nobs=obs.py\n') hgrc.close() check(obsolete) + hgrc = open('.hg/hgrc', 'a') + hgrc.write('[extensions]\nmq=\n') + hgrc.close() + check(mqoutsidechanges) + + os.chdir('..') + check(hellomessage) + check(startwithoutrepo)
--- a/tests/test-commandserver.py.out Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-commandserver.py.out Tue Mar 18 14:25:28 2014 -0500 @@ -43,6 +43,9 @@ 000000000000 runcommand id 000000000000 tip + runcommand id -runknown +abort: unknown revision 'unknown'! + [255] testing inputeof: @@ -70,7 +73,7 @@ testing localhgrc: runcommand showconfig -bundle.mainreporoot=$TESTTMP +bundle.mainreporoot=$TESTTMP/repo defaults.backout=-d "0 0" defaults.commit=-d "0 0" defaults.shelve=--date "0 0" @@ -142,6 +145,7 @@ runcommand phase -r . -p no phases changed + [1] runcommand commit -Am. runcommand rollback repository tip rolled back to revision 3 (undo commit) @@ -222,3 +226,27 @@ date: Thu Jan 01 00:00:00 1970 +0000 summary: 1 + +testing mqoutsidechanges: + + runcommand qapplied + runcommand qapplied +0.diff + runcommand qpop --all +popping 0.diff +patch queue now empty + runcommand qqueue --active +foo + +testing hellomessage: + +o, 'capabilities: getencoding runcommand\nencoding: ***' + runcommand id +abort: there is no Mercurial repository here (.hg not found) + [255] + +testing startwithoutrepo: + + runcommand init repo2 + runcommand id -R repo2 +000000000000 tip
--- a/tests/test-commit.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-commit.t Tue Mar 18 14:25:28 2014 -0500 @@ -103,6 +103,7 @@ $ echo fake >> .hg/requires $ hg commit -m bla abort: unknown repository format: requires features 'fake' (upgrade Mercurial)! + (see http://mercurial.selenic.com/wiki/MissingRequirement for details) [255] $ cd ..
--- a/tests/test-committer.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-committer.t Tue Mar 18 14:25:28 2014 -0500 @@ -49,7 +49,8 @@ $ echo "[ui]" > .hg/hgrc $ echo "username = " >> .hg/hgrc $ hg commit -m commit-1 - abort: no username supplied (see "hg help config") + abort: no username supplied + (use "hg config --edit" to set your username) [255] $ rm .hg/hgrc $ hg commit -m commit-1 2>&1
--- a/tests/test-completion.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-completion.t Tue Mar 18 14:25:28 2014 -0500 @@ -13,6 +13,7 @@ cat clone commit + config copy diff export @@ -43,7 +44,6 @@ rollback root serve - showconfig status summary tag @@ -222,6 +222,7 @@ branches: active, closed bundle: force, rev, branch, base, all, type, ssh, remotecmd, insecure cat: output, rev, decode, include, exclude + config: untrusted, edit copy: after, force, include, exclude, dry-run debugancestor: debugbuilddag: mergeable-file, overwritten-file, new-file @@ -250,7 +251,7 @@ debugrebuilddirstate: rev debugrename: rev debugrevlog: changelog, manifest, dump - debugrevspec: + debugrevspec: optimize debugsetparents: debugsub: rev debugsuccessorssets: @@ -275,7 +276,6 @@ revert: all, date, rev, no-backup, include, exclude, dry-run rollback: dry-run, force root: - showconfig: untrusted tag: force, local, rev, remove, edit, message, date, user tags: tip: patch, git, style, template
--- a/tests/test-convert-bzr.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-convert-bzr.t Tue Mar 18 14:25:28 2014 -0500 @@ -102,7 +102,7 @@ > > ../bzr-timestamps $ cd .. $ hg -R source-hg log --template "{date|isodate}\n" > hg-timestamps - $ diff -u bzr-timestamps hg-timestamps + $ cmp bzr-timestamps hg-timestamps || diff -u bzr-timestamps hg-timestamps $ cd .. merge
--- a/tests/test-convert-hg-sink.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-convert-hg-sink.t Tue Mar 18 14:25:28 2014 -0500 @@ -119,7 +119,7 @@ 0 add baz $ cd new-filemap $ hg tags - tip 2:6f4fd1df87fb + tip 2:3c74706b1ff8 some-tag 0:ba8636729451 $ cd ..
--- a/tests/test-convert-hg-svn.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-convert-hg-svn.t Tue Mar 18 14:25:28 2014 -0500 @@ -103,3 +103,14 @@ scanning source... sorting... converting... + +verify which shamap format we are storing and must be able to handle + + $ cat svn-repo-hg/.hg/shamap + svn:????????-????-????-????-????????????@1 ???????????????????????????????????????? (glob) + svn:????????-????-????-????-????????????@2 ???????????????????????????????????????? (glob) + svn:????????-????-????-????-????????????@2 ???????????????????????????????????????? (glob) + $ cat svn-repo-wc/.svn/hg-shamap + ???????????????????????????????????????? 1 (glob) + ???????????????????????????????????????? svn:????????-????-????-????-????????????@2 (glob) + ???????????????????????????????????????? svn:????????-????-????-????-????????????@2 (glob)
--- a/tests/test-convert-svn-source.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-convert-svn-source.t Tue Mar 18 14:25:28 2014 -0500 @@ -198,11 +198,12 @@ extra: convert_revision=svn:........-....-....-....-............/proj B/mytrunk@1 (re) $ cd .. -Test converting empty heads (issue3347) +Test converting empty heads (issue3347). +Also tests getting logs directly without debugsvnlog. $ svnadmin create svn-empty $ svnadmin load -q svn-empty < "$TESTDIR/svn/empty.svndump" - $ hg --config convert.svn.trunk= convert svn-empty + $ hg --config convert.svn.trunk= --config convert.svn.debugsvnlog=0 convert svn-empty assuming destination svn-empty-hg initializing destination svn-empty-hg repository scanning source...
--- a/tests/test-convert.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-convert.t Tue Mar 18 14:25:28 2014 -0500 @@ -121,6 +121,14 @@ can be used to (for instance) move code in one repository from "default" to a named branch. + The closemap is a file that allows closing of a branch. This is useful if + you want to close a branch. Each entry contains a revision or hash + separated by white space. + + The tagpmap is a file that exactly analogous to the branchmap. This will + rename tags on the fly and prevent the 'update tags' commit usually found + at the end of a convert process. + Mercurial Source ################ @@ -266,6 +274,8 @@ --filemap FILE remap file names using contents of file --splicemap FILE splice synthesized history into place --branchmap FILE change branch names while converting + --closemap FILE closes given revs + --tagmap FILE change tag names while converting --branchsort try to sort changesets by branches --datesort try to sort changesets by date --sourcesort preserve source changesets order
--- a/tests/test-default-push.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-default-push.t Tue Mar 18 14:25:28 2014 -0500 @@ -15,6 +15,16 @@ $ echo b >> b/a $ hg --cwd b ci -mb +Push should provide a hint when both 'default' and 'default-push' not set: + $ cd c + $ hg push --config paths.default= + pushing to default-push + abort: default repository not configured! + (see the "path" section in "hg help config") + [255] + + $ cd .. + Push should push to 'default' when 'default-push' not set: $ hg --cwd b push
--- a/tests/test-doctest.py Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-doctest.py Tue Mar 18 14:25:28 2014 -0500 @@ -27,3 +27,4 @@ testmod('mercurial.util', testtarget='platform') testmod('hgext.convert.cvsps') testmod('hgext.convert.filemap') +testmod('hgext.convert.subversion')
--- a/tests/test-duplicateoptions.py Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-duplicateoptions.py Tue Mar 18 14:25:28 2014 -0500 @@ -1,7 +1,7 @@ import os from mercurial import ui, commands, extensions -ignore = set(['highlight', 'inotify', 'win32text', 'factotum']) +ignore = set(['highlight', 'win32text', 'factotum']) if os.name != 'nt': ignore.add('win32mbcs')
--- a/tests/test-filelog.py Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-filelog.py Tue Mar 18 14:25:28 2014 -0500 @@ -13,7 +13,7 @@ def addrev(text, renamed=False): if renamed: # data doesn't matter. Just make sure filelog.renamed() returns True - meta = dict(copyrev=hex(nullid), copy='bar') + meta = {'copyrev': hex(nullid), 'copy': 'bar'} else: meta = {}
--- a/tests/test-gendoc.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-gendoc.t Tue Mar 18 14:25:28 2014 -0500 @@ -3,7 +3,7 @@ $ "$TESTDIR/hghave" docutils || exit 80 $ HGENCODING=UTF-8 $ export HGENCODING - $ { echo C; find "$TESTDIR/../i18n" -name "*.po" | sort; } | while read PO; do + $ { echo C; ls "$TESTDIR/../i18n"/*.po | sort; } | while read PO; do > LOCALE=`basename "$PO" .po` > echo > echo "% extracting documentation from $LOCALE"
--- a/tests/test-globalopts.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-globalopts.t Tue Mar 18 14:25:28 2014 -0500 @@ -297,6 +297,7 @@ cat output the current or given revision of files clone make a copy of an existing repository commit commit the specified files or all outstanding changes + config show combined config settings from all hgrc files copy mark files as copied for the next commit diff diff repository (or selected files) export dump the header and diffs for one or more changesets @@ -326,7 +327,6 @@ revert restore files to their checkout state root print the root (top) of the current working directory serve start stand-alone webserver - showconfig show combined config settings from all hgrc files status show changed files in the working directory summary summarize working directory state tag add one or more tags for the current or given revision @@ -379,6 +379,7 @@ cat output the current or given revision of files clone make a copy of an existing repository commit commit the specified files or all outstanding changes + config show combined config settings from all hgrc files copy mark files as copied for the next commit diff diff repository (or selected files) export dump the header and diffs for one or more changesets @@ -408,7 +409,6 @@ revert restore files to their checkout state root print the root (top) of the current working directory serve start stand-alone webserver - showconfig show combined config settings from all hgrc files status show changed files in the working directory summary summarize working directory state tag add one or more tags for the current or given revision
--- a/tests/test-help.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-help.t Tue Mar 18 14:25:28 2014 -0500 @@ -62,6 +62,7 @@ cat output the current or given revision of files clone make a copy of an existing repository commit commit the specified files or all outstanding changes + config show combined config settings from all hgrc files copy mark files as copied for the next commit diff diff repository (or selected files) export dump the header and diffs for one or more changesets @@ -91,7 +92,6 @@ revert restore files to their checkout state root print the root (top) of the current working directory serve start stand-alone webserver - showconfig show combined config settings from all hgrc files status show changed files in the working directory summary summarize working directory state tag add one or more tags for the current or given revision @@ -138,6 +138,7 @@ cat output the current or given revision of files clone make a copy of an existing repository commit commit the specified files or all outstanding changes + config show combined config settings from all hgrc files copy mark files as copied for the next commit diff diff repository (or selected files) export dump the header and diffs for one or more changesets @@ -167,7 +168,6 @@ revert restore files to their checkout state root print the root (top) of the current working directory serve start stand-alone webserver - showconfig show combined config settings from all hgrc files status show changed files in the working directory summary summarize working directory state tag add one or more tags for the current or given revision @@ -198,6 +198,86 @@ templating Template Usage urls URL Paths +Test extension help: + $ hg help extensions --config extensions.rebase= --config extensions.children= + Using Additional Features + """"""""""""""""""""""""" + + Mercurial has the ability to add new features through the use of + extensions. Extensions may add new commands, add options to existing + commands, change the default behavior of commands, or implement hooks. + + To enable the "foo" extension, either shipped with Mercurial or in the + Python search path, create an entry for it in your configuration file, + like this: + + [extensions] + foo = + + You may also specify the full path to an extension: + + [extensions] + myfeature = ~/.hgext/myfeature.py + + See "hg help config" for more information on configuration files. + + Extensions are not loaded by default for a variety of reasons: they can + increase startup overhead; they may be meant for advanced usage only; they + may provide potentially dangerous abilities (such as letting you destroy + or modify history); they might not be ready for prime time; or they may + alter some usual behaviors of stock Mercurial. It is thus up to the user + to activate extensions as needed. + + To explicitly disable an extension enabled in a configuration file of + broader scope, prepend its path with !: + + [extensions] + # disabling extension bar residing in /path/to/extension/bar.py + bar = !/path/to/extension/bar.py + # ditto, but no path was supplied for extension baz + baz = ! + + enabled extensions: + + children command to display child changesets (DEPRECATED) + rebase command to move sets of revisions to a different ancestor + + disabled extensions: + + acl hooks for controlling repository access + blackbox log repository events to a blackbox for debugging + bugzilla hooks for integrating with the Bugzilla bug tracker + churn command to display statistics about repository history + color colorize output from some commands + convert import revisions from foreign VCS repositories into + Mercurial + eol automatically manage newlines in repository files + extdiff command to allow external programs to compare revisions + factotum http authentication with factotum + gpg commands to sign and verify changesets + hgcia hooks for integrating with the CIA.vc notification service + hgk browse the repository in a graphical way + highlight syntax highlighting for hgweb (requires Pygments) + histedit interactive history editing + keyword expand keywords in tracked files + largefiles track large binary files + mq manage a stack of patches + notify hooks for sending email push notifications + pager browse command output with an external pager + patchbomb command to send changesets as (a series of) patch emails + progress show progress bars for some actions + purge command to delete untracked files from the working + directory + record commands to interactively select changes for + commit/qrefresh + relink recreates hardlinks between repository clones + schemes extend schemes with shortcuts to repository swarms + share share a common history between several working directories + shelve save and restore changes to the working directory + strip strip changesets and their descendents from history + transplant command to transplant changesets from another branch + win32mbcs allow the use of MBCS paths with problematic encodings + zeroconf discover and advertise repositories on the local network Test short command list with verbose option $ hg -v help shortlist @@ -483,7 +563,7 @@ ! = missing (deleted by non-hg command, but still tracked) ? = not tracked I = ignored - = origin of the previous file listed as A (added) + = origin of the previous file (with --copies) Returns 0 on success. @@ -569,6 +649,7 @@ use "hg help" for the full list of commands or "hg -v" for details [255] + $ cat > helpext.py <<EOF > import os > from mercurial import commands @@ -577,7 +658,11 @@ > pass > > cmdtable = { - > "nohelp": (nohelp, [], "hg nohelp"), + > "debugoptDEP": (nohelp, [('', 'dopt', None, 'option is DEPRECATED')],), + > "nohelp": (nohelp, [('', 'longdesc', 3, 'x'*90), + > ('n', '', None, 'normal desc'), + > ('', 'newline', '', 'line1\nline2'), + > ], "hg nohelp"), > } > > commands.norepo += ' nohelp' @@ -592,6 +677,13 @@ (no help text available) + options: + + --longdesc VALUE xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx (default: 3) + -n -- normal desc + --newline VALUE line1 line2 + use "hg -v help nohelp" to show the global options $ hg help -k nohelp @@ -623,6 +715,7 @@ cat output the current or given revision of files clone make a copy of an existing repository commit commit the specified files or all outstanding changes + config show combined config settings from all hgrc files copy mark files as copied for the next commit diff diff repository (or selected files) export dump the header and diffs for one or more changesets @@ -652,7 +745,6 @@ revert restore files to their checkout state root print the root (top) of the current working directory serve start stand-alone webserver - showconfig show combined config settings from all hgrc files status show changed files in the working directory summary summarize working directory state tag add one or more tags for the current or given revision @@ -702,6 +794,33 @@ use "hg -v help helpext" to show builtin aliases and global options + +test deprecated option is hidden in command help + $ hg help debugoptDEP + hg debugoptDEP + + (no help text available) + + options: + + use "hg -v help debugoptDEP" to show the global options + +test deprecated option is shown with -v + $ hg help -v debugoptDEP | grep dopt + --dopt option is DEPRECATED + +test deprecated option is hidden with translation with untranslated description +(use many globy for not failing on changed transaction) + $ LANGUAGE=sv hg help debugoptDEP + hg debugoptDEP + + (*) (glob) + + flaggor: + + *"hg -v help debugoptDEP"* (glob) + + Test a help topic $ hg help revs @@ -1259,6 +1378,13 @@ output the current or given revision of files </td></tr> <tr><td> + <a href="/help/config"> + config + </a> + </td><td> + show combined config settings from all hgrc files + </td></tr> + <tr><td> <a href="/help/copy"> copy </a> @@ -1399,13 +1525,6 @@ print the root (top) of the current working directory </td></tr> <tr><td> - <a href="/help/showconfig"> - showconfig - </a> - </td><td> - show combined config settings from all hgrc files - </td></tr> - <tr><td> <a href="/help/tag"> tag </a>
--- a/tests/test-highlight.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-highlight.t Tue Mar 18 14:25:28 2014 -0500 @@ -522,7 +522,7 @@ $ echo "" >> b $ echo "" >> b $ echo "" >> b - $ diff -u b a + $ cmp b a || diff -u b a hgweb filerevision, raw @@ -531,7 +531,7 @@ $ echo "200 Script output follows" > b $ echo "" >> b $ hg cat primes.py >> b - $ diff -u b a + $ cmp b a || diff -u b a hgweb highlightcss friendly
--- a/tests/test-histedit-arguments.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-histedit-arguments.t Tue Mar 18 14:25:28 2014 -0500 @@ -51,10 +51,12 @@ # Edit history between eb57da33312f and 08d98a8350f3 # + # Commits are listed from least to most recent + # # Commands: # p, pick = use commit # e, edit = use commit, but stop for amending - # f, fold = use commit, but fold into previous commit (combines N and N-1) + # f, fold = use commit, but combine it with the one above # d, drop = remove commit from history # m, mess = edit message without changing commit content #
--- a/tests/test-histedit-bookmark-motion.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-histedit-bookmark-motion.t Tue Mar 18 14:25:28 2014 -0500 @@ -67,10 +67,12 @@ # Edit history between d2ae7f538514 and 652413bf663e # + # Commits are listed from least to most recent + # # Commands: # p, pick = use commit # e, edit = use commit, but stop for amending - # f, fold = use commit, but fold into previous commit (combines N and N-1) + # f, fold = use commit, but combine it with the one above # d, drop = remove commit from history # m, mess = edit message without changing commit content # @@ -125,10 +127,12 @@ # Edit history between b346ab9a313d and cacdfd884a93 # + # Commits are listed from least to most recent + # # Commands: # p, pick = use commit # e, edit = use commit, but stop for amending - # f, fold = use commit, but fold into previous commit (combines N and N-1) + # f, fold = use commit, but combine it with the one above # d, drop = remove commit from history # m, mess = edit message without changing commit content #
--- a/tests/test-histedit-commute.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-histedit-commute.t Tue Mar 18 14:25:28 2014 -0500 @@ -61,10 +61,12 @@ # Edit history between 177f92b77385 and 652413bf663e # + # Commits are listed from least to most recent + # # Commands: # p, pick = use commit # e, edit = use commit, but stop for amending - # f, fold = use commit, but fold into previous commit (combines N and N-1) + # f, fold = use commit, but combine it with the one above # d, drop = remove commit from history # m, mess = edit message without changing commit content #
--- a/tests/test-histedit-obsolete.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-histedit-obsolete.t Tue Mar 18 14:25:28 2014 -0500 @@ -51,10 +51,12 @@ # Edit history between d2ae7f538514 and 652413bf663e # + # Commits are listed from least to most recent + # # Commands: # p, pick = use commit # e, edit = use commit, but stop for amending - # f, fold = use commit, but fold into previous commit (combines N and N-1) + # f, fold = use commit, but combine it with the one above # d, drop = remove commit from history # m, mess = edit message without changing commit content #
--- a/tests/test-histedit-outgoing.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-histedit-outgoing.t Tue Mar 18 14:25:28 2014 -0500 @@ -43,10 +43,12 @@ # Edit history between 055a42cdd887 and 652413bf663e # + # Commits are listed from least to most recent + # # Commands: # p, pick = use commit # e, edit = use commit, but stop for amending - # f, fold = use commit, but fold into previous commit (combines N and N-1) + # f, fold = use commit, but combine it with the one above # d, drop = remove commit from history # m, mess = edit message without changing commit content # @@ -72,10 +74,12 @@ # Edit history between 2a4042b45417 and 51281e65ba79 # + # Commits are listed from least to most recent + # # Commands: # p, pick = use commit # e, edit = use commit, but stop for amending - # f, fold = use commit, but fold into previous commit (combines N and N-1) + # f, fold = use commit, but combine it with the one above # d, drop = remove commit from history # m, mess = edit message without changing commit content # @@ -93,10 +97,12 @@ # Edit history between f26599ee3441 and f26599ee3441 # + # Commits are listed from least to most recent + # # Commands: # p, pick = use commit # e, edit = use commit, but stop for amending - # f, fold = use commit, but fold into previous commit (combines N and N-1) + # f, fold = use commit, but combine it with the one above # d, drop = remove commit from history # m, mess = edit message without changing commit content #
--- a/tests/test-hook.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-hook.t Tue Mar 18 14:25:28 2014 -0500 @@ -499,7 +499,7 @@ $ echo >> foo $ hg ci --debug -d '0 0' -m 'change foo' foo - calling hook commit.auto: <function autohook at *> (glob) + calling hook commit.auto: hgext_hookext.autohook Automatically installed hook committed changeset 1:52998019f6252a2b893452765fcb0a47351a5708
--- a/tests/test-http.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-http.t Tue Mar 18 14:25:28 2014 -0500 @@ -153,7 +153,8 @@ > common.permhooks.insert(0, perform_authentication) > EOT $ hg --config extensions.x=userpass.py serve -p $HGPORT2 -d --pid-file=pid \ - > --config server.preferuncompressed=True + > --config server.preferuncompressed=True \ + > --config web.push_ssl=False --config web.allow_push=* -A ../access.log $ cat pid >> $DAEMON_PIDS $ cat << EOF > get_pass.py @@ -163,6 +164,7 @@ > getpass.getpass = newgetpass > EOF +#if python243 $ hg id http://localhost:$HGPORT2/ abort: http authorization required for http://localhost:$HGPORT2/ [255] @@ -176,6 +178,7 @@ password: 5fed3813f7f5 $ hg id http://user:pass@localhost:$HGPORT2/ 5fed3813f7f5 +#endif $ echo '[auth]' >> .hg/hgrc $ echo 'l.schemes=http' >> .hg/hgrc $ echo 'l.prefix=lo' >> .hg/hgrc @@ -187,6 +190,7 @@ 5fed3813f7f5 $ hg id http://user@localhost:$HGPORT2/ 5fed3813f7f5 +#if python243 $ hg clone http://user:pass@localhost:$HGPORT2/ dest 2>&1 streaming all changes 7 files to transfer, 916 bytes of data @@ -201,6 +205,82 @@ abort: HTTP Error 403: no [255] + $ hg -R dest tag -r tip top + $ hg -R dest push http://user:pass@localhost:$HGPORT2/ + pushing to http://user:***@localhost:$HGPORT2/ + searching for changes + remote: adding changesets + remote: adding manifests + remote: adding file changes + remote: added 1 changesets with 1 changes to 1 files + $ hg rollback -q + + $ cut -c38- ../access.log + "GET /?cmd=capabilities HTTP/1.1" 200 - + "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip + "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces + "GET /?cmd=capabilities HTTP/1.1" 200 - + "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip + "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces + "GET /?cmd=capabilities HTTP/1.1" 200 - + "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip + "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces + "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces + "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=bookmarks + "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks + "GET /?cmd=capabilities HTTP/1.1" 200 - + "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip + "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces + "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces + "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=bookmarks + "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks + "GET /?cmd=capabilities HTTP/1.1" 200 - + "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip + "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces + "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces + "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=bookmarks + "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks + "GET /?cmd=capabilities HTTP/1.1" 200 - + "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip + "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces + "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces + "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=bookmarks + "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks + "GET /?cmd=capabilities HTTP/1.1" 200 - + "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip + "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces + "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces + "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=bookmarks + "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks + "GET /?cmd=capabilities HTTP/1.1" 200 - + "GET /?cmd=branchmap HTTP/1.1" 200 - + "GET /?cmd=stream_out HTTP/1.1" 401 - + "GET /?cmd=stream_out HTTP/1.1" 200 - + "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=bookmarks + "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks + "GET /?cmd=capabilities HTTP/1.1" 200 - + "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip + "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces + "GET /?cmd=capabilities HTTP/1.1" 200 - + "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip + "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces + "GET /?cmd=listkeys HTTP/1.1" 403 - x-hgarg-1:namespace=namespaces + "GET /?cmd=capabilities HTTP/1.1" 200 - + "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872 + "GET /?cmd=branchmap HTTP/1.1" 200 - + "GET /?cmd=branchmap HTTP/1.1" 200 - + "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=bookmarks + "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks + "POST /?cmd=unbundle HTTP/1.1" 401 - x-hgarg-1:heads=686173686564+5eb5abfefeea63c80dd7553bcc3783f37e0c5524 + "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=686173686564+5eb5abfefeea63c80dd7553bcc3783f37e0c5524 + "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=phases + "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases + "POST /?cmd=pushkey HTTP/1.1" 401 - x-hgarg-1:key=7f4e523d01f2cc3765ac8934da3d14db775ff872&namespace=phases&new=0&old=1 + "POST /?cmd=pushkey HTTP/1.1" 200 - x-hgarg-1:key=7f4e523d01f2cc3765ac8934da3d14db775ff872&namespace=phases&new=0&old=1 + "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=bookmarks + "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks + +#endif $ cd .. clone of serve with repo in root and unserved subrepo (issue2970)
--- a/tests/test-identify.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-identify.t Tue Mar 18 14:25:28 2014 -0500 @@ -114,11 +114,13 @@ $ echo fake >> .hg/requires $ hg id abort: unknown repository format: requires features 'fake' (upgrade Mercurial)! + (see http://mercurial.selenic.com/wiki/MissingRequirement for details) [255] $ cd .. #if no-outer-repo $ hg id test abort: unknown repository format: requires features 'fake' (upgrade Mercurial)! + (see http://mercurial.selenic.com/wiki/MissingRequirement for details) [255] #endif
--- a/tests/test-inotify-debuginotify.t Mon Mar 17 14:57:13 2014 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,41 +0,0 @@ - - $ "$TESTDIR/hghave" inotify || exit 80 - $ hg init - $ echo "[extensions]" >> $HGRCPATH - $ echo "inotify=" >> $HGRCPATH - -inserve - - $ hg inserve -d --pid-file=hg.pid - $ cat hg.pid >> "$DAEMON_PIDS" - -let the daemon finish its stuff - - $ sleep 1 - -empty - - $ hg debuginotify - directories being watched: - / - .hg/ - $ mkdir a - $ sleep 1 - -only 'a - - $ hg debuginotify - directories being watched: - / - .hg/ - a/ - $ rmdir a - $ sleep 1 - -empty again - - $ hg debuginotify - directories being watched: - / - .hg/ - $ "$TESTDIR/killdaemons.py" hg.pid
--- a/tests/test-inotify-dirty-dirstate.t Mon Mar 17 14:57:13 2014 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,72 +0,0 @@ -issues when status queries are issued when dirstate is dirty - - $ "$TESTDIR/hghave" inotify || exit 80 - $ echo "[extensions]" >> $HGRCPATH - $ echo "inotify=" >> $HGRCPATH - $ echo "fetch=" >> $HGRCPATH - -issue1810: inotify and fetch - - $ hg init test; cd test - $ hg inserve -d --pid-file=../hg.pid - $ cat ../hg.pid >> "$DAEMON_PIDS" - $ echo foo > foo - $ hg add - adding foo - $ hg ci -m foo - $ cd .. - $ hg --config "inotify.pidfile=../hg2.pid" clone test test2 - updating to branch default - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved - $ cat ../hg2.pid >> "$DAEMON_PIDS" - $ cd test2 - $ echo bar > bar - $ hg add - adding bar - $ hg ci -m bar - $ cd ../test - $ echo spam > spam - $ hg add - adding spam - $ hg ci -m spam - $ cd ../test2 - $ hg st - -abort, outstanding changes - - $ hg fetch -q - $ hg st - $ cd .. - -issue1719: inotify and mq - - $ echo "mq=" >> $HGRCPATH - $ hg init test-1719 - $ cd test-1719 - -inserve - - $ hg inserve -d --pid-file=../hg-test-1719.pid - $ cat ../hg-test-1719.pid >> "$DAEMON_PIDS" - $ echo content > file - $ hg add file - $ hg qnew -f test.patch - $ hg status - $ hg qpop - popping test.patch - patch queue now empty - -st should not output anything - - $ hg status - $ hg qpush - applying test.patch - now at: test.patch - -st should not output anything - - $ hg status - $ hg qrefresh - $ hg status - - $ cd ..
--- a/tests/test-inotify-issue1371.t Mon Mar 17 14:57:13 2014 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,44 +0,0 @@ - - $ "$TESTDIR/hghave" inotify || exit 80 - $ hg init - $ touch a b c d e f - $ echo "[extensions]" >> $HGRCPATH - $ echo "inotify=" >> $HGRCPATH - -inserve - - $ hg inserve -d --pid-file=hg.pid 2>&1 - $ cat hg.pid >> "$DAEMON_PIDS" - $ hg ci -Am m - adding a - adding b - adding c - adding d - adding e - adding f - adding hg.pid - -let the daemon finish its stuff - - $ sleep 1 - -eed to test all file operations - - $ hg rm a - $ rm b - $ echo c >> c - $ touch g - $ hg add g - $ hg mv e h - $ hg status - M c - A g - A h - R a - R e - ! b - $ sleep 1 - -Are we able to kill the service? if not, the service died on some error - - $ "$TESTDIR/killdaemons.py" hg.pid
--- a/tests/test-inotify-issue1542.t Mon Mar 17 14:57:13 2014 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,36 +0,0 @@ - - $ "$TESTDIR/hghave" inotify || exit 80 - $ hg init - $ touch a - $ mkdir dir - $ touch dir/b - $ touch dir/c - $ echo "[extensions]" >> $HGRCPATH - $ echo "inotify=" >> $HGRCPATH - $ hg add dir/c - -inserve - - $ hg inserve -d --pid-file=hg.pid 2>&1 - $ cat hg.pid >> "$DAEMON_PIDS" - $ hg st - A dir/c - ? a - ? dir/b - ? hg.pid - -moving dir out - - $ mv dir ../tmp-test-inotify-issue1542 - -status - - $ hg st - ! dir/c - ? a - ? hg.pid - $ sleep 1 - -Are we able to kill the service? if not, the service died on some error - - $ "$TESTDIR/killdaemons.py" hg.pid
--- a/tests/test-inotify-issue1556.t Mon Mar 17 14:57:13 2014 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,31 +0,0 @@ - - $ "$TESTDIR/hghave" inotify || exit 80 - $ hg init - $ touch a b - $ hg add a b - $ rm b - -status without inotify - - $ hg st - A a - ! b - $ echo "[extensions]" >> $HGRCPATH - $ echo "inotify=" >> $HGRCPATH - -inserve - - $ hg inserve -d --pid-file=hg.pid 2>&1 - $ cat hg.pid >> "$DAEMON_PIDS" - -status - - $ hg st - A a - ! b - ? hg.pid - $ sleep 1 - -Are we able to kill the service? if not, the service died on some error - - $ "$TESTDIR/killdaemons.py" hg.pid
--- a/tests/test-inotify-lookup.t Mon Mar 17 14:57:13 2014 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,14 +0,0 @@ - - $ "$TESTDIR/hghave" inotify || exit 80 - $ hg init - $ echo "[extensions]" > .hg/hgrc - $ echo "inotify=" >> .hg/hgrc - $ hg inserve -d --pid-file .hg/inotify.pid - $ echo a > a - $ hg ci -Aqm0 - $ hg co -q null - $ hg co -q - $ hg st - $ cat a - a - $ "$TESTDIR/killdaemons.py" .hg/inotify.pid
--- a/tests/test-inotify.t Mon Mar 17 14:57:13 2014 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,182 +0,0 @@ - - $ "$TESTDIR/hghave" inotify || exit 80 - $ hg init repo1 - $ cd repo1 - $ touch a b c d e - $ mkdir dir - $ mkdir dir/bar - $ touch dir/x dir/y dir/bar/foo - $ hg ci -Am m - adding a - adding b - adding c - adding d - adding dir/bar/foo - adding dir/x - adding dir/y - adding e - $ cd .. - $ hg clone repo1 repo2 - updating to branch default - 8 files updated, 0 files merged, 0 files removed, 0 files unresolved - $ echo "[extensions]" >> $HGRCPATH - $ echo "inotify=" >> $HGRCPATH - $ cd repo2 - $ echo b >> a - -check that daemon started automatically works correctly -and make sure that inotify.pidfile works - - $ hg --config "inotify.pidfile=../hg2.pid" status - M a - -make sure that pidfile worked. Output should be silent. - - $ "$TESTDIR/killdaemons.py" ../hg2.pid - $ cd ../repo1 - -inserve - - $ hg inserve -d --pid-file=hg.pid - $ cat hg.pid >> "$DAEMON_PIDS" - -let the daemon finish its stuff - - $ sleep 1 - -cannot start, already bound - - $ hg inserve - abort: inotify-server: cannot start: socket is already bound - [255] - -issue907 - - $ hg status - ? hg.pid - -clean - - $ hg status -c - C a - C b - C c - C d - C dir/bar/foo - C dir/x - C dir/y - C e - -all - - $ hg status -A - ? hg.pid - C a - C b - C c - C d - C dir/bar/foo - C dir/x - C dir/y - C e - -path patterns - - $ echo x > dir/x - $ hg status . - M dir/x - ? hg.pid - $ hg status dir - M dir/x - $ cd dir - $ hg status . - M x - $ cd .. - -issue 1375 -testing that we can remove a folder and then add a file with the same name -issue 1375 - - $ mkdir h - $ echo h > h/h - $ hg ci -Am t - adding h/h - adding hg.pid - $ hg rm h - removing h/h - $ echo h >h - $ hg add h - $ hg status - A h - R h/h - $ hg ci -m0 - -Test for issue1735: inotify watches files in .hg/merge - - $ hg st - $ echo a > a - $ hg ci -Am a - $ hg st - $ echo b >> a - $ hg ci -m ab - $ hg st - $ echo c >> a - $ hg st - M a - $ HGMERGE=internal:local hg up 0 - 1 files updated, 1 files merged, 2 files removed, 0 files unresolved - $ hg st - M a - $ HGMERGE=internal:local hg up - 3 files updated, 1 files merged, 0 files removed, 0 files unresolved - $ hg st - M a - -Test for 1844: "hg ci folder" will not commit all changes beneath "folder" - - $ mkdir 1844 - $ echo a > 1844/foo - $ hg add 1844 - adding 1844/foo - $ hg ci -m 'working' - $ echo b >> 1844/foo - $ hg ci 1844 -m 'broken' - -Test for issue884: "Build products not ignored until .hgignore is touched" - - $ echo '^build$' > .hgignore - $ hg add .hgignore - $ hg ci .hgignore -m 'ignorelist' - -Now, lets add some build products... - - $ mkdir build - $ touch build/x - $ touch build/y - -build/x & build/y shouldn't appear in "hg st" - - $ hg st - $ "$TESTDIR/killdaemons.py" hg.pid - - $ cd .. - -Ensure that if the repo is in a directory whose name is too long, the -unix domain socket is reached through a symlink (issue1208). - - $ mkdir 0_3456789_10_456789_20_456789_30_456789_40_456789_50_45678_ - $ cd 0_3456789_10_456789_20_456789_30_456789_40_456789_50_45678_ - $ mkdir 60_456789_70_456789_80_456789_90_456789_100_56789_ - $ cd 60_456789_70_456789_80_456789_90_456789_100_56789_ - - $ hg --config inotify.pidfile=hg3.pid clone -q ../../repo1 - $ readlink repo1/.hg/inotify.sock - */inotify.sock (glob) - -Trying to start the server a second time should fail as usual. - - $ hg --cwd repo1 inserve - abort: inotify-server: cannot start: socket is already bound - [255] - - $ "$TESTDIR/killdaemons.py" hg3.pid
--- a/tests/test-install.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-install.t Tue Mar 18 14:25:28 2014 -0500 @@ -1,6 +1,8 @@ hg debuginstall $ hg debuginstall checking encoding (ascii)... + checking Python executable (*) (glob) + checking Python version (2.*) (glob) checking Python lib (*lib*)... (glob) checking installed modules (*mercurial)... (glob) checking templates (*mercurial?templates)... (glob) @@ -11,12 +13,14 @@ hg debuginstall with no username $ HGUSER= hg debuginstall checking encoding (ascii)... + checking Python executable (*) (glob) + checking Python version (2.*) (glob) checking Python lib (*lib*)... (glob) checking installed modules (*mercurial)... (glob) checking templates (*mercurial?templates)... (glob) checking commit editor... checking username... - no username supplied (see "hg help config") + no username supplied (specify a username in your configuration file) 1 problems detected, please check your install! [1]
--- a/tests/test-largefiles-cache.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-largefiles-cache.t Tue Mar 18 14:25:28 2014 -0500 @@ -47,7 +47,7 @@ $ hg update -r0 getting changed largefiles - large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file://$TESTTMP/mirror (glob) + large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file:/*/$TESTTMP/mirror (glob) 0 largefiles updated, 0 removed 1 files updated, 0 files merged, 0 files removed, 0 files unresolved $ hg status @@ -64,7 +64,7 @@ $ hg update -r0 getting changed largefiles - large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file://$TESTTMP/mirror (glob) + large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file:/*/$TESTTMP/mirror (glob) 0 largefiles updated, 0 removed 1 files updated, 0 files merged, 0 files removed, 0 files unresolved $ hg status
--- a/tests/test-largefiles.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-largefiles.t Tue Mar 18 14:25:28 2014 -0500 @@ -900,7 +900,7 @@ adding manifests adding file changes added 6 changesets with 16 changes to 8 files - calling hook changegroup.lfiles: <function checkrequireslfiles at *> (glob) + calling hook changegroup.lfiles: hgext.largefiles.reposetup.checkrequireslfiles (run 'hg update' to get a working copy) pulling largefiles for revision 7 found 971fb41e78fea4f8e0ba5244784239371cb00591 in store @@ -1280,7 +1280,7 @@ $ rm ${USERCACHE}/7838695e10da2bb75ac1156565f40a2595fa2fa0 $ hg up -r 6 getting changed largefiles - large3: largefile 7838695e10da2bb75ac1156565f40a2595fa2fa0 not available from file://$TESTTMP/d (glob) + large3: largefile 7838695e10da2bb75ac1156565f40a2595fa2fa0 not available from file:/*/$TESTTMP/d (glob) 1 largefiles updated, 2 removed 4 files updated, 0 files merged, 2 files removed, 0 files unresolved $ rm normal3 @@ -1301,7 +1301,7 @@ ! normal3 $ hg up -Cr. getting changed largefiles - large3: largefile 7838695e10da2bb75ac1156565f40a2595fa2fa0 not available from file://$TESTTMP/d (glob) + large3: largefile 7838695e10da2bb75ac1156565f40a2595fa2fa0 not available from file:/*/$TESTTMP/d (glob) 0 largefiles updated, 0 removed 1 files updated, 0 files merged, 0 files removed, 0 files unresolved $ hg st @@ -1323,7 +1323,7 @@ 4 files updated, 0 files merged, 0 files removed, 0 files unresolved (branch merge, don't forget to commit) getting changed largefiles - large3: largefile 7838695e10da2bb75ac1156565f40a2595fa2fa0 not available from file://$TESTTMP/d (glob) + large3: largefile 7838695e10da2bb75ac1156565f40a2595fa2fa0 not available from file:/*/$TESTTMP/d (glob) 1 largefiles updated, 0 removed $ hg rollback -q @@ -2260,6 +2260,7 @@ $TESTTMP/individualenabling/enabledlocally (glob) $ hg -R notenabledlocally root abort: unknown repository format: requires features 'largefiles' (upgrade Mercurial)! + (see http://mercurial.selenic.com/wiki/MissingRequirement for details) [255] $ hg init push-dst @@ -2276,6 +2277,7 @@ $ hg clone enabledlocally clone-dst abort: unknown repository format: requires features 'largefiles' (upgrade Mercurial)! + (see http://mercurial.selenic.com/wiki/MissingRequirement for details) [255] $ test -d clone-dst [1]
--- a/tests/test-lfconvert.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-lfconvert.t Tue Mar 18 14:25:28 2014 -0500 @@ -342,7 +342,7 @@ $ rm largefiles-repo/.hg/largefiles/* $ hg lfconvert --to-normal issue3519 normalized3519 initializing destination normalized3519 - large: largefile 2e000fa7e85759c7f4c254d4d9c33ef481e459a7 not available from file://$TESTTMP/largefiles-repo (glob) + large: largefile 2e000fa7e85759c7f4c254d4d9c33ef481e459a7 not available from file:/*/$TESTTMP/largefiles-repo (glob) abort: missing largefile 'large' from revision d4892ec57ce212905215fad1d9018f56b99202ad [255]
--- a/tests/test-lock-badness.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-lock-badness.t Tue Mar 18 14:25:28 2014 -0500 @@ -1,4 +1,7 @@ -#if unix-permissions no-root +#if unix-permissions no-root no-windows + +Prepare + $ hg init a $ echo a > a/a $ hg -R a ci -A -m a @@ -8,10 +11,25 @@ updating to branch default 1 files updated, 0 files merged, 0 files removed, 0 files unresolved +One process waiting for another + + $ cat > hooks.py << EOF + > import time + > def sleepone(**x): time.sleep(1) + > def sleephalf(**x): time.sleep(0.5) + > EOF $ echo b > b/b - $ hg -R b ci -A -m b + $ hg -R b ci -A -m b --config hooks.precommit="python:`pwd`/hooks.py:sleepone" > stdout & + $ hg -R b up -q --config hooks.pre-update="python:`pwd`/hooks.py:sleephalf" + waiting for lock on working directory of b held by '*:*' (glob) + got lock after ? seconds (glob) + warning: ignoring unknown working parent d2ae7f538514! + $ wait + $ cat stdout adding b +Pushing to a local read-only repo that can't be locked + $ chmod 100 a/.hg/store $ hg -R b push a
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-merge-criss-cross.t Tue Mar 18 14:25:28 2014 -0500 @@ -0,0 +1,111 @@ +Criss cross merging + + $ hg init criss-cross + $ cd criss-cross + $ echo '0 base' > f1 + $ echo '0 base' > f2 + $ hg ci -Aqm '0 base' + + $ echo '1 first change' > f1 + $ hg ci -m '1 first change f1' + + $ hg up -qr0 + $ echo '2 first change' > f2 + $ hg ci -qm '2 first change f2' + + $ hg merge -qr 1 + $ hg ci -m '3 merge' + + $ hg up -qr2 + $ hg merge -qr1 + $ hg ci -qm '4 merge' + + $ echo '5 second change' > f1 + $ hg ci -m '5 second change f1' + + $ hg up -qr3 + $ echo '6 second change' > f2 + $ hg ci -m '6 second change f2' + + $ hg log -G + @ changeset: 6:3b08d01b0ab5 + | tag: tip + | parent: 3:cf89f02107e5 + | user: test + | date: Thu Jan 01 00:00:00 1970 +0000 + | summary: 6 second change f2 + | + | o changeset: 5:adfe50279922 + | | user: test + | | date: Thu Jan 01 00:00:00 1970 +0000 + | | summary: 5 second change f1 + | | + | o changeset: 4:7d3e55501ae6 + | |\ parent: 2:40663881a6dd + | | | parent: 1:0f6b37dbe527 + | | | user: test + | | | date: Thu Jan 01 00:00:00 1970 +0000 + | | | summary: 4 merge + | | | + o---+ changeset: 3:cf89f02107e5 + | | | parent: 2:40663881a6dd + |/ / parent: 1:0f6b37dbe527 + | | user: test + | | date: Thu Jan 01 00:00:00 1970 +0000 + | | summary: 3 merge + | | + | o changeset: 2:40663881a6dd + | | parent: 0:40494bf2444c + | | user: test + | | date: Thu Jan 01 00:00:00 1970 +0000 + | | summary: 2 first change f2 + | | + o | changeset: 1:0f6b37dbe527 + |/ user: test + | date: Thu Jan 01 00:00:00 1970 +0000 + | summary: 1 first change f1 + | + o changeset: 0:40494bf2444c + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: 0 base + + + $ hg merge -v --debug --tool internal:dump 5 + searching for copies back to rev 3 + resolving manifests + branchmerge: True, force: False, partial: False + ancestor: 0f6b37dbe527, local: 3b08d01b0ab5+, remote: adfe50279922 + f1: remote is newer -> g + f2: versions differ -> m + preserving f2 for resolve of f2 + getting f1 + updating: f1 1/2 files (50.00%) + updating: f2 2/2 files (100.00%) + picked tool 'internal:dump' for f2 (binary False symlink False) + merging f2 + my f2@3b08d01b0ab5+ other f2@adfe50279922 ancestor f2@40494bf2444c + 1 files updated, 0 files merged, 0 files removed, 1 files unresolved + use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon + [1] + + $ head * + ==> f1 <== + 5 second change + + ==> f2 <== + 6 second change + + ==> f2.base <== + 0 base + + ==> f2.local <== + 6 second change + + ==> f2.orig <== + 6 second change + + ==> f2.other <== + 2 first change + + $ cd ..
--- a/tests/test-minirst.py Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-minirst.py Tue Mar 18 14:25:28 2014 -0500 @@ -244,3 +244,14 @@ print table debugformats('table', table) + +data = [['s', 'long', 'line\ngoes on here'], + ['', 'xy', 'tried to fix here\n by indenting']] + +rst = minirst.maketable(data, 1, False) +table = ''.join(rst) + +print table + +debugformats('table+nl', table) +
--- a/tests/test-minirst.py.out Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-minirst.py.out Tue Mar 18 14:25:28 2014 -0500 @@ -773,3 +773,34 @@ </table> ---------------------------------------------------------------------- + = ==== ====================================== + s long line goes on here + xy tried to fix here by indenting + = ==== ====================================== + +== table+nl == +60 column format: +---------------------------------------------------------------------- + s long line goes on here + xy tried to fix here by indenting +---------------------------------------------------------------------- + +30 column format: +---------------------------------------------------------------------- + s long line goes on here + xy tried to fix here by + indenting +---------------------------------------------------------------------- + +html format: +---------------------------------------------------------------------- +<table> +<tr><td>s</td> +<td>long</td> +<td>line goes on here</td></tr> +<tr><td></td> +<td>xy</td> +<td>tried to fix here by indenting</td></tr> +</table> +---------------------------------------------------------------------- +
--- a/tests/test-module-imports.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-module-imports.t Tue Mar 18 14:25:28 2014 -0500 @@ -22,18 +22,20 @@ hidden by deduplication algorithm in the cycle detector, so fixing these may expose other cycles. - $ hg locate 'mercurial/**.py' | xargs python "$import_checker" - mercurial/dispatch.py mixed stdlib and relative imports: - commands, error, extensions, fancyopts, hg, hook, util - mercurial/fileset.py mixed stdlib and relative imports: - error, merge, parser, util - mercurial/revset.py mixed stdlib and relative imports: - discovery, error, hbisect, parser, phases, util - mercurial/templater.py mixed stdlib and relative imports: - config, error, parser, templatefilters, util - mercurial/ui.py mixed stdlib and relative imports: - config, error, formatter, scmutil, util - Import cycle: mercurial.cmdutil -> mercurial.subrepo -> mercurial.cmdutil - Import cycle: mercurial.repoview -> mercurial.revset -> mercurial.repoview - Import cycle: mercurial.fileset -> mercurial.merge -> mercurial.subrepo -> mercurial.match -> mercurial.fileset - Import cycle: mercurial.filemerge -> mercurial.match -> mercurial.fileset -> mercurial.merge -> mercurial.filemerge + $ hg locate 'mercurial/**.py' | sed 's-\\-/-g' | xargs python "$import_checker" + mercurial/dispatch.py mixed imports + stdlib: commands + relative: error, extensions, fancyopts, hg, hook, util + mercurial/fileset.py mixed imports + stdlib: parser + relative: error, merge, util + mercurial/revset.py mixed imports + stdlib: parser + relative: discovery, error, hbisect, phases, util + mercurial/templater.py mixed imports + stdlib: parser + relative: config, error, templatefilters, templatekw, util + mercurial/ui.py mixed imports + stdlib: formatter + relative: config, error, scmutil, util + Import cycle: mercurial.cmdutil -> mercurial.context -> mercurial.subrepo -> mercurial.cmdutil -> mercurial.cmdutil
--- a/tests/test-parseindex2.py Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-parseindex2.py Tue Mar 18 14:25:28 2014 -0500 @@ -1,8 +1,13 @@ -"""This unit test tests parsers.parse_index2().""" +"""This unit test primarily tests parsers.parse_index2(). + +It also checks certain aspects of the parsers module as a whole. +""" from mercurial import parsers from mercurial.node import nullid, nullrev import struct +import subprocess +import sys # original python implementation def gettype(q): @@ -95,7 +100,70 @@ index, chunkcache = parsers.parse_index2(data, inline) return list(index), chunkcache +def importparsers(hexversion): + """Import mercurial.parsers with the given sys.hexversion.""" + # The file parsers.c inspects sys.hexversion to determine the version + # of the currently-running Python interpreter, so we monkey-patch + # sys.hexversion to simulate using different versions. + code = ("import sys; sys.hexversion=%s; " + "import mercurial.parsers" % hexversion) + cmd = "python -c \"%s\"" % code + # We need to do these tests inside a subprocess because parser.c's + # version-checking code happens inside the module init function, and + # when using reload() to reimport an extension module, "The init function + # of extension modules is not called a second time" + # (from http://docs.python.org/2/library/functions.html?#reload). + p = subprocess.Popen(cmd, shell=True, + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + return p.communicate() # returns stdout, stderr + +def printhexfail(testnumber, hexversion, stdout, expected): + try: + hexstring = hex(hexversion) + except TypeError: + hexstring = None + print ("FAILED: version test #%s with Python %s and patched " + "sys.hexversion %r (%r):\n Expected %s but got:\n-->'%s'\n" % + (testnumber, sys.version_info, hexversion, hexstring, expected, + stdout)) + +def testversionokay(testnumber, hexversion): + stdout, stderr = importparsers(hexversion) + if stdout: + printhexfail(testnumber, hexversion, stdout, expected="no stdout") + +def testversionfail(testnumber, hexversion): + stdout, stderr = importparsers(hexversion) + # We include versionerrortext to distinguish from other ImportErrors. + errtext = "ImportError: %s" % parsers.versionerrortext + if errtext not in stdout: + printhexfail(testnumber, hexversion, stdout, + expected="stdout to contain %r" % errtext) + +def makehex(major, minor, micro): + return int("%x%02x%02x00" % (major, minor, micro), 16) + +def runversiontests(): + """Check the version-detection logic when importing parsers.""" + info = sys.version_info + major, minor, micro = info[0], info[1], info[2] + # Test same major-minor versions. + testversionokay(1, makehex(major, minor, micro)) + testversionokay(2, makehex(major, minor, micro + 1)) + # Test different major-minor versions. + testversionfail(3, makehex(major + 1, minor, micro)) + testversionfail(4, makehex(major, minor + 1, micro)) + testversionfail(5, "'foo'") + def runtest() : + # Only test the version-detection logic if it is present. + try: + parsers.versionerrortext + except AttributeError: + pass + else: + runversiontests() + # Check that parse_index2() raises TypeError on bad arguments. try: parse_index2(0, True)
--- a/tests/test-push-warn.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-push-warn.t Tue Mar 18 14:25:28 2014 -0500 @@ -22,6 +22,7 @@ $ hg push ../a pushing to ../a searching for changes + remote has heads on branch 'default' that are not known locally: 1c9246a22a0a abort: push creates new remote head 1e108cc5548c! (pull and merge or see "hg help push" for details about pushing new heads) [255] @@ -35,6 +36,7 @@ query 2; still undecided: 1, sample size is: 1 2 total queries listing keys for "bookmarks" + remote has heads on branch 'default' that are not known locally: 1c9246a22a0a new remote heads on branch 'default': 1e108cc5548c abort: push creates new remote head 1e108cc5548c! @@ -405,6 +407,7 @@ $ hg -R i push h pushing to h searching for changes + remote has heads on branch 'default' that are not known locally: ce4212fc8847 abort: push creates new remote head 97bd0c84d346! (pull and merge or see "hg help push" for details about pushing new heads) [255]
--- a/tests/test-rebase-conflicts.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-rebase-conflicts.t Tue Mar 18 14:25:28 2014 -0500 @@ -210,7 +210,7 @@ summary: added default.txt $ hg rebase -s9 -d2 --debug # use debug to really check merge base used - rebase onto 2 starting from [<changectx e31216eec445>] + rebase onto 2 starting from e31216eec445 rebasing: 9:e31216eec445 5/6 changesets (83.33%) future parents are 2 and -1 rebase status stored
--- a/tests/test-record.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-record.t Tue Mar 18 14:25:28 2014 -0500 @@ -251,7 +251,8 @@ $ echo 11 >> plain $ unset HGUSER $ hg record --config ui.username= -d '8 0' -m end plain - abort: no username supplied (see "hg help config") + abort: no username supplied + (use "hg config --edit" to set your username) [255]
--- a/tests/test-rename-merge2.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-rename-merge2.t Tue Mar 18 14:25:28 2014 -0500 @@ -335,6 +335,8 @@ test L:um a b R:um a b W: - 9 do merge with ancestor in a -------------- searching for copies back to rev 1 + unmatched files new in both: + b resolving manifests branchmerge: True, force: False, partial: False ancestor: 924404dff337, local: 62e7bf090eba+, remote: 49b6d8032493 @@ -403,6 +405,8 @@ test L:nc a b R:up b W: - 12 merge b no ancestor -------------- searching for copies back to rev 1 + unmatched files new in both: + b resolving manifests branchmerge: True, force: False, partial: False ancestor: 924404dff337, local: 86a2aa42fc76+, remote: af30c7647fc7 @@ -431,6 +435,8 @@ test L:up b R:nm a b W: - 13 merge b no ancestor -------------- searching for copies back to rev 1 + unmatched files new in both: + b resolving manifests branchmerge: True, force: False, partial: False ancestor: 924404dff337, local: 59318016310c+, remote: bdb19105162a @@ -461,6 +467,8 @@ test L:nc a b R:up a b W: - 14 merge b no ancestor -------------- searching for copies back to rev 1 + unmatched files new in both: + b resolving manifests branchmerge: True, force: False, partial: False ancestor: 924404dff337, local: 86a2aa42fc76+, remote: 8dbce441892a @@ -492,6 +500,8 @@ test L:up b R:nm a b W: - 15 merge b no ancestor, remove a -------------- searching for copies back to rev 1 + unmatched files new in both: + b resolving manifests branchmerge: True, force: False, partial: False ancestor: 924404dff337, local: 59318016310c+, remote: bdb19105162a @@ -522,6 +532,8 @@ test L:nc a b R:up a b W: - 16 get a, merge b no ancestor -------------- searching for copies back to rev 1 + unmatched files new in both: + b resolving manifests branchmerge: True, force: False, partial: False ancestor: 924404dff337, local: 86a2aa42fc76+, remote: 8dbce441892a @@ -553,6 +565,8 @@ test L:up a b R:nc a b W: - 17 keep a, merge b no ancestor -------------- searching for copies back to rev 1 + unmatched files new in both: + b resolving manifests branchmerge: True, force: False, partial: False ancestor: 924404dff337, local: 0b76e65c8289+, remote: 4ce40f5aca24 @@ -581,6 +595,8 @@ test L:nm a b R:up a b W: - 18 merge b no ancestor -------------- searching for copies back to rev 1 + unmatched files new in both: + b resolving manifests branchmerge: True, force: False, partial: False ancestor: 924404dff337, local: 02963e448370+, remote: 8dbce441892a @@ -614,6 +630,8 @@ test L:up a b R:nm a b W: - 19 merge b no ancestor, prompt remove a -------------- searching for copies back to rev 1 + unmatched files new in both: + b resolving manifests branchmerge: True, force: False, partial: False ancestor: 924404dff337, local: 0b76e65c8289+, remote: bdb19105162a @@ -752,3 +770,201 @@ $ cd .. + + +Systematic and terse testing of merge merges and ancestor calculation: + +Excpected result: + +\ a m1 m2 dst +0 - f f f "versions differ" +1 f g g g "versions differ" +2 f f f f "versions differ" +3 f f g f+g "remote copied to " + f +4 f f g g "remote moved to " + f +5 f g f f+g "local copied to " + f2 +6 f g f g "local moved to " + f2 +7 - (f) f f "remote differs from untracked local" +8 f (f) f f "remote differs from untracked local" + + $ hg init ancestortest + $ cd ancestortest + $ for x in 1 2 3 4 5 6 8; do mkdir $x; echo a > $x/f; done + $ hg ci -Aqm "a" + $ mkdir 0 + $ touch 0/f + $ hg mv 1/f 1/g + $ hg cp 5/f 5/g + $ hg mv 6/f 6/g + $ hg rm 8/f + $ for x in */*; do echo m1 > $x; done + $ hg ci -Aqm "m1" + $ hg up -qr0 + $ mkdir 0 7 + $ touch 0/f 7/f + $ hg mv 1/f 1/g + $ hg cp 3/f 3/g + $ hg mv 4/f 4/g + $ for x in */*; do echo m2 > $x; done + $ hg ci -Aqm "m2" + $ hg up -qr1 + $ mkdir 7 8 + $ echo m > 7/f + $ echo m > 8/f + $ hg merge -f --tool internal:dump -v --debug -r2 | sed '/^updating:/,$d' 2> /dev/null + searching for copies back to rev 1 + unmatched files in local: + 5/g + 6/g + unmatched files in other: + 3/g + 4/g + 7/f + unmatched files new in both: + 0/f + 1/g + all copies found (* = to merge, ! = divergent, % = renamed and deleted): + src: '3/f' -> dst: '3/g' * + src: '4/f' -> dst: '4/g' * + src: '5/f' -> dst: '5/g' * + src: '6/f' -> dst: '6/g' * + checking for directory renames + resolving manifests + branchmerge: True, force: True, partial: False + ancestor: e6cb3cf11019, local: ec44bf929ab5+, remote: c62e34d0b898 + remote changed 8/f which local deleted + use (c)hanged version or leave (d)eleted? c + 0/f: versions differ -> m + preserving 0/f for resolve of 0/f + 1/g: versions differ -> m + preserving 1/g for resolve of 1/g + 2/f: versions differ -> m + preserving 2/f for resolve of 2/f + 3/f: versions differ -> m + preserving 3/f for resolve of 3/f + 3/f: remote copied to 3/g -> m + preserving 3/f for resolve of 3/g + 4/f: remote moved to 4/g -> m + preserving 4/f for resolve of 4/g + 5/f: versions differ -> m + preserving 5/f for resolve of 5/f + 5/g: local copied/moved to 5/f -> m + preserving 5/g for resolve of 5/g + 6/g: local copied/moved to 6/f -> m + preserving 6/g for resolve of 6/g + 7/f: remote differs from untracked local -> m + preserving 7/f for resolve of 7/f + 8/f: prompt recreating -> g + removing 4/f + getting 8/f + $ hg mani + 0/f + 1/g + 2/f + 3/f + 4/f + 5/f + 5/g + 6/g + $ for f in */*; do echo $f:; cat $f; done + 0/f: + m1 + 0/f.base: + 0/f.local: + m1 + 0/f.orig: + m1 + 0/f.other: + m2 + 1/g: + m1 + 1/g.base: + a + 1/g.local: + m1 + 1/g.orig: + m1 + 1/g.other: + m2 + 2/f: + m1 + 2/f.base: + a + 2/f.local: + m1 + 2/f.orig: + m1 + 2/f.other: + m2 + 3/f: + m1 + 3/f.base: + a + 3/f.local: + m1 + 3/f.orig: + m1 + 3/f.other: + m2 + 3/g: + m1 + 3/g.base: + a + 3/g.local: + m1 + 3/g.orig: + m1 + 3/g.other: + m2 + 4/g: + m1 + 4/g.base: + a + 4/g.local: + m1 + 4/g.orig: + m1 + 4/g.other: + m2 + 5/f: + m1 + 5/f.base: + a + 5/f.local: + m1 + 5/f.orig: + m1 + 5/f.other: + m2 + 5/g: + m1 + 5/g.base: + a + 5/g.local: + m1 + 5/g.orig: + m1 + 5/g.other: + m2 + 6/g: + m1 + 6/g.base: + a + 6/g.local: + m1 + 6/g.orig: + m1 + 6/g.other: + m2 + 7/f: + m + 7/f.base: + 7/f.local: + m + 7/f.orig: + m + 7/f.other: + m2 + 8/f: + m2 + $ cd ..
--- a/tests/test-requires.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-requires.t Tue Mar 18 14:25:28 2014 -0500 @@ -10,10 +10,12 @@ $ echo indoor-pool > .hg/requires $ hg tip abort: unknown repository format: requires features 'indoor-pool' (upgrade Mercurial)! + (see http://mercurial.selenic.com/wiki/MissingRequirement for details) [255] $ echo outdoor-pool >> .hg/requires $ hg tip abort: unknown repository format: requires features 'indoor-pool', 'outdoor-pool' (upgrade Mercurial)! + (see http://mercurial.selenic.com/wiki/MissingRequirement for details) [255] $ cd .. @@ -61,6 +63,7 @@ $ hg clone supported clone-dst abort: unknown repository format: requires features 'featuresetup-test' (upgrade Mercurial)! + (see http://mercurial.selenic.com/wiki/MissingRequirement for details) [255] $ hg clone --pull supported clone-dst abort: required features are not supported in the destination: featuresetup-test
--- a/tests/test-revset.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-revset.t Tue Mar 18 14:25:28 2014 -0500 @@ -367,6 +367,22 @@ 4 $ log 'id(5)' 2 + $ log 'only(9)' + 8 + 9 + $ log 'only(8)' + 8 + $ log 'only(9, 5)' + 2 + 4 + 8 + 9 + $ log 'only(7 + 9, 5 + 2)' + 4 + 6 + 7 + 8 + 9 $ log 'outgoing()' 8 9 @@ -414,6 +430,16 @@ 2 1 0 + $ log '1:: and reverse(all())' + 9 + 8 + 7 + 6 + 5 + 4 + 3 + 2 + 1 $ log 'rev(5)' 5 $ log 'sort(limit(reverse(all()), 3))' @@ -434,6 +460,138 @@ $ log 'tag(tip)' 9 +test sort revset +-------------------------------------------- + +test when adding two unordered revsets + + $ log 'sort(keyword(issue) or modifies(b))' + 4 + 6 + +test when sorting a reversed collection in the same way it is + + $ log 'sort(reverse(all()), -rev)' + 9 + 8 + 7 + 6 + 5 + 4 + 3 + 2 + 1 + 0 + +test when sorting a reversed collection + + $ log 'sort(reverse(all()), rev)' + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + + +test sorting two sorted collections in different orders + + $ log 'sort(outgoing() or reverse(removes(a)), rev)' + 2 + 6 + 8 + 9 + +test sorting two sorted collections in different orders backwards + + $ log 'sort(outgoing() or reverse(removes(a)), -rev)' + 9 + 8 + 6 + 2 + +test substracting something from an addset + + $ log '(outgoing() or removes(a)) - removes(a)' + 8 + 9 + +test intersecting something with an addset + + $ log 'parents(outgoing() or removes(a))' + 1 + 4 + 5 + 8 + +check that conversion to _missingancestors works + $ try --optimize '::3 - ::1' + (minus + (dagrangepre + ('symbol', '3')) + (dagrangepre + ('symbol', '1'))) + * optimized: + (func + ('symbol', '_missingancestors') + (list + ('symbol', '3') + ('symbol', '1'))) + 3 + $ try --optimize 'ancestors(1) - ancestors(3)' + (minus + (func + ('symbol', 'ancestors') + ('symbol', '1')) + (func + ('symbol', 'ancestors') + ('symbol', '3'))) + * optimized: + (func + ('symbol', '_missingancestors') + (list + ('symbol', '1') + ('symbol', '3'))) + $ try --optimize 'not ::2 and ::6' + (and + (not + (dagrangepre + ('symbol', '2'))) + (dagrangepre + ('symbol', '6'))) + * optimized: + (func + ('symbol', '_missingancestors') + (list + ('symbol', '6') + ('symbol', '2'))) + 3 + 4 + 5 + 6 + $ try --optimize 'ancestors(6) and not ancestors(4)' + (and + (func + ('symbol', 'ancestors') + ('symbol', '6')) + (not + (func + ('symbol', 'ancestors') + ('symbol', '4')))) + * optimized: + (func + ('symbol', '_missingancestors') + (list + ('symbol', '6') + ('symbol', '4'))) + 3 + 5 + 6 + we can use patterns when searching for tags $ log 'tag("1..*")'
--- a/tests/test-rollback.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-rollback.t Tue Mar 18 14:25:28 2014 -0500 @@ -184,4 +184,14 @@ $ cat a a - $ cd .. +corrupt journal test + $ echo "foo" > .hg/store/journal + $ hg recover + rolling back interrupted transaction + couldn't read journal entry 'foo\n'! + checking changesets + checking manifests + crosschecking files in changesets and manifests + checking files + 1 files, 2 changesets, 2 total revisions +
--- a/tests/test-run-tests.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-run-tests.t Tue Mar 18 14:25:28 2014 -0500 @@ -65,7 +65,7 @@ Combining esc with other markups - and handling lines ending with \r instead of \n: $ printf 'foo/bar\r' - foo/bar\r (no-eol) (glob) (esc) + fo?/bar\r (no-eol) (glob) (esc) #if windows $ printf 'foo\\bar\r' foo/bar\r (no-eol) (glob) (esc)
--- a/tests/test-shelve.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-shelve.t Tue Mar 18 14:25:28 2014 -0500 @@ -23,10 +23,6 @@ $ hg unshelve unshelving change 'default' - adding changesets - adding manifests - adding file changes - added 1 changesets with 5 changes to 5 files $ hg commit -q -m 'initial commit' @@ -81,11 +77,11 @@ ensure that our shelved changes exist $ hg shelve -l - default-01 (*) [mq]: second.patch (glob) - default (*) [mq]: second.patch (glob) + default-01 (*) changes to '[mq]: second.patch' (glob) + default (*) changes to '[mq]: second.patch' (glob) $ hg shelve -l -p default - default (*) [mq]: second.patch (glob) + default (*) changes to '[mq]: second.patch' (glob) diff --git a/a/a b/a/a --- a/a/a @@ -104,10 +100,8 @@ $ printf "z\na\n" > a/a $ hg unshelve --keep unshelving change 'default-01' - adding changesets - adding manifests - adding file changes - added 1 changesets with 3 changes to 8 files (+1 heads) + temporarily committing pending changes (restore with 'hg unshelve --abort') + rebasing shelved changes merging a/a $ hg revert --all -q @@ -117,10 +111,6 @@ $ hg unshelve unshelving change 'default-01' - adding changesets - adding manifests - adding file changes - added 1 changesets with 3 changes to 8 files $ hg status -C M a/a A b.rename/b @@ -192,10 +182,8 @@ $ hg unshelve unshelving change 'default' - adding changesets - adding manifests - adding file changes - added 1 changesets with 3 changes to 8 files (+1 heads) + temporarily committing pending changes (restore with 'hg unshelve --abort') + rebasing shelved changes merging a/a warning: conflicts during merge. merging a/a incomplete! (edit conflicts, then use 'hg resolve --mark') @@ -379,10 +367,8 @@ $ HGMERGE=true hg unshelve unshelving change 'default' - adding changesets - adding manifests - adding file changes - added 1 changesets with 1 changes to 6 files (+1 heads) + temporarily committing pending changes (restore with 'hg unshelve --abort') + rebasing shelved changes merging a/a $ hg parents -q 4:33f7f61e6c5e @@ -400,15 +386,11 @@ shelved as default 0 files updated, 0 files merged, 1 files removed, 0 files unresolved $ hg shelve --list - default (*) create conflict (glob) + default (*) changes to 'create conflict' (glob) $ hg unshelve --keep unshelving change 'default' - adding changesets - adding manifests - adding file changes - added 1 changesets with 1 changes to 7 files $ hg shelve --list - default (*) create conflict (glob) + default (*) changes to 'create conflict' (glob) $ hg shelve --cleanup $ hg shelve --list @@ -424,10 +406,6 @@ * test 4:33f7f61e6c5e $ hg unshelve unshelving change 'test' - adding changesets - adding manifests - adding file changes - added 1 changesets with 1 changes to 7 files $ hg bookmark * test 4:33f7f61e6c5e @@ -437,13 +415,9 @@ shelved as test 0 files updated, 0 files merged, 1 files removed, 0 files unresolved $ hg --config extensions.mq=! shelve --list - test (1s ago) create conflict + test (*) changes to 'create conflict' (glob) $ hg --config extensions.mq=! unshelve unshelving change 'test' - adding changesets - adding manifests - adding file changes - added 1 changesets with 1 changes to 7 files shelve should leave dirstate clean (issue 4055) @@ -468,10 +442,7 @@ saved backup bundle to $TESTTMP/shelverebase/.hg/strip-backup/323bfa07f744-backup.hg (glob) $ hg unshelve unshelving change 'default' - adding changesets - adding manifests - adding file changes - added 2 changesets with 2 changes to 2 files (+1 heads) + rebasing shelved changes $ hg status M z @@ -497,10 +468,7 @@ $ hg up -q 1 $ hg unshelve unshelving change 'default' - adding changesets - adding manifests - adding file changes - added 1 changesets with 1 changes to 3 files + rebasing shelved changes $ hg status A d @@ -513,10 +481,7 @@ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved $ hg unshelve unshelving change 'default' - adding changesets - adding manifests - adding file changes - added 1 changesets with 1 changes to 3 files + rebasing shelved changes $ hg status A d @@ -534,10 +499,6 @@ $ hg debugobsolete `hg --debug id -i -r 1` $ hg unshelve unshelving change 'default' - adding changesets - adding manifests - adding file changes - added 1 changesets with 1 changes to 2 files (+1 heads) unshelve should leave unknown files alone (issue4113) @@ -549,10 +510,6 @@ ? e $ hg unshelve unshelving change 'default' - adding changesets - adding manifests - adding file changes - added 1 changesets with 1 changes to 2 files (+1 heads) $ hg status A d ? e @@ -568,13 +525,138 @@ $ echo z > e $ hg unshelve unshelving change 'default' - adding changesets - adding manifests - adding file changes - added 1 changesets with 2 changes to 3 files (+1 heads) $ cat e e $ cat e.orig z + +unshelve and conflicts with untracked files + + preparing: + + $ rm *.orig + $ hg ci -qm 'commit stuff' + $ hg phase -p null: + + no other changes - no merge: + + $ echo f > f + $ hg add f + $ hg shelve + shelved as default + 0 files updated, 0 files merged, 1 files removed, 0 files unresolved + $ echo gold > f + $ hg unshelve + unshelving change 'default' + $ hg st + A f + ? f.orig + $ cat f + f + $ cat f.orig + gold + + other uncommitted changes - merge: + + $ hg st + A f + ? f.orig + $ hg shelve + shelved as default + 0 files updated, 0 files merged, 1 files removed, 0 files unresolved + $ hg log -G --template '{rev} {desc|firstline} {author}' -R bundle://.hg/shelved/default.hg -r 'bundle()' + o 4 changes to 'commit stuff' shelve@localhost + | + $ hg log -G --template '{rev} {desc|firstline} {author}' + @ 3 commit stuff test + | + | o 2 c test + |/ + o 0 a test + + $ mv f.orig f + $ echo other change >> a + $ hg unshelve + unshelving change 'default' + temporarily committing pending changes (restore with 'hg unshelve --abort') + rebasing shelved changes + merging f + warning: conflicts during merge. + merging f incomplete! (edit conflicts, then use 'hg resolve --mark') + unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue') + [1] + $ hg log -G --template '{rev} {desc|firstline} {author}' + @ 5 changes to 'commit stuff' shelve@localhost + | + | @ 4 pending changes temporary commit shelve@localhost + |/ + o 3 commit stuff test + | + | o 2 c test + |/ + o 0 a test + + $ hg st + M f + ? f.orig + $ cat f + <<<<<<< local + gold + ======= + f + >>>>>>> other + $ cat f.orig + gold + $ hg unshelve --abort + rebase aborted + unshelve of 'default' aborted + $ hg st + M a + ? f.orig + $ cat f.orig + gold + $ hg unshelve + unshelving change 'default' + temporarily committing pending changes (restore with 'hg unshelve --abort') + rebasing shelved changes + $ hg st + M a + A f + ? f.orig + + other committed changes - merge: + + $ hg shelve f + shelved as default + 0 files updated, 0 files merged, 1 files removed, 0 files unresolved + $ hg ci a -m 'intermediate other change' + $ mv f.orig f + $ hg unshelve + unshelving change 'default' + rebasing shelved changes + merging f + warning: conflicts during merge. + merging f incomplete! (edit conflicts, then use 'hg resolve --mark') + unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue') + [1] + $ hg st + M f + ? f.orig + $ cat f + <<<<<<< local + gold + ======= + f + >>>>>>> other + $ cat f.orig + gold + $ hg unshelve --abort + rebase aborted + no changes needed to a + no changes needed to d + no changes needed to e + unshelve of 'default' aborted + $ hg shelve --delete default + $ cd ..
--- a/tests/test-ssh.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-ssh.t Tue Mar 18 14:25:28 2014 -0500 @@ -223,7 +223,7 @@ $ hg push pushing to ssh://user@dummy/remote searching for changes - note: unsynced remote changes! + remote has heads on branch 'default' that are not known locally: 6c0482d977a3 remote: adding changesets remote: adding manifests remote: adding file changes
--- a/tests/test-status-color.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-status-color.t Tue Mar 18 14:25:28 2014 -0500 @@ -1,5 +1,3 @@ - $ "$TESTDIR/hghave" tic || exit 80 - $ echo "[extensions]" >> $HGRCPATH $ echo "color=" >> $HGRCPATH $ echo "[color]" >> $HGRCPATH @@ -186,8 +184,11 @@ \x1b[0;0mC \x1b[0m\x1b[0;0m.hgignore\x1b[0m (esc) \x1b[0;0mC \x1b[0m\x1b[0;0mmodified\x1b[0m (esc) + hg status -A (with terminfo color): +#if tic + $ mkdir "$TESTTMP/terminfo" $ TERMINFO="$TESTTMP/terminfo" tic "$TESTDIR/hgterm.ti" $ TERM=hgterm TERMINFO="$TESTTMP/terminfo" hg status --config color.mode=terminfo --color=always -A @@ -201,6 +202,8 @@ \x1b[30m\x1b[30mC \x1b[30m\x1b[30m\x1b[30m.hgignore\x1b[30m (esc) \x1b[30m\x1b[30mC \x1b[30m\x1b[30m\x1b[30mmodified\x1b[30m (esc) +#endif + $ echo "^ignoreddir$" > .hgignore $ mkdir ignoreddir
--- a/tests/test-up-local-change.t Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-up-local-change.t Tue Mar 18 14:25:28 2014 -0500 @@ -176,6 +176,8 @@ [255] $ hg --debug merge -f searching for copies back to rev 1 + unmatched files new in both: + b resolving manifests branchmerge: True, force: True, partial: False ancestor: c19d34741b0a, local: 1e71731e6fbb+, remote: 83c51d0caff4
--- a/tests/test-url.py Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-url.py Tue Mar 18 14:25:28 2014 -0500 @@ -5,7 +5,7 @@ print (a, b) def cert(cn): - return dict(subject=((('commonName', cn),),)) + return {'subject': ((('commonName', cn),),)} from mercurial.sslutil import _verifycert
--- a/tests/test-wireproto.py Mon Mar 17 14:57:13 2014 -0400 +++ b/tests/test-wireproto.py Tue Mar 18 14:25:28 2014 -0500 @@ -18,7 +18,7 @@ @wireproto.batchable def greet(self, name): f = wireproto.future() - yield wireproto.todict(name=mangle(name)), f + yield {'name': mangle(name)}, f yield unmangle(f.value) class serverrepo(object):