# HG changeset patch # User Matt Mackall # Date 1303321472 18000 # Node ID ac1c75a7c6b56a1fe5bcb14563d3a8788afeaddb # Parent 616ad3f6fd33290dc8b06eab753b1d3604e9e5a1# Parent 3c753f9a2fbcffbc7859d666bf1a86d9702027b0 merge with stable diff -r 3c753f9a2fbc -r ac1c75a7c6b5 .hgignore --- a/.hgignore Tue Apr 19 13:33:43 2011 -0500 +++ b/.hgignore Wed Apr 20 12:44:32 2011 -0500 @@ -7,6 +7,7 @@ *.mergebackup *.o *.so +*.dll *.pyd *.pyc *.pyo diff -r 3c753f9a2fbc -r ac1c75a7c6b5 contrib/check-code.py --- a/contrib/check-code.py Tue Apr 19 13:33:43 2011 -0500 +++ b/contrib/check-code.py Wed Apr 20 12:44:32 2011 -0500 @@ -66,6 +66,7 @@ (r'^source\b', "don't use 'source', use '.'"), (r'touch -d', "don't use 'touch -d', use 'touch -t' instead"), (r'ls\s+[^|-]+\s+-', "options to 'ls' must come before filenames"), + (r'[^>]>\s*\$HGRCPATH', "don't overwrite $HGRCPATH, append to it"), ] testfilters = [ @@ -176,9 +177,10 @@ (r'\([^\)]+\) \w+', "use (int)foo, not (int) foo"), (r'\S+ (\+\+|--)', "use foo++, not foo ++"), (r'\w,\w', "missing whitespace after ,"), - (r'\w[+/*]\w', "missing whitespace in expression"), + (r'^[^#]\w[+/*]\w', "missing whitespace in expression"), (r'^#\s+\w', "use #foo, not # foo"), (r'[^\n]\Z', "no trailing newline"), + (r'^\s*#import\b', "use only #include in standard C code"), ] cfilters = [ diff -r 3c753f9a2fbc -r ac1c75a7c6b5 contrib/python-hook-examples.py --- a/contrib/python-hook-examples.py Tue Apr 19 13:33:43 2011 -0500 +++ b/contrib/python-hook-examples.py Wed Apr 20 12:44:32 2011 -0500 @@ -13,7 +13,7 @@ if kwargs.get('parent2'): return node = kwargs['node'] - first = repo[node].parents()[0].node() + first = repo[node].p1().node() if 'url' in kwargs: last = repo['tip'].node() else: diff -r 3c753f9a2fbc -r ac1c75a7c6b5 contrib/shrink-revlog.py --- a/contrib/shrink-revlog.py Tue Apr 19 13:33:43 2011 -0500 +++ b/contrib/shrink-revlog.py Wed Apr 20 12:44:32 2011 -0500 @@ -102,15 +102,16 @@ ui.status(_('writing revs\n')) - count = [0] - def progress(*args): - ui.progress(_('writing'), count[0], total=len(order)) - count[0] += 1 order = [r1.node(r) for r in order] # this is a bit ugly, but it works - lookup = lambda x: "%020d" % r1.linkrev(r1.rev(x)) + count = [0] + def lookup(x): + count[0] += 1 + ui.progress(_('writing'), count[0], total=len(order)) + return "%020d" % r1.linkrev(r1.rev(x)) + unlookup = lambda x: int(x, 10) try: diff -r 3c753f9a2fbc -r ac1c75a7c6b5 contrib/zsh_completion --- a/contrib/zsh_completion Tue Apr 19 13:33:43 2011 -0500 +++ b/contrib/zsh_completion Wed Apr 20 12:44:32 2011 -0500 @@ -360,8 +360,8 @@ '(--help -h)'{-h,--help}'[display help and exit]' '--debug[debug mode]' '--debugger[start debugger]' - '--encoding[set the charset encoding (default: UTF8)]' - '--encodingmode[set the charset encoding mode (default: strict)]' + '--encoding[set the charset encoding]' + '--encodingmode[set the charset encoding mode]' '--lsprof[print improved command execution profile]' '--traceback[print traceback on exception]' '--time[time how long the command takes]' diff -r 3c753f9a2fbc -r ac1c75a7c6b5 doc/hgrc.5.txt --- a/doc/hgrc.5.txt Tue Apr 19 13:33:43 2011 -0500 +++ b/doc/hgrc.5.txt Wed Apr 20 12:44:32 2011 -0500 @@ -910,9 +910,16 @@ The conflict resolution program to use during a manual merge. For more information on merge tools see :hg:`help merge-tools`. For configuring merge tools see the merge-tools_ section. -``patch`` - command to use to apply patches. Look for ``gpatch`` or ``patch`` in - PATH if unset. +``portablefilenames`` + Check for portable filenames. Can be ``warn``, ``ignore`` or ``abort``. + Default is ``warn``. + If set to ``warn`` (or ``true``), a warning message is printed on POSIX + platforms, if a file with a non-portable filename is added (e.g. a file + with a name that can't be created on Windows because it contains reserved + parts like ``AUX`` or reserved characters like ``:``). + If set to ``ignore`` (or ``false``), no warning is printed. + If set to ``abort``, the command is aborted. + On Windows, this configuration option is ignored and the command aborted. ``quiet`` Reduce the amount of output printed. True or False. Default is False. ``remotecmd`` @@ -1085,6 +1092,9 @@ Default is False. ``ipv6`` Whether to use IPv6. Default is False. +``logourl`` + Base URL to use for logos. If unset, ``http://mercurial.selenic.com/`` + will be used. ``name`` Repository name to use in the web interface. Default is current working directory. diff -r 3c753f9a2fbc -r ac1c75a7c6b5 hgext/bugzilla.py --- a/hgext/bugzilla.py Tue Apr 19 13:33:43 2011 -0500 +++ b/hgext/bugzilla.py Wed Apr 20 12:44:32 2011 -0500 @@ -1,6 +1,7 @@ # bugzilla.py - bugzilla integration for mercurial # # Copyright 2006 Vadim Gelfer +# Copyright 2011 Jim Hague # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. @@ -8,97 +9,161 @@ '''hooks for integrating with the Bugzilla bug tracker This hook extension adds comments on bugs in Bugzilla when changesets -that refer to bugs by Bugzilla ID are seen. The hook does not change -bug status. +that refer to bugs by Bugzilla ID are seen. The comment is formatted using +the Mercurial template mechanism. + +The hook does not change bug status. + +Three basic modes of access to Bugzilla are provided: + +1. Access via the Bugzilla XMLRPC interface. Requires Bugzilla 3.4 or later. + +2. Check data via the Bugzilla XMLRPC interface and submit bug change + via email to Bugzilla email interface. Requires Bugzilla 3.4 or later. + +3. Writing directly to the Bugzilla database. Only Bugzilla installations + using MySQL are supported. Requires Python MySQLdb. -The hook updates the Bugzilla database directly. Only Bugzilla -installations using MySQL are supported. +Writing directly to the database is susceptible to schema changes, and +relies on a Bugzilla contrib script to send out bug change +notification emails. This script runs as the user running Mercurial, +must be run on the host with the Bugzilla install, and requires +permission to read Bugzilla configuration details and the necessary +MySQL user and password to have full access rights to the Bugzilla +database. For these reasons this access mode is now considered +deprecated, and will not be updated for new Bugzilla versions going +forward. + +Access via XMLRPC needs a Bugzilla username and password to be specified +in the configuration. Comments are added under that username. Since the +configuration must be readable by all Mercurial users, it is recommended +that the rights of that user are restricted in Bugzilla to the minimum +necessary to add comments. -The hook relies on a Bugzilla script to send bug change notification -emails. That script changes between Bugzilla versions; the -'processmail' script used prior to 2.18 is replaced in 2.18 and -subsequent versions by 'config/sendbugmail.pl'. Note that these will -be run by Mercurial as the user pushing the change; you will need to -ensure the Bugzilla install file permissions are set appropriately. +Access via XMLRPC/email uses XMLRPC to query Bugzilla, but sends +email to the Bugzilla email interface to submit comments to bugs. +The From: address in the email is set to the email address of the Mercurial +user, so the comment appears to come from the Mercurial user. In the event +that the Mercurial user email is not recognised by Bugzilla as a Bugzilla +user, the email associated with the Bugzilla username used to log into +Bugzilla is used instead as the source of the comment. + +Configuration items common to all access modes: + +bugzilla.version + This access type to use. Values recognised are: + + :``xmlrpc``: Bugzilla XMLRPC interface. + :``xmlrpc+email``: Bugzilla XMLRPC and email interfaces. + :``3.0``: MySQL access, Bugzilla 3.0 and later. + :``2.18``: MySQL access, Bugzilla 2.18 and up to but not + including 3.0. + :``2.16``: MySQL access, Bugzilla 2.16 and up to but not + including 2.18. + +bugzilla.regexp + Regular expression to match bug IDs in changeset commit message. + Must contain one "()" group. The default expression matches ``Bug + 1234``, ``Bug no. 1234``, ``Bug number 1234``, ``Bugs 1234,5678``, + ``Bug 1234 and 5678`` and variations thereof. Matching is case + insensitive. + +bugzilla.style + The style file to use when formatting comments. + +bugzilla.template + Template to use when formatting comments. Overrides style if + specified. In addition to the usual Mercurial keywords, the + extension specifies: -The extension is configured through three different configuration -sections. These keys are recognized in the [bugzilla] section: + :``{bug}``: The Bugzilla bug ID. + :``{root}``: The full pathname of the Mercurial repository. + :``{webroot}``: Stripped pathname of the Mercurial repository. + :``{hgweb}``: Base URL for browsing Mercurial repositories. + + Default ``changeset {node|short} in repo {root} refers to bug + {bug}.\\ndetails:\\n\\t{desc|tabindent}`` + +bugzilla.strip + The number of path separator characters to strip from the front of + the Mercurial repository path (``{root}`` in templates) to produce + ``{webroot}``. For example, a repository with ``{root}`` + ``/var/local/my-project`` with a strip of 2 gives a value for + ``{webroot}`` of ``my-project``. Default 0. -host - Hostname of the MySQL server holding the Bugzilla database. +web.baseurl + Base URL for browsing Mercurial repositories. Referenced from + templates as ``{hgweb}``. + +Configuration items common to XMLRPC+email and MySQL access modes: + +bugzilla.usermap + Path of file containing Mercurial committer email to Bugzilla user email + mappings. If specified, the file should contain one mapping per + line:: + + committer = Bugzilla user + + See also the ``[usermap]`` section. + +The ``[usermap]`` section is used to specify mappings of Mercurial +committer email to Bugzilla user email. See also ``bugzilla.usermap``. +Contains entries of the form ``committer = Bugzilla user``. -db - Name of the Bugzilla database in MySQL. Default 'bugs'. +XMLRPC access mode configuration: + +bugzilla.bzurl + The base URL for the Bugzilla installation. + Default ``http://localhost/bugzilla``. + +bugzilla.user + The username to use to log into Bugzilla via XMLRPC. Default + ``bugs``. + +bugzilla.password + The password for Bugzilla login. + +XMLRPC+email access mode uses the XMLRPC access mode configuration items, +and also: -user - Username to use to access MySQL server. Default 'bugs'. +bugzilla.bzemail + The Bugzilla email address. + +In addition, the Mercurial email settings must be configured. See the +documentation in hgrc(5), sections ``[email]`` and ``[smtp]``. + +MySQL access mode configuration: -password +bugzilla.host + Hostname of the MySQL server holding the Bugzilla database. + Default ``localhost``. + +bugzilla.db + Name of the Bugzilla database in MySQL. Default ``bugs``. + +bugzilla.user + Username to use to access MySQL server. Default ``bugs``. + +bugzilla.password Password to use to access MySQL server. -timeout +bugzilla.timeout Database connection timeout (seconds). Default 5. -version - Bugzilla version. Specify '3.0' for Bugzilla versions 3.0 and later, - '2.18' for Bugzilla versions from 2.18 and '2.16' for versions prior - to 2.18. - -bzuser +bugzilla.bzuser Fallback Bugzilla user name to record comments with, if changeset committer cannot be found as a Bugzilla user. -bzdir +bugzilla.bzdir Bugzilla install directory. Used by default notify. Default - '/var/www/html/bugzilla'. - -notify - The command to run to get Bugzilla to send bug change notification - emails. Substitutes from a map with 3 keys, 'bzdir', 'id' (bug id) - and 'user' (committer bugzilla email). Default depends on version; - from 2.18 it is "cd %(bzdir)s && perl -T contrib/sendbugmail.pl - %(id)s %(user)s". - -regexp - Regular expression to match bug IDs in changeset commit message. - Must contain one "()" group. The default expression matches 'Bug - 1234', 'Bug no. 1234', 'Bug number 1234', 'Bugs 1234,5678', 'Bug - 1234 and 5678' and variations thereof. Matching is case insensitive. - -style - The style file to use when formatting comments. - -template - Template to use when formatting comments. Overrides style if - specified. In addition to the usual Mercurial keywords, the - extension specifies:: + ``/var/www/html/bugzilla``. - {bug} The Bugzilla bug ID. - {root} The full pathname of the Mercurial repository. - {webroot} Stripped pathname of the Mercurial repository. - {hgweb} Base URL for browsing Mercurial repositories. - - Default 'changeset {node|short} in repo {root} refers ' - 'to bug {bug}.\\ndetails:\\n\\t{desc|tabindent}' - -strip - The number of slashes to strip from the front of {root} to produce - {webroot}. Default 0. - -usermap - Path of file containing Mercurial committer ID to Bugzilla user ID - mappings. If specified, the file should contain one mapping per - line, "committer"="Bugzilla user". See also the [usermap] section. - -The [usermap] section is used to specify mappings of Mercurial -committer ID to Bugzilla user ID. See also [bugzilla].usermap. -"committer"="Bugzilla user" - -Finally, the [web] section supports one entry: - -baseurl - Base URL for browsing Mercurial repositories. Reference from - templates as {hgweb}. +bugzilla.notify + The command to run to get Bugzilla to send bug change notification + emails. Substitutes from a map with 3 keys, ``bzdir``, ``id`` (bug + id) and ``user`` (committer bugzilla email). Default depends on + version; from 2.18 it is "cd %(bzdir)s && perl -T + contrib/sendbugmail.pl %(id)s %(user)s". Activating the extension:: @@ -109,11 +174,58 @@ # run bugzilla hook on every change pulled or pushed in here incoming.bugzilla = python:hgext.bugzilla.hook -Example configuration: +Example configurations: + +XMLRPC example configuration. This uses the Bugzilla at +``http://my-project.org/bugzilla``, logging in as user +``bugmail@my-project.org`` with password ``plugh``. It is used with a +collection of Mercurial repositories in ``/var/local/hg/repos/``, +with a web interface at ``http://my-project.org/hg``. :: + + [bugzilla] + bzurl=http://my-project.org/bugzilla + user=bugmail@my-project.org + password=plugh + version=xmlrpc + template=Changeset {node|short} in {root|basename}. + {hgweb}/{webroot}/rev/{node|short}\\n + {desc}\\n + strip=5 + + [web] + baseurl=http://my-project.org/hg -This example configuration is for a collection of Mercurial -repositories in /var/local/hg/repos/ used with a local Bugzilla 3.2 -installation in /opt/bugzilla-3.2. :: +XMLRPC+email example configuration. This uses the Bugzilla at +``http://my-project.org/bugzilla``, logging in as user +``bugmail@my-project.org`` wityh password ``plugh``. It is used with a +collection of Mercurial repositories in ``/var/local/hg/repos/``, +with a web interface at ``http://my-project.org/hg``. Bug comments +are sent to the Bugzilla email address +``bugzilla@my-project.org``. :: + + [bugzilla] + bzurl=http://my-project.org/bugzilla + user=bugmail@my-project.org + password=plugh + version=xmlrpc + bzemail=bugzilla@my-project.org + template=Changeset {node|short} in {root|basename}. + {hgweb}/{webroot}/rev/{node|short}\\n + {desc}\\n + strip=5 + + [web] + baseurl=http://my-project.org/hg + + [usermap] + user@emaildomain.com=user.name@bugzilladomain.com + +MySQL example configuration. This has a local Bugzilla 3.2 installation +in ``/opt/bugzilla-3.2``. The MySQL database is on ``localhost``, +the Bugzilla database name is ``bugs`` and MySQL is +accessed with MySQL username ``bugs`` password ``XYZZY``. It is used +with a collection of Mercurial repositories in ``/var/local/hg/repos/``, +with a web interface at ``http://my-project.org/hg``. :: [bugzilla] host=localhost @@ -127,46 +239,98 @@ strip=5 [web] - baseurl=http://dev.domain.com/hg + baseurl=http://my-project.org/hg [usermap] user@emaildomain.com=user.name@bugzilladomain.com -Commits add a comment to the Bugzilla bug record of the form:: +All the above add a comment to the Bugzilla bug record of the form:: Changeset 3b16791d6642 in repository-name. - http://dev.domain.com/hg/repository-name/rev/3b16791d6642 + http://my-project.org/hg/repository-name/rev/3b16791d6642 Changeset commit comment. Bug 1234. ''' from mercurial.i18n import _ from mercurial.node import short -from mercurial import cmdutil, templater, util -import re, time - -MySQLdb = None +from mercurial import cmdutil, mail, templater, util +import re, time, xmlrpclib -def buglist(ids): - return '(' + ','.join(map(str, ids)) + ')' - -class bugzilla_2_16(object): - '''support for bugzilla version 2.16.''' +class bzaccess(object): + '''Base class for access to Bugzilla.''' def __init__(self, ui): self.ui = ui + usermap = self.ui.config('bugzilla', 'usermap') + if usermap: + self.ui.readconfig(usermap, sections=['usermap']) + + def map_committer(self, user): + '''map name of committer to Bugzilla user name.''' + for committer, bzuser in self.ui.configitems('usermap'): + if committer.lower() == user.lower(): + return bzuser + return user + + # Methods to be implemented by access classes. + def filter_real_bug_ids(self, ids): + '''remove bug IDs that do not exist in Bugzilla from set.''' + pass + + def filter_cset_known_bug_ids(self, node, ids): + '''remove bug IDs where node occurs in comment text from set.''' + pass + + def add_comment(self, bugid, text, committer): + '''add comment to bug. + + If possible add the comment as being from the committer of + the changeset. Otherwise use the default Bugzilla user. + ''' + pass + + def notify(self, ids, committer): + '''Force sending of Bugzilla notification emails.''' + pass + +# Bugzilla via direct access to MySQL database. +class bzmysql(bzaccess): + '''Support for direct MySQL access to Bugzilla. + + The earliest Bugzilla version this is tested with is version 2.16. + + If your Bugzilla is version 3.2 or above, you are strongly + recommended to use the XMLRPC access method instead. + ''' + + @staticmethod + def sql_buglist(ids): + '''return SQL-friendly list of bug ids''' + return '(' + ','.join(map(str, ids)) + ')' + + _MySQLdb = None + + def __init__(self, ui): + try: + import MySQLdb as mysql + bzmysql._MySQLdb = mysql + except ImportError, err: + raise util.Abort(_('python mysql support not available: %s') % err) + + bzaccess.__init__(self, ui) + host = self.ui.config('bugzilla', 'host', 'localhost') user = self.ui.config('bugzilla', 'user', 'bugs') passwd = self.ui.config('bugzilla', 'password') db = self.ui.config('bugzilla', 'db', 'bugs') timeout = int(self.ui.config('bugzilla', 'timeout', 5)) - usermap = self.ui.config('bugzilla', 'usermap') - if usermap: - self.ui.readconfig(usermap, sections=['usermap']) self.ui.note(_('connecting to %s:%s as %s, password %s\n') % (host, db, user, '*' * len(passwd))) - self.conn = MySQLdb.connect(host=host, user=user, passwd=passwd, - db=db, connect_timeout=timeout) + self.conn = bzmysql._MySQLdb.connect(host=host, + user=user, passwd=passwd, + db=db, + connect_timeout=timeout) self.cursor = self.conn.cursor() self.longdesc_id = self.get_longdesc_id() self.user_ids = {} @@ -177,7 +341,7 @@ self.ui.note(_('query: %s %s\n') % (args, kwargs)) try: self.cursor.execute(*args, **kwargs) - except MySQLdb.MySQLError: + except bzmysql._MySQLdb.MySQLError: self.ui.note(_('failed query: %s %s\n') % (args, kwargs)) raise @@ -190,22 +354,22 @@ return ids[0][0] def filter_real_bug_ids(self, ids): - '''filter not-existing bug ids from list.''' - self.run('select bug_id from bugs where bug_id in %s' % buglist(ids)) - return sorted([c[0] for c in self.cursor.fetchall()]) + '''filter not-existing bug ids from set.''' + self.run('select bug_id from bugs where bug_id in %s' % + bzmysql.sql_buglist(ids)) + return set([c[0] for c in self.cursor.fetchall()]) - def filter_unknown_bug_ids(self, node, ids): - '''filter bug ids from list that already refer to this changeset.''' + def filter_cset_known_bug_ids(self, node, ids): + '''filter bug ids that already refer to this changeset from set.''' self.run('''select bug_id from longdescs where bug_id in %s and thetext like "%%%s%%"''' % - (buglist(ids), short(node))) - unknown = set(ids) + (bzmysql.sql_buglist(ids), short(node))) for (id,) in self.cursor.fetchall(): self.ui.status(_('bug %d already knows about changeset %s\n') % (id, short(node))) - unknown.discard(id) - return sorted(unknown) + ids.discard(id) + return ids def notify(self, ids, committer): '''tell bugzilla to send mail.''' @@ -251,15 +415,8 @@ self.user_ids[user] = userid return userid - def map_committer(self, user): - '''map name of committer to bugzilla user name.''' - for committer, bzuser in self.ui.configitems('usermap'): - if committer.lower() == user.lower(): - return bzuser - return user - def get_bugzilla_user(self, committer): - '''see if committer is a registered bugzilla user. Return + '''See if committer is a registered bugzilla user. Return bugzilla username and userid if so. If not, return default bugzilla username and userid.''' user = self.map_committer(committer) @@ -292,19 +449,19 @@ (bugid, userid, now, self.longdesc_id)) self.conn.commit() -class bugzilla_2_18(bugzilla_2_16): +class bzmysql_2_18(bzmysql): '''support for bugzilla 2.18 series.''' def __init__(self, ui): - bugzilla_2_16.__init__(self, ui) + bzmysql.__init__(self, ui) self.default_notify = \ "cd %(bzdir)s && perl -T contrib/sendbugmail.pl %(id)s %(user)s" -class bugzilla_3_0(bugzilla_2_18): +class bzmysql_3_0(bzmysql_2_18): '''support for bugzilla 3.0 series.''' def __init__(self, ui): - bugzilla_2_18.__init__(self, ui) + bzmysql_2_18.__init__(self, ui) def get_longdesc_id(self): '''get identity of longdesc field''' @@ -314,13 +471,176 @@ raise util.Abort(_('unknown database schema')) return ids[0][0] +# Buzgilla via XMLRPC interface. + +class CookieSafeTransport(xmlrpclib.SafeTransport): + """A SafeTransport that retains cookies over its lifetime. + + The regular xmlrpclib transports ignore cookies. Which causes + a bit of a problem when you need a cookie-based login, as with + the Bugzilla XMLRPC interface. + + So this is a SafeTransport which looks for cookies being set + in responses and saves them to add to all future requests. + It appears a SafeTransport can do both HTTP and HTTPS sessions, + which saves us having to do a CookieTransport too. + """ + + # Inspiration drawn from + # http://blog.godson.in/2010/09/how-to-make-python-xmlrpclib-client.html + # http://www.itkovian.net/base/transport-class-for-pythons-xml-rpc-lib/ + + cookies = [] + def send_cookies(self, connection): + if self.cookies: + for cookie in self.cookies: + connection.putheader("Cookie", cookie) + + def request(self, host, handler, request_body, verbose=0): + self.verbose = verbose + + # issue XML-RPC request + h = self.make_connection(host) + if verbose: + h.set_debuglevel(1) + + self.send_request(h, handler, request_body) + self.send_host(h, host) + self.send_cookies(h) + self.send_user_agent(h) + self.send_content(h, request_body) + + # Deal with differences between Python 2.4-2.6 and 2.7. + # In the former h is a HTTP(S). In the latter it's a + # HTTP(S)Connection. Luckily, the 2.4-2.6 implementation of + # HTTP(S) has an underlying HTTP(S)Connection, so extract + # that and use it. + try: + response = h.getresponse() + except AttributeError: + response = h._conn.getresponse() + + # Add any cookie definitions to our list. + for header in response.msg.getallmatchingheaders("Set-Cookie"): + val = header.split(": ", 1)[1] + cookie = val.split(";", 1)[0] + self.cookies.append(cookie) + + if response.status != 200: + raise xmlrpclib.ProtocolError(host + handler, response.status, + response.reason, response.msg.headers) + + payload = response.read() + parser, unmarshaller = self.getparser() + parser.feed(payload) + parser.close() + + return unmarshaller.close() + +class bzxmlrpc(bzaccess): + """Support for access to Bugzilla via the Bugzilla XMLRPC API. + + Requires a minimum Bugzilla version 3.4. + """ + + def __init__(self, ui): + bzaccess.__init__(self, ui) + + bzweb = self.ui.config('bugzilla', 'bzurl', + 'http://localhost/bugzilla/') + bzweb = bzweb.rstrip("/") + "/xmlrpc.cgi" + + user = self.ui.config('bugzilla', 'user', 'bugs') + passwd = self.ui.config('bugzilla', 'password') + + self.bzproxy = xmlrpclib.ServerProxy(bzweb, CookieSafeTransport()) + self.bzproxy.User.login(dict(login=user, password=passwd)) + + def get_bug_comments(self, id): + """Return a string with all comment text for a bug.""" + c = self.bzproxy.Bug.comments(dict(ids=[id])) + return ''.join([t['text'] for t in c['bugs'][str(id)]['comments']]) + + def filter_real_bug_ids(self, ids): + res = set() + bugs = self.bzproxy.Bug.get(dict(ids=sorted(ids), permissive=True)) + for bug in bugs['bugs']: + res.add(bug['id']) + return res + + def filter_cset_known_bug_ids(self, node, ids): + for id in sorted(ids): + if self.get_bug_comments(id).find(short(node)) != -1: + self.ui.status(_('bug %d already knows about changeset %s\n') % + (id, short(node))) + ids.discard(id) + return ids + + def add_comment(self, bugid, text, committer): + self.bzproxy.Bug.add_comment(dict(id=bugid, comment=text)) + +class bzxmlrpcemail(bzxmlrpc): + """Read data from Bugzilla via XMLRPC, send updates via email. + + Advantages of sending updates via email: + 1. Comments can be added as any user, not just logged in user. + 2. Bug statuses and other fields not accessible via XMLRPC can + be updated. This is not currently used. + """ + + def __init__(self, ui): + bzxmlrpc.__init__(self, ui) + + self.bzemail = self.ui.config('bugzilla', 'bzemail') + if not self.bzemail: + raise util.Abort(_("configuration 'bzemail' missing")) + mail.validateconfig(self.ui) + + def send_bug_modify_email(self, bugid, commands, comment, committer): + '''send modification message to Bugzilla bug via email. + + The message format is documented in the Bugzilla email_in.pl + specification. commands is a list of command lines, comment is the + comment text. + + To stop users from crafting commit comments with + Bugzilla commands, specify the bug ID via the message body, rather + than the subject line, and leave a blank line after it. + ''' + user = self.map_committer(committer) + matches = self.bzproxy.User.get(dict(match=[user])) + if not matches['users']: + user = self.ui.config('bugzilla', 'user', 'bugs') + matches = self.bzproxy.User.get(dict(match=[user])) + if not matches['users']: + raise util.Abort(_("default bugzilla user %s email not found") % + user) + user = matches['users'][0]['email'] + + text = "\n".join(commands) + "\n@bug_id = %d\n\n" % bugid + comment + + _charsets = mail._charsets(self.ui) + user = mail.addressencode(self.ui, user, _charsets) + bzemail = mail.addressencode(self.ui, self.bzemail, _charsets) + msg = mail.mimeencode(self.ui, text, _charsets) + msg['From'] = user + msg['To'] = bzemail + msg['Subject'] = mail.headencode(self.ui, "Bug modification", _charsets) + sendmail = mail.connect(self.ui) + sendmail(user, bzemail, msg.as_string()) + + def add_comment(self, bugid, text, committer): + self.send_bug_modify_email(bugid, [], text, committer) + class bugzilla(object): # supported versions of bugzilla. different versions have # different schemas. _versions = { - '2.16': bugzilla_2_16, - '2.18': bugzilla_2_18, - '3.0': bugzilla_3_0 + '2.16': bzmysql, + '2.18': bzmysql_2_18, + '3.0': bzmysql_3_0, + 'xmlrpc': bzxmlrpc, + 'xmlrpc+email': bzxmlrpcemail } _default_bug_re = (r'bugs?\s*,?\s*(?:#|nos?\.?|num(?:ber)?s?)?\s*' @@ -353,10 +673,12 @@ _split_re = None def find_bug_ids(self, ctx): - '''find valid bug ids that are referred to in changeset - comments and that do not already have references to this - changeset.''' + '''return set of integer bug IDs from commit comment. + Extract bug IDs from changeset comments. Filter out any that are + not known to Bugzilla, and any that already have a reference to + the given changeset in their comments. + ''' if bugzilla._bug_re is None: bugzilla._bug_re = re.compile( self.ui.config('bugzilla', 'regexp', bugzilla._default_bug_re), @@ -376,7 +698,7 @@ if ids: ids = self.filter_real_bug_ids(ids) if ids: - ids = self.filter_unknown_bug_ids(ctx.node(), ids) + ids = self.filter_cset_known_bug_ids(ctx.node(), ids) return ids def update(self, bugid, ctx): @@ -418,13 +740,6 @@ '''add comment to bugzilla for each changeset that refers to a bugzilla bug id. only add a comment once per bug, so same change seen multiple times does not fill bug with duplicate data.''' - try: - import MySQLdb as mysql - global MySQLdb - MySQLdb = mysql - except ImportError, err: - raise util.Abort(_('python mysql support not available: %s') % err) - if node is None: raise util.Abort(_('hook type %s does not pass a changeset id') % hooktype) @@ -436,6 +751,6 @@ for id in ids: bz.update(id, ctx) bz.notify(ids, util.email(ctx.user())) - except MySQLdb.MySQLError, err: - raise util.Abort(_('database error: %s') % err.args[1]) + except Exception, e: + raise util.Abort(_('Bugzilla error: %s') % e) diff -r 3c753f9a2fbc -r ac1c75a7c6b5 hgext/color.py --- a/hgext/color.py Tue Apr 19 13:33:43 2011 -0500 +++ b/hgext/color.py Wed Apr 20 12:44:32 2011 -0500 @@ -18,11 +18,11 @@ '''colorize output from some commands -This extension modifies the status and resolve commands to add color to their -output to reflect file status, the qseries command to add color to reflect -patch status (applied, unapplied, missing), and to diff-related -commands to highlight additions, removals, diff headers, and trailing -whitespace. +This extension modifies the status and resolve commands to add color +to their output to reflect file status, the qseries command to add +color to reflect patch status (applied, unapplied, missing), and to +diff-related commands to highlight additions, removals, diff headers, +and trailing whitespace. Other effects in addition to color, like bold and underlined text, are also available. Effects are rendered with the ECMA-48 SGR control @@ -107,6 +107,7 @@ 'diff.trailingwhitespace': 'bold red_background', 'diffstat.deleted': 'red', 'diffstat.inserted': 'green', + 'ui.prompt': 'yellow', 'log.changeset': 'yellow', 'resolve.resolved': 'green bold', 'resolve.unresolved': 'red bold', @@ -348,13 +349,15 @@ # Look for ANSI-like codes embedded in text m = re.match(ansire, text) - while m: - for sattr in m.group(1).split(';'): - if sattr: - attr = mapcolor(int(sattr), attr) - _kernel32.SetConsoleTextAttribute(stdout, attr) - orig(m.group(2), **opts) - m = re.match(ansire, m.group(3)) - # Explicity reset original attributes - _kernel32.SetConsoleTextAttribute(stdout, origattr) + try: + while m: + for sattr in m.group(1).split(';'): + if sattr: + attr = mapcolor(int(sattr), attr) + _kernel32.SetConsoleTextAttribute(stdout, attr) + orig(m.group(2), **opts) + m = re.match(ansire, m.group(3)) + finally: + # Explicity reset original attributes + _kernel32.SetConsoleTextAttribute(stdout, origattr) diff -r 3c753f9a2fbc -r ac1c75a7c6b5 hgext/convert/__init__.py --- a/hgext/convert/__init__.py Tue Apr 19 13:33:43 2011 -0500 +++ b/hgext/convert/__init__.py Wed Apr 20 12:44:32 2011 -0500 @@ -10,7 +10,7 @@ import convcmd import cvsps import subversion -from mercurial import commands +from mercurial import commands, templatekw from mercurial.i18n import _ # Commands definition was moved elsewhere to ease demandload job. @@ -334,3 +334,34 @@ ], _('hg debugcvsps [OPTION]... [PATH]...')), } + +def kwconverted(ctx, name): + rev = ctx.extra().get('convert_revision', '') + if rev.startswith('svn:'): + if name == 'svnrev': + return str(subversion.revsplit(rev)[2]) + elif name == 'svnpath': + return subversion.revsplit(rev)[1] + elif name == 'svnuuid': + return subversion.revsplit(rev)[0] + return rev + +def kwsvnrev(repo, ctx, **args): + """:svnrev: String. Converted subversion revision number.""" + return kwconverted(ctx, 'svnrev') + +def kwsvnpath(repo, ctx, **args): + """:svnpath: String. Converted subversion revision project path.""" + return kwconverted(ctx, 'svnpath') + +def kwsvnuuid(repo, ctx, **args): + """:svnuuid: String. Converted subversion revision repository identifier.""" + return kwconverted(ctx, 'svnuuid') + +def extsetup(ui): + templatekw.keywords['svnrev'] = kwsvnrev + templatekw.keywords['svnpath'] = kwsvnpath + templatekw.keywords['svnuuid'] = kwsvnuuid + +# tell hggettext to extract docstrings from these functions: +i18nfunctions = [kwsvnrev, kwsvnpath, kwsvnuuid] diff -r 3c753f9a2fbc -r ac1c75a7c6b5 hgext/convert/common.py --- a/hgext/convert/common.py Tue Apr 19 13:33:43 2011 -0500 +++ b/hgext/convert/common.py Wed Apr 20 12:44:32 2011 -0500 @@ -151,6 +151,13 @@ """ return None + def getbookmarks(self): + """Return the bookmarks as a dictionary of name: revision + + Bookmark names are to be UTF-8 strings. + """ + return {} + class converter_sink(object): """Conversion sink (target) interface""" @@ -228,6 +235,13 @@ def after(self): pass + def putbookmarks(self, bookmarks): + """Put bookmarks into sink. + + bookmarks: {bookmarkname: sink_rev_id, ...} + where bookmarkname is an UTF-8 string. + """ + pass class commandline(object): def __init__(self, ui, command): @@ -240,7 +254,7 @@ def postrun(self): pass - def _cmdline(self, cmd, *args, **kwargs): + def _cmdline(self, cmd, closestdin, *args, **kwargs): cmdline = [self.command, cmd] + list(args) for k, v in kwargs.iteritems(): if len(k) == 1: @@ -257,16 +271,23 @@ cmdline = [util.shellquote(arg) for arg in cmdline] if not self.ui.debugflag: cmdline += ['2>', util.nulldev] - cmdline += ['<', util.nulldev] + if closestdin: + cmdline += ['<', util.nulldev] cmdline = ' '.join(cmdline) return cmdline def _run(self, cmd, *args, **kwargs): - cmdline = self._cmdline(cmd, *args, **kwargs) + return self._dorun(util.popen, cmd, True, *args, **kwargs) + + def _run2(self, cmd, *args, **kwargs): + return self._dorun(util.popen2, cmd, False, *args, **kwargs) + + def _dorun(self, openfunc, cmd, closestdin, *args, **kwargs): + cmdline = self._cmdline(cmd, closestdin, *args, **kwargs) self.ui.debug('running: %s\n' % (cmdline,)) self.prerun() try: - return util.popen(cmdline) + return openfunc(cmdline) finally: self.postrun() @@ -322,8 +343,9 @@ self._argmax = self._argmax / 2 - 1 return self._argmax - def limit_arglist(self, arglist, cmd, *args, **kwargs): - limit = self.getargmax() - len(self._cmdline(cmd, *args, **kwargs)) + def limit_arglist(self, arglist, cmd, closestdin, *args, **kwargs): + cmdlen = len(self._cmdline(cmd, closestdin, *args, **kwargs)) + limit = self.getargmax() - cmdlen bytes = 0 fl = [] for fn in arglist: @@ -339,7 +361,7 @@ yield fl def xargs(self, arglist, cmd, *args, **kwargs): - for l in self.limit_arglist(arglist, cmd, *args, **kwargs): + for l in self.limit_arglist(arglist, cmd, True, *args, **kwargs): self.run0(cmd, *(list(args) + l), **kwargs) class mapfile(dict): diff -r 3c753f9a2fbc -r ac1c75a7c6b5 hgext/convert/convcmd.py --- a/hgext/convert/convcmd.py Tue Apr 19 13:33:43 2011 -0500 +++ b/hgext/convert/convcmd.py Wed Apr 20 12:44:32 2011 -0500 @@ -378,6 +378,16 @@ if tagsparents: self.map[tagsparents[0][0]] = nrev + bookmarks = self.source.getbookmarks() + cbookmarks = {} + for k in bookmarks: + v = bookmarks[k] + if self.map.get(v, SKIPREV) != SKIPREV: + cbookmarks[k] = self.map[v] + + if c and cbookmarks: + self.dest.putbookmarks(cbookmarks) + self.writeauthormap() finally: self.cleanup() diff -r 3c753f9a2fbc -r ac1c75a7c6b5 hgext/convert/git.py --- a/hgext/convert/git.py Tue Apr 19 13:33:43 2011 -0500 +++ b/hgext/convert/git.py Wed Apr 20 12:44:32 2011 -0500 @@ -17,19 +17,27 @@ # cannot remove environment variable. Just assume none have # both issues. if hasattr(os, 'unsetenv'): - def gitopen(self, s): + def gitopen(self, s, noerr=False): prevgitdir = os.environ.get('GIT_DIR') os.environ['GIT_DIR'] = self.path try: - return util.popen(s, 'rb') + if noerr: + (stdin, stdout, stderr) = util.popen3(s) + return stdout + else: + return util.popen(s, 'rb') finally: if prevgitdir is None: del os.environ['GIT_DIR'] else: os.environ['GIT_DIR'] = prevgitdir else: - def gitopen(self, s): - return util.popen('GIT_DIR=%s %s' % (self.path, s), 'rb') + def gitopen(self, s, noerr=False): + if noerr: + (sin, so, se) = util.popen3('GIT_DIR=%s %s' % (self.path, s)) + return stdout + else: + util.popen('GIT_DIR=%s %s' % (self.path, s), 'rb') def gitread(self, s): fh = self.gitopen(s) @@ -168,3 +176,30 @@ raise util.Abort(_('cannot read changes in %s') % version) return changes + + def getbookmarks(self): + bookmarks = {} + + # Interesting references in git are prefixed + prefix = 'refs/heads/' + prefixlen = len(prefix) + + # factor two commands + gitcmd = { 'remote/': 'git ls-remote --heads origin', + '': 'git show-ref'} + + # Origin heads + for reftype in gitcmd: + try: + fh = self.gitopen(gitcmd[reftype], noerr=True) + for line in fh: + line = line.strip() + rev, name = line.split(None, 1) + if not name.startswith(prefix): + continue + name = '%s%s' % (reftype, name[prefixlen:]) + bookmarks[name] = rev + except: + pass + + return bookmarks diff -r 3c753f9a2fbc -r ac1c75a7c6b5 hgext/convert/hg.py --- a/hgext/convert/hg.py Tue Apr 19 13:33:43 2011 -0500 +++ b/hgext/convert/hg.py Wed Apr 20 12:44:32 2011 -0500 @@ -21,7 +21,7 @@ import os, time, cStringIO from mercurial.i18n import _ from mercurial.node import bin, hex, nullid -from mercurial import hg, util, context, error +from mercurial import hg, util, context, bookmarks, error from common import NoRepo, commit, converter_source, converter_sink @@ -214,6 +214,16 @@ def setfilemapmode(self, active): self.filemapmode = active + def putbookmarks(self, updatedbookmark): + if not len(updatedbookmark): + return + + self.ui.status(_("updating bookmarks\n")) + for bookmark in updatedbookmark: + self.repo._bookmarks[bookmark] = bin(updatedbookmark[bookmark]) + bookmarks.write(self.repo) + + class mercurial_source(converter_source): def __init__(self, ui, path, rev=None): converter_source.__init__(self, ui, path, rev) @@ -374,3 +384,6 @@ return hex(self.repo.lookup(rev)) except error.RepoError: return None + + def getbookmarks(self): + return bookmarks.listbookmarks(self.repo) diff -r 3c753f9a2fbc -r ac1c75a7c6b5 hgext/convert/monotone.py --- a/hgext/convert/monotone.py Tue Apr 19 13:33:43 2011 -0500 +++ b/hgext/convert/monotone.py Wed Apr 20 12:44:32 2011 -0500 @@ -19,6 +19,8 @@ self.ui = ui self.path = path + self.automatestdio = False + self.rev = rev norepo = NoRepo(_("%s does not look like a monotone repository") % path) @@ -64,18 +66,103 @@ checktool('mtn', abort=False) - # test if there are any revisions - self.rev = None - try: - self.getheads() - except: - raise norepo - self.rev = rev + def mtnrun(self, *args, **kwargs): + if self.automatestdio: + return self.mtnrunstdio(*args, **kwargs) + else: + return self.mtnrunsingle(*args, **kwargs) - def mtnrun(self, *args, **kwargs): + def mtnrunsingle(self, *args, **kwargs): kwargs['d'] = self.path return self.run0('automate', *args, **kwargs) + def mtnrunstdio(self, *args, **kwargs): + # Prepare the command in automate stdio format + command = [] + for k, v in kwargs.iteritems(): + command.append("%s:%s" % (len(k), k)) + if v: + command.append("%s:%s" % (len(v), v)) + if command: + command.insert(0, 'o') + command.append('e') + + command.append('l') + for arg in args: + command += "%s:%s" % (len(arg), arg) + command.append('e') + command = ''.join(command) + + self.ui.debug("mtn: sending '%s'\n" % command) + self.mtnwritefp.write(command) + self.mtnwritefp.flush() + + return self.mtnstdioreadcommandoutput(command) + + def mtnstdioreadpacket(self): + read = None + commandnbr = '' + while read != ':': + read = self.mtnreadfp.read(1) + if not read: + raise util.Abort(_('bad mtn packet - no end of commandnbr')) + commandnbr += read + commandnbr = commandnbr[:-1] + + stream = self.mtnreadfp.read(1) + if stream not in 'mewptl': + raise util.Abort(_('bad mtn packet - bad stream type %s' % stream)) + + read = self.mtnreadfp.read(1) + if read != ':': + raise util.Abort(_('bad mtn packet - no divider before size')) + + read = None + lengthstr = '' + while read != ':': + read = self.mtnreadfp.read(1) + if not read: + raise util.Abort(_('bad mtn packet - no end of packet size')) + lengthstr += read + try: + length = long(lengthstr[:-1]) + except TypeError: + raise util.Abort(_('bad mtn packet - bad packet size %s') + % lengthstr) + + read = self.mtnreadfp.read(length) + if len(read) != length: + raise util.Abort(_("bad mtn packet - unable to read full packet " + "read %s of %s") % (len(read), length)) + + return (commandnbr, stream, length, read) + + def mtnstdioreadcommandoutput(self, command): + retval = [] + while True: + commandnbr, stream, length, output = self.mtnstdioreadpacket() + self.ui.debug('mtn: read packet %s:%s:%s\n' % + (commandnbr, stream, length)) + + if stream == 'l': + # End of command + if output != '0': + raise util.Abort(_("mtn command '%s' returned %s") % + (command, output)) + break + elif stream in 'ew': + # Error, warning output + self.ui.warn(_('%s error:\n') % self.command) + self.ui.warn(output) + elif stream == 'p': + # Progress messages + self.ui.debug('mtn: ' + output) + elif stream == 'm': + # Main stream - command output + retval.append(output) + + return ''.join(retval) + def mtnloadmanifest(self, rev): if self.manifest_rev == rev: return @@ -204,14 +291,18 @@ return data, attr def getcommit(self, rev): - certs = self.mtngetcerts(rev) + extra = {} + certs = self.mtngetcerts(rev) + if certs.get('suspend') == certs["branch"]: + extra['close'] = '1' return commit( author=certs["author"], date=util.datestr(util.strdate(certs["date"], "%Y-%m-%dT%H:%M:%S")), desc=certs["changelog"], rev=rev, parents=self.mtnrun("parents", rev).splitlines(), - branch=certs["branch"]) + branch=certs["branch"], + extra=extra) def gettags(self): tags = {} @@ -225,3 +316,43 @@ # This function is only needed to support --filemap # ... and we don't support that raise NotImplementedError() + + def before(self): + # Check if we have a new enough version to use automate stdio + version = 0.0 + try: + versionstr = self.mtnrunsingle("interface_version") + version = float(versionstr) + except Exception: + raise util.Abort(_("unable to determine mtn automate interface " + "version")) + + if version >= 12.0: + self.automatestdio = True + self.ui.debug("mtn automate version %s - using automate stdio\n" % + version) + + # launch the long-running automate stdio process + self.mtnwritefp, self.mtnreadfp = self._run2('automate', 'stdio', + '-d', self.path) + # read the headers + read = self.mtnreadfp.readline() + if read != 'format-version: 2\n': + raise util.Abort(_('mtn automate stdio header unexpected: %s') + % read) + while read != '\n': + read = self.mtnreadfp.readline() + if not read: + raise util.Abort(_("failed to reach end of mtn automate " + "stdio headers")) + else: + self.ui.debug("mtn automate version %s - not using automate stdio " + "(automate >= 12.0 - mtn >= 0.46 is needed)\n" % version) + + def after(self): + if self.automatestdio: + self.mtnwritefp.close() + self.mtnwritefp = None + self.mtnreadfp.close() + self.mtnreadfp = None + diff -r 3c753f9a2fbc -r ac1c75a7c6b5 hgext/convert/subversion.py --- a/hgext/convert/subversion.py Tue Apr 19 13:33:43 2011 -0500 +++ b/hgext/convert/subversion.py Wed Apr 20 12:44:32 2011 -0500 @@ -41,6 +41,15 @@ class SvnPathNotFound(Exception): pass +def revsplit(rev): + """Parse a revision string and return (uuid, path, revnum).""" + url, revnum = rev.rsplit('@', 1) + parts = url.split('/', 1) + mod = '' + if len(parts) > 1: + mod = '/' + parts[1] + return parts[0][4:], mod, int(revnum) + def geturl(path): try: return svn.client.url_from_path(svn.core.svn_path_canonicalize(path)) @@ -259,6 +268,7 @@ except ValueError: raise util.Abort(_('svn: revision %s is not an integer') % rev) + self.trunkname = self.ui.config('convert', 'svn.trunk', 'trunk').strip('/') self.startrev = self.ui.config('convert', 'svn.startrev', default=0) try: self.startrev = int(self.startrev) @@ -285,7 +295,7 @@ def setrevmap(self, revmap): lastrevs = {} for revid in revmap.iterkeys(): - uuid, module, revnum = self.revsplit(revid) + uuid, module, revnum = revsplit(revid) lastrevnum = lastrevs.setdefault(module, revnum) if revnum > lastrevnum: lastrevs[module] = revnum @@ -380,7 +390,7 @@ files, self.removed, copies = self.expandpaths(rev, paths, parents) else: # Perform a full checkout on roots - uuid, module, revnum = self.revsplit(rev) + uuid, module, revnum = revsplit(rev) entries = svn.client.ls(self.baseurl + urllib.quote(module), optrev(revnum), True, self.ctx) files = [n for n, e in entries.iteritems() @@ -402,7 +412,7 @@ def getcommit(self, rev): if rev not in self.commits: - uuid, module, revnum = self.revsplit(rev) + uuid, module, revnum = revsplit(rev) self.module = module self.reparent(module) # We assume that: @@ -529,16 +539,6 @@ def revnum(self, rev): return int(rev.split('@')[-1]) - def revsplit(self, rev): - url, revnum = rev.rsplit('@', 1) - revnum = int(revnum) - parts = url.split('/', 1) - uuid = parts.pop(0)[4:] - mod = '' - if parts: - mod = '/' + parts[0] - return uuid, mod, revnum - def latest(self, path, stop=0): """Find the latest revid affecting path, up to stop. It may return a revision in a different module, since a branch may be moved without @@ -605,7 +605,7 @@ changed, removed = set(), set() copies = {} - new_module, revnum = self.revsplit(rev)[1:] + new_module, revnum = revsplit(rev)[1:] if new_module != self.module: self.module = new_module self.reparent(self.module) @@ -622,7 +622,7 @@ continue # Copy sources not in parent revisions cannot be # represented, ignore their origin for now - pmodule, prevnum = self.revsplit(parents[0])[1:] + pmodule, prevnum = revsplit(parents[0])[1:] if ent.copyfrom_rev < prevnum: continue copyfrom_path = self.getrelpath(ent.copyfrom_path, pmodule) @@ -633,7 +633,7 @@ copies[self.recode(entrypath)] = self.recode(copyfrom_path) elif kind == 0: # gone, but had better be a deleted *file* self.ui.debug("gone from %s\n" % ent.copyfrom_rev) - pmodule, prevnum = self.revsplit(parents[0])[1:] + pmodule, prevnum = revsplit(parents[0])[1:] parentpath = pmodule + "/" + entrypath fromkind = self._checkpath(entrypath, prevnum, pmodule) @@ -659,7 +659,7 @@ if ent.action == 'R' and parents: # If a directory is replacing a file, mark the previous # file as deleted - pmodule, prevnum = self.revsplit(parents[0])[1:] + pmodule, prevnum = revsplit(parents[0])[1:] pkind = self._checkpath(entrypath, prevnum, pmodule) if pkind == svn.core.svn_node_file: removed.add(self.recode(entrypath)) @@ -681,7 +681,7 @@ continue # Copy sources not in parent revisions cannot be # represented, ignore their origin for now - pmodule, prevnum = self.revsplit(parents[0])[1:] + pmodule, prevnum = revsplit(parents[0])[1:] if ent.copyfrom_rev < prevnum: continue copyfrompath = self.getrelpath(ent.copyfrom_path, pmodule) @@ -736,7 +736,7 @@ # ent.copyfrom_rev may not be the actual last revision previd = self.latest(newpath, ent.copyfrom_rev) if previd is not None: - prevmodule, prevnum = self.revsplit(previd)[1:] + prevmodule, prevnum = revsplit(previd)[1:] if prevnum >= self.startrev: parents = [previd] self.ui.note( @@ -761,9 +761,8 @@ author = author and self.recode(author) or '' try: branch = self.module.split("/")[-1] - trunkname = self.ui.config('convert', 'svn.trunk', 'trunk') - if branch == trunkname.strip('/'): - branch = '' + if branch == self.trunkname: + branch = None except IndexError: branch = None @@ -834,7 +833,7 @@ raise IOError() mode = '' try: - new_module, revnum = self.revsplit(rev)[1:] + new_module, revnum = revsplit(rev)[1:] if self.module != new_module: self.module = new_module self.reparent(self.module) @@ -944,6 +943,7 @@ class svn_sink(converter_sink, commandline): commit_re = re.compile(r'Committed revision (\d+).', re.M) + uuid_re = re.compile(r'Repository UUID:\s*(\S+)', re.M) def prerun(self): if self.wc: @@ -964,8 +964,6 @@ def __init__(self, ui, path): - if svn is None: - raise MissingTool(_('Could not load Subversion python bindings')) converter_sink.__init__(self, ui, path) commandline.__init__(self, ui, 'svn') self.delete = [] @@ -1012,8 +1010,8 @@ fp.close() util.set_flags(hook, False, True) - xport = transport.SvnRaTransport(url=geturl(path)) - self.uuid = svn.ra.get_uuid(xport.ra) + output = self.run0('info') + self.uuid = self.uuid_re.search(output).group(1).strip() def wjoin(self, *names): return os.path.join(self.wc, *names) diff -r 3c753f9a2fbc -r ac1c75a7c6b5 hgext/eol.py --- a/hgext/eol.py Tue Apr 19 13:33:43 2011 -0500 +++ b/hgext/eol.py Wed Apr 20 12:44:32 2011 -0500 @@ -73,11 +73,13 @@ only need to these filters until you have prepared a ``.hgeol`` file. The ``win32text.forbid*`` hooks provided by the win32text extension -have been unified into a single hook named ``eol.hook``. The hook will -lookup the expected line endings from the ``.hgeol`` file, which means -you must migrate to a ``.hgeol`` file first before using the hook. -Remember to enable the eol extension in the repository where you -install the hook. +have been unified into a single hook named ``eol.checkheadshook``. The +hook will lookup the expected line endings from the ``.hgeol`` file, +which means you must migrate to a ``.hgeol`` file first before using +the hook. ``eol.checkheadshook`` only checks heads, intermediate +invalid revisions will be pushed. To forbid them completely, use the +``eol.checkallhook`` hook. These hooks are best used as +``pretxnchangegroup`` hooks. See :hg:`help patterns` for more information about the glob patterns used. @@ -127,36 +129,119 @@ 'cleverdecode:': tocrlf } +class eolfile(object): + def __init__(self, ui, root, data): + self._decode = {'LF': 'to-lf', 'CRLF': 'to-crlf', 'BIN': 'is-binary'} + self._encode = {'LF': 'to-lf', 'CRLF': 'to-crlf', 'BIN': 'is-binary'} -def hook(ui, repo, node, hooktype, **kwargs): - """verify that files have expected EOLs""" + self.cfg = config.config() + # Our files should not be touched. The pattern must be + # inserted first override a '** = native' pattern. + self.cfg.set('patterns', '.hg*', 'BIN') + # We can then parse the user's patterns. + self.cfg.parse('.hgeol', data) + + isrepolf = self.cfg.get('repository', 'native') != 'CRLF' + self._encode['NATIVE'] = isrepolf and 'to-lf' or 'to-crlf' + iswdlf = ui.config('eol', 'native', os.linesep) in ('LF', '\n') + self._decode['NATIVE'] = iswdlf and 'to-lf' or 'to-crlf' + + include = [] + exclude = [] + for pattern, style in self.cfg.items('patterns'): + key = style.upper() + if key == 'BIN': + exclude.append(pattern) + else: + include.append(pattern) + # This will match the files for which we need to care + # about inconsistent newlines. + self.match = match.match(root, '', [], include, exclude) + + def setfilters(self, ui): + for pattern, style in self.cfg.items('patterns'): + key = style.upper() + try: + ui.setconfig('decode', pattern, self._decode[key]) + ui.setconfig('encode', pattern, self._encode[key]) + except KeyError: + ui.warn(_("ignoring unknown EOL style '%s' from %s\n") + % (style, self.cfg.source('patterns', pattern))) + + def checkrev(self, repo, ctx, files): + failed = [] + for f in (files or ctx.files()): + if f not in ctx: + continue + for pattern, style in self.cfg.items('patterns'): + if not match.match(repo.root, '', [pattern])(f): + continue + target = self._encode[style.upper()] + data = ctx[f].data() + if (target == "to-lf" and "\r\n" in data + or target == "to-crlf" and singlelf.search(data)): + failed.append((str(ctx), target, f)) + break + return failed + +def parseeol(ui, repo, nodes): + try: + for node in nodes: + try: + if node is None: + # Cannot use workingctx.data() since it would load + # and cache the filters before we configure them. + data = repo.wfile('.hgeol').read() + else: + data = repo[node]['.hgeol'].data() + return eolfile(ui, repo.root, data) + except (IOError, LookupError): + pass + except error.ParseError, inst: + ui.warn(_("warning: ignoring .hgeol file due to parse error " + "at %s: %s\n") % (inst.args[1], inst.args[0])) + return None + +def _checkhook(ui, repo, node, headsonly): + # Get revisions to check and touched files at the same time files = set() + revs = set() for rev in xrange(repo[node].rev(), len(repo)): - files.update(repo[rev].files()) - tip = repo['tip'] - for f in files: - if f not in tip: - continue - for pattern, target in ui.configitems('encode'): - if match.match(repo.root, '', [pattern])(f): - data = tip[f].data() - if target == "to-lf" and "\r\n" in data: - raise util.Abort(_("%s should not have CRLF line endings") - % f) - elif target == "to-crlf" and singlelf.search(data): - raise util.Abort(_("%s should not have LF line endings") - % f) - # Ignore other rules for this file - break + revs.add(rev) + if headsonly: + ctx = repo[rev] + files.update(ctx.files()) + for pctx in ctx.parents(): + revs.discard(pctx.rev()) + failed = [] + for rev in revs: + ctx = repo[rev] + eol = parseeol(ui, repo, [ctx.node()]) + if eol: + failed.extend(eol.checkrev(repo, ctx, files)) + if failed: + eols = {'to-lf': 'CRLF', 'to-crlf': 'LF'} + msgs = [] + for node, target, f in failed: + msgs.append(_(" %s in %s should not have %s line endings") % + (f, node, eols[target])) + raise util.Abort(_("end-of-line check failed:\n") + "\n".join(msgs)) + +def checkallhook(ui, repo, node, hooktype, **kwargs): + """verify that files have expected EOLs""" + _checkhook(ui, repo, node, False) + +def checkheadshook(ui, repo, node, hooktype, **kwargs): + """verify that files have expected EOLs""" + _checkhook(ui, repo, node, True) + +# "checkheadshook" used to be called "hook" +hook = checkheadshook def preupdate(ui, repo, hooktype, parent1, parent2): #print "preupdate for %s: %s -> %s" % (repo.root, parent1, parent2) - try: - repo.readhgeol(parent1) - except error.ParseError, inst: - ui.warn(_("warning: ignoring .hgeol file due to parse error " - "at %s: %s\n") % (inst.args[1], inst.args[0])) + repo.loadeol([parent1]) return False def uisetup(ui): @@ -184,66 +269,15 @@ class eolrepo(repo.__class__): - _decode = {'LF': 'to-lf', 'CRLF': 'to-crlf', 'BIN': 'is-binary'} - _encode = {'LF': 'to-lf', 'CRLF': 'to-crlf', 'BIN': 'is-binary'} - - def readhgeol(self, node=None, data=None): - if data is None: - try: - if node is None: - data = self.wfile('.hgeol').read() - else: - data = self[node]['.hgeol'].data() - except (IOError, LookupError): - return None - - if self.ui.config('eol', 'native', os.linesep) in ('LF', '\n'): - self._decode['NATIVE'] = 'to-lf' - else: - self._decode['NATIVE'] = 'to-crlf' - - eol = config.config() - # Our files should not be touched. The pattern must be - # inserted first override a '** = native' pattern. - eol.set('patterns', '.hg*', 'BIN') - # We can then parse the user's patterns. - eol.parse('.hgeol', data) - - if eol.get('repository', 'native') == 'CRLF': - self._encode['NATIVE'] = 'to-crlf' - else: - self._encode['NATIVE'] = 'to-lf' - - for pattern, style in eol.items('patterns'): - key = style.upper() - try: - self.ui.setconfig('decode', pattern, self._decode[key]) - self.ui.setconfig('encode', pattern, self._encode[key]) - except KeyError: - self.ui.warn(_("ignoring unknown EOL style '%s' from %s\n") - % (style, eol.source('patterns', pattern))) - - include = [] - exclude = [] - for pattern, style in eol.items('patterns'): - key = style.upper() - if key == 'BIN': - exclude.append(pattern) - else: - include.append(pattern) - - # This will match the files for which we need to care - # about inconsistent newlines. - return match.match(self.root, '', [], include, exclude) + def loadeol(self, nodes): + eol = parseeol(self.ui, self, nodes) + if eol is None: + return None + eol.setfilters(self.ui) + return eol.match def _hgcleardirstate(self): - try: - self._eolfile = self.readhgeol() or self.readhgeol('tip') - except error.ParseError, inst: - ui.warn(_("warning: ignoring .hgeol file due to parse error " - "at %s: %s\n") % (inst.args[1], inst.args[0])) - self._eolfile = None - + self._eolfile = self.loadeol([None, 'tip']) if not self._eolfile: self._eolfile = util.never return diff -r 3c753f9a2fbc -r ac1c75a7c6b5 hgext/extdiff.py --- a/hgext/extdiff.py Tue Apr 19 13:33:43 2011 -0500 +++ b/hgext/extdiff.py Wed Apr 20 12:44:32 2011 -0500 @@ -121,12 +121,12 @@ msg = _('cannot specify --rev and --change at the same time') raise util.Abort(msg) elif change: - node2 = repo.lookup(change) + node2 = cmdutil.revsingle(repo, change, None).node() node1a, node1b = repo.changelog.parents(node2) else: node1a, node2 = cmdutil.revpair(repo, revs) if not revs: - node1b = repo.dirstate.parents()[1] + node1b = repo.dirstate.p2() else: node1b = nullid @@ -187,14 +187,14 @@ # Handle bogus modifies correctly by checking if the files exist if len(common) == 1: common_file = util.localpath(common.pop()) - dir1a = os.path.join(dir1a, common_file) + dir1a = os.path.join(tmproot, dir1a, common_file) label1a = common_file + rev1a - if not os.path.isfile(os.path.join(tmproot, dir1a)): + if not os.path.isfile(dir1a): dir1a = os.devnull if do3way: - dir1b = os.path.join(dir1b, common_file) + dir1b = os.path.join(tmproot, dir1b, common_file) label1b = common_file + rev1b - if not os.path.isfile(os.path.join(tmproot, dir1b)): + if not os.path.isfile(dir1b): dir1b = os.devnull dir2 = os.path.join(dir2root, dir2, common_file) label2 = common_file + rev2 diff -r 3c753f9a2fbc -r ac1c75a7c6b5 hgext/graphlog.py --- a/hgext/graphlog.py Tue Apr 19 13:33:43 2011 -0500 +++ b/hgext/graphlog.py Wed Apr 20 12:44:32 2011 -0500 @@ -324,6 +324,7 @@ except TypeError, e: if len(args) > wrapfn.func_code.co_argcount: raise util.Abort(_('--graph option allows at most one file')) + raise return orig(*args, **kwargs) entry = extensions.wrapcommand(table, cmd, graph) entry[1].append(('G', 'graph', None, _("show the revision DAG"))) diff -r 3c753f9a2fbc -r ac1c75a7c6b5 hgext/hgcia.py --- a/hgext/hgcia.py Tue Apr 19 13:33:43 2011 -0500 +++ b/hgext/hgcia.py Wed Apr 20 12:44:32 2011 -0500 @@ -75,7 +75,7 @@ def fileelems(self): n = self.ctx.node() - f = self.cia.repo.status(self.ctx.parents()[0].node(), n) + f = self.cia.repo.status(self.ctx.p1().node(), n) url = self.url or '' elems = [] for path in f[0]: diff -r 3c753f9a2fbc -r ac1c75a7c6b5 hgext/keyword.py --- a/hgext/keyword.py Tue Apr 19 13:33:43 2011 -0500 +++ b/hgext/keyword.py Wed Apr 20 12:44:32 2011 -0500 @@ -60,11 +60,11 @@ control run :hg:`kwdemo`. See :hg:`help templates` for a list of available templates and filters. -Three additional date template filters are provided:: +Three additional date template filters are provided: - utcdate "2006/09/18 15:13:13" - svnutcdate "2006-09-18 15:13:13Z" - svnisodate "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)" +:``utcdate``: "2006/09/18 15:13:13" +:``svnutcdate``: "2006-09-18 15:13:13Z" +:``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)" The default template mappings (view with :hg:`kwdemo -d`) can be replaced with customized keywords and templates. Again, run @@ -109,11 +109,26 @@ } # date like in cvs' $Date -utcdate = lambda x: util.datestr((x[0], 0), '%Y/%m/%d %H:%M:%S') +def utcdate(text): + ''':utcdate: Date. Returns a UTC-date in this format: "2009/08/18 11:00:13". + ''' + return util.datestr((text[0], 0), '%Y/%m/%d %H:%M:%S') # date like in svn's $Date -svnisodate = lambda x: util.datestr(x, '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)') +def svnisodate(text): + ''':svnisodate: Date. Returns a date in this format: "2009-08-18 13:00:13 + +0200 (Tue, 18 Aug 2009)". + ''' + return util.datestr(text, '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)') # date like in svn's $Id -svnutcdate = lambda x: util.datestr((x[0], 0), '%Y-%m-%d %H:%M:%SZ') +def svnutcdate(text): + ''':svnutcdate: Date. Returns a UTC-date in this format: "2009-08-18 + 11:00:13Z". + ''' + return util.datestr((text[0], 0), '%Y-%m-%d %H:%M:%SZ') + +templatefilters.filters.update({'utcdate': utcdate, + 'svnisodate': svnisodate, + 'svnutcdate': svnutcdate}) # make keyword tools accessible kwtools = {'templater': None, 'hgcmd': ''} @@ -176,9 +191,6 @@ for k, v in kwmaps) else: self.templates = _defaultkwmaps(self.ui) - templatefilters.filters.update({'utcdate': utcdate, - 'svnisodate': svnisodate, - 'svnutcdate': svnutcdate}) @util.propertycache def escape(self): diff -r 3c753f9a2fbc -r ac1c75a7c6b5 hgext/mq.py --- a/hgext/mq.py Tue Apr 19 13:33:43 2011 -0500 +++ b/hgext/mq.py Wed Apr 20 12:44:32 2011 -0500 @@ -899,7 +899,7 @@ else: p.write("# HG changeset patch\n") p.write("# Parent " - + hex(repo[None].parents()[0].node()) + "\n") + + hex(repo[None].p1().node()) + "\n") if user: p.write("# User " + user + "\n") if date: @@ -1054,7 +1054,7 @@ heads += ls if not heads: heads = [nullid] - if repo.dirstate.parents()[0] not in heads and not exact: + if repo.dirstate.p1() not in heads and not exact: self.ui.status(_("(working directory not at a head)\n")) if not self.series: @@ -1148,7 +1148,7 @@ ret = self.apply(repo, s, list, all_files=all_files) except: self.ui.warn(_('cleaning up working directory...')) - node = repo.dirstate.parents()[0] + node = repo.dirstate.p1() hg.revert(repo, node, None) # only remove unknown files that we know we touched or # created while patching @@ -1899,7 +1899,7 @@ With -g/--git, patches imported with --rev will use the git diff format. See the diffs help topic for information on why this is important for preserving rename/copy information and permission - changes. + changes. Use :hg:`qfinish` to remove changesets from mq control. To import a patch from standard input, pass - as the patch file. When importing from standard input, a patch name must be specified diff -r 3c753f9a2fbc -r ac1c75a7c6b5 hgext/notify.py --- a/hgext/notify.py Tue Apr 19 13:33:43 2011 -0500 +++ b/hgext/notify.py Wed Apr 20 12:44:32 2011 -0500 @@ -249,7 +249,7 @@ def diff(self, ctx, ref=None): maxdiff = int(self.ui.config('notify', 'maxdiff', 300)) - prev = ctx.parents()[0].node() + prev = ctx.p1().node() ref = ref and ref.node() or ctx.node() chunks = patch.diff(self.repo, prev, ref, opts=patch.diffopts(self.ui)) difflines = ''.join(chunks).splitlines() diff -r 3c753f9a2fbc -r ac1c75a7c6b5 hgext/rebase.py --- a/hgext/rebase.py Tue Apr 19 13:33:43 2011 -0500 +++ b/hgext/rebase.py Wed Apr 20 12:44:32 2011 -0500 @@ -15,7 +15,7 @@ ''' from mercurial import hg, util, repair, merge, cmdutil, commands -from mercurial import extensions, ancestor, copies, patch +from mercurial import extensions, copies, patch from mercurial.commands import templateopts from mercurial.node import nullrev from mercurial.lock import release @@ -90,7 +90,8 @@ contf = opts.get('continue') abortf = opts.get('abort') collapsef = opts.get('collapse', False) - extrafn = opts.get('extrafn') + collapsemsg = cmdutil.logmessage(opts) + extrafn = opts.get('extrafn') # internal, used by e.g. hgsubversion keepf = opts.get('keep', False) keepbranchesf = opts.get('keepbranches', False) detachf = opts.get('detach', False) @@ -98,6 +99,10 @@ # other extensions keepopen = opts.get('keepopen', False) + if collapsemsg and not collapsef: + raise util.Abort( + _('message can only be specified with collapse')) + if contf or abortf: if contf and abortf: raise util.Abort(_('cannot use both abort and continue')) @@ -109,6 +114,8 @@ if srcf or basef or destf: raise util.Abort( _('abort and continue do not allow specifying revisions')) + if opts.get('tool', False): + ui.warn(_('tool option will be ignored\n')) (originalwd, target, state, skipped, collapsef, keepf, keepbranchesf, external) = restorestatus(repo) @@ -138,8 +145,7 @@ external = checkexternal(repo, state, targetancestors) if keepbranchesf: - if extrafn: - raise util.Abort(_('cannot use both keepbranches and extrafn')) + assert not extrafn, 'cannot use both keepbranches and extrafn' def extrafn(ctx, extra): extra['branch'] = ctx.branch() @@ -163,10 +169,14 @@ if len(repo.parents()) == 2: repo.ui.debug('resuming interrupted rebase\n') else: - stats = rebasenode(repo, rev, p1, p2, state) - if stats and stats[3] > 0: - raise util.Abort(_('unresolved conflicts (see hg ' - 'resolve, then hg rebase --continue)')) + try: + ui.setconfig('ui', 'forcemerge', opts.get('tool', '')) + stats = rebasenode(repo, rev, p1, state) + if stats and stats[3] > 0: + raise util.Abort(_('unresolved conflicts (see hg ' + 'resolve, then hg rebase --continue)')) + finally: + ui.setconfig('ui', 'forcemerge', '') updatedirstate(repo, rev, target, p2) if not collapsef: newrev = concludenode(repo, rev, p1, p2, extrafn=extrafn) @@ -190,11 +200,14 @@ if collapsef and not keepopen: p1, p2 = defineparents(repo, min(state), target, state, targetancestors) - commitmsg = 'Collapsed revision' - for rebased in state: - if rebased not in skipped and state[rebased] != nullmerge: - commitmsg += '\n* %s' % repo[rebased].description() - commitmsg = ui.edit(commitmsg, repo.ui.username()) + if collapsemsg: + commitmsg = collapsemsg + else: + commitmsg = 'Collapsed revision' + for rebased in state: + if rebased not in skipped and state[rebased] != nullmerge: + commitmsg += '\n* %s' % repo[rebased].description() + commitmsg = ui.edit(commitmsg, repo.ui.username()) newrev = concludenode(repo, rev, p1, external, commitmsg=commitmsg, extrafn=extrafn) @@ -221,25 +234,6 @@ finally: release(lock, wlock) -def rebasemerge(repo, rev, first=False): - 'return the correct ancestor' - oldancestor = ancestor.ancestor - - def newancestor(a, b, pfunc): - if b == rev: - return repo[rev].parents()[0].rev() - return oldancestor(a, b, pfunc) - - if not first: - ancestor.ancestor = newancestor - else: - repo.ui.debug("first revision, do not change ancestor\n") - try: - stats = merge.update(repo, rev, True, True, False) - return stats - finally: - ancestor.ancestor = oldancestor - def checkexternal(repo, state, targetancestors): """Check whether one or more external revisions need to be taken in consideration. In the latter case, abort. @@ -293,7 +287,7 @@ repo.dirstate.invalidate() raise -def rebasenode(repo, rev, p1, p2, state): +def rebasenode(repo, rev, p1, state): 'Rebase a single revision' # Merge phase # Update to target and merge it with local @@ -304,9 +298,10 @@ repo.ui.debug(" already in target\n") repo.dirstate.write() repo.ui.debug(" merge against %d:%s\n" % (repo[rev].rev(), repo[rev])) - first = repo[rev].rev() == repo[min(state)].rev() - stats = rebasemerge(repo, rev, first) - return stats + base = None + if repo[rev].rev() != repo[min(state)].rev(): + base = repo[rev].p1().node() + return merge.update(repo, rev, True, True, False, base) def defineparents(repo, rev, target, state, targetancestors): 'Return the new parent relationship of the revision that will be rebased' @@ -354,6 +349,8 @@ 'Update rebased mq patches - finalize and then import them' mqrebase = {} mq = repo.mq + original_series = mq.full_series[:] + for p in mq.applied: rev = repo[p.node].rev() if rev in state: @@ -371,6 +368,15 @@ repo.ui.debug('import mq patch %d (%s)\n' % (state[rev], name)) mq.qimport(repo, (), patchname=name, git=isgit, rev=[str(state[rev])]) + + # Restore missing guards + for s in original_series: + pname = mq.guard_re.split(s, 1)[0] + if pname in mq.full_series: + repo.ui.debug('restoring guard for patch %s' % (pname)) + mq.full_series.remove(pname) + mq.full_series.append(s) + mq.series_dirty = True mq.save_dirty() def storestatus(repo, originalwd, target, state, collapse, keep, keepbranches, @@ -475,9 +481,10 @@ if src: commonbase = repo[src].ancestor(repo[dest]) + samebranch = repo[src].branch() == repo[dest].branch() if commonbase == repo[src]: raise util.Abort(_('source is ancestor of destination')) - if commonbase == repo[dest]: + if samebranch and commonbase == repo[dest]: raise util.Abort(_('source is descendant of destination')) source = repo[src].rev() if detach: @@ -565,10 +572,15 @@ ('d', 'dest', '', _('rebase onto the specified changeset'), _('REV')), ('', 'collapse', False, _('collapse the rebased changesets')), + ('m', 'message', '', + _('use text as collapse commit message'), _('TEXT')), + ('l', 'logfile', '', + _('read collapse commit message from file'), _('FILE')), ('', 'keep', False, _('keep original changesets')), ('', 'keepbranches', False, _('keep original branch names')), ('', 'detach', False, _('force detaching of source from its original ' 'branch')), + ('t', 'tool', '', _('specify merge tool')), ('c', 'continue', False, _('continue an interrupted rebase')), ('a', 'abort', False, _('abort an interrupted rebase'))] + templateopts, diff -r 3c753f9a2fbc -r ac1c75a7c6b5 hgext/record.py --- a/hgext/record.py Tue Apr 19 13:33:43 2011 -0500 +++ b/hgext/record.py Wed Apr 20 12:44:32 2011 -0500 @@ -324,10 +324,12 @@ for i, chunk in enumerate(h.hunks): if skipfile is None and skipall is None: chunk.pretty(ui) - msg = (total == 1 - and (_('record this change to %r?') % chunk.filename()) - or (_('record change %d/%d to %r?') % - (pos - len(h.hunks) + i, total, chunk.filename()))) + if total == 1: + msg = _('record this change to %r?') % chunk.filename() + else: + idx = pos - len(h.hunks) + i + msg = _('record change %d/%d to %r?') % (idx, total, + chunk.filename()) r, skipfile, skipall = prompt(skipfile, skipall, msg) if r: if fixoffset: @@ -467,7 +469,7 @@ # 3a. apply filtered patch to clean repo (clean) if backups: - hg.revert(repo, repo.dirstate.parents()[0], + hg.revert(repo, repo.dirstate.p1(), lambda key: key in backups) # 3b. (apply) @@ -533,6 +535,9 @@ "record": (record, commands.table['^commit|ci'][1], # same options as commit _('hg record [OPTION]... [FILE]...')), + "qrecord": + (qrecord, {}, # placeholder until mq is available + _('hg qrecord [OPTION]... PATCH [FILE]...')), } diff -r 3c753f9a2fbc -r ac1c75a7c6b5 hgext/relink.py --- a/hgext/relink.py Tue Apr 19 13:33:43 2011 -0500 +++ b/hgext/relink.py Wed Apr 20 12:44:32 2011 -0500 @@ -38,9 +38,9 @@ """ if not hasattr(util, 'samefile') or not hasattr(util, 'samedevice'): raise util.Abort(_('hardlinks are not supported on this system')) - src = hg.repository( - hg.remoteui(repo, opts), - ui.expandpath(origin or 'default-relink', origin or 'default')) + src = hg.repository(hg.remoteui(repo, opts), + ui.expandpath(origin or 'default-relink', + origin or 'default')) if not src.local(): raise util.Abort(_('must specify local origin repository')) ui.status(_('relinking %s to %s\n') % (src.store.path, repo.store.path)) @@ -172,8 +172,8 @@ ui.progress(_('relinking'), None) - ui.status(_('relinked %d files (%d bytes reclaimed)\n') % - (relinked, savedbytes)) + ui.status(_('relinked %d files (%s reclaimed)\n') % + (relinked, util.bytecount(savedbytes))) cmdtable = { 'relink': ( diff -r 3c753f9a2fbc -r ac1c75a7c6b5 hgext/schemes.py --- a/hgext/schemes.py Tue Apr 19 13:33:43 2011 -0500 +++ b/hgext/schemes.py Wed Apr 20 12:44:32 2011 -0500 @@ -40,8 +40,9 @@ same name. """ -import re -from mercurial import hg, templater +import os, re +from mercurial import extensions, hg, templater, url as urlmod, util +from mercurial.i18n import _ class ShortRepository(object): @@ -58,6 +59,7 @@ return '' % self.scheme def instance(self, ui, url, create): + # Should this use urlmod.url(), or is manual parsing better? url = url.split('://', 1)[1] parts = url.split('/', self.parts) if len(parts) > self.parts: @@ -69,6 +71,12 @@ url = ''.join(self.templater.process(self.url, context)) + tail return hg._lookup(url).instance(ui, url, create) +def hasdriveletter(orig, path): + for scheme in schemes: + if path.startswith(scheme + ':'): + return False + return orig(path) + schemes = { 'py': 'http://hg.python.org/', 'bb': 'https://bitbucket.org/', @@ -81,4 +89,10 @@ schemes.update(dict(ui.configitems('schemes'))) t = templater.engine(lambda x: x) for scheme, url in schemes.items(): + if (os.name == 'nt' and len(scheme) == 1 and scheme.isalpha() + and os.path.exists('%s:\\' % scheme)): + raise util.Abort(_('custom scheme %s:// conflicts with drive ' + 'letter %s:\\\n') % (scheme, scheme.upper())) hg.schemes[scheme] = ShortRepository(url, scheme, t) + + extensions.wrapfunction(urlmod, 'hasdriveletter', hasdriveletter) diff -r 3c753f9a2fbc -r ac1c75a7c6b5 hgext/transplant.py --- a/hgext/transplant.py Tue Apr 19 13:33:43 2011 -0500 +++ b/hgext/transplant.py Wed Apr 20 12:44:32 2011 -0500 @@ -17,7 +17,7 @@ import os, tempfile from mercurial import bundlerepo, cmdutil, hg, merge, match from mercurial import patch, revlog, util, error -from mercurial import revset +from mercurial import revset, templatekw class transplantentry(object): def __init__(self, lnode, rnode): @@ -177,12 +177,11 @@ lock.release() wlock.release() - def filter(self, filter, changelog, patchfile): + def filter(self, filter, node, changelog, patchfile): '''arbitrarily rewrite changeset before applying it''' self.ui.status(_('filtering %s\n') % patchfile) user, date, msg = (changelog[1], changelog[2], changelog[4]) - fd, headerfile = tempfile.mkstemp(prefix='hg-transplant-') fp = os.fdopen(fd, 'w') fp.write("# HG changeset patch\n") @@ -194,7 +193,9 @@ try: util.system('%s %s %s' % (filter, util.shellquote(headerfile), util.shellquote(patchfile)), - environ={'HGUSER': changelog[1]}, + environ={'HGUSER': changelog[1], + 'HGREVISION': revlog.hex(node), + }, onerr=util.Abort, errprefix=_('filter failed')) user, date, msg = self.parselog(file(headerfile))[1:4] finally: @@ -209,7 +210,7 @@ date = "%d %d" % (time, timezone) extra = {'transplant_source': node} if filter: - (user, date, message) = self.filter(filter, cl, patchfile) + (user, date, message) = self.filter(filter, node, cl, patchfile) if log: # we don't translate messages inserted into commits @@ -236,7 +237,7 @@ seriespath = os.path.join(self.path, 'series') if os.path.exists(seriespath): os.unlink(seriespath) - p1 = repo.dirstate.parents()[0] + p1 = repo.dirstate.p1() p2 = node self.log(user, date, message, p1, p2, merge=merge) self.ui.write(str(inst) + '\n') @@ -345,6 +346,8 @@ message = [] node = revlog.nullid inmsg = False + user = None + date = None for line in fp.read().splitlines(): if inmsg: message.append(line) @@ -359,6 +362,8 @@ elif not line.startswith('# '): inmsg = True message.append(line) + if None in (user, date): + raise util.Abort(_("filter corrupted changeset (no user or date)")) return (node, user, date, '\n'.join(message), parents) def log(self, user, date, message, p1, p2, merge=False): @@ -547,8 +552,8 @@ if source: sourcerepo = ui.expandpath(source) source = hg.repository(ui, sourcerepo) - source, incoming, bundle = bundlerepo.getremotechanges(ui, repo, source, - force=True) + source, common, incoming, bundle = bundlerepo.getremotechanges(ui, repo, + source, force=True) else: source = repo @@ -607,8 +612,15 @@ cs.add(r) return [r for r in s if r in cs] +def kwtransplanted(repo, ctx, **args): + """:transplanted: String. The node identifier of the transplanted + changeset if any.""" + n = ctx.extra().get('transplant_source') + return n and revlog.hex(n) or '' + def extsetup(ui): revset.symbols['transplanted'] = revsettransplanted + templatekw.keywords['transplanted'] = kwtransplanted cmdtable = { "transplant": @@ -632,4 +644,4 @@ } # tell hggettext to extract docstrings from these functions: -i18nfunctions = [revsettransplanted] +i18nfunctions = [revsettransplanted, kwtransplanted] diff -r 3c753f9a2fbc -r ac1c75a7c6b5 hgext/zeroconf/Zeroconf.py --- a/hgext/zeroconf/Zeroconf.py Tue Apr 19 13:33:43 2011 -0500 +++ b/hgext/zeroconf/Zeroconf.py Wed Apr 20 12:44:32 2011 -0500 @@ -237,14 +237,14 @@ """Class accessor""" try: return _CLASSES[clazz] - except: + except KeyError: return "?(%s)" % (clazz) def getType(self, type): """Type accessor""" try: return _TYPES[type] - except: + except KeyError: return "?(%s)" % (type) def toString(self, hdr, other): @@ -360,7 +360,7 @@ """String representation""" try: return socket.inet_ntoa(self.address) - except: + except Exception: return self.address class DNSHinfo(DNSRecord): @@ -790,7 +790,7 @@ """Adds an entry""" try: list = self.cache[entry.key] - except: + except KeyError: list = self.cache[entry.key] = [] list.append(entry) @@ -799,7 +799,7 @@ try: list = self.cache[entry.key] list.remove(entry) - except: + except KeyError: pass def get(self, entry): @@ -808,7 +808,7 @@ try: list = self.cache[entry.key] return list[list.index(entry)] - except: + except KeyError: return None def getByDetails(self, name, type, clazz): @@ -821,7 +821,7 @@ """Returns a list of entries whose key matches the name.""" try: return self.cache[name] - except: + except KeyError: return [] def entries(self): @@ -829,7 +829,7 @@ def add(x, y): return x+y try: return reduce(add, self.cache.values()) - except: + except Exception: return [] @@ -869,10 +869,10 @@ for socket in rr: try: self.readers[socket].handle_read() - except: + except Exception: if not globals()['_GLOBAL_DONE']: traceback.print_exc() - except: + except Exception: pass def getReaders(self): @@ -988,7 +988,7 @@ callback = lambda x: self.listener.removeService(x, self.type, record.alias) self.list.append(callback) return - except: + except Exception: if not expired: self.services[record.alias.lower()] = record callback = lambda x: self.listener.addService(x, self.type, record.alias) @@ -1117,7 +1117,7 @@ result[key] = value self.properties = result - except: + except Exception: traceback.print_exc() self.properties = None @@ -1255,7 +1255,7 @@ try: self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) - except: + except Exception: # SO_REUSEADDR should be equivalent to SO_REUSEPORT for # multicast UDP sockets (p 731, "TCP/IP Illustrated, # Volume 2"), but some BSD-derived systems require @@ -1270,7 +1270,7 @@ self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, 1) try: self.socket.bind(self.group) - except: + except Exception: # Some versions of linux raise an exception even though # the SO_REUSE* options have been set, so ignore it # @@ -1370,7 +1370,7 @@ self.servicetypes[info.type]-=1 else: del self.servicetypes[info.type] - except: + except KeyError: pass now = currentTimeMillis() nextTime = now @@ -1455,7 +1455,7 @@ try: self.listeners.remove(listener) self.notifyAll() - except: + except Exception: pass def updateRecord(self, now, rec): @@ -1528,7 +1528,7 @@ out.addAnswer(msg, DNSText(question.name, _TYPE_TXT, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.text)) if question.type == _TYPE_SRV: out.addAdditionalAnswer(DNSAddress(service.server, _TYPE_A, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.address)) - except: + except Exception: traceback.print_exc() if out is not None and out.answers: @@ -1541,7 +1541,7 @@ #temp = DNSIncoming(out.packet()) try: self.socket.sendto(out.packet(), 0, (addr, port)) - except: + except Exception: # Ignore this, it may be a temporary loss of network connection pass diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/ancestor.py --- a/mercurial/ancestor.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/ancestor.py Wed Apr 20 12:44:32 2011 -0500 @@ -9,9 +9,10 @@ def ancestor(a, b, pfunc): """ - return a minimal-distance ancestor of nodes a and b, or None if there is no - such ancestor. Note that there can be several ancestors with the same - (minimal) distance, and the one returned is arbitrary. + Returns the common ancestor of a and b that is furthest from a + root (as measured by longest path) or None if no ancestor is + found. If there are multiple common ancestors at the same + distance, the first one found is returned. pfunc must return a list of parent vertices for a given vertex """ @@ -22,6 +23,7 @@ a, b = sorted([a, b]) # find depth from root of all ancestors + # depth is stored as a negative for heapq parentcache = {} visit = [a, b] depth = {} @@ -39,6 +41,7 @@ if p not in depth: visit.append(p) if visit[-1] == vertex: + # -(maximum distance of parents + 1) depth[vertex] = min([depth[p] for p in pl]) - 1 visit.pop() diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/archival.py --- a/mercurial/archival.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/archival.py Wed Apr 20 12:44:32 2011 -0500 @@ -9,7 +9,7 @@ from node import hex import cmdutil import util, encoding -import cStringIO, os, stat, tarfile, time, zipfile +import cStringIO, os, tarfile, time, zipfile import zlib, gzip def tidyprefix(dest, kind, prefix): @@ -172,10 +172,10 @@ # unzip will not honor unix file modes unless file creator is # set to unix (id 3). i.create_system = 3 - ftype = stat.S_IFREG + ftype = 0x8000 # UNX_IFREG in unzip source code if islink: mode = 0777 - ftype = stat.S_IFLNK + ftype = 0xa000 # UNX_IFLNK in unzip source code i.external_attr = (mode | ftype) << 16L self.z.writestr(i, data) diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/bdiff.c --- a/mercurial/bdiff.c Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/bdiff.c Wed Apr 20 12:44:32 2011 -0500 @@ -49,7 +49,7 @@ #include "util.h" struct line { - int h, len, n, e; + int hash, len, n, e; const char *l; }; @@ -63,9 +63,10 @@ struct hunk *next; }; -int splitlines(const char *a, int len, struct line **lr) +static int splitlines(const char *a, int len, struct line **lr) { - int h, i; + unsigned hash; + int i; const char *p, *b = a; const char * const plast = a + len - 1; struct line *l; @@ -81,14 +82,14 @@ return -1; /* build the line array and calculate hashes */ - h = 0; + hash = 0; for (p = a; p < a + len; p++) { /* Leonid Yuriev's hash */ - h = (h * 1664525) + *p + 1013904223; + hash = (hash * 1664525) + (unsigned char)*p + 1013904223; if (*p == '\n' || p == plast) { - l->h = h; - h = 0; + l->hash = hash; + hash = 0; l->len = p - b + 1; l->l = b; l->n = INT_MAX; @@ -98,14 +99,15 @@ } /* set up a sentinel */ - l->h = l->len = 0; + l->hash = 0; + l->len = 0; l->l = a + len; return i - 1; } -int inline cmp(struct line *a, struct line *b) +static inline int cmp(struct line *a, struct line *b) { - return a->h != b->h || a->len != b->len || memcmp(a->l, b->l, a->len); + return a->hash != b->hash || a->len != b->len || memcmp(a->l, b->l, a->len); } static int equatelines(struct line *a, int an, struct line *b, int bn) @@ -138,7 +140,7 @@ /* add lines to the hash table chains */ for (i = bn - 1; i >= 0; i--) { /* find the equivalence class */ - for (j = b[i].h & buckets; h[j].pos != INT_MAX; + for (j = b[i].hash & buckets; h[j].pos != INT_MAX; j = (j + 1) & buckets) if (!cmp(b + i, b + h[j].pos)) break; @@ -156,7 +158,7 @@ /* match items in a to their equivalence class in b */ for (i = 0; i < an; i++) { /* find the equivalence class */ - for (j = a[i].h & buckets; h[j].pos != INT_MAX; + for (j = a[i].hash & buckets; h[j].pos != INT_MAX; j = (j + 1) & buckets) if (!cmp(a + i, b + h[j].pos)) break; diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/bookmarks.py --- a/mercurial/bookmarks.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/bookmarks.py Wed Apr 20 12:44:32 2011 -0500 @@ -101,13 +101,7 @@ if current == mark: return - refs = repo._bookmarks - - # do not update if we do update to a rev equal to the current bookmark - if (mark and mark not in refs and - current and refs[current] == repo.changectx('.').node()): - return - if mark not in refs: + if mark not in repo._bookmarks: mark = '' if not valid(mark): raise util.Abort(_("bookmark '%s' contains illegal " @@ -122,6 +116,15 @@ wlock.release() repo._bookmarkcurrent = mark +def updatecurrentbookmark(repo, oldnode, curbranch): + try: + update(repo, oldnode, repo.branchtags()[curbranch]) + except KeyError: + if curbranch == "default": # no default branch! + update(repo, oldnode, repo.lookup("tip")) + else: + raise util.Abort(_("branch %s not found") % curbranch) + def update(repo, parents, node): marks = repo._bookmarks update = False @@ -163,6 +166,28 @@ finally: w.release() +def updatefromremote(ui, repo, remote): + ui.debug("checking for updated bookmarks\n") + rb = remote.listkeys('bookmarks') + changed = False + for k in rb.keys(): + if k in repo._bookmarks: + nr, nl = rb[k], repo._bookmarks[k] + if nr in repo: + cr = repo[nr] + cl = repo[nl] + if cl.rev() >= cr.rev(): + continue + if cr in cl.descendants(): + repo._bookmarks[k] = cr.node() + changed = True + ui.status(_("updating bookmark %s\n") % k) + else: + ui.warn(_("not updating divergent" + " bookmark %s\n") % k) + if changed: + write(repo) + def diff(ui, repo, remote): ui.status(_("searching for changed bookmarks\n")) diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/bundlerepo.py --- a/mercurial/bundlerepo.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/bundlerepo.py Wed Apr 20 12:44:32 2011 -0500 @@ -15,7 +15,7 @@ from i18n import _ import os, struct, tempfile, shutil import changegroup, util, mdiff, discovery -import localrepo, changelog, manifest, filelog, revlog, error +import localrepo, changelog, manifest, filelog, revlog, error, url class bundlerevlog(revlog.revlog): def __init__(self, opener, indexfile, bundle, @@ -274,9 +274,9 @@ cwd = os.path.join(cwd,'') if parentpath.startswith(cwd): parentpath = parentpath[len(cwd):] - path = util.drop_scheme('file', path) - if path.startswith('bundle:'): - path = util.drop_scheme('bundle', path) + u = url.url(path) + path = u.localpath() + if u.scheme == 'bundle': s = path.split("+", 1) if len(s) == 1: repopath, bundlename = parentpath, s[0] @@ -286,15 +286,17 @@ repopath, bundlename = parentpath, path return bundlerepository(ui, repopath, bundlename) -def getremotechanges(ui, repo, other, revs=None, bundlename=None, force=False): - tmp = discovery.findcommonincoming(repo, other, heads=revs, force=force) +def getremotechanges(ui, repo, other, revs=None, bundlename=None, + force=False, usecommon=False): + tmp = discovery.findcommonincoming(repo, other, heads=revs, force=force, + commononly=usecommon) common, incoming, rheads = tmp if not incoming: try: os.unlink(bundlename) except: pass - return other, None, None + return other, None, None, None bundle = None if bundlename or not other.local(): @@ -303,7 +305,9 @@ if revs is None and other.capable('changegroupsubset'): revs = rheads - if revs is None: + if usecommon: + cg = other.getbundle('incoming', common=common, heads=revs) + elif revs is None: cg = other.changegroup(incoming, "incoming") else: cg = other.changegroupsubset(incoming, revs, 'incoming') @@ -315,5 +319,5 @@ if not other.local(): # use the created uncompressed bundlerepo other = bundlerepository(ui, repo.root, fname) - return (other, incoming, bundle) + return (other, common, incoming, bundle) diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/changegroup.py --- a/mercurial/changegroup.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/changegroup.py Wed Apr 20 12:44:32 2011 -0500 @@ -49,15 +49,6 @@ "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()), } -def collector(cl, mmfs, files): - # Gather information about changeset nodes going out in a bundle. - # We want to gather manifests needed and filelogs affected. - def collect(node): - c = cl.read(node) - files.update(c[3]) - mmfs.setdefault(c[0], node) - return collect - # hgweb uses this list to communicate its preferred type bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN'] @@ -203,3 +194,18 @@ if version != '10': raise util.Abort(_('%s: unknown bundle version %s') % (fname, version)) return unbundle10(fh, alg) + +class bundle10(object): + def __init__(self, lookup): + self._lookup = lookup + def close(self): + return closechunk() + def fileheader(self, fname): + return chunkheader(len(fname)) + fname + def revchunk(self, revlog, node='', p1='', p2='', prefix='', data=''): + linknode = self._lookup(revlog, node) + meta = node + p1 + p2 + linknode + prefix + l = len(meta) + len(data) + yield chunkheader(l) + yield meta + yield data diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/cmdutil.py --- a/mercurial/cmdutil.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/cmdutil.py Wed Apr 20 12:44:32 2011 -0500 @@ -8,7 +8,7 @@ from node import hex, nullid, nullrev, short from i18n import _ import os, sys, errno, re, glob, tempfile -import util, templater, patch, error, encoding, templatekw +import util, scmutil, templater, patch, error, encoding, templatekw import match as matchmod import similar, revset, subrepo @@ -72,7 +72,7 @@ return p def bail_if_changed(repo): - if repo.dirstate.parents()[1] != nullid: + if repo.dirstate.p2() != nullid: raise util.Abort(_('outstanding uncommitted merge')) modified, added, removed, deleted = repo.status()[:4] if modified or added or removed or deleted: @@ -122,12 +122,12 @@ def revpair(repo, revs): if not revs: - return repo.dirstate.parents()[0], None + return repo.dirstate.p1(), None l = revrange(repo, revs) if len(l) == 0: - return repo.dirstate.parents()[0], None + return repo.dirstate.p1(), None if len(l) == 1: return repo.lookup(l[0]), None @@ -230,7 +230,7 @@ def make_file(repo, pat, node=None, total=None, seqno=None, revwidth=None, mode='wb', pathname=None): - writable = 'w' in mode or 'a' in mode + writable = mode not in ('r', 'rb') if not pat or pat == '-': fp = writable and sys.stdout or sys.stdin @@ -435,6 +435,8 @@ src = repo.wjoin(abssrc) state = repo.dirstate[abstarget] + scmutil.checkportable(ui, abstarget) + # check for collisions prevsrc = targets.get(abstarget) if prevsrc is not None: diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/commands.py --- a/mercurial/commands.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/commands.py Wed Apr 20 12:44:32 2011 -0500 @@ -5,7 +5,7 @@ # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. -from node import hex, nullid, nullrev, short +from node import hex, bin, nullid, nullrev, short from lock import release from i18n import _, gettext import os, re, sys, difflib, time, tempfile @@ -13,7 +13,7 @@ import patch, help, mdiff, url, encoding, templatekw, discovery import archival, changegroup, cmdutil, sshserver, hbisect, hgweb, hgweb.server import merge as mergemod -import minirst, revset +import minirst, revset, templatefilters import dagparser # Commands start here, listed alphabetically @@ -126,8 +126,12 @@ lastfunc = funcmap[-1] funcmap[-1] = lambda x: "%s:%s" % (lastfunc(x), x[1]) + def bad(x, y): + raise util.Abort("%s: %s" % (x, y)) + ctx = cmdutil.revsingle(repo, opts.get('rev')) m = cmdutil.match(repo, pats, opts) + m.bad = bad follow = not opts.get('no_follow') for abs in ctx.walk(m): fctx = ctx[abs] @@ -303,7 +307,8 @@ return 0 def bisect(ui, repo, rev=None, extra=None, command=None, - reset=None, good=None, bad=None, skip=None, noupdate=None): + reset=None, good=None, bad=None, skip=None, extend=None, + noupdate=None): """subdivision search of changesets This command helps to find changesets which introduce problems. To @@ -326,6 +331,17 @@ Returns 0 on success. """ + def extendbisectrange(nodes, good): + # bisect is incomplete when it ends on a merge node and + # one of the parent was not checked. + parents = repo[nodes[0]].parents() + if len(parents) > 1: + side = good and state['bad'] or state['good'] + num = len(set(i.node() for i in parents) & set(side)) + if num == 1: + return parents[0].ancestor(parents[1]) + return None + def print_result(nodes, good): displayer = cmdutil.show_changeset(ui, repo, {}) if len(nodes) == 1: @@ -336,14 +352,12 @@ ui.write(_("The first bad revision is:\n")) displayer.show(repo[nodes[0]]) parents = repo[nodes[0]].parents() - if len(parents) > 1: - side = good and state['bad'] or state['good'] - num = len(set(i.node() for i in parents) & set(side)) - if num == 1: - common = parents[0].ancestor(parents[1]) - ui.write(_('Not all ancestors of this changeset have been' - ' checked.\nTo check the other ancestors, start' - ' from the common ancestor, %s.\n' % common)) + extendnode = extendbisectrange(nodes, good) + if extendnode is not None: + ui.write(_('Not all ancestors of this changeset have been' + ' checked.\nUse bisect --extend to continue the ' + 'bisection from\nthe common ancestor, %s.\n') + % short(extendnode.node())) else: # multiple possible revisions if good: @@ -376,7 +390,7 @@ bad = True else: reset = True - elif extra or good + bad + skip + reset + bool(command) > 1: + elif extra or good + bad + skip + reset + extend + bool(command) > 1: raise util.Abort(_('incompatible arguments')) if reset: @@ -440,6 +454,18 @@ # actually bisect nodes, changesets, good = hbisect.bisect(repo.changelog, state) + if extend: + if not changesets: + extendnode = extendbisectrange(nodes, good) + if extendnode is not None: + ui.write(_("Extending search to changeset %d:%s\n" + % (extendnode.rev(), short(extendnode.node())))) + if noupdate: + return + cmdutil.bail_if_changed(repo) + return hg.clean(repo, extendnode.node()) + raise util.Abort(_("nothing to extend")) + if changesets == 0: print_result(nodes, good) else: @@ -481,9 +507,10 @@ if rename: if rename not in marks: - raise util.Abort(_("a bookmark of this name does not exist")) + raise util.Abort(_("bookmark '%s' does not exist") % rename) if mark in marks and not force: - raise util.Abort(_("a bookmark of the same name already exists")) + raise util.Abort(_("bookmark '%s' already exists " + "(use -f to force)") % mark) if mark is None: raise util.Abort(_("new bookmark name required")) marks[mark] = marks[rename] @@ -497,7 +524,7 @@ if mark is None: raise util.Abort(_("bookmark name required")) if mark not in marks: - raise util.Abort(_("a bookmark of this name does not exist")) + raise util.Abort(_("bookmark '%s' does not exist") % mark) if mark == repo._bookmarkcurrent: bookmarks.setcurrent(repo, None) del marks[mark] @@ -512,7 +539,8 @@ raise util.Abort(_("bookmark names cannot consist entirely of " "whitespace")) if mark in marks and not force: - raise util.Abort(_("a bookmark of the same name already exists")) + raise util.Abort(_("bookmark '%s' already exists " + "(use -f to force)") % mark) if ((mark in repo.branchtags() or mark == repo.dirstate.branch()) and not force): raise util.Abort( @@ -570,7 +598,7 @@ """ if opts.get('clean'): - label = repo[None].parents()[0].branch() + label = repo[None].p1().branch() repo.dirstate.setbranch(label) ui.status(_('reset working directory to branch %s\n') % label) elif label: @@ -868,7 +896,12 @@ node = cmdutil.commit(ui, repo, commitfunc, pats, opts) if not node: - ui.status(_("nothing changed\n")) + stat = repo.status(match=cmdutil.match(repo, pats, opts)) + if stat[3]: + ui.status(_("nothing changed (%d missing files, see 'hg status')\n") + % len(stat[3])) + else: + ui.status(_("nothing changed\n")) return 1 ctx = repo[node] @@ -1175,6 +1208,7 @@ if len(items) > 1 or items and sections: raise util.Abort(_('only one config item permitted')) for section, name, value in ui.walkconfig(untrusted=untrusted): + value = str(value).replace('\n', '\\n') sectname = section + '.' + name if values: for v in values: @@ -1191,6 +1225,81 @@ ui.configsource(section, name, untrusted)) ui.write('%s=%s\n' % (sectname, value)) +def debugknown(ui, repopath, *ids, **opts): + """test whether node ids are known to a repo + + Every ID must be a full-length hex node id string. Returns a list of 0s and 1s + indicating unknown/known. + """ + repo = hg.repository(ui, repopath) + if not repo.capable('known'): + raise util.Abort("known() not supported by target repository") + flags = repo.known([bin(s) for s in ids]) + ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags]))) + +def debugbundle(ui, bundlepath, all=None, **opts): + """lists the contents of a bundle""" + f = url.open(ui, bundlepath) + try: + gen = changegroup.readbundle(f, bundlepath) + if all: + ui.write("format: id, p1, p2, cset, len(delta)\n") + + def showchunks(named): + ui.write("\n%s\n" % named) + while 1: + chunkdata = gen.parsechunk() + if not chunkdata: + break + node = chunkdata['node'] + p1 = chunkdata['p1'] + p2 = chunkdata['p2'] + cs = chunkdata['cs'] + delta = chunkdata['data'] + ui.write("%s %s %s %s %s\n" % + (hex(node), hex(p1), hex(p2), + hex(cs), len(delta))) + + showchunks("changelog") + showchunks("manifest") + while 1: + fname = gen.chunk() + if not fname: + break + showchunks(fname) + else: + while 1: + chunkdata = gen.parsechunk() + if not chunkdata: + break + node = chunkdata['node'] + ui.write("%s\n" % hex(node)) + finally: + f.close() + +def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts): + """retrieves a bundle from a repo + + Every ID must be a full-length hex node id string. Saves the bundle to the + given file. + """ + repo = hg.repository(ui, repopath) + if not repo.capable('getbundle'): + raise util.Abort("getbundle() not supported by target repository") + args = {} + if common: + args['common'] = [bin(s) for s in common] + if head: + args['heads'] = [bin(s) for s in head] + bundle = repo.getbundle('debug', **args) + + bundletype = opts.get('type', 'bzip2').lower() + btypes = {'none': 'HG10UN', 'bzip2': 'HG10BZ', 'gzip': 'HG10GZ'} + bundletype = btypes.get(bundletype) + if bundletype not in changegroup.bundletypes: + raise util.Abort(_('unknown bundle type specified with --type')) + changegroup.writebundle(bundle, bundlepath, bundletype) + def debugpushkey(ui, repopath, namespace, *keyinfo): '''access the pushkey key/value protocol @@ -1214,7 +1323,7 @@ def debugrevspec(ui, repo, expr): '''parse and apply a revision specification''' if ui.verbose: - tree = revset.parse(expr) + tree = revset.parse(expr)[0] ui.note(tree, "\n") func = revset.match(expr) for c in func(repo, range(len(repo))): @@ -1238,11 +1347,15 @@ finally: wlock.release() -def debugstate(ui, repo, nodates=None): +def debugstate(ui, repo, nodates=None, datesort=None): """show the contents of the current dirstate""" timestr = "" showdate = not nodates - for file_, ent in sorted(repo.dirstate._map.iteritems()): + if datesort: + keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename + else: + keyfunc = None # sort by filename + for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc): if showdate: if ent[3] == -1: # Pad or slice to locale representation @@ -1457,45 +1570,6 @@ ui.write(_(" (templates seem to have been installed incorrectly)\n")) problems += 1 - # patch - ui.status(_("Checking patch...\n")) - patchproblems = 0 - a = "1\n2\n3\n4\n" - b = "1\n2\n3\ninsert\n4\n" - fa = writetemp(a) - d = mdiff.unidiff(a, None, b, None, os.path.basename(fa), - os.path.basename(fa)) - fd = writetemp(d) - - files = {} - try: - patch.patch(fd, ui, cwd=os.path.dirname(fa), files=files) - except util.Abort, e: - ui.write(_(" patch call failed:\n")) - ui.write(" " + str(e) + "\n") - patchproblems += 1 - else: - if list(files) != [os.path.basename(fa)]: - ui.write(_(" unexpected patch output!\n")) - patchproblems += 1 - a = open(fa).read() - if a != b: - ui.write(_(" patch test failed!\n")) - patchproblems += 1 - - if patchproblems: - if ui.config('ui', 'patch'): - ui.write(_(" (Current patch tool may be incompatible with patch," - " or misconfigured. Please check your configuration" - " file)\n")) - else: - ui.write(_(" Internal patcher failure, please report this error" - " to http://mercurial.selenic.com/wiki/BugTracker\n")) - problems += patchproblems - - os.unlink(fa) - os.unlink(fd) - # editor ui.status(_("Checking commit editor...\n")) editor = ui.geteditor() @@ -1555,6 +1629,21 @@ line = fmt % (abs, m.rel(abs), m.exact(abs) and 'exact' or '') ui.write("%s\n" % line.rstrip()) +def debugwireargs(ui, repopath, *vals, **opts): + repo = hg.repository(hg.remoteui(ui, opts), repopath) + for opt in remoteopts: + del opts[opt[1]] + args = {} + for k, v in opts.iteritems(): + if v: + args[k] = v + # run twice to check that we don't mess up the stream for the next command + res1 = repo.debugwireargs(*vals, **args) + res2 = repo.debugwireargs(*vals, **args) + ui.write("%s\n" % res1) + if res1 != res2: + ui.warn("%s\n" % res2) + def diff(ui, repo, *pats, **opts): """diff repository (or selected files) @@ -1595,8 +1684,8 @@ msg = _('cannot specify --rev and --change at the same time') raise util.Abort(msg) elif change: - node2 = repo.lookup(change) - node1 = repo[node2].parents()[0].node() + node2 = cmdutil.revsingle(repo, change, None).node() + node1 = repo[node2].p1().node() else: node1, node2 = cmdutil.revpair(repo, revs) @@ -1782,6 +1871,10 @@ datefunc = ui.quiet and util.shortdate or util.datestr found = False filerevmatches = {} + def binary(): + flog = getfile(fn) + return util.binary(flog.read(ctx.filenode(fn))) + if opts.get('all'): iter = difflinestates(pstates, states) else: @@ -1808,9 +1901,12 @@ after = l.line[l.colend:] ui.write(sep.join(cols)) if before is not None: - ui.write(sep + before) - ui.write(match, label='grep.match') - ui.write(after) + if not opts.get('text') and binary(): + ui.write(sep + " Binary file matches") + else: + ui.write(sep + before) + ui.write(match, label='grep.match') + ui.write(after) ui.write(eol) found = True return found @@ -1823,7 +1919,7 @@ def prep(ctx, fns): rev = ctx.rev() - pctx = ctx.parents()[0] + pctx = ctx.p1() parent = pctx.rev() matches.setdefault(rev, {}) matches.setdefault(parent, {}) @@ -1858,7 +1954,7 @@ for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep): rev = ctx.rev() - parent = ctx.parents()[0].rev() + parent = ctx.p1().rev() for fn in sorted(revfiles.get(rev, [])): states = matches[rev][fn] copy = copies.get(rev, {}).get(fn) @@ -1951,7 +2047,7 @@ displayer.show(ctx) displayer.close() -def help_(ui, name=None, with_version=False, unknowncmd=False): +def help_(ui, name=None, with_version=False, unknowncmd=False, full=True): """show help for a given topic or a help overview With no arguments, print a list of commands with short help messages. @@ -1962,7 +2058,7 @@ Returns 0 if successful. """ option_lists = [] - textwidth = ui.termwidth() - 2 + textwidth = min(ui.termwidth(), 80) - 2 def addglobalopts(aliases): if ui.verbose: @@ -1974,6 +2070,8 @@ if name == 'shortlist': msg = _('use "hg help" for the full list of commands ' 'or "hg -v" for details') + elif name and not full: + msg = _('use "hg help %s" to show the full help text' % name) elif aliases: msg = _('use "hg -v help%s" to show builtin aliases and ' 'global options') % (name and " " + name or "") @@ -2012,7 +2110,7 @@ ui.write('hg %s\n' % aliases[0]) # aliases - if not ui.quiet and len(aliases) > 1: + if full and not ui.quiet and len(aliases) > 1: ui.write(_("\naliases: %s\n") % ', '.join(aliases[1:])) # description @@ -2024,7 +2122,7 @@ doc = _('shell alias for::\n\n %s') % entry[0].definition[1:] else: doc = _('alias for: hg %s\n\n%s') % (entry[0].definition, doc) - if ui.quiet: + if ui.quiet or not full: doc = doc.splitlines()[0] keep = ui.verbose and ['verbose'] or [] formatted, pruned = minirst.format(doc, textwidth, keep=keep) @@ -2141,6 +2239,8 @@ 'extensions\n')) help.addtopichook('revsets', revset.makedoc) + help.addtopichook('templates', templatekw.makedoc) + help.addtopichook('templates', templatefilters.makedoc) if name and name != 'shortlist': i = None @@ -2265,64 +2365,86 @@ hexfunc = ui.debugflag and hex or short default = not (num or id or branch or tags or bookmarks) output = [] - revs = [] + if source: source, branches = hg.parseurl(ui.expandpath(source)) repo = hg.repository(ui, source) revs, checkout = hg.addbranchrevs(repo, repo, branches, None) if not repo.local(): + if num or branch or tags: + raise util.Abort( + _("can't query remote revision number, branch, or tags")) if not rev and revs: rev = revs[0] if not rev: rev = "tip" - if num or branch or tags or bookmarks: - raise util.Abort(_("can't query remote revision number," - " branch, tags, or bookmarks")) - output = [hexfunc(repo.lookup(rev))] - elif not rev: - ctx = repo[None] - parents = ctx.parents() - changed = False - if default or id or num: - changed = util.any(repo.status()) - if default or id: - output = ["%s%s" % ('+'.join([hexfunc(p.node()) for p in parents]), - (changed) and "+" or "")] - if num: - output.append("%s%s" % ('+'.join([str(p.rev()) for p in parents]), - (changed) and "+" or "")) - else: - ctx = cmdutil.revsingle(repo, rev) + + remoterev = repo.lookup(rev) if default or id: - output = [hexfunc(ctx.node())] - if num: - output.append(str(ctx.rev())) - - if repo.local() and default and not ui.quiet: - b = ctx.branch() - if b != 'default': - output.append("(%s)" % b) - - # multiple tags for a single parent separated by '/' - t = "/".join(ctx.tags()) - if t: - output.append(t) - - # multiple bookmarks for a single parent separated by '/' - bm = '/'.join(ctx.bookmarks()) - if bm: - output.append(bm) - - if branch: - output.append(ctx.branch()) - - if tags: - output.extend(ctx.tags()) - - if bookmarks: - output.extend(ctx.bookmarks()) + output = [hexfunc(remoterev)] + + def getbms(): + bms = [] + + if 'bookmarks' in repo.listkeys('namespaces'): + hexremoterev = hex(remoterev) + bms = [bm for bm, bmr in repo.listkeys('bookmarks').iteritems() + if bmr == hexremoterev] + + return bms + + if bookmarks: + output.extend(getbms()) + elif default and not ui.quiet: + # multiple bookmarks for a single parent separated by '/' + bm = '/'.join(getbms()) + if bm: + output.append(bm) + else: + if not rev: + ctx = repo[None] + parents = ctx.parents() + changed = "" + if default or id or num: + changed = util.any(repo.status()) and "+" or "" + if default or id: + output = ["%s%s" % + ('+'.join([hexfunc(p.node()) for p in parents]), changed)] + if num: + output.append("%s%s" % + ('+'.join([str(p.rev()) for p in parents]), changed)) + else: + ctx = cmdutil.revsingle(repo, rev) + if default or id: + output = [hexfunc(ctx.node())] + if num: + output.append(str(ctx.rev())) + + if default and not ui.quiet: + b = ctx.branch() + if b != 'default': + output.append("(%s)" % b) + + # multiple tags for a single parent separated by '/' + t = '/'.join(ctx.tags()) + if t: + output.append(t) + + # multiple bookmarks for a single parent separated by '/' + bm = '/'.join(ctx.bookmarks()) + if bm: + output.append(bm) + else: + if branch: + output.append(ctx.branch()) + + if tags: + output.extend(ctx.tags()) + + if bookmarks: + output.extend(ctx.bookmarks()) ui.write("%s\n" % ' '.join(output)) @@ -2711,7 +2833,7 @@ ``--tool`` can be used to specify the merge tool used for file merges. It overrides the HGMERGE environment variable and your - configuration files. + configuration files. See :hg:`help merge-tools` for options. If no revision is specified, the working directory's parent is a head revision, and the current branch contains exactly one other @@ -2742,7 +2864,7 @@ '(run \'hg heads .\' to see heads)') % (branch, len(bheads))) - parent = repo.dirstate.parents()[0] + parent = repo.dirstate.p1() if len(bheads) == 1: if len(repo.heads()) > 1: raise util.Abort(_( @@ -2891,7 +3013,13 @@ else: ui.status(_("not updating, since new heads added\n")) if modheads > 1: - ui.status(_("(run 'hg heads' to see heads, 'hg merge' to merge)\n")) + currentbranchheads = len(repo.branchheads()) + if currentbranchheads == modheads: + ui.status(_("(run 'hg heads' to see heads, 'hg merge' to merge)\n")) + elif currentbranchheads > 1: + ui.status(_("(run 'hg heads .' to see heads, 'hg merge' to merge)\n")) + else: + ui.status(_("(run 'hg heads' to see heads)\n")) else: ui.status(_("(run 'hg update' to get a working copy)\n")) @@ -2938,6 +3066,7 @@ raise util.Abort(err) modheads = repo.pull(other, heads=revs, force=opts.get('force')) + bookmarks.updatefromremote(ui, repo, other) if checkout: checkout = str(repo.changelog.rev(other.lookup(checkout))) repo._subtoppath = source @@ -3265,8 +3394,9 @@ directory, the reverted files will thus appear modified afterwards. - If a file has been deleted, it is restored. If the executable mode - of a file was changed, it is reset. + If a file has been deleted, it is restored. Files scheduled for + addition are just unscheduled and left as they are. If the + executable mode of a file was changed, it is reset. If names are given, all files matching the names are reverted. If no arguments are given, no files are reverted. @@ -3645,7 +3775,7 @@ raise util.Abort(msg) elif change: node2 = repo.lookup(change) - node1 = repo[node2].parents()[0].node() + node1 = repo[node2].p1().node() else: node1, node2 = cmdutil.revpair(repo, revs) @@ -3955,19 +4085,16 @@ ui.write("%s\n" % t) continue - try: - hn = hexfunc(n) - r = "%5d:%s" % (repo.changelog.rev(n), hn) - except error.LookupError: - r = " ?:%s" % hn - else: - spaces = " " * (30 - encoding.colwidth(t)) - if ui.verbose: - if repo.tagtype(t) == 'local': - tagtype = " local" - else: - tagtype = "" - ui.write("%s%s %s%s\n" % (t, spaces, r, tagtype)) + hn = hexfunc(n) + r = "%5d:%s" % (repo.changelog.rev(n), hn) + spaces = " " * (30 - encoding.colwidth(t)) + + if ui.verbose: + if repo.tagtype(t) == 'local': + tagtype = " local" + else: + tagtype = "" + ui.write("%s%s %s%s\n" % (t, spaces, r, tagtype)) def tip(ui, repo, **opts): """show the tip revision @@ -3998,15 +4125,16 @@ fnames = (fname1,) + fnames lock = repo.lock() + wc = repo['.'] try: for fname in fnames: f = url.open(ui, fname) gen = changegroup.readbundle(f, fname) modheads = repo.addchangegroup(gen, 'unbundle', 'bundle:' + fname, lock=lock) + bookmarks.updatecurrentbookmark(repo, wc.node(), wc.branch()) finally: lock.release() - return postincoming(ui, repo, modheads, opts.get('update'), None) def update(ui, repo, node=None, rev=None, clean=False, date=None, check=False): @@ -4053,7 +4181,7 @@ if rev and node: raise util.Abort(_("please specify just one revision")) - if not rev: + if rev is None or rev == '': rev = node # if we defined a bookmark, we have to remember the original bookmark name @@ -4070,7 +4198,7 @@ raise util.Abort(_("uncommitted local changes")) if date: - if rev: + if rev is not None: raise util.Abort(_("you can't specify a revision and a date")) rev = cmdutil.finddate(ui, repo, date) @@ -4269,6 +4397,7 @@ ('g', 'good', False, _('mark changeset good')), ('b', 'bad', False, _('mark changeset bad')), ('s', 'skip', False, _('skip testing changeset')), + ('e', 'extend', False, _('extend the bisect range')), ('c', 'command', '', _('use command to check changeset state'), _('CMD')), ('U', 'noupdate', False, _('do not update to target'))], @@ -4359,6 +4488,11 @@ ('n', 'new-file', None, _('add new file at each rev')), ], _('[OPTION]... TEXT')), + "debugbundle": + (debugbundle, + [('a', 'all', None, _('show all details')), + ], + _('FILE')), "debugcheckstate": (debugcheckstate, [], ''), "debugcommands": (debugcommands, [], _('[COMMAND]')), "debugcomplete": @@ -4379,12 +4513,20 @@ _('[-e] DATE [RANGE]')), "debugdata": (debugdata, [], _('FILE REV')), "debugfsinfo": (debugfsinfo, [], _('[PATH]')), + "debuggetbundle": + (debuggetbundle, + [('H', 'head', [], _('id of head node'), _('ID')), + ('C', 'common', [], _('id of common node'), _('ID')), + ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE')), + ], + _('REPO FILE [-H|-C ID]...')), "debugignore": (debugignore, [], ''), "debugindex": (debugindex, [('f', 'format', 0, _('revlog format'), _('FORMAT'))], _('FILE')), "debugindexdot": (debugindexdot, [], _('FILE')), "debuginstall": (debuginstall, [], ''), + "debugknown": (debugknown, [], _('REPO ID...')), "debugpushkey": (debugpushkey, [], _('REPO NAMESPACE [KEY OLD NEW]')), "debugrebuildstate": (debugrebuildstate, @@ -4402,7 +4544,8 @@ (debugsetparents, [], _('REV1 [REV2]')), "debugstate": (debugstate, - [('', 'nodates', None, _('do not display the saved mtime'))], + [('', 'nodates', None, _('do not display the saved mtime')), + ('', 'datesort', None, _('sort by saved mtime'))], _('[OPTION]...')), "debugsub": (debugsub, @@ -4410,6 +4553,12 @@ _('revision to check'), _('REV'))], _('[-r REV] [REV]')), "debugwalk": (debugwalk, walkopts, _('[OPTION]... [FILE]...')), + "debugwireargs": + (debugwireargs, + [('', 'three', '', 'three'), + ('', 'four', '', 'four'), + ] + remoteopts, + _('REPO [OPTIONS]... [ONE [TWO]]')), "^diff": (diff, [('r', 'rev', [], @@ -4435,6 +4584,7 @@ (grep, [('0', 'print0', None, _('end fields with NUL')), ('', 'all', None, _('print all revisions that match')), + ('a', 'text', None, _('treat all files as text')), ('f', 'follow', None, _('follow changeset history,' ' or file history across copies and renames')), @@ -4743,6 +4893,7 @@ } norepo = ("clone init version help debugcommands debugcomplete" - " debugdate debuginstall debugfsinfo debugpushkey") + " debugdate debuginstall debugfsinfo debugpushkey debugwireargs" + " debugknown debuggetbundle debugbundle") optionalrepo = ("identify paths serve showconfig debugancestor debugdag" " debugdata debugindex debugindexdot") diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/config.py --- a/mercurial/config.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/config.py Wed Apr 20 12:44:32 2011 -0500 @@ -138,5 +138,5 @@ def read(self, path, fp=None, sections=None, remap=None): if not fp: - fp = open(path) + fp = util.posixfile(path) self.parse(path, fp.read(), sections, remap, self.read) diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/context.py --- a/mercurial/context.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/context.py Wed Apr 20 12:44:32 2011 -0500 @@ -7,7 +7,7 @@ from node import nullid, nullrev, short, hex from i18n import _ -import ancestor, bdiff, error, util, subrepo, patch, encoding +import ancestor, bdiff, error, util, scmutil, subrepo, patch, encoding import os, errno, stat propertycache = util.propertycache @@ -402,6 +402,15 @@ return [filectx(self._repo, p, fileid=n, filelog=l) for p, n, l in pl if n != nullid] + def p1(self): + return self.parents()[0] + + def p2(self): + p = self.parents() + if len(p) == 2: + return p[1] + return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog) + def children(self): # hard for renames c = self._filelog.children(self._filenode) @@ -792,6 +801,7 @@ try: rejected = [] for f in list: + scmutil.checkportable(ui, join(f)) p = self._repo.wjoin(f) try: st = os.lstat(p) diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/copies.py --- a/mercurial/copies.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/copies.py Wed Apr 20 12:44:32 2011 -0500 @@ -93,7 +93,7 @@ return {}, {} # avoid silly behavior for parent -> working dir - if c2.node() is None and c1.node() == repo.dirstate.parents()[0]: + if c2.node() is None and c1.node() == repo.dirstate.p1(): return repo.dirstate.copies(), {} limit = _findlimit(repo, c1.rev(), c2.rev()) diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/dirstate.py --- a/mercurial/dirstate.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/dirstate.py Wed Apr 20 12:44:32 2011 -0500 @@ -49,6 +49,7 @@ self._rootdir = os.path.join(root, '') self._dirty = False self._dirtypl = False + self._lastnormaltime = None self._ui = ui @propertycache @@ -202,6 +203,12 @@ def parents(self): return [self._validate(p) for p in self._pl] + def p1(self): + return self._validate(self._pl[0]) + + def p2(self): + return self._validate(self._pl[1]) + def branch(self): return encoding.tolocal(self._branch) @@ -236,6 +243,7 @@ "_ignore"): if a in self.__dict__: delattr(self, a) + self._lastnormaltime = None self._dirty = False def copy(self, source, dest): @@ -261,9 +269,7 @@ def _addpath(self, f, check=False): oldstate = self[f] if check or oldstate == "r": - if '\r' in f or '\n' in f: - raise util.Abort( - _("'\\n' and '\\r' disallowed in filenames: %r") % f) + util.checkfilename(f) if f in self._dirs: raise util.Abort(_('directory %r already in dirstate') % f) # shadows @@ -281,9 +287,15 @@ self._dirty = True self._addpath(f) s = os.lstat(self._join(f)) - self._map[f] = ('n', s.st_mode, s.st_size, int(s.st_mtime)) + mtime = int(s.st_mtime) + self._map[f] = ('n', s.st_mode, s.st_size, mtime) if f in self._copymap: del self._copymap[f] + if mtime > self._lastnormaltime: + # Remember the most recent modification timeslot for status(), + # to make sure we won't miss future size-preserving file content + # modifications that happen within the same timeslot. + self._lastnormaltime = mtime def normallookup(self, f): '''Mark a file normal, but possibly dirty.''' @@ -397,6 +409,7 @@ delattr(self, "_dirs") self._copymap = {} self._pl = [nullid, nullid] + self._lastnormaltime = None self._dirty = True def rebuild(self, parent, files): @@ -444,6 +457,7 @@ write(f) st.write(cs.getvalue()) st.rename() + self._lastnormaltime = None self._dirty = self._dirtypl = False def _dirignore(self, f): @@ -680,6 +694,7 @@ # lines are an expansion of "islink => checklink" # where islink means "is this a link?" and checklink # means "can we check links?". + mtime = int(st.st_mtime) if (size >= 0 and (size != st.st_size or ((mode ^ st.st_mode) & 0100 and self._checkexec)) @@ -687,9 +702,15 @@ or size == -2 # other parent or fn in self._copymap): madd(fn) - elif (time != int(st.st_mtime) + elif (mtime != time and (mode & lnkkind != lnkkind or self._checklink)): ladd(fn) + elif mtime == self._lastnormaltime: + # fn may have been changed in the same timeslot without + # changing its size. This can happen if we quickly do + # multiple commits in a single transaction. + # Force lookup, so we don't miss such a racy file change. + ladd(fn) elif listclean: cadd(fn) elif state == 'm': diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/discovery.py --- a/mercurial/discovery.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/discovery.py Wed Apr 20 12:44:32 2011 -0500 @@ -9,9 +9,10 @@ from i18n import _ import util, error -def findcommonincoming(repo, remote, heads=None, force=False): - """Return a tuple (common, missing roots, heads) used to identify - missing nodes from remote. +def findcommonincoming(repo, remote, heads=None, force=False, commononly=False): + """Return a tuple (common, missing, heads) used to identify missing nodes + from remote. "missing" is either a boolean indicating if any nodes are missing + (when commononly=True), or else a list of the root nodes of the missing set. If a list of heads is specified, return only nodes which are heads or ancestors of these heads. @@ -36,6 +37,13 @@ # and start by examining the heads repo.ui.status(_("searching for changes\n")) + if commononly: + myheads = repo.heads() + known = remote.known(myheads) + if util.all(known): + hasincoming = set(heads).difference(set(myheads)) and True + return myheads, hasincoming, heads + unknown = [] for h in heads: if h not in m: diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/dispatch.py --- a/mercurial/dispatch.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/dispatch.py Wed Apr 20 12:44:32 2011 -0500 @@ -90,7 +90,7 @@ except error.CommandError, inst: if inst.args[0]: ui.warn(_("hg %s: %s\n") % (inst.args[0], inst.args[1])) - commands.help_(ui, inst.args[0]) + commands.help_(ui, inst.args[0], full=False) else: ui.warn(_("hg: %s\n") % inst.args[1]) commands.help_(ui, 'shortlist') diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/help.py --- a/mercurial/help.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/help.py Wed Apr 20 12:44:32 2011 -0500 @@ -86,7 +86,7 @@ return loader -helptable = [ +helptable = sorted([ (["config", "hgrc"], _("Configuration Files"), loaddoc('config')), (["dates"], _("Date Formats"), loaddoc('dates')), (["patterns"], _("File Name Patterns"), loaddoc('patterns')), @@ -106,7 +106,7 @@ (["subrepo", "subrepos"], _("Subrepositories"), loaddoc('subrepos')), (["hgweb"], _("Configuring hgweb"), loaddoc('hgweb')), (["glossary"], _("Glossary"), loaddoc('glossary')), -] +]) # Map topics to lists of callable taking the current topic help and # returning the updated version @@ -115,3 +115,19 @@ def addtopichook(topic, rewriter): helphooks.setdefault(topic, []).append(rewriter) + +def makeitemsdoc(topic, doc, marker, items): + """Extract docstring from the items key to function mapping, build a + .single documentation block and use it to overwrite the marker in doc + """ + entries = [] + for name in sorted(items): + text = (items[name].__doc__ or '').rstrip() + if not text: + continue + text = gettext(text) + lines = text.splitlines() + lines[1:] = [(' ' + l.strip()) for l in lines[1:]] + entries.append('\n'.join(lines)) + entries = '\n\n'.join(entries) + return doc.replace(marker, entries) diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/help/dates.txt --- a/mercurial/help/dates.txt Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/help/dates.txt Wed Apr 20 12:44:32 2011 -0500 @@ -30,7 +30,7 @@ The log command also accepts date ranges: -- ``<{datetime}`` - at or before a given date/time -- ``>{datetime}`` - on or after a given date/time -- ``{datetime} to {datetime}`` - a date range, inclusive -- ``-{days}`` - within a given number of days of today +- ``DATE`` - on or after a given date/time +- ``DATE to DATE`` - a date range, inclusive +- ``-DAYS`` - within a given number of days of today diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/help/environment.txt --- a/mercurial/help/environment.txt Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/help/environment.txt Wed Apr 20 12:44:32 2011 -0500 @@ -59,6 +59,14 @@ Equivalent options set via command line flags or environment variables are not overridden. +HGPLAINEXCEPT + This is a comma-separated list of features to preserve when + HGPLAIN is enabled. Currently the only value supported is "i18n", + which preserves internationalization in plain mode. + + Setting HGPLAINEXCEPT to anything (even an empty string) will + enable plain mode. + HGUSER This is the string used as the author of a commit. If not set, available values will be considered in this order: diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/help/templates.txt --- a/mercurial/help/templates.txt Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/help/templates.txt Wed Apr 20 12:44:32 2011 -0500 @@ -23,52 +23,7 @@ keywords depends on the exact context of the templater. These keywords are usually available for templating a log-like command: -:author: String. The unmodified author of the changeset. - -:branch: String. The name of the branch on which the changeset was - committed. - -:branches: List of strings. The name of the branch on which the - changeset was committed. Will be empty if the branch name was - default. - -:children: List of strings. The children of the changeset. - -:date: Date information. The date when the changeset was committed. - -:desc: String. The text of the changeset description. - -:diffstat: String. Statistics of changes with the following format: - "modified files: +added/-removed lines" - -:files: List of strings. All files modified, added, or removed by this - changeset. - -:file_adds: List of strings. Files added by this changeset. - -:file_copies: List of strings. Files copied in this changeset with - their sources. - -:file_copies_switch: List of strings. Like "file_copies" but displayed - only if the --copied switch is set. - -:file_mods: List of strings. Files modified by this changeset. - -:file_dels: List of strings. Files removed by this changeset. - -:node: String. The changeset identification hash, as a 40 hexadecimal - digit string. - -:parents: List of strings. The parents of the changeset. - -:rev: Integer. The repository-local changeset revision number. - -:tags: List of strings. Any tags associated with the changeset. - -:latesttag: String. Most recent global tag in the ancestors of this - changeset. - -:latesttagdistance: Integer. Longest path to the latest tag. +.. keywordsmarker The "date" keyword does not produce human-readable output. If you want to use a date in your output, you can use a filter to process @@ -82,82 +37,4 @@ List of filters: -:addbreaks: Any text. Add an XHTML "
" tag before the end of - every line except the last. - -:age: Date. Returns a human-readable date/time difference between the - given date/time and the current date/time. - -:basename: Any text. Treats the text as a path, and returns the last - component of the path after splitting by the path separator - (ignoring trailing separators). For example, "foo/bar/baz" becomes - "baz" and "foo/bar//" becomes "bar". - -:stripdir: Treat the text as path and strip a directory level, if - possible. For example, "foo" and "foo/bar" becomes "foo". - -:date: Date. Returns a date in a Unix date format, including the - timezone: "Mon Sep 04 15:13:13 2006 0700". - -:domain: Any text. Finds the first string that looks like an email - address, and extracts just the domain component. Example: ``User - `` becomes ``example.com``. - -:email: Any text. Extracts the first string that looks like an email - address. Example: ``User `` becomes - ``user@example.com``. - -:escape: Any text. Replaces the special XML/XHTML characters "&", "<" - and ">" with XML entities. - -:hex: Any text. Convert a binary Mercurial node identifier into - its long hexadecimal representation. - -:fill68: Any text. Wraps the text to fit in 68 columns. - -:fill76: Any text. Wraps the text to fit in 76 columns. - -:firstline: Any text. Returns the first line of text. - -:nonempty: Any text. Returns '(none)' if the string is empty. - -:hgdate: Date. Returns the date as a pair of numbers: "1157407993 - 25200" (Unix timestamp, timezone offset). - -:isodate: Date. Returns the date in ISO 8601 format: "2009-08-18 13:00 - +0200". - -:isodatesec: Date. Returns the date in ISO 8601 format, including - seconds: "2009-08-18 13:00:13 +0200". See also the rfc3339date - filter. - -:localdate: Date. Converts a date to local date. - -:obfuscate: Any text. Returns the input text rendered as a sequence of - XML entities. - -:person: Any text. Returns the text before an email address. - -:rfc822date: Date. Returns a date using the same format used in email - headers: "Tue, 18 Aug 2009 13:00:13 +0200". - -:rfc3339date: Date. Returns a date using the Internet date format - specified in RFC 3339: "2009-08-18T13:00:13+02:00". - -:short: Changeset hash. Returns the short form of a changeset hash, - i.e. a 12 hexadecimal digit string. - -:shortdate: Date. Returns a date like "2006-09-18". - -:stringify: Any type. Turns the value into text by converting values into - text and concatenating them. - -:strip: Any text. Strips all leading and trailing whitespace. - -:tabindent: Any text. Returns the text, with every line except the - first starting with a tab character. - -:urlescape: Any text. Escapes all "special" characters. For example, - "foo bar" becomes "foo%20bar". - -:user: Any text. Returns the user portion of an email address. +.. filtersmarker diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/hg.py --- a/mercurial/hg.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/hg.py Wed Apr 20 12:44:32 2011 -0500 @@ -9,7 +9,7 @@ from i18n import _ from lock import release from node import hex, nullid, nullrev, short -import localrepo, bundlerepo, httprepo, sshrepo, statichttprepo +import localrepo, bundlerepo, httprepo, sshrepo, statichttprepo, bookmarks import lock, util, extensions, error, encoding, node import cmdutil, discovery, url import merge as mergemod @@ -17,7 +17,7 @@ import errno, os, shutil def _local(path): - path = util.expandpath(util.drop_scheme('file', path)) + path = util.expandpath(url.localpath(path)) return (os.path.isfile(path) and bundlerepo or localrepo) def addbranchrevs(lrepo, repo, branches, revs): @@ -51,13 +51,15 @@ revs.append(hashbranch) return revs, revs[0] -def parseurl(url, branches=None): +def parseurl(path, branches=None): '''parse url#branch, returning (url, (branch, branches))''' - if '#' not in url: - return url, (None, branches or []) - url, branch = url.split('#', 1) - return url, (branch, branches or []) + u = url.url(path) + branch = None + if u.fragment: + branch = u.fragment + u.fragment = None + return str(u), (branch, branches or []) schemes = { 'bundle': bundlerepo, @@ -69,11 +71,8 @@ } def _lookup(path): - scheme = 'file' - if path: - c = path.find(':') - if c > 0: - scheme = path[:c] + u = url.url(path) + scheme = u.scheme or 'file' thing = schemes.get(scheme) or schemes['file'] try: return thing(path) @@ -103,15 +102,6 @@ '''return default destination of clone if none is given''' return os.path.basename(os.path.normpath(source)) -def localpath(path): - if path.startswith('file://localhost/'): - return path[16:] - if path.startswith('file://'): - return path[7:] - if path.startswith('file:'): - return path[5:] - return path - def share(ui, source, dest=None, update=True): '''create a shared repository''' @@ -143,7 +133,7 @@ if not os.path.isdir(root): os.mkdir(root) - os.mkdir(roothg) + util.makedir(roothg, notindexed=True) requirements = '' try: @@ -231,8 +221,8 @@ else: dest = ui.expandpath(dest) - dest = localpath(dest) - source = localpath(source) + dest = url.localpath(dest) + source = url.localpath(source) if os.path.exists(dest): if not os.path.isdir(dest): @@ -258,7 +248,7 @@ abspath = origsource copy = False if src_repo.cancopy() and islocal(dest): - abspath = os.path.abspath(util.drop_scheme('file', origsource)) + abspath = os.path.abspath(url.localpath(origsource)) copy = not pull and not rev if copy: @@ -281,7 +271,7 @@ dir_cleanup.dir_ = hgdir try: dest_path = hgdir - os.mkdir(dest_path) + util.makedir(dest_path, notindexed=True) except OSError, inst: if inst.errno == errno.EEXIST: dir_cleanup.close() @@ -366,6 +356,21 @@ dest_repo.ui.status(_("updating to branch %s\n") % bn) _update(dest_repo, uprev) + # clone all bookmarks + if dest_repo.local() and src_repo.capable("pushkey"): + rb = src_repo.listkeys('bookmarks') + for k, n in rb.iteritems(): + try: + m = dest_repo.lookup(n) + dest_repo._bookmarks[k] = m + except: + pass + if rb: + bookmarks.write(dest_repo) + elif src_repo.local() and dest_repo.capable("pushkey"): + for k, n in src_repo._bookmarks.iteritems(): + dest_repo.pushkey('bookmarks', k, '', hex(n)) + return src_repo, dest_repo finally: release(src_lock, dest_lock) @@ -421,14 +426,19 @@ if revs: revs = [other.lookup(rev) for rev in revs] - other, incoming, bundle = bundlerepo.getremotechanges(ui, repo, other, revs, - opts["bundle"], opts["force"]) - if incoming is None: + usecommon = other.capable('getbundle') + other, common, incoming, bundle = bundlerepo.getremotechanges(ui, repo, other, + revs, opts["bundle"], opts["force"], + usecommon=usecommon) + if not incoming: ui.status(_("no changes found\n")) return subreporecurse() try: - chlist = other.changelog.nodesbetween(incoming, revs)[0] + if usecommon: + chlist = other.changelog.findmissing(common, revs) + else: + chlist = other.changelog.nodesbetween(incoming, revs)[0] displayer = cmdutil.show_changeset(ui, other, opts, buffered) # XXX once graphlog extension makes it into core, diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/hgweb/common.py --- a/mercurial/hgweb/common.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/hgweb/common.py Wed Apr 20 12:44:32 2011 -0500 @@ -73,10 +73,29 @@ def __init__(self, code, message=None, headers=[]): if message is None: message = _statusmessage(code) - Exception.__init__(self, code, message) + Exception.__init__(self) self.code = code self.message = message self.headers = headers + def __str__(self): + return self.message + +class continuereader(object): + def __init__(self, f, write): + self.f = f + self._write = write + self.continued = False + + def read(self, amt=-1): + if not self.continued: + self.continued = True + self._write('HTTP/1.1 100 Continue\r\n\r\n') + return self.f.read(amt) + + def __getattr__(self, attr): + if attr in ('close', 'readline', 'readlines', '__iter__'): + return getattr(self.f, attr) + raise AttributeError() def _statusmessage(code): from BaseHTTPServer import BaseHTTPRequestHandler diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/hgweb/hgweb_mod.py --- a/mercurial/hgweb/hgweb_mod.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/hgweb/hgweb_mod.py Wed Apr 20 12:44:32 2011 -0500 @@ -17,6 +17,7 @@ perms = { 'changegroup': 'pull', 'changegroupsubset': 'pull', + 'getbundle': 'pull', 'stream_out': 'pull', 'listkeys': 'pull', 'unbundle': 'push', @@ -125,7 +126,11 @@ self.check_perm(req, perms[cmd]) return protocol.call(self.repo, req, cmd) except ErrorResponse, inst: - if cmd == 'unbundle': + # A client that sends unbundle without 100-continue will + # break if we respond early. + if (cmd == 'unbundle' and + req.env.get('HTTP_EXPECT', + '').lower() != '100-continue'): req.drain() req.respond(inst, protocol.HGTYPE) return '0\n%s\n' % inst.message @@ -228,6 +233,7 @@ port = req.env["SERVER_PORT"] port = port != default_port and (":" + port) or "" urlbase = '%s://%s%s' % (proto, req.env['SERVER_NAME'], port) + logourl = self.config("web", "logourl", "http://mercurial.selenic.com/") staticurl = self.config("web", "staticurl") or req.url + 'static/' if not staticurl.endswith('/'): staticurl += '/' @@ -267,6 +273,7 @@ tmpl = templater.templater(mapfile, defaults={"url": req.url, + "logourl": logourl, "staticurl": staticurl, "urlbase": urlbase, "repo": self.reponame, diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/hgweb/hgwebdir_mod.py --- a/mercurial/hgweb/hgwebdir_mod.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/hgweb/hgwebdir_mod.py Wed Apr 20 12:44:32 2011 -0500 @@ -6,10 +6,10 @@ # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. -import os, re, time, urlparse +import os, re, time from mercurial.i18n import _ from mercurial import ui, hg, util, templater -from mercurial import error, encoding +from mercurial import error, encoding, url from common import ErrorResponse, get_mtime, staticfile, paritygen, \ get_contact, HTTP_OK, HTTP_NOT_FOUND, HTTP_SERVER_ERROR from hgweb_mod import hgweb @@ -40,9 +40,10 @@ def urlrepos(prefix, roothead, paths): """yield url paths and filesystem paths from a list of repo paths - >>> list(urlrepos('hg', '/opt', ['/opt/r', '/opt/r/r', '/opt'])) + >>> conv = lambda seq: [(v, util.pconvert(p)) for v,p in seq] + >>> conv(urlrepos('hg', '/opt', ['/opt/r', '/opt/r/r', '/opt'])) [('hg/r', '/opt/r'), ('hg/r/r', '/opt/r/r'), ('hg', '/opt')] - >>> list(urlrepos('', '/opt', ['/opt/r', '/opt/r/r', '/opt'])) + >>> conv(urlrepos('', '/opt', ['/opt/r', '/opt/r/r', '/opt'])) [('r', '/opt/r'), ('r/r', '/opt/r/r'), ('', '/opt')] """ for path in paths: @@ -76,7 +77,10 @@ if not os.path.exists(self.conf): raise util.Abort(_('config file %s not found!') % self.conf) u.readconfig(self.conf, remap=map, trust=True) - paths = u.configitems('hgweb-paths') + paths = [] + for name, ignored in u.configitems('hgweb-paths'): + for path in u.configlist('hgweb-paths', name): + paths.append((name, path)) elif isinstance(self.conf, (list, tuple)): paths = self.conf elif isinstance(self.conf, dict): @@ -247,6 +251,9 @@ # update time with local timezone try: r = hg.repository(self.ui, path) + except IOError: + u.warn(_('error accessing repository at %s\n') % path) + continue except error.RepoError: u.warn(_('error accessing repository at %s\n') % path) continue @@ -340,6 +347,7 @@ start = url[-1] == '?' and '&' or '?' sessionvars = webutil.sessionvars(vars, start) + logourl = config('web', 'logourl', 'http://mercurial.selenic.com/') staticurl = config('web', 'staticurl') or url + 'static/' if not staticurl.endswith('/'): staticurl += '/' @@ -349,22 +357,15 @@ "footer": footer, "motd": motd, "url": url, + "logourl": logourl, "staticurl": staticurl, "sessionvars": sessionvars}) return tmpl def updatereqenv(self, env): - def splitnetloc(netloc): - if ':' in netloc: - return netloc.split(':', 1) - else: - return (netloc, None) - if self._baseurl is not None: - urlcomp = urlparse.urlparse(self._baseurl) - host, port = splitnetloc(urlcomp[1]) - path = urlcomp[2] - env['SERVER_NAME'] = host - if port: - env['SERVER_PORT'] = port - env['SCRIPT_NAME'] = path + u = url.url(self._baseurl) + env['SERVER_NAME'] = u.host + if u.port: + env['SERVER_PORT'] = u.port + env['SCRIPT_NAME'] = '/' + u.path diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/hgweb/protocol.py --- a/mercurial/hgweb/protocol.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/hgweb/protocol.py Wed Apr 20 12:44:32 2011 -0500 @@ -22,7 +22,7 @@ if k == '*': star = {} for key in self.req.form.keys(): - if key not in keys: + if key != 'cmd' and key not in keys: star[key] = self.req.form[key][0] data['*'] = star else: diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/hgweb/server.py --- a/mercurial/hgweb/server.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/hgweb/server.py Wed Apr 20 12:44:32 2011 -0500 @@ -8,6 +8,7 @@ import os, sys, errno, urllib, BaseHTTPServer, socket, SocketServer, traceback from mercurial import util, error +from mercurial.hgweb import common from mercurial.i18n import _ def _splitURI(uri): @@ -111,6 +112,9 @@ env['SERVER_PROTOCOL'] = self.request_version env['wsgi.version'] = (1, 0) env['wsgi.url_scheme'] = self.url_scheme + if env.get('HTTP_EXPECT', '').lower() == '100-continue': + self.rfile = common.continuereader(self.rfile, self.wfile.write) + env['wsgi.input'] = self.rfile env['wsgi.errors'] = _error_logger(self) env['wsgi.multithread'] = isinstance(self.server, diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/hgweb/webcommands.py --- a/mercurial/hgweb/webcommands.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/hgweb/webcommands.py Wed Apr 20 12:44:32 2011 -0500 @@ -108,10 +108,11 @@ revcount = web.maxchanges if 'revcount' in req.form: revcount = int(req.form.get('revcount', [revcount])[0]) + revcount = max(revcount, 1) tmpl.defaults['sessionvars']['revcount'] = revcount lessvars = copy.copy(tmpl.defaults['sessionvars']) - lessvars['revcount'] = revcount / 2 + lessvars['revcount'] = max(revcount / 2, 1) lessvars['rev'] = query morevars = copy.copy(tmpl.defaults['sessionvars']) morevars['revcount'] = revcount * 2 @@ -220,10 +221,11 @@ revcount = shortlog and web.maxshortchanges or web.maxchanges if 'revcount' in req.form: revcount = int(req.form.get('revcount', [revcount])[0]) + revcount = max(revcount, 1) tmpl.defaults['sessionvars']['revcount'] = revcount lessvars = copy.copy(tmpl.defaults['sessionvars']) - lessvars['revcount'] = revcount / 2 + lessvars['revcount'] = max(revcount / 2, 1) morevars = copy.copy(tmpl.defaults['sessionvars']) morevars['revcount'] = revcount * 2 @@ -393,14 +395,11 @@ def bookmarks(web, req, tmpl): i = web.repo._bookmarks.items() - i.reverse() parity = paritygen(web.stripecount) - def entries(notip=False, limit=0, **map): + def entries(limit=0, **map): count = 0 - for k, n in i: - if notip and k == "tip": - continue + for k, n in sorted(i): if limit > 0 and count >= limit: continue count = count + 1 @@ -411,9 +410,8 @@ return tmpl("bookmarks", node=hex(web.repo.changelog.tip()), - entries=lambda **x: entries(False, 0, **x), - entriesnotip=lambda **x: entries(True, 0, **x), - latestentry=lambda **x: entries(True, 1, **x)) + entries=lambda **x: entries(0, **x), + latestentry=lambda **x: entries(1, **x)) def branches(web, req, tmpl): tips = (web.repo[n] for t, n in web.repo.branchtags().iteritems()) @@ -464,6 +462,15 @@ node=hex(n), date=web.repo[n].date()) + def bookmarks(**map): + parity = paritygen(web.stripecount) + b = web.repo._bookmarks.items() + for k, n in sorted(b)[:10]: # limit to 10 bookmarks + yield {'parity': parity.next(), + 'bookmark': k, + 'date': web.repo[n].date(), + 'node': hex(n)} + def branches(**map): parity = paritygen(web.stripecount) @@ -508,6 +515,7 @@ owner=get_contact(web.config) or "unknown", lastchange=tip.date(), tags=tagentries, + bookmarks=bookmarks, branches=branches, shortlog=changelist, node=tip.hex(), @@ -624,10 +632,11 @@ revcount = web.maxshortchanges if 'revcount' in req.form: revcount = int(req.form.get('revcount', [revcount])[0]) + revcount = max(revcount, 1) tmpl.defaults['sessionvars']['revcount'] = revcount lessvars = copy.copy(tmpl.defaults['sessionvars']) - lessvars['revcount'] = revcount / 2 + lessvars['revcount'] = max(revcount / 2, 1) morevars = copy.copy(tmpl.defaults['sessionvars']) morevars['revcount'] = revcount * 2 @@ -725,10 +734,11 @@ revcount = web.maxshortchanges if 'revcount' in req.form: revcount = int(req.form.get('revcount', [revcount])[0]) + revcount = max(revcount, 1) tmpl.defaults['sessionvars']['revcount'] = revcount lessvars = copy.copy(tmpl.defaults['sessionvars']) - lessvars['revcount'] = revcount / 2 + lessvars['revcount'] = max(revcount / 2, 1) morevars = copy.copy(tmpl.defaults['sessionvars']) morevars['revcount'] = revcount * 2 diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/hgweb/wsgicgi.py --- a/mercurial/hgweb/wsgicgi.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/hgweb/wsgicgi.py Wed Apr 20 12:44:32 2011 -0500 @@ -10,6 +10,7 @@ import os, sys from mercurial import util +from mercurial.hgweb import common def launch(application): util.set_binary(sys.stdin) @@ -23,7 +24,11 @@ if environ['PATH_INFO'].startswith(scriptname): environ['PATH_INFO'] = environ['PATH_INFO'][len(scriptname):] - environ['wsgi.input'] = sys.stdin + stdin = sys.stdin + if environ.get('HTTP_EXPECT', '').lower() == '100-continue': + stdin = common.continuereader(stdin, sys.stdout.write) + + environ['wsgi.input'] = stdin environ['wsgi.errors'] = sys.stderr environ['wsgi.version'] = (1, 0) environ['wsgi.multithread'] = False diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/httprepo.py --- a/mercurial/httprepo.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/httprepo.py Wed Apr 20 12:44:32 2011 -0500 @@ -9,7 +9,7 @@ from node import nullid from i18n import _ import changegroup, statichttprepo, error, url, util, wireproto -import os, urllib, urllib2, urlparse, zlib, httplib +import os, urllib, urllib2, zlib, httplib import errno, socket def zgenerator(f): @@ -28,13 +28,13 @@ self.path = path self.caps = None self.handler = None - scheme, netloc, urlpath, query, frag = urlparse.urlsplit(path) - if query or frag: + u = url.url(path) + if u.query or u.fragment: raise util.Abort(_('unsupported URL component: "%s"') % - (query or frag)) + (u.query or u.fragment)) # urllib cannot handle URLs with embedded user or passwd - self._url, authinfo = url.getauthinfo(path) + self._url, authinfo = u.authinfo() self.ui = ui self.ui.debug('using %s\n' % self._url) @@ -52,10 +52,13 @@ # look up capabilities only when needed + def _fetchcaps(self): + self.caps = set(self._call('capabilities').split()) + def get_caps(self): if self.caps is None: try: - self.caps = set(self._call('capabilities').split()) + self._fetchcaps() except error.RepoError: self.caps = set() self.ui.debug('capabilities: %s\n' % @@ -73,8 +76,7 @@ data = args.pop('data', None) headers = args.pop('headers', {}) self.ui.debug("sending %s command\n" % cmd) - q = {"cmd": cmd} - q.update(args) + q = [('cmd', cmd)] + sorted(args.items()) qs = '?%s' % urllib.urlencode(q) cu = "%s%s" % (self._url, qs) req = urllib2.Request(cu, data, headers) @@ -196,7 +198,13 @@ inst = httpsrepository(ui, path) else: inst = httprepository(ui, path) - inst.between([(nullid, nullid)]) + try: + # Try to do useful work when checking compatibility. + # Usually saves a roundtrip since we want the caps anyway. + inst._fetchcaps() + except error.RepoError: + # No luck, try older compatibility check. + inst.between([(nullid, nullid)]) return inst except error.RepoError: ui.note('(falling back to static-http)\n') diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/i18n.py --- a/mercurial/i18n.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/i18n.py Wed Apr 20 12:44:32 2011 -0500 @@ -51,7 +51,13 @@ # An unknown encoding results in a LookupError. return message -if 'HGPLAIN' in os.environ: +def _plain(): + if 'HGPLAIN' not in os.environ and 'HGPLAINEXCEPT' not in os.environ: + return False + exceptions = os.environ.get('HGPLAINEXCEPT', '').strip().split(',') + return 'i18n' not in exceptions + +if _plain(): _ = lambda message: message else: _ = gettext diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/localrepo.py --- a/mercurial/localrepo.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/localrepo.py Wed Apr 20 12:44:32 2011 -0500 @@ -20,7 +20,8 @@ propertycache = util.propertycache class localrepository(repo.repository): - capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey')) + capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey', + 'known', 'getbundle')) supportedformats = set(('revlogv1', 'parentdelta')) supported = supportedformats | set(('store', 'fncache', 'shared', 'dotencode')) @@ -46,7 +47,7 @@ if create: if not os.path.exists(path): util.makedirs(path) - os.mkdir(self.path) + util.makedir(self.path, notindexed=True) requirements = ["revlogv1"] if self.ui.configbool('format', 'usestore', True): os.mkdir(os.path.join(self.path, "store")) @@ -361,7 +362,12 @@ tags = {} for (name, (node, hist)) in alltags.iteritems(): if node != nullid: - tags[encoding.tolocal(name)] = node + try: + # ignore tags to unknown nodes + self.changelog.lookup(node) + tags[encoding.tolocal(name)] = node + except error.LookupError: + pass tags['tip'] = self.changelog.tip() tagtypes = dict([(encoding.tolocal(name), value) for (name, value) in tagtypes.iteritems()]) @@ -384,10 +390,7 @@ '''return a list of tags ordered by revision''' l = [] for t, n in self.tags().iteritems(): - try: - r = self.changelog.rev(n) - except: - r = -2 # sort to the beginning of the list if unknown + r = self.changelog.rev(n) l.append((r, t, n)) return [(t, n) for r, t, n in sorted(l)] @@ -521,7 +524,7 @@ if isinstance(key, int): return self.changelog.node(key) elif key == '.': - return self.dirstate.parents()[0] + return self.dirstate.p1() elif key == 'null': return nullid elif key == 'tip': @@ -558,6 +561,10 @@ repo = (remote and remote.local()) and remote or self return repo[key].branch() + def known(self, nodes): + nm = self.changelog.nodemap + return [(n in nm) for n in nodes] + def local(self): return True @@ -1014,10 +1021,7 @@ raise # update bookmarks, dirstate and mergestate - parents = (p1, p2) - if p2 == nullid: - parents = (p1,) - bookmarks.update(self, parents, ret) + bookmarks.update(self, p1, ret) for f in changes[0] + changes[1]: self.dirstate.normal(f) for f in changes[2]: @@ -1221,14 +1225,15 @@ modified, added, clean = [], [], [] for fn in mf2: if fn in mf1: - if (mf1.flags(fn) != mf2.flags(fn) or - (mf1[fn] != mf2[fn] and - (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))): + if (fn not in deleted and + (mf1.flags(fn) != mf2.flags(fn) or + (mf1[fn] != mf2[fn] and + (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))): modified.append(fn) elif listclean: clean.append(fn) del mf1[fn] - else: + elif fn not in deleted: added.append(fn) removed = mf1.keys() @@ -1320,20 +1325,24 @@ def pull(self, remote, heads=None, force=False): lock = self.lock() try: + usecommon = remote.capable('getbundle') tmp = discovery.findcommonincoming(self, remote, heads=heads, - force=force) + force=force, commononly=usecommon) common, fetch, rheads = tmp if not fetch: self.ui.status(_("no changes found\n")) result = 0 else: - if heads is None and fetch == [nullid]: + if heads is None and list(common) == [nullid]: self.ui.status(_("requesting all changes\n")) elif heads is None and remote.capable('changegroupsubset'): # issue1320, avoid a race if remote changed after discovery heads = rheads - if heads is None: + if usecommon: + cg = remote.getbundle('pull', common=common, + heads=heads or rheads) + elif heads is None: cg = remote.changegroup(fetch, 'pull') elif not remote.capable('changegroupsubset'): raise util.Abort(_("partial pull cannot be done because " @@ -1346,27 +1355,6 @@ finally: lock.release() - self.ui.debug("checking for updated bookmarks\n") - rb = remote.listkeys('bookmarks') - changed = False - for k in rb.keys(): - if k in self._bookmarks: - nr, nl = rb[k], self._bookmarks[k] - if nr in self: - cr = self[nr] - cl = self[nl] - if cl.rev() >= cr.rev(): - continue - if cr in cl.descendants(): - self._bookmarks[k] = cr.node() - changed = True - self.ui.status(_("updating bookmark %s\n") % k) - else: - self.ui.warn(_("not updating divergent" - " bookmark %s\n") % k) - if changed: - bookmarks.write(self) - return result def checkpush(self, force, revs): @@ -1446,7 +1434,7 @@ for node in nodes: self.ui.debug("%s\n" % hex(node)) - def changegroupsubset(self, bases, heads, source, extranodes=None): + def changegroupsubset(self, bases, heads, source): """Compute a changegroup consisting of all the nodes that are descendents of any of the bases and ancestors of any of the heads. Return a chunkbuffer object whose read() method will return @@ -1458,214 +1446,127 @@ Another wrinkle is doing the reverse, figuring out which changeset in the changegroup a particular filenode or manifestnode belongs to. - - The caller can specify some nodes that must be included in the - changegroup using the extranodes argument. It should be a dict - where the keys are the filenames (or 1 for the manifest), and the - values are lists of (node, linknode) tuples, where node is a wanted - node and linknode is the changelog node that should be transmitted as - the linkrev. """ - - # Set up some initial variables - # Make it easy to refer to self.changelog cl = self.changelog - # Compute the list of changesets in this changegroup. - # Some bases may turn out to be superfluous, and some heads may be - # too. nodesbetween will return the minimal set of bases and heads - # necessary to re-create the changegroup. if not bases: bases = [nullid] - msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads) + csets, bases, heads = cl.nodesbetween(bases, heads) + # We assume that all ancestors of bases are known + common = set(cl.ancestors(*[cl.rev(n) for n in bases])) + return self._changegroupsubset(common, csets, heads, source) + + def getbundle(self, source, heads=None, common=None): + """Like changegroupsubset, but returns the set difference between the + ancestors of heads and the ancestors common. + + If heads is None, use the local heads. If common is None, use [nullid]. - if extranodes is None: - # can we go through the fast path ? - heads.sort() - allheads = self.heads() - allheads.sort() - if heads == allheads: - return self._changegroup(msng_cl_lst, source) + The nodes in common might not all be known locally due to the way the + current discovery protocol works. + """ + cl = self.changelog + if common: + nm = cl.nodemap + common = [n for n in common if n in nm] + else: + common = [nullid] + if not heads: + heads = cl.heads() + common, missing = cl.findcommonmissing(common, heads) + return self._changegroupsubset(common, missing, heads, source) + + def _changegroupsubset(self, commonrevs, csets, heads, source): + + cl = self.changelog + mf = self.manifest + mfs = {} # needed manifests + fnodes = {} # needed file nodes + changedfiles = set() + fstate = ['', {}] + count = [0] + + # can we go through the fast path ? + heads.sort() + if heads == sorted(self.heads()): + return self._changegroup(csets, source) # slow path self.hook('preoutgoing', throw=True, source=source) - - self.changegroupinfo(msng_cl_lst, source) - - # We assume that all ancestors of bases are known - commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases])) + self.changegroupinfo(csets, source) - # Make it easy to refer to self.manifest - mnfst = self.manifest - # We don't know which manifests are missing yet - msng_mnfst_set = {} - # Nor do we know which filenodes are missing. - msng_filenode_set = {} - - # A changeset always belongs to itself, so the changenode lookup - # function for a changenode is identity. - def identity(x): - return x + # filter any nodes that claim to be part of the known set + def prune(revlog, missing): + for n in missing: + if revlog.linkrev(revlog.rev(n)) not in commonrevs: + yield n - # A function generating function that sets up the initial environment - # the inner function. - def filenode_collector(changedfiles): - # This gathers information from each manifestnode included in the - # changegroup about which filenodes the manifest node references - # so we can include those in the changegroup too. - # - # It also remembers which changenode each filenode belongs to. It - # does this by assuming the a filenode belongs to the changenode - # the first manifest that references it belongs to. - def collect_msng_filenodes(mnfstnode): - r = mnfst.rev(mnfstnode) - if mnfst.deltaparent(r) in mnfst.parentrevs(r): - # If the previous rev is one of the parents, - # we only need to see a diff. - deltamf = mnfst.readdelta(mnfstnode) - # For each line in the delta - for f, fnode in deltamf.iteritems(): - # And if the file is in the list of files we care - # about. - if f in changedfiles: - # Get the changenode this manifest belongs to - clnode = msng_mnfst_set[mnfstnode] - # Create the set of filenodes for the file if - # there isn't one already. - ndset = msng_filenode_set.setdefault(f, {}) - # And set the filenode's changelog node to the - # manifest's if it hasn't been set already. - ndset.setdefault(fnode, clnode) - else: - # Otherwise we need a full manifest. - m = mnfst.read(mnfstnode) - # For every file in we care about. - for f in changedfiles: - fnode = m.get(f, None) - # If it's in the manifest - if fnode is not None: - # See comments above. - clnode = msng_mnfst_set[mnfstnode] - ndset = msng_filenode_set.setdefault(f, {}) - ndset.setdefault(fnode, clnode) - return collect_msng_filenodes + def lookup(revlog, x): + if revlog == cl: + c = cl.read(x) + changedfiles.update(c[3]) + mfs.setdefault(c[0], x) + count[0] += 1 + self.ui.progress(_('bundling'), count[0], unit=_('changesets')) + return x + elif revlog == mf: + clnode = mfs[x] + mdata = mf.readfast(x) + for f in changedfiles: + if f in mdata: + fnodes.setdefault(f, {}).setdefault(mdata[f], clnode) + count[0] += 1 + self.ui.progress(_('bundling'), count[0], + unit=_('manifests'), total=len(mfs)) + return mfs[x] + else: + self.ui.progress( + _('bundling'), count[0], item=fstate[0], + unit=_('files'), total=len(changedfiles)) + return fstate[1][x] - # If we determine that a particular file or manifest node must be a - # node that the recipient of the changegroup will already have, we can - # also assume the recipient will have all the parents. This function - # prunes them from the set of missing nodes. - def prune(revlog, missingnodes): - hasset = set() - # If a 'missing' filenode thinks it belongs to a changenode we - # assume the recipient must have, then the recipient must have - # that filenode. - for n in missingnodes: - clrev = revlog.linkrev(revlog.rev(n)) - if clrev in commonrevs: - hasset.add(n) - for n in hasset: - missingnodes.pop(n, None) - for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]): - missingnodes.pop(revlog.node(r), None) + bundler = changegroup.bundle10(lookup) - # Add the nodes that were explicitly requested. - def add_extra_nodes(name, nodes): - if not extranodes or name not in extranodes: - return - - for node, linknode in extranodes[name]: - if node not in nodes: - nodes[node] = linknode - - # Now that we have all theses utility functions to help out and - # logically divide up the task, generate the group. def gengroup(): - # The set of changed files starts empty. - changedfiles = set() - collect = changegroup.collector(cl, msng_mnfst_set, changedfiles) - # Create a changenode group generator that will call our functions # back to lookup the owning changenode and collect information. - group = cl.group(msng_cl_lst, identity, collect) - for cnt, chnk in enumerate(group): - yield chnk - # revlog.group yields three entries per node, so - # dividing by 3 gives an approximation of how many - # nodes have been processed. - self.ui.progress(_('bundling'), cnt / 3, - unit=_('changesets')) - changecount = cnt / 3 + for chunk in cl.group(csets, bundler): + yield chunk self.ui.progress(_('bundling'), None) - prune(mnfst, msng_mnfst_set) - add_extra_nodes(1, msng_mnfst_set) - msng_mnfst_lst = msng_mnfst_set.keys() - # Sort the manifestnodes by revision number. - msng_mnfst_lst.sort(key=mnfst.rev) # Create a generator for the manifestnodes that calls our lookup # and data collection functions back. - group = mnfst.group(msng_mnfst_lst, - lambda mnode: msng_mnfst_set[mnode], - filenode_collector(changedfiles)) - efiles = {} - for cnt, chnk in enumerate(group): - if cnt % 3 == 1: - mnode = chnk[:20] - efiles.update(mnfst.readdelta(mnode)) - yield chnk - # see above comment for why we divide by 3 - self.ui.progress(_('bundling'), cnt / 3, - unit=_('manifests'), total=changecount) + count[0] = 0 + for chunk in mf.group(prune(mf, mfs), bundler): + yield chunk self.ui.progress(_('bundling'), None) - efiles = len(efiles) - # These are no longer needed, dereference and toss the memory for - # them. - msng_mnfst_lst = None - msng_mnfst_set.clear() + mfs.clear() - if extranodes: - for fname in extranodes: - if isinstance(fname, int): - continue - msng_filenode_set.setdefault(fname, {}) - changedfiles.add(fname) # Go through all our files in order sorted by name. - for idx, fname in enumerate(sorted(changedfiles)): + count[0] = 0 + for fname in sorted(changedfiles): filerevlog = self.file(fname) if not len(filerevlog): raise util.Abort(_("empty or missing revlog for %s") % fname) - # Toss out the filenodes that the recipient isn't really - # missing. - missingfnodes = msng_filenode_set.pop(fname, {}) - prune(filerevlog, missingfnodes) - add_extra_nodes(fname, missingfnodes) - # If any filenodes are left, generate the group for them, - # otherwise don't bother. - if missingfnodes: - yield changegroup.chunkheader(len(fname)) - yield fname - # Sort the filenodes by their revision # (topological order) - nodeiter = list(missingfnodes) - nodeiter.sort(key=filerevlog.rev) - # Create a group generator and only pass in a changenode - # lookup function as we need to collect no information - # from filenodes. - group = filerevlog.group(nodeiter, - lambda fnode: missingfnodes[fnode]) - for chnk in group: - # even though we print the same progress on - # most loop iterations, put the progress call - # here so that time estimates (if any) can be updated - self.ui.progress( - _('bundling'), idx, item=fname, - unit=_('files'), total=efiles) - yield chnk + fstate[0] = fname + fstate[1] = fnodes.pop(fname, {}) + first = True + + for chunk in filerevlog.group(prune(filerevlog, fstate[1]), + bundler): + if first: + if chunk == bundler.close(): + break + count[0] += 1 + yield bundler.fileheader(fname) + first = False + yield chunk # Signal that no more groups are left. - yield changegroup.closechunk() + yield bundler.close() self.ui.progress(_('bundling'), None) - if msng_cl_lst: - self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source) + if csets: + self.hook('outgoing', node=hex(csets[0]), source=source) return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN') @@ -1683,75 +1584,75 @@ nodes is the set of nodes to send""" - self.hook('preoutgoing', throw=True, source=source) + cl = self.changelog + mf = self.manifest + mfs = {} + changedfiles = set() + fstate = [''] + count = [0] - cl = self.changelog - revset = set([cl.rev(n) for n in nodes]) + self.hook('preoutgoing', throw=True, source=source) self.changegroupinfo(nodes, source) - def identity(x): - return x + revset = set([cl.rev(n) for n in nodes]) def gennodelst(log): for r in log: if log.linkrev(r) in revset: yield log.node(r) - def lookuplinkrev_func(revlog): - def lookuplinkrev(n): - return cl.node(revlog.linkrev(revlog.rev(n))) - return lookuplinkrev + def lookup(revlog, x): + if revlog == cl: + c = cl.read(x) + changedfiles.update(c[3]) + mfs.setdefault(c[0], x) + count[0] += 1 + self.ui.progress(_('bundling'), count[0], unit=_('changesets')) + return x + elif revlog == mf: + count[0] += 1 + self.ui.progress(_('bundling'), count[0], + unit=_('manifests'), total=len(mfs)) + return cl.node(revlog.linkrev(revlog.rev(x))) + else: + self.ui.progress( + _('bundling'), count[0], item=fstate[0], + total=len(changedfiles), unit=_('files')) + return cl.node(revlog.linkrev(revlog.rev(x))) + + bundler = changegroup.bundle10(lookup) def gengroup(): '''yield a sequence of changegroup chunks (strings)''' # construct a list of all changed files - changedfiles = set() - mmfs = {} - collect = changegroup.collector(cl, mmfs, changedfiles) - for cnt, chnk in enumerate(cl.group(nodes, identity, collect)): - # revlog.group yields three entries per node, so - # dividing by 3 gives an approximation of how many - # nodes have been processed. - self.ui.progress(_('bundling'), cnt / 3, unit=_('changesets')) - yield chnk - changecount = cnt / 3 + for chunk in cl.group(nodes, bundler): + yield chunk self.ui.progress(_('bundling'), None) - mnfst = self.manifest - nodeiter = gennodelst(mnfst) - efiles = {} - for cnt, chnk in enumerate(mnfst.group(nodeiter, - lookuplinkrev_func(mnfst))): - if cnt % 3 == 1: - mnode = chnk[:20] - efiles.update(mnfst.readdelta(mnode)) - # see above comment for why we divide by 3 - self.ui.progress(_('bundling'), cnt / 3, - unit=_('manifests'), total=changecount) - yield chnk - efiles = len(efiles) + count[0] = 0 + for chunk in mf.group(gennodelst(mf), bundler): + yield chunk self.ui.progress(_('bundling'), None) - for idx, fname in enumerate(sorted(changedfiles)): + count[0] = 0 + for fname in sorted(changedfiles): filerevlog = self.file(fname) if not len(filerevlog): raise util.Abort(_("empty or missing revlog for %s") % fname) - nodeiter = gennodelst(filerevlog) - nodeiter = list(nodeiter) - if nodeiter: - yield changegroup.chunkheader(len(fname)) - yield fname - lookup = lookuplinkrev_func(filerevlog) - for chnk in filerevlog.group(nodeiter, lookup): - self.ui.progress( - _('bundling'), idx, item=fname, - total=efiles, unit=_('files')) - yield chnk + fstate[0] = fname + first = True + for chunk in filerevlog.group(gennodelst(filerevlog), bundler): + if first: + if chunk == bundler.close(): + break + count[0] += 1 + yield bundler.fileheader(fname) + first = False + yield chunk + yield bundler.close() self.ui.progress(_('bundling'), None) - yield changegroup.closechunk() - if nodes: self.hook('outgoing', node=hex(nodes[0]), source=source) @@ -1915,10 +1816,6 @@ self.hook("incoming", node=hex(cl.node(i)), source=srctype, url=url) - # FIXME - why does this care about tip? - if newheads == oldheads: - bookmarks.update(self, self.dirstate.parents(), self['tip'].node()) - # never return 0 here: if newheads < oldheads: return newheads - oldheads - 1 @@ -2019,6 +1916,10 @@ def listkeys(self, namespace): return pushkey.list(self, namespace) + def debugwireargs(self, one, two, three=None, four=None): + '''used to test argument passing over the wire''' + return "%s %s %s %s" % (one, two, three, four) + # used to avoid circular references so destructors work def aftertrans(files): renamefiles = [tuple(t) for t in files] @@ -2028,7 +1929,7 @@ return a def instance(ui, path, create): - return localrepository(ui, util.drop_scheme('file', path), create) + return localrepository(ui, urlmod.localpath(path), create) def islocal(path): return True diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/manifest.py --- a/mercurial/manifest.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/manifest.py Wed Apr 20 12:44:32 2011 -0500 @@ -38,6 +38,13 @@ r = self.rev(node) return self.parse(mdiff.patchtext(self.revdiff(self.deltaparent(r), r))) + def readfast(self, node): + '''use the faster of readdelta or read''' + r = self.rev(node) + if self.deltaparent(r) in self.parentrevs(r): + return self.readdelta(node) + return self.read(node) + def read(self, node): if node == revlog.nullid: return manifestdict() # don't upset local cache diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/merge.py --- a/mercurial/merge.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/merge.py Wed Apr 20 12:44:32 2011 -0500 @@ -268,7 +268,7 @@ updated, merged, removed, unresolved = 0, 0, 0, 0 ms = mergestate(repo) - ms.reset(wctx.parents()[0].node()) + ms.reset(wctx.p1().node()) moves = [] action.sort(key=actionkey) substate = wctx.substate # prime @@ -286,7 +286,7 @@ fco = mctx[f2] if mctx == actx: # backwards, use working dir parent as ancestor if fcl.parents(): - fca = fcl.parents()[0] + fca = fcl.p1() else: fca = repo.filectx(f, fileid=nullrev) else: @@ -439,7 +439,7 @@ if f: repo.dirstate.forget(f) -def update(repo, node, branchmerge, force, partial): +def update(repo, node, branchmerge, force, partial, ancestor=None): """ Perform a merge between the working directory and the given node @@ -492,9 +492,12 @@ overwrite = force and not branchmerge pl = wc.parents() p1, p2 = pl[0], repo[node] - pa = p1.ancestor(p2) + if ancestor: + pa = repo[ancestor] + else: + pa = p1.ancestor(p2) + fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2) - fastforward = False ### check phase if not overwrite and len(pl) > 1: @@ -504,9 +507,7 @@ raise util.Abort(_("merging with a working directory ancestor" " has no effect")) elif pa == p1: - if p1.branch() != p2.branch(): - fastforward = True - else: + if p1.branch() == p2.branch(): raise util.Abort(_("nothing to merge (use 'hg update'" " or check 'hg heads')")) if not force and (wc.files() or wc.deleted()): @@ -551,7 +552,7 @@ if not partial: repo.dirstate.setparents(fp1, fp2) recordupdates(repo, action, branchmerge) - if not branchmerge and not fastforward: + if not branchmerge: repo.dirstate.setbranch(p2.branch()) finally: wlock.release() diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/osutil.c --- a/mercurial/osutil.c Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/osutil.c Wed Apr 20 12:44:32 2011 -0500 @@ -514,6 +514,22 @@ } #endif +#ifdef __APPLE__ +#include + +static PyObject *isgui(PyObject *self) +{ + CFDictionaryRef dict = CGSessionCopyCurrentDictionary(); + + if (dict != NULL) { + CFRelease(dict); + return Py_True; + } else { + return Py_False; + } +} +#endif + static char osutil_doc[] = "Native operating system services."; static PyMethodDef methods[] = { @@ -524,6 +540,12 @@ "Open a file with POSIX-like semantics.\n" "On error, this function may raise either a WindowsError or an IOError."}, #endif +#ifdef __APPLE__ + { + "isgui", (PyCFunction)isgui, METH_NOARGS, + "Is a CoreGraphics session available?" + }, +#endif {NULL, NULL} }; diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/parser.py --- a/mercurial/parser.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/parser.py Wed Apr 20 12:44:32 2011 -0500 @@ -78,7 +78,9 @@ 'generate a parse tree from a message' self._iter = self._tokenizer(message) self._advance() - return self._parse() + res = self._parse() + token, value, pos = self.current + return res, pos def eval(self, tree): 'recursively evaluate a parse tree using node methods' if not isinstance(tree, tuple): diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/patch.py --- a/mercurial/patch.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/patch.py Wed Apr 20 12:44:32 2011 -0500 @@ -488,11 +488,6 @@ cand.sort(key=lambda x: abs(x - linenum)) return cand - def hashlines(self): - self.hash = {} - for x, s in enumerate(self.lines): - self.hash.setdefault(s, []).append(x) - def makerejlines(self, fname): base = os.path.basename(fname) yield "--- %s\n+++ %s\n" % (base, base) @@ -574,8 +569,10 @@ self.dirty = 1 return 0 - # ok, we couldn't match the hunk. Lets look for offsets and fuzz it - self.hashlines() + # ok, we couldn't match the hunk. Lets look for offsets and fuzz it + self.hash = {} + for x, s in enumerate(self.lines): + self.hash.setdefault(s, []).append(x) if h.hunk[-1][0] != ' ': # if the hunk tried to put something at the bottom of the file # override the start line and use eof here @@ -613,6 +610,12 @@ self.rej.append(horig) return -1 + def close(self): + if self.dirty: + self.writelines(self.fname, self.lines) + self.write_rej() + return len(self.rej) + class hunk(object): def __init__(self, desc, num, lr, context, create=False, remove=False): self.number = num @@ -680,6 +683,7 @@ del self.b[-1] self.lena -= 1 self.lenb -= 1 + self._fixnewline(lr) def read_context_hunk(self, lr): self.desc = lr.readline() @@ -782,9 +786,14 @@ self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena, self.startb, self.lenb) self.hunk[0] = self.desc + self._fixnewline(lr) - def fix_newline(self): - diffhelpers.fix_newline(self.hunk, self.a, self.b) + def _fixnewline(self, lr): + l = lr.readline() + if l.startswith('\ '): + diffhelpers.fix_newline(self.hunk, self.a, self.b) + else: + lr.push(l) def complete(self): return len(self.a) == self.lena and len(self.b) == self.lenb @@ -993,7 +1002,6 @@ maps filenames to gitpatch records. Unique event. """ changed = {} - current_hunk = None afile = "" bfile = "" state = None @@ -1011,11 +1019,6 @@ x = lr.readline() if not x: break - if current_hunk: - if x.startswith('\ '): - current_hunk.fix_newline() - yield 'hunk', current_hunk - current_hunk = None if (state == BFILE and ((not context and x[0] == '@') or ((context is not False) and x.startswith('***************')))): if context is None and x.startswith('***************'): @@ -1023,18 +1026,20 @@ gpatch = changed.get(bfile) create = afile == '/dev/null' or gpatch and gpatch.op == 'ADD' remove = bfile == '/dev/null' or gpatch and gpatch.op == 'DELETE' - current_hunk = hunk(x, hunknum + 1, lr, context, create, remove) + h = hunk(x, hunknum + 1, lr, context, create, remove) hunknum += 1 if emitfile: emitfile = False - yield 'file', (afile, bfile, current_hunk) + yield 'file', (afile, bfile, h) + yield 'hunk', h elif state == BFILE and x.startswith('GIT binary patch'): - current_hunk = binhunk(changed[bfile]) + h = binhunk(changed[bfile]) hunknum += 1 if emitfile: emitfile = False - yield 'file', ('a/' + afile, 'b/' + bfile, current_hunk) - current_hunk.extract(lr) + yield 'file', ('a/' + afile, 'b/' + bfile, h) + h.extract(lr) + yield 'hunk', h elif x.startswith('diff --git'): # check for git diff, scanning the whole patch file if needed m = gitre.match(x) @@ -1083,12 +1088,6 @@ emitfile = True state = BFILE hunknum = 0 - if current_hunk: - if current_hunk.complete(): - yield 'hunk', current_hunk - else: - raise PatchError(_("malformed patch %s %s") % (afile, - current_hunk.desc)) def applydiff(ui, fp, changed, strip=1, eolmode='strict'): """Reads a patch from fp and tries to apply it. @@ -1114,14 +1113,6 @@ cwd = os.getcwd() opener = util.opener(cwd) - def closefile(): - if not current_file: - return 0 - if current_file.dirty: - current_file.writelines(current_file.fname, current_file.lines) - current_file.write_rej() - return len(current_file.rej) - for state, values in iterhunks(ui, fp): if state == 'hunk': if not current_file: @@ -1132,7 +1123,8 @@ if ret > 0: err = 1 elif state == 'file': - rejects += closefile() + if current_file: + rejects += current_file.close() afile, bfile, first_hunk = values try: current_file, missing = selectfile(afile, bfile, @@ -1157,13 +1149,14 @@ else: raise util.Abort(_('unsupported parser state: %s') % state) - rejects += closefile() + if current_file: + rejects += current_file.close() if rejects: return -1 return err -def externalpatch(patcher, patchname, ui, strip, cwd, files): +def _externalpatch(patcher, patchname, ui, strip, cwd, files): """use to apply to the working directory. returns whether patch was applied with fuzz factor.""" @@ -1247,7 +1240,7 @@ files = {} try: if patcher: - return externalpatch(patcher, patchname, ui, strip, cwd, files) + return _externalpatch(patcher, patchname, ui, strip, cwd, files) return internalpatch(patchname, ui, strip, cwd, files, eolmode) except PatchError, err: raise util.Abort(str(err)) @@ -1331,7 +1324,7 @@ opts = mdiff.defaultopts if not node1 and not node2: - node1 = repo.dirstate.parents()[0] + node1 = repo.dirstate.p1() def lrugetfilectx(): cache = {} diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/posix.py --- a/mercurial/posix.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/posix.py Wed Apr 20 12:44:32 2011 -0500 @@ -7,7 +7,7 @@ from i18n import _ import osutil -import os, sys, errno, stat, getpass, pwd, grp +import os, sys, errno, stat, getpass, pwd, grp, tempfile posixfile = open nulldev = '/dev/null' @@ -108,6 +108,50 @@ # Turn off all +x bits os.chmod(f, s & 0666) +def checkexec(path): + """ + Check whether the given path is on a filesystem with UNIX-like exec flags + + Requires a directory (like /foo/.hg) + """ + + # VFAT on some Linux versions can flip mode but it doesn't persist + # a FS remount. Frequently we can detect it if files are created + # with exec bit on. + + try: + EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH + fh, fn = tempfile.mkstemp(dir=path, prefix='hg-checkexec-') + try: + os.close(fh) + m = os.stat(fn).st_mode & 0777 + new_file_has_exec = m & EXECFLAGS + os.chmod(fn, m ^ EXECFLAGS) + exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m) + finally: + os.unlink(fn) + except (IOError, OSError): + # we don't care, the user probably won't be able to commit anyway + return False + return not (new_file_has_exec or exec_flags_cannot_flip) + +def checklink(path): + """check whether the given path is on a symlink-capable filesystem""" + # mktemp is not racy because symlink creation will fail if the + # file already exists + name = tempfile.mktemp(dir=path, prefix='hg-checklink-') + try: + os.symlink(".", name) + os.unlink(name) + return True + except (OSError, AttributeError): + return False + +def checkosfilename(path): + '''Check that the base-relative path is a valid filename on this platform. + Returns None if the path is ok, or a UI string describing the problem.''' + pass # on posix platforms, every path is ok + def set_binary(fd): pass diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/repair.py --- a/mercurial/repair.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/repair.py Wed Apr 20 12:44:32 2011 -0500 @@ -11,9 +11,9 @@ from i18n import _ import os -def _bundle(repo, bases, heads, node, suffix, extranodes=None, compress=True): +def _bundle(repo, bases, heads, node, suffix, compress=True): """create a bundle with the specified revisions as a backup""" - cg = repo.changegroupsubset(bases, heads, 'strip', extranodes) + cg = repo.changegroupsubset(bases, heads, 'strip') backupdir = repo.join("strip-backup") if not os.path.isdir(backupdir): os.mkdir(backupdir) @@ -33,40 +33,26 @@ return sorted(files) -def _collectextranodes(repo, files, link): - """return the nodes that have to be saved before the strip""" - def collectone(cl, revlog): - extra = [] - startrev = count = len(revlog) +def _collectbrokencsets(repo, files, striprev): + """return the changesets which will be broken by the truncation""" + s = set() + def collectone(revlog): + links = (revlog.linkrev(i) for i in revlog) # find the truncation point of the revlog - for i in xrange(count): - lrev = revlog.linkrev(i) - if lrev >= link: - startrev = i + 1 + for lrev in links: + if lrev >= striprev: break + # see if any revision after this point has a linkrev + # less than striprev (those will be broken by strip) + for lrev in links: + if lrev < striprev: + s.add(lrev) - # see if any revision after that point has a linkrev less than link - # (we have to manually save these guys) - for i in xrange(startrev, count): - node = revlog.node(i) - lrev = revlog.linkrev(i) - if lrev < link: - extra.append((node, cl.node(lrev))) - - return extra + collectone(repo.manifest) + for fname in files: + collectone(repo.file(fname)) - extranodes = {} - cl = repo.changelog - extra = collectone(cl, repo.manifest) - if extra: - extranodes[1] = extra - for fname in files: - f = repo.file(fname) - extra = collectone(cl, f) - if extra: - extranodes[fname] = extra - - return extranodes + return s def strip(ui, repo, node, backup="all"): cl = repo.changelog @@ -82,28 +68,26 @@ # the list of heads and bases of the set of interesting revisions. # (head = revision in the set that has no descendant in the set; # base = revision in the set that has no ancestor in the set) - tostrip = set((striprev,)) - saveheads = set() - savebases = [] + tostrip = set(cl.descendants(striprev)) + tostrip.add(striprev) + + files = _collectfiles(repo, striprev) + saverevs = _collectbrokencsets(repo, files, striprev) + + # compute heads + saveheads = set(saverevs) for r in xrange(striprev + 1, len(cl)): - parents = cl.parentrevs(r) - if parents[0] in tostrip or parents[1] in tostrip: - # r is a descendant of striprev - tostrip.add(r) - # if this is a merge and one of the parents does not descend - # from striprev, mark that parent as a savehead. - if parents[1] != nullrev: - for p in parents: - if p not in tostrip and p > striprev: - saveheads.add(p) - else: - # if no parents of this revision will be stripped, mark it as - # a savebase - if parents[0] < striprev and parents[1] < striprev: - savebases.append(cl.node(r)) + if r not in tostrip: + saverevs.add(r) + saveheads.difference_update(cl.parentrevs(r)) + saveheads.add(r) + saveheads = [cl.node(r) for r in saveheads] - saveheads.difference_update(parents) - saveheads.add(r) + # compute base nodes + if saverevs: + descendants = set(cl.descendants(*saverevs)) + saverevs.difference_update(descendants) + savebases = [cl.node(r) for r in saverevs] bm = repo._bookmarks updatebm = [] @@ -112,20 +96,15 @@ if rev in tostrip: updatebm.append(m) - saveheads = [cl.node(r) for r in saveheads] - files = _collectfiles(repo, striprev) - - extranodes = _collectextranodes(repo, files, striprev) - # create a changegroup for all the branches we need to keep backupfile = None if backup == "all": backupfile = _bundle(repo, [node], cl.heads(), node, 'backup') repo.ui.status(_("saved backup bundle to %s\n") % backupfile) - if saveheads or extranodes: + if saveheads or savebases: # do not compress partial bundle if we remove it from disk later chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp', - extranodes=extranodes, compress=keeppartialbundle) + compress=keeppartialbundle) mfst = repo.manifest @@ -149,7 +128,7 @@ tr.abort() raise - if saveheads or extranodes: + if saveheads or savebases: ui.note(_("adding branch\n")) f = open(chgrpfile, "rb") gen = changegroup.readbundle(f, chgrpfile) diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/revlog.py --- a/mercurial/revlog.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/revlog.py Wed Apr 20 12:44:32 2011 -0500 @@ -399,11 +399,12 @@ yield i break - def findmissing(self, common=None, heads=None): - """Return the ancestors of heads that are not ancestors of common. + def findcommonmissing(self, common=None, heads=None): + """Return a tuple of the ancestors of common and the ancestors of heads + that are not ancestors of common. - More specifically, return a list of nodes N such that every N - satisfies the following constraints: + More specifically, the second element is a list of nodes N such that + every N satisfies the following constraints: 1. N is an ancestor of some node in 'heads' 2. N is not an ancestor of any node in 'common' @@ -441,7 +442,25 @@ visit.append(p) missing = list(missing) missing.sort() - return [self.node(r) for r in missing] + return has, [self.node(r) for r in missing] + + def findmissing(self, common=None, heads=None): + """Return the ancestors of heads that are not ancestors of common. + + More specifically, return a list of nodes N such that every N + satisfies the following constraints: + + 1. N is an ancestor of some node in 'heads' + 2. N is not an ancestor of any node in 'common' + + The list is sorted by revision number, meaning it is + topologically sorted. + + 'heads' and 'common' are both lists of node IDs. If heads is + not supplied, uses all of the revlog's heads. If common is not + supplied, uses nullid.""" + _common, missing = self.findcommonmissing(common, heads) + return missing def nodesbetween(self, roots=None, heads=None): """Return a topological path from 'roots' to 'heads'. @@ -1039,7 +1058,7 @@ self._cache = (node, curr, text) return node - def group(self, nodelist, lookup, infocollect=None, fullrev=False): + def group(self, nodelist, bundler): """Calculate a delta group, yielding a sequence of changegroup chunks (strings). @@ -1049,45 +1068,35 @@ guaranteed to have this parent as it has all history before these changesets. In the case firstparent is nullrev the changegroup starts with a full revision. - fullrev forces the insertion of the full revision, necessary - in the case of shallow clones where the first parent might - not exist at the reciever. """ - revs = [self.rev(n) for n in nodelist] + revs = sorted([self.rev(n) for n in nodelist]) # if we don't have any revisions touched by these changesets, bail if not revs: - yield changegroup.closechunk() + yield bundler.close() return # add the parent of the first rev p = self.parentrevs(revs[0])[0] revs.insert(0, p) - if p == nullrev: - fullrev = True # build deltas - for d in xrange(len(revs) - 1): - a, b = revs[d], revs[d + 1] + for r in xrange(len(revs) - 1): + a, b = revs[r], revs[r + 1] nb = self.node(b) - - if infocollect is not None: - infocollect(nb) + p1, p2 = self.parents(nb) + prefix = '' - p = self.parents(nb) - meta = nb + p[0] + p[1] + lookup(nb) - if fullrev: + if a == nullrev: d = self.revision(nb) - meta += mdiff.trivialdiffheader(len(d)) - fullrev = False + prefix = mdiff.trivialdiffheader(len(d)) else: d = self.revdiff(a, b) - yield changegroup.chunkheader(len(meta) + len(d)) - yield meta - yield d + for c in bundler.revchunk(self, nb, p1, p2, prefix, d): + yield c - yield changegroup.closechunk() + yield bundler.close() def addgroup(self, bundle, linkmapper, transaction): """ diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/revset.py --- a/mercurial/revset.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/revset.py Wed Apr 20 12:44:32 2011 -0500 @@ -6,10 +6,10 @@ # GNU General Public License version 2 or any later version. import re -import parser, util, error, discovery +import parser, util, error, discovery, help, hbisect import bookmarks as bookmarksmod import match as matchmod -from i18n import _, gettext +from i18n import _ elements = { "(": (20, ("group", 1, ")"), ("func", 1, ")")), @@ -123,7 +123,7 @@ x = repo[x].rev() if x == -1 and len(subset) == len(repo): return [-1] - if x in subset: + if len(subset) == len(repo) or x in subset: return [x] return [] @@ -156,9 +156,10 @@ return getset(repo, getset(repo, subset, x), y) def orset(repo, subset, x, y): - s = set(getset(repo, subset, x)) - s |= set(getset(repo, [r for r in subset if r not in s], y)) - return [r for r in subset if r in s] + xl = getset(repo, subset, x) + s = set(xl) + yl = getset(repo, [r for r in subset if r not in s], y) + return xl + yl def notset(repo, subset, x): s = set(getset(repo, subset, x)) @@ -174,6 +175,322 @@ # functions +def adds(repo, subset, x): + """``adds(pattern)`` + Changesets that add a file matching pattern. + """ + # i18n: "adds" is a keyword + pat = getstring(x, _("adds requires a pattern")) + return checkstatus(repo, subset, pat, 1) + +def ancestor(repo, subset, x): + """``ancestor(single, single)`` + Greatest common ancestor of the two changesets. + """ + # i18n: "ancestor" is a keyword + l = getargs(x, 2, 2, _("ancestor requires two arguments")) + r = range(len(repo)) + a = getset(repo, r, l[0]) + b = getset(repo, r, l[1]) + if len(a) != 1 or len(b) != 1: + # i18n: "ancestor" is a keyword + raise error.ParseError(_("ancestor arguments must be single revisions")) + an = [repo[a[0]].ancestor(repo[b[0]]).rev()] + + return [r for r in an if r in subset] + +def ancestors(repo, subset, x): + """``ancestors(set)`` + Changesets that are ancestors of a changeset in set. + """ + args = getset(repo, range(len(repo)), x) + if not args: + return [] + s = set(repo.changelog.ancestors(*args)) | set(args) + return [r for r in subset if r in s] + +def author(repo, subset, x): + """``author(string)`` + Alias for ``user(string)``. + """ + # i18n: "author" is a keyword + n = getstring(x, _("author requires a string")).lower() + return [r for r in subset if n in repo[r].user().lower()] + +def bisected(repo, subset, x): + """``bisected(string)`` + Changesets marked in the specified bisect state (good, bad, skip). + """ + state = getstring(x, _("bisect requires a string")).lower() + if state not in ('good', 'bad', 'skip', 'unknown'): + raise ParseError(_('invalid bisect state')) + marked = set(repo.changelog.rev(n) for n in hbisect.load_state(repo)[state]) + return [r for r in subset if r in marked] + +def bookmark(repo, subset, x): + """``bookmark([name])`` + The named bookmark or all bookmarks. + """ + # i18n: "bookmark" is a keyword + args = getargs(x, 0, 1, _('bookmark takes one or no arguments')) + if args: + bm = getstring(args[0], + # i18n: "bookmark" is a keyword + _('the argument to bookmark must be a string')) + bmrev = bookmarksmod.listbookmarks(repo).get(bm, None) + if not bmrev: + raise util.Abort(_("bookmark '%s' does not exist") % bm) + bmrev = repo[bmrev].rev() + return [r for r in subset if r == bmrev] + bms = set([repo[r].rev() + for r in bookmarksmod.listbookmarks(repo).values()]) + return [r for r in subset if r in bms] + +def branch(repo, subset, x): + """``branch(string or set)`` + All changesets belonging to the given branch or the branches of the given + changesets. + """ + try: + b = getstring(x, '') + if b in repo.branchmap(): + return [r for r in subset if repo[r].branch() == b] + except error.ParseError: + # not a string, but another revspec, e.g. tip() + pass + + s = getset(repo, range(len(repo)), x) + b = set() + for r in s: + b.add(repo[r].branch()) + s = set(s) + return [r for r in subset if r in s or repo[r].branch() in b] + +def checkstatus(repo, subset, pat, field): + m = matchmod.match(repo.root, repo.getcwd(), [pat]) + s = [] + fast = (m.files() == [pat]) + for r in subset: + c = repo[r] + if fast: + if pat not in c.files(): + continue + else: + for f in c.files(): + if m(f): + break + else: + continue + files = repo.status(c.p1().node(), c.node())[field] + if fast: + if pat in files: + s.append(r) + else: + for f in files: + if m(f): + s.append(r) + break + return s + +def children(repo, subset, x): + """``children(set)`` + Child changesets of changesets in set. + """ + cs = set() + cl = repo.changelog + s = set(getset(repo, range(len(repo)), x)) + for r in xrange(0, len(repo)): + for p in cl.parentrevs(r): + if p in s: + cs.add(r) + return [r for r in subset if r in cs] + +def closed(repo, subset, x): + """``closed()`` + Changeset is closed. + """ + # i18n: "closed" is a keyword + getargs(x, 0, 0, _("closed takes no arguments")) + return [r for r in subset if repo[r].extra().get('close')] + +def contains(repo, subset, x): + """``contains(pattern)`` + Revision contains pattern. + """ + # i18n: "contains" is a keyword + pat = getstring(x, _("contains requires a pattern")) + m = matchmod.match(repo.root, repo.getcwd(), [pat]) + s = [] + if m.files() == [pat]: + for r in subset: + if pat in repo[r]: + s.append(r) + else: + for r in subset: + for f in repo[r].manifest(): + if m(f): + s.append(r) + break + return s + +def date(repo, subset, x): + """``date(interval)`` + Changesets within the interval, see :hg:`help dates`. + """ + # i18n: "date" is a keyword + ds = getstring(x, _("date requires a string")) + dm = util.matchdate(ds) + return [r for r in subset if dm(repo[r].date()[0])] + +def descendants(repo, subset, x): + """``descendants(set)`` + Changesets which are descendants of changesets in set. + """ + args = getset(repo, range(len(repo)), x) + if not args: + return [] + s = set(repo.changelog.descendants(*args)) | set(args) + return [r for r in subset if r in s] + +def follow(repo, subset, x): + """``follow()`` + An alias for ``::.`` (ancestors of the working copy's first parent). + """ + # i18n: "follow" is a keyword + getargs(x, 0, 0, _("follow takes no arguments")) + p = repo['.'].rev() + s = set(repo.changelog.ancestors(p)) | set([p]) + return [r for r in subset if r in s] + +def getall(repo, subset, x): + """``all()`` + All changesets, the same as ``0:tip``. + """ + # i18n: "all" is a keyword + getargs(x, 0, 0, _("all takes no arguments")) + return subset + +def grep(repo, subset, x): + """``grep(regex)`` + Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')`` + to ensure special escape characters are handled correctly. + """ + try: + # i18n: "grep" is a keyword + gr = re.compile(getstring(x, _("grep requires a string"))) + except re.error, e: + raise error.ParseError(_('invalid match pattern: %s') % e) + l = [] + for r in subset: + c = repo[r] + for e in c.files() + [c.user(), c.description()]: + if gr.search(e): + l.append(r) + break + return l + +def hasfile(repo, subset, x): + """``file(pattern)`` + Changesets affecting files matched by pattern. + """ + # i18n: "file" is a keyword + pat = getstring(x, _("file requires a pattern")) + m = matchmod.match(repo.root, repo.getcwd(), [pat]) + s = [] + for r in subset: + for f in repo[r].files(): + if m(f): + s.append(r) + break + return s + +def head(repo, subset, x): + """``head()`` + Changeset is a named branch head. + """ + # i18n: "head" is a keyword + getargs(x, 0, 0, _("head takes no arguments")) + hs = set() + for b, ls in repo.branchmap().iteritems(): + hs.update(repo[h].rev() for h in ls) + return [r for r in subset if r in hs] + +def heads(repo, subset, x): + """``heads(set)`` + Members of set with no children in set. + """ + s = getset(repo, subset, x) + ps = set(parents(repo, subset, x)) + return [r for r in s if r not in ps] + +def keyword(repo, subset, x): + """``keyword(string)`` + Search commit message, user name, and names of changed files for + string. + """ + # i18n: "keyword" is a keyword + kw = getstring(x, _("keyword requires a string")).lower() + l = [] + for r in subset: + c = repo[r] + t = " ".join(c.files() + [c.user(), c.description()]) + if kw in t.lower(): + l.append(r) + return l + +def limit(repo, subset, x): + """``limit(set, n)`` + First n members of set. + """ + # i18n: "limit" is a keyword + l = getargs(x, 2, 2, _("limit requires two arguments")) + try: + # i18n: "limit" is a keyword + lim = int(getstring(l[1], _("limit requires a number"))) + except ValueError: + # i18n: "limit" is a keyword + raise error.ParseError(_("limit expects a number")) + return getset(repo, subset, l[0])[:lim] + +def maxrev(repo, subset, x): + """``max(set)`` + Changeset with highest revision number in set. + """ + s = getset(repo, subset, x) + if s: + m = max(s) + if m in subset: + return [m] + return [] + +def merge(repo, subset, x): + """``merge()`` + Changeset is a merge changeset. + """ + # i18n: "merge" is a keyword + getargs(x, 0, 0, _("merge takes no arguments")) + cl = repo.changelog + return [r for r in subset if cl.parentrevs(r)[1] != -1] + +def minrev(repo, subset, x): + """``min(set)`` + Changeset with lowest revision number in set. + """ + s = getset(repo, subset, x) + if s: + m = min(s) + if m in subset: + return [m] + return [] + +def modifies(repo, subset, x): + """``modifies(pattern)`` + Changesets modifying files matched by pattern. + """ + # i18n: "modifies" is a keyword + pat = getstring(x, _("modifies requires a pattern")) + return checkstatus(repo, subset, pat, 0) + def node(repo, subset, x): """``id(string)`` Revision non-ambiguously specified by the given hex string prefix. @@ -188,26 +505,35 @@ rn = repo.changelog.rev(repo.changelog._partialmatch(n)) return [r for r in subset if r == rn] -def rev(repo, subset, x): - """``rev(number)`` - Revision with the given numeric identifier. +def outgoing(repo, subset, x): + """``outgoing([path])`` + Changesets not found in the specified destination repository, or the + default push location. """ - # i18n: "rev" is a keyword - l = getargs(x, 1, 1, _("rev requires one argument")) - try: - # i18n: "rev" is a keyword - l = int(getstring(l[0], _("rev requires a number"))) - except ValueError: - # i18n: "rev" is a keyword - raise error.ParseError(_("rev expects a number")) - return [r for r in subset if r == l] + import hg # avoid start-up nasties + # i18n: "outgoing" is a keyword + l = getargs(x, 0, 1, _("outgoing requires a repository path")) + # i18n: "outgoing" is a keyword + dest = l and getstring(l[0], _("outgoing requires a repository path")) or '' + dest = repo.ui.expandpath(dest or 'default-push', dest or 'default') + dest, branches = hg.parseurl(dest) + revs, checkout = hg.addbranchrevs(repo, repo, branches, []) + if revs: + revs = [repo.lookup(rev) for rev in revs] + other = hg.repository(hg.remoteui(repo, {}), dest) + repo.ui.pushbuffer() + o = discovery.findoutgoing(repo, other) + repo.ui.popbuffer() + cl = repo.changelog + o = set([cl.rev(r) for r in repo.changelog.nodesbetween(o, revs)[0]]) + return [r for r in subset if r in o] def p1(repo, subset, x): """``p1([set])`` First parent of changesets in set, or the working directory. """ if x is None: - p = repo[x].parents()[0].rev() + p = repo[x].p1().rev() return [r for r in subset if r == p] ps = set() @@ -248,247 +574,15 @@ ps.update(cl.parentrevs(r)) return [r for r in subset if r in ps] -def maxrev(repo, subset, x): - """``max(set)`` - Changeset with highest revision number in set. - """ - s = getset(repo, subset, x) - if s: - m = max(s) - if m in subset: - return [m] - return [] - -def minrev(repo, subset, x): - """``min(set)`` - Changeset with lowest revision number in set. - """ - s = getset(repo, subset, x) - if s: - m = min(s) - if m in subset: - return [m] - return [] - -def limit(repo, subset, x): - """``limit(set, n)`` - First n members of set. - """ - # i18n: "limit" is a keyword - l = getargs(x, 2, 2, _("limit requires two arguments")) - try: - # i18n: "limit" is a keyword - lim = int(getstring(l[1], _("limit requires a number"))) - except ValueError: - # i18n: "limit" is a keyword - raise error.ParseError(_("limit expects a number")) - return getset(repo, subset, l[0])[:lim] - -def children(repo, subset, x): - """``children(set)`` - Child changesets of changesets in set. - """ - cs = set() - cl = repo.changelog - s = set(getset(repo, range(len(repo)), x)) - for r in xrange(0, len(repo)): - for p in cl.parentrevs(r): - if p in s: - cs.add(r) - return [r for r in subset if r in cs] - -def branch(repo, subset, x): - """``branch(set)`` - All changesets belonging to the branches of changesets in set. - """ - s = getset(repo, range(len(repo)), x) - b = set() - for r in s: - b.add(repo[r].branch()) - s = set(s) - return [r for r in subset if r in s or repo[r].branch() in b] - -def ancestor(repo, subset, x): - """``ancestor(single, single)`` - Greatest common ancestor of the two changesets. - """ - # i18n: "ancestor" is a keyword - l = getargs(x, 2, 2, _("ancestor requires two arguments")) - r = range(len(repo)) - a = getset(repo, r, l[0]) - b = getset(repo, r, l[1]) - if len(a) != 1 or len(b) != 1: - # i18n: "ancestor" is a keyword - raise error.ParseError(_("ancestor arguments must be single revisions")) - an = [repo[a[0]].ancestor(repo[b[0]]).rev()] - - return [r for r in an if r in subset] - -def ancestors(repo, subset, x): - """``ancestors(set)`` - Changesets that are ancestors of a changeset in set. - """ - args = getset(repo, range(len(repo)), x) - if not args: - return [] - s = set(repo.changelog.ancestors(*args)) | set(args) - return [r for r in subset if r in s] - -def descendants(repo, subset, x): - """``descendants(set)`` - Changesets which are descendants of changesets in set. - """ - args = getset(repo, range(len(repo)), x) - if not args: - return [] - s = set(repo.changelog.descendants(*args)) | set(args) - return [r for r in subset if r in s] - -def follow(repo, subset, x): - """``follow()`` - An alias for ``::.`` (ancestors of the working copy's first parent). - """ - # i18n: "follow" is a keyword - getargs(x, 0, 0, _("follow takes no arguments")) - p = repo['.'].rev() - s = set(repo.changelog.ancestors(p)) | set([p]) - return [r for r in subset if r in s] - -def date(repo, subset, x): - """``date(interval)`` - Changesets within the interval, see :hg:`help dates`. - """ - # i18n: "date" is a keyword - ds = getstring(x, _("date requires a string")) - dm = util.matchdate(ds) - return [r for r in subset if dm(repo[r].date()[0])] - -def keyword(repo, subset, x): - """``keyword(string)`` - Search commit message, user name, and names of changed files for - string. - """ - # i18n: "keyword" is a keyword - kw = getstring(x, _("keyword requires a string")).lower() - l = [] - for r in subset: - c = repo[r] - t = " ".join(c.files() + [c.user(), c.description()]) - if kw in t.lower(): - l.append(r) - return l - -def grep(repo, subset, x): - """``grep(regex)`` - Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')`` - to ensure special escape characters are handled correctly. +def present(repo, subset, x): + """``present(set)`` + An empty set, if any revision in set isn't found; otherwise, + all revisions in set. """ try: - # i18n: "grep" is a keyword - gr = re.compile(getstring(x, _("grep requires a string"))) - except re.error, e: - raise error.ParseError(_('invalid match pattern: %s') % e) - l = [] - for r in subset: - c = repo[r] - for e in c.files() + [c.user(), c.description()]: - if gr.search(e): - l.append(r) - continue - return l - -def author(repo, subset, x): - """``author(string)`` - Alias for ``user(string)``. - """ - # i18n: "author" is a keyword - n = getstring(x, _("author requires a string")).lower() - return [r for r in subset if n in repo[r].user().lower()] - -def user(repo, subset, x): - """``user(string)`` - User name is string. - """ - return author(repo, subset, x) - -def hasfile(repo, subset, x): - """``file(pattern)`` - Changesets affecting files matched by pattern. - """ - # i18n: "file" is a keyword - pat = getstring(x, _("file requires a pattern")) - m = matchmod.match(repo.root, repo.getcwd(), [pat]) - s = [] - for r in subset: - for f in repo[r].files(): - if m(f): - s.append(r) - continue - return s - -def contains(repo, subset, x): - """``contains(pattern)`` - Revision contains pattern. - """ - # i18n: "contains" is a keyword - pat = getstring(x, _("contains requires a pattern")) - m = matchmod.match(repo.root, repo.getcwd(), [pat]) - s = [] - if m.files() == [pat]: - for r in subset: - if pat in repo[r]: - s.append(r) - continue - else: - for r in subset: - for f in repo[r].manifest(): - if m(f): - s.append(r) - continue - return s - -def checkstatus(repo, subset, pat, field): - m = matchmod.match(repo.root, repo.getcwd(), [pat]) - s = [] - fast = (m.files() == [pat]) - for r in subset: - c = repo[r] - if fast: - if pat not in c.files(): - continue - else: - for f in c.files(): - if m(f): - break - else: - continue - files = repo.status(c.p1().node(), c.node())[field] - if fast: - if pat in files: - s.append(r) - continue - else: - for f in files: - if m(f): - s.append(r) - continue - return s - -def modifies(repo, subset, x): - """``modifies(pattern)`` - Changesets modifying files matched by pattern. - """ - # i18n: "modifies" is a keyword - pat = getstring(x, _("modifies requires a pattern")) - return checkstatus(repo, subset, pat, 0) - -def adds(repo, subset, x): - """``adds(pattern)`` - Changesets that add a file matching pattern. - """ - # i18n: "adds" is a keyword - pat = getstring(x, _("adds requires a pattern")) - return checkstatus(repo, subset, pat, 1) + return getset(repo, subset, x) + except error.RepoLookupError: + return [] def removes(repo, subset, x): """``removes(pattern)`` @@ -498,33 +592,19 @@ pat = getstring(x, _("removes requires a pattern")) return checkstatus(repo, subset, pat, 2) -def merge(repo, subset, x): - """``merge()`` - Changeset is a merge changeset. - """ - # i18n: "merge" is a keyword - getargs(x, 0, 0, _("merge takes no arguments")) - cl = repo.changelog - return [r for r in subset if cl.parentrevs(r)[1] != -1] - -def closed(repo, subset, x): - """``closed()`` - Changeset is closed. +def rev(repo, subset, x): + """``rev(number)`` + Revision with the given numeric identifier. """ - # i18n: "closed" is a keyword - getargs(x, 0, 0, _("closed takes no arguments")) - return [r for r in subset if repo[r].extra().get('close')] - -def head(repo, subset, x): - """``head()`` - Changeset is a named branch head. - """ - # i18n: "head" is a keyword - getargs(x, 0, 0, _("head takes no arguments")) - hs = set() - for b, ls in repo.branchmap().iteritems(): - hs.update(repo[h].rev() for h in ls) - return [r for r in subset if r in hs] + # i18n: "rev" is a keyword + l = getargs(x, 1, 1, _("rev requires one argument")) + try: + # i18n: "rev" is a keyword + l = int(getstring(l[0], _("rev requires a number"))) + except ValueError: + # i18n: "rev" is a keyword + raise error.ParseError(_("rev expects a number")) + return [r for r in subset if r == l] def reverse(repo, subset, x): """``reverse(set)`` @@ -534,15 +614,13 @@ l.reverse() return l -def present(repo, subset, x): - """``present(set)`` - An empty set, if any revision in set isn't found; otherwise, - all revisions in set. +def roots(repo, subset, x): + """``roots(set)`` + Changesets with no parent changeset in set. """ - try: - return getset(repo, subset, x) - except error.RepoLookupError: - return [] + s = getset(repo, subset, x) + cs = set(children(repo, subset, x)) + return [r for r in s if r not in cs] def sort(repo, subset, x): """``sort(set[, [-]key...])`` @@ -599,53 +677,6 @@ l.sort() return [e[-1] for e in l] -def getall(repo, subset, x): - """``all()`` - All changesets, the same as ``0:tip``. - """ - # i18n: "all" is a keyword - getargs(x, 0, 0, _("all takes no arguments")) - return subset - -def heads(repo, subset, x): - """``heads(set)`` - Members of set with no children in set. - """ - s = getset(repo, subset, x) - ps = set(parents(repo, subset, x)) - return [r for r in s if r not in ps] - -def roots(repo, subset, x): - """``roots(set)`` - Changesets with no parent changeset in set. - """ - s = getset(repo, subset, x) - cs = set(children(repo, subset, x)) - return [r for r in s if r not in cs] - -def outgoing(repo, subset, x): - """``outgoing([path])`` - Changesets not found in the specified destination repository, or the - default push location. - """ - import hg # avoid start-up nasties - # i18n: "outgoing" is a keyword - l = getargs(x, 0, 1, _("outgoing requires a repository path")) - # i18n: "outgoing" is a keyword - dest = l and getstring(l[0], _("outgoing requires a repository path")) or '' - dest = repo.ui.expandpath(dest or 'default-push', dest or 'default') - dest, branches = hg.parseurl(dest) - revs, checkout = hg.addbranchrevs(repo, repo, branches, []) - if revs: - revs = [repo.lookup(rev) for rev in revs] - other = hg.repository(hg.remoteui(repo, {}), dest) - repo.ui.pushbuffer() - o = discovery.findoutgoing(repo, other) - repo.ui.popbuffer() - cl = repo.changelog - o = set([cl.rev(r) for r in repo.changelog.nodesbetween(o, revs)[0]]) - return [r for r in subset if r in o] - def tag(repo, subset, x): """``tag(name)`` The specified tag by name, or all tagged revisions if no name is given. @@ -657,6 +688,8 @@ tn = getstring(args[0], # i18n: "tag" is a keyword _('the argument to tag must be a string')) + if not repo.tags().get(tn, None): + raise util.Abort(_("tag '%s' does not exist") % tn) s = set([cl.rev(n) for t, n in repo.tagslist() if t == tn]) else: s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip']) @@ -665,23 +698,11 @@ def tagged(repo, subset, x): return tag(repo, subset, x) -def bookmark(repo, subset, x): - """``bookmark([name])`` - The named bookmark or all bookmarks. +def user(repo, subset, x): + """``user(string)`` + User name is string. """ - # i18n: "bookmark" is a keyword - args = getargs(x, 0, 1, _('bookmark takes one or no arguments')) - if args: - bm = getstring(args[0], - # i18n: "bookmark" is a keyword - _('the argument to bookmark must be a string')) - bmrev = bookmarksmod.listbookmarks(repo).get(bm, None) - if bmrev: - bmrev = repo[bmrev].rev() - return [r for r in subset if r == bmrev] - bms = set([repo[r].rev() - for r in bookmarksmod.listbookmarks(repo).values()]) - return [r for r in subset if r in bms] + return author(repo, subset, x) symbols = { "adds": adds, @@ -689,6 +710,7 @@ "ancestor": ancestor, "ancestors": ancestors, "author": author, + "bisected": bisected, "bookmark": bookmark, "branch": branch, "children": children, @@ -786,7 +808,7 @@ elif op == 'func': f = getstring(x[1], _("not a symbol")) wa, ta = optimize(x[2], small) - if f in "grep date user author keyword branch file outgoing": + if f in "grep date user author keyword branch file outgoing closed": w = 10 # slow elif f in "modifies adds removes": w = 30 # slower @@ -808,26 +830,16 @@ def match(spec): if not spec: raise error.ParseError(_("empty query")) - tree = parse(spec) + tree, pos = parse(spec) + if (pos != len(spec)): + raise error.ParseError("invalid token", pos) weight, tree = optimize(tree, True) def mfunc(repo, subset): return getset(repo, subset, tree) return mfunc def makedoc(topic, doc): - """Generate and include predicates help in revsets topic.""" - predicates = [] - for name in sorted(symbols): - text = symbols[name].__doc__ - if not text: - continue - text = gettext(text.rstrip()) - lines = text.splitlines() - lines[1:] = [(' ' + l.strip()) for l in lines[1:]] - predicates.append('\n'.join(lines)) - predicates = '\n\n'.join(predicates) - doc = doc.replace('.. predicatesmarker', predicates) - return doc + return help.makeitemsdoc(topic, doc, '.. predicatesmarker', symbols) # tell hggettext to extract docstrings from these functions: i18nfunctions = symbols.values() diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/scmutil.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/scmutil.py Wed Apr 20 12:44:32 2011 -0500 @@ -0,0 +1,27 @@ +# scmutil.py - Mercurial core utility functions +# +# Copyright Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +import util, error +import os + +def checkportable(ui, f): + '''Check if filename f is portable and warn or abort depending on config''' + util.checkfilename(f) + val = ui.config('ui', 'portablefilenames', 'warn') + lval = val.lower() + abort = os.name == 'nt' or lval == 'abort' + bval = util.parsebool(val) + if abort or lval == 'warn' or bval: + msg = util.checkwinfilename(f) + if msg: + if abort: + raise util.Abort("%s: %r" % (msg, f)) + ui.warn(_("warning: %s: %r\n") % (msg, f)) + elif bval is None and lval != 'ignore': + raise error.ConfigError( + _("ui.portablefilenames value is invalid ('%s')") % val) diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/sshrepo.py --- a/mercurial/sshrepo.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/sshrepo.py Wed Apr 20 12:44:32 2011 -0500 @@ -6,8 +6,7 @@ # GNU General Public License version 2 or any later version. from i18n import _ -import util, error, wireproto -import re +import util, error, wireproto, url class remotelock(object): def __init__(self, repo): @@ -24,16 +23,16 @@ self._url = path self.ui = ui - m = re.match(r'^ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?$', path) - if not m: + u = url.url(path, parsequery=False, parsefragment=False) + if u.scheme != 'ssh' or not u.host or u.path is None: self._abort(error.RepoError(_("couldn't parse location %s") % path)) - self.user = m.group(2) - if self.user and ':' in self.user: + self.user = u.user + if u.passwd is not None: self._abort(error.RepoError(_("password in URL not supported"))) - self.host = m.group(3) - self.port = m.group(5) - self.path = m.group(7) or "." + self.host = u.host + self.port = u.port + self.path = u.path or "." sshcmd = self.ui.config("ui", "ssh", "ssh") remotecmd = self.ui.config("ui", "remotecmd", "hg") @@ -119,9 +118,24 @@ def _callstream(self, cmd, **args): self.ui.debug("sending %s command\n" % cmd) self.pipeo.write("%s\n" % cmd) - for k, v in sorted(args.iteritems()): + _func, names = wireproto.commands[cmd] + keys = names.split() + wireargs = {} + for k in keys: + if k == '*': + wireargs['*'] = args + break + else: + wireargs[k] = args[k] + del args[k] + for k, v in sorted(wireargs.iteritems()): self.pipeo.write("%s %d\n" % (k, len(v))) - self.pipeo.write(v) + if isinstance(v, dict): + for dk, dv in v.iteritems(): + self.pipeo.write("%s %d\n" % (dk, len(dv))) + self.pipeo.write(dv) + else: + self.pipeo.write(v) self.pipeo.flush() return self.pipei diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/sshserver.py --- a/mercurial/sshserver.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/sshserver.py Wed Apr 20 12:44:32 2011 -0500 @@ -27,21 +27,21 @@ def getargs(self, args): data = {} keys = args.split() - count = len(keys) for n in xrange(len(keys)): argline = self.fin.readline()[:-1] arg, l = argline.split() - val = self.fin.read(int(l)) if arg not in keys: raise util.Abort("unexpected parameter %r" % arg) if arg == '*': star = {} - for n in xrange(int(l)): + for k in xrange(int(l)): + argline = self.fin.readline()[:-1] arg, l = argline.split() val = self.fin.read(int(l)) star[arg] = val data['*'] = star else: + val = self.fin.read(int(l)) data[arg] = val return [data[k] for k in keys] diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/statichttprepo.py --- a/mercurial/statichttprepo.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/statichttprepo.py Wed Apr 20 12:44:32 2011 -0500 @@ -71,7 +71,7 @@ """return a function that opens files over http""" p = base def o(path, mode="r", atomictemp=None): - if 'a' in mode or 'w' in mode: + if mode not in ('r', 'rb'): raise IOError('Permission denied') f = "/".join((p, urllib.quote(path))) return httprangereader(f, urlopener) @@ -85,7 +85,8 @@ self.ui = ui self.root = path - self.path, authinfo = url.getauthinfo(path.rstrip('/') + "/.hg") + u = url.url(path.rstrip('/') + "/.hg") + self.path, authinfo = u.authinfo() opener = build_opener(ui, authinfo) self.opener = opener(self.path) diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/store.py --- a/mercurial/store.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/store.py Wed Apr 20 12:44:32 2011 -0500 @@ -14,6 +14,14 @@ # This avoids a collision between a file named foo and a dir named # foo.i or foo.d def encodedir(path): + ''' + >>> encodedir('data/foo.i') + 'data/foo.i' + >>> encodedir('data/foo.i/bla.i') + 'data/foo.i.hg/bla.i' + >>> encodedir('data/foo.i.hg/bla.i') + 'data/foo.i.hg.hg/bla.i' + ''' if not path.startswith('data/'): return path return (path @@ -22,6 +30,14 @@ .replace(".d/", ".d.hg/")) def decodedir(path): + ''' + >>> decodedir('data/foo.i') + 'data/foo.i' + >>> decodedir('data/foo.i.hg/bla.i') + 'data/foo.i/bla.i' + >>> decodedir('data/foo.i.hg.hg/bla.i') + 'data/foo.i.hg/bla.i' + ''' if not path.startswith('data/') or ".hg/" not in path: return path return (path @@ -30,6 +46,29 @@ .replace(".hg.hg/", ".hg/")) def _buildencodefun(): + ''' + >>> enc, dec = _buildencodefun() + + >>> enc('nothing/special.txt') + 'nothing/special.txt' + >>> dec('nothing/special.txt') + 'nothing/special.txt' + + >>> enc('HELLO') + '_h_e_l_l_o' + >>> dec('_h_e_l_l_o') + 'HELLO' + + >>> enc('hello:world?') + 'hello~3aworld~3f' + >>> dec('hello~3aworld~3f') + 'hello:world?' + + >>> enc('the\x07quick\xADshot') + 'the~07quick~adshot' + >>> dec('the~07quick~adshot') + 'the\\x07quick\\xadshot' + ''' e = '_' win_reserved = [ord(x) for x in '\\:*?"<>|'] cmap = dict([(chr(x), chr(x)) for x in xrange(127)]) @@ -58,6 +97,17 @@ encodefilename, decodefilename = _buildencodefun() def _build_lower_encodefun(): + ''' + >>> f = _build_lower_encodefun() + >>> f('nothing/special.txt') + 'nothing/special.txt' + >>> f('HELLO') + 'hello' + >>> f('hello:world?') + 'hello~3aworld~3f' + >>> f('the\x07quick\xADshot') + 'the~07quick~adshot' + ''' win_reserved = [ord(x) for x in '\\:*?"<>|'] cmap = dict([(chr(x), chr(x)) for x in xrange(127)]) for x in (range(32) + range(126, 256) + win_reserved): @@ -72,6 +122,23 @@ com1 com2 com3 com4 com5 com6 com7 com8 com9 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split() def _auxencode(path, dotencode): + ''' + Encodes filenames containing names reserved by Windows or which end in + period or space. Does not touch other single reserved characters c. + Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here. + Additionally encodes space or period at the beginning, if dotencode is + True. + path is assumed to be all lowercase. + + >>> _auxencode('.foo/aux.txt/txt.aux/con/prn/nul/foo.', True) + '~2efoo/au~78.txt/txt.aux/co~6e/pr~6e/nu~6c/foo~2e' + >>> _auxencode('.com1com2/lpt9.lpt4.lpt1/conprn/foo.', False) + '.com1com2/lp~749.lpt4.lpt1/conprn/foo~2e' + >>> _auxencode('foo. ', True) + 'foo.~20' + >>> _auxencode(' .foo', True) + '~20.foo' + ''' res = [] for n in path.split('/'): if n: diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/subrepo.py --- a/mercurial/subrepo.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/subrepo.py Wed Apr 20 12:44:32 2011 -0500 @@ -5,10 +5,10 @@ # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. -import errno, os, re, xml.dom.minidom, shutil, urlparse, posixpath +import errno, os, re, xml.dom.minidom, shutil, posixpath import stat, subprocess, tarfile from i18n import _ -import config, util, node, error, cmdutil +import config, util, node, error, cmdutil, url, bookmarks hg = None nullstate = ('', '', 'empty') @@ -144,7 +144,7 @@ debug(s, "prompt remove") wctx.sub(s).remove() - for s, r in s2.items(): + for s, r in sorted(s2.items()): if s in s1: continue elif s not in sa: @@ -193,25 +193,16 @@ """return pull/push path of repo - either based on parent repo .hgsub info or on the top repo config. Abort or return None if no source found.""" if hasattr(repo, '_subparent'): - source = repo._subsource - if source.startswith('/') or '://' in source: - return source + source = url.url(repo._subsource) + source.path = posixpath.normpath(source.path) + if posixpath.isabs(source.path) or source.scheme: + return str(source) parent = _abssource(repo._subparent, push, abort=False) if parent: - if '://' in parent: - if parent[-1] == '/': - parent = parent[:-1] - r = urlparse.urlparse(parent + '/' + source) - if parent.startswith('ssh://'): - host, path = r[2][2:].split('/', 1) - r2 = '//%s/%s' % (host, posixpath.normpath(path)) - else: - r2 = posixpath.normpath(r[2]) - r = urlparse.urlunparse((r[0], r[1], r2, - r[3], r[4], r[5])) - return r - else: # plain file system path - return posixpath.normpath(os.path.join(parent, repo._subsource)) + parent = url.url(parent) + parent.path = posixpath.join(parent.path, source.path) + parent.path = posixpath.normpath(parent.path) + return str(parent) else: # recursion reached top repo if hasattr(repo, '_subtoppath'): return repo._subtoppath @@ -436,15 +427,14 @@ def _get(self, state): source, revision, kind = state - try: - self._repo.lookup(revision) - except error.RepoError: + if revision not in self._repo: self._repo._subsource = source srcurl = _abssource(self._repo) self._repo.ui.status(_('pulling subrepo %s from %s\n') % (subrelpath(self), srcurl)) other = hg.repository(self._repo.ui, srcurl) self._repo.pull(other) + bookmarks.updatefromremote(self._repo.ui, self._repo, other) def get(self, state, overwrite=False): self._get(state) @@ -718,6 +708,12 @@ current = None return current + def _gitremote(self, remote): + out = self._gitcommand(['remote', 'show', '-n', remote]) + line = out.split('\n')[1] + i = line.index('URL: ') + len('URL: ') + return line[i:] + def _githavelocally(self, revision): out, code = self._gitdir(['cat-file', '-e', revision]) return code == 0 @@ -771,11 +767,14 @@ def _fetch(self, source, revision): if self._gitmissing(): - self._ui.status(_('cloning subrepo %s\n') % self._relpath) - self._gitnodir(['clone', self._abssource(source), self._abspath]) + source = self._abssource(source) + self._ui.status(_('cloning subrepo %s from %s\n') % + (self._relpath, source)) + self._gitnodir(['clone', source, self._abspath]) if self._githavelocally(revision): return - self._ui.status(_('pulling subrepo %s\n') % self._relpath) + self._ui.status(_('pulling subrepo %s from %s\n') % + (self._relpath, self._gitremote('origin'))) # try only origin: the originally cloned repo self._gitcommand(['fetch']) if not self._githavelocally(revision): @@ -803,7 +802,7 @@ return elif self._gitstate() == revision: if overwrite: - # first reset the index to unmark new files for commit, because + # first reset the index to unmark new files for commit, because # reset --hard will otherwise throw away files added for commit, # not just unmark them. self._gitcommand(['reset', 'HEAD']) diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/templatefilters.py --- a/mercurial/templatefilters.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/templatefilters.py Wed Apr 20 12:44:32 2011 -0500 @@ -6,13 +6,13 @@ # GNU General Public License version 2 or any later version. import cgi, re, os, time, urllib -import encoding, node, util +import encoding, node, util, help -def stringify(thing): - '''turn nested template iterator into string.''' - if hasattr(thing, '__iter__') and not isinstance(thing, str): - return "".join([stringify(t) for t in thing if t is not None]) - return str(thing) +def addbreaks(text): + """:addbreaks: Any text. Add an XHTML "
" tag before the end of + every line except the last. + """ + return text.replace('\n', '
\n') agescales = [("year", 3600 * 24 * 365), ("month", 3600 * 24 * 30), @@ -23,7 +23,9 @@ ("second", 1)] def age(date): - '''turn a (timestamp, tzoff) tuple into an age string.''' + """:age: Date. Returns a human-readable date/time difference between the + given date/time and the current date/time. + """ def plural(t, c): if c == 1: @@ -34,18 +36,65 @@ now = time.time() then = date[0] + future = False if then > now: - return 'in the future' - - delta = max(1, int(now - then)) - if delta > agescales[0][1] * 2: - return util.shortdate(date) + future = True + delta = max(1, int(then - now)) + if delta > agescales[0][1] * 30: + return 'in the distant future' + else: + delta = max(1, int(now - then)) + if delta > agescales[0][1] * 2: + return util.shortdate(date) for t, s in agescales: n = delta // s if n >= 2 or s == 1: + if future: + return '%s from now' % fmt(t, n) return '%s ago' % fmt(t, n) +def basename(path): + """:basename: Any text. Treats the text as a path, and returns the last + component of the path after splitting by the path separator + (ignoring trailing separators). For example, "foo/bar/baz" becomes + "baz" and "foo/bar//" becomes "bar". + """ + return os.path.basename(path) + +def datefilter(text): + """:date: Date. Returns a date in a Unix date format, including the + timezone: "Mon Sep 04 15:13:13 2006 0700". + """ + return util.datestr(text) + +def domain(author): + """:domain: Any text. Finds the first string that looks like an email + address, and extracts just the domain component. Example: ``User + `` becomes ``example.com``. + """ + f = author.find('@') + if f == -1: + return '' + author = author[f + 1:] + f = author.find('>') + if f >= 0: + author = author[:f] + return author + +def email(text): + """:email: Any text. Extracts the first string that looks like an email + address. Example: ``User `` becomes + ``user@example.com``. + """ + return util.email(text) + +def escape(text): + """:escape: Any text. Replaces the special XML/XHTML characters "&", "<" + and ">" with XML entities. + """ + return cgi.escape(text, True) + para_re = None space_re = None @@ -74,40 +123,45 @@ return "".join([space_re.sub(' ', util.wrap(para, width=width)) + rest for para, rest in findparas()]) +def fill68(text): + """:fill68: Any text. Wraps the text to fit in 68 columns.""" + return fill(text, 68) + +def fill76(text): + """:fill76: Any text. Wraps the text to fit in 76 columns.""" + return fill(text, 76) + def firstline(text): - '''return the first line of text''' + """:firstline: Any text. Returns the first line of text.""" try: return text.splitlines(True)[0].rstrip('\r\n') except IndexError: return '' -def nl2br(text): - '''replace raw newlines with xhtml line breaks.''' - return text.replace('\n', '
\n') +def hexfilter(text): + """:hex: Any text. Convert a binary Mercurial node identifier into + its long hexadecimal representation. + """ + return node.hex(text) -def obfuscate(text): - text = unicode(text, encoding.encoding, 'replace') - return ''.join(['&#%d;' % ord(c) for c in text]) +def hgdate(text): + """:hgdate: Date. Returns the date as a pair of numbers: "1157407993 + 25200" (Unix timestamp, timezone offset). + """ + return "%d %d" % text -def domain(author): - '''get domain of author, or empty string if none.''' - f = author.find('@') - if f == -1: - return '' - author = author[f + 1:] - f = author.find('>') - if f >= 0: - author = author[:f] - return author +def isodate(text): + """:isodate: Date. Returns the date in ISO 8601 format: "2009-08-18 13:00 + +0200". + """ + return util.datestr(text, '%Y-%m-%d %H:%M %1%2') -def person(author): - '''get name of author, or else username.''' - if not '@' in author: - return author - f = author.find('<') - if f == -1: - return util.shortuser(author) - return author[:f].rstrip() +def isodatesec(text): + """:isodatesec: Date. Returns the date in ISO 8601 format, including + seconds: "2009-08-18 13:00:13 +0200". See also the rfc3339date + filter. + """ + return util.datestr(text, '%Y-%m-%d %H:%M:%S %1%2') def indent(text, prefix): '''indent each non-empty line of text after first with prefix.''' @@ -124,38 +178,6 @@ yield '\n' return "".join(indenter()) -def permissions(flags): - if "l" in flags: - return "lrwxrwxrwx" - if "x" in flags: - return "-rwxr-xr-x" - return "-rw-r--r--" - -def xmlescape(text): - text = (text - .replace('&', '&') - .replace('<', '<') - .replace('>', '>') - .replace('"', '"') - .replace("'", ''')) # ' invalid in HTML - return re.sub('[\x00-\x08\x0B\x0C\x0E-\x1F]', ' ', text) - -def uescape(c): - if ord(c) < 0x80: - return c - else: - return '\\u%04x' % ord(c) - -_escapes = [ - ('\\', '\\\\'), ('"', '\\"'), ('\t', '\\t'), ('\n', '\\n'), - ('\r', '\\r'), ('\f', '\\f'), ('\b', '\\b'), -] - -def jsonescape(s): - for k, v in _escapes: - s = s.replace(k, v) - return ''.join(uescape(c) for c in s) - def json(obj): if obj is None or obj is False or obj is True: return {None: 'null', False: 'false', True: 'true'}[obj] @@ -180,49 +202,164 @@ else: raise TypeError('cannot encode type %s' % obj.__class__.__name__) +def _uescape(c): + if ord(c) < 0x80: + return c + else: + return '\\u%04x' % ord(c) + +_escapes = [ + ('\\', '\\\\'), ('"', '\\"'), ('\t', '\\t'), ('\n', '\\n'), + ('\r', '\\r'), ('\f', '\\f'), ('\b', '\\b'), +] + +def jsonescape(s): + for k, v in _escapes: + s = s.replace(k, v) + return ''.join(_uescape(c) for c in s) + +def localdate(text): + """:localdate: Date. Converts a date to local date.""" + return (text[0], util.makedate()[1]) + +def nonempty(str): + """:nonempty: Any text. Returns '(none)' if the string is empty.""" + return str or "(none)" + +def obfuscate(text): + """:obfuscate: Any text. Returns the input text rendered as a sequence of + XML entities. + """ + text = unicode(text, encoding.encoding, 'replace') + return ''.join(['&#%d;' % ord(c) for c in text]) + +def permissions(flags): + if "l" in flags: + return "lrwxrwxrwx" + if "x" in flags: + return "-rwxr-xr-x" + return "-rw-r--r--" + +def person(author): + """:person: Any text. Returns the text before an email address.""" + if not '@' in author: + return author + f = author.find('<') + if f != -1: + return author[:f].rstrip() + f = author.find('@') + return author[:f].replace('.', ' ') + +def rfc3339date(text): + """:rfc3339date: Date. Returns a date using the Internet date format + specified in RFC 3339: "2009-08-18T13:00:13+02:00". + """ + return util.datestr(text, "%Y-%m-%dT%H:%M:%S%1:%2") + +def rfc822date(text): + """:rfc822date: Date. Returns a date using the same format used in email + headers: "Tue, 18 Aug 2009 13:00:13 +0200". + """ + return util.datestr(text, "%a, %d %b %Y %H:%M:%S %1%2") + +def short(text): + """:short: Changeset hash. Returns the short form of a changeset hash, + i.e. a 12 hexadecimal digit string. + """ + return text[:12] + +def shortdate(text): + """:shortdate: Date. Returns a date like "2006-09-18".""" + return util.shortdate(text) + +def stringescape(text): + return text.encode('string_escape') + +def stringify(thing): + """:stringify: Any type. Turns the value into text by converting values into + text and concatenating them. + """ + if hasattr(thing, '__iter__') and not isinstance(thing, str): + return "".join([stringify(t) for t in thing if t is not None]) + return str(thing) + +def strip(text): + """:strip: Any text. Strips all leading and trailing whitespace.""" + return text.strip() + def stripdir(text): - '''Treat the text as path and strip a directory level, if possible.''' + """:stripdir: Treat the text as path and strip a directory level, if + possible. For example, "foo" and "foo/bar" becomes "foo". + """ dir = os.path.dirname(text) if dir == "": return os.path.basename(text) else: return dir -def nonempty(str): - return str or "(none)" +def tabindent(text): + """:tabindent: Any text. Returns the text, with every line except the + first starting with a tab character. + """ + return indent(text, '\t') + +def urlescape(text): + """:urlescape: Any text. Escapes all "special" characters. For example, + "foo bar" becomes "foo%20bar". + """ + return urllib.quote(text) + +def userfilter(text): + """:user: Any text. Returns the user portion of an email address.""" + return util.shortuser(text) + +def xmlescape(text): + text = (text + .replace('&', '&') + .replace('<', '<') + .replace('>', '>') + .replace('"', '"') + .replace("'", ''')) # ' invalid in HTML + return re.sub('[\x00-\x08\x0B\x0C\x0E-\x1F]', ' ', text) filters = { - "addbreaks": nl2br, - "basename": os.path.basename, - "stripdir": stripdir, + "addbreaks": addbreaks, "age": age, - "date": lambda x: util.datestr(x), + "basename": basename, + "date": datefilter, "domain": domain, - "email": util.email, - "escape": lambda x: cgi.escape(x, True), - "fill68": lambda x: fill(x, width=68), - "fill76": lambda x: fill(x, width=76), + "email": email, + "escape": escape, + "fill68": fill68, + "fill76": fill76, "firstline": firstline, - "tabindent": lambda x: indent(x, '\t'), - "hgdate": lambda x: "%d %d" % x, - "isodate": lambda x: util.datestr(x, '%Y-%m-%d %H:%M %1%2'), - "isodatesec": lambda x: util.datestr(x, '%Y-%m-%d %H:%M:%S %1%2'), + "hex": hexfilter, + "hgdate": hgdate, + "isodate": isodate, + "isodatesec": isodatesec, "json": json, "jsonescape": jsonescape, - "localdate": lambda x: (x[0], util.makedate()[1]), + "localdate": localdate, "nonempty": nonempty, "obfuscate": obfuscate, "permissions": permissions, "person": person, - "rfc822date": lambda x: util.datestr(x, "%a, %d %b %Y %H:%M:%S %1%2"), - "rfc3339date": lambda x: util.datestr(x, "%Y-%m-%dT%H:%M:%S%1:%2"), - "hex": node.hex, - "short": lambda x: x[:12], - "shortdate": util.shortdate, + "rfc3339date": rfc3339date, + "rfc822date": rfc822date, + "short": short, + "shortdate": shortdate, + "stringescape": stringescape, "stringify": stringify, - "strip": lambda x: x.strip(), - "urlescape": lambda x: urllib.quote(x), - "user": lambda x: util.shortuser(x), - "stringescape": lambda x: x.encode('string_escape'), + "strip": strip, + "stripdir": stripdir, + "tabindent": tabindent, + "urlescape": urlescape, + "user": userfilter, "xmlescape": xmlescape, } + +def makedoc(topic, doc): + return help.makeitemsdoc(topic, doc, '.. filtersmarker', filters) + +# tell hggettext to extract docstrings from these functions: +i18nfunctions = filters.values() diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/templatekw.py --- a/mercurial/templatekw.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/templatekw.py Wed Apr 20 12:44:32 2011 -0500 @@ -6,7 +6,7 @@ # GNU General Public License version 2 or any later version. from node import hex -import encoding, patch, util, error +import encoding, patch, util, error, help def showlist(name, values, plural=None, **args): '''expand set of values. @@ -73,8 +73,7 @@ def getfiles(repo, ctx, revcache): if 'files' not in revcache: - revcache['files'] = repo.status(ctx.parents()[0].node(), - ctx.node())[:3] + revcache['files'] = repo.status(ctx.p1().node(), ctx.node())[:3] return revcache['files'] def getlatesttags(repo, ctx, cache): @@ -143,32 +142,49 @@ def showauthor(repo, ctx, templ, **args): + """:author: String. The unmodified author of the changeset.""" return ctx.user() def showbranch(**args): + """:branch: String. The name of the branch on which the changeset was + committed. + """ return args['ctx'].branch() def showbranches(**args): + """:branches: List of strings. The name of the branch on which the + changeset was committed. Will be empty if the branch name was + default. + """ branch = args['ctx'].branch() if branch != 'default': return showlist('branch', [branch], plural='branches', **args) def showbookmarks(**args): + """:bookmarks: List of strings. Any bookmarks associated with the + changeset. + """ bookmarks = args['ctx'].bookmarks() return showlist('bookmark', bookmarks, **args) def showchildren(**args): + """:children: List of strings. The children of the changeset.""" ctx = args['ctx'] childrevs = ['%d:%s' % (cctx, cctx) for cctx in ctx.children()] return showlist('children', childrevs, **args) def showdate(repo, ctx, templ, **args): + """:date: Date information. The date when the changeset was committed.""" return ctx.date() def showdescription(repo, ctx, templ, **args): + """:desc: String. The text of the changeset description.""" return ctx.description().strip() def showdiffstat(repo, ctx, templ, **args): + """:diffstat: String. Statistics of changes with the following format: + "modified files: +added/-removed lines" + """ files, adds, removes = 0, 0, 0 for i in patch.diffstatdata(util.iterlines(ctx.diff())): files += 1 @@ -184,10 +200,14 @@ yield templ('extra', **args) def showfileadds(**args): + """:file_adds: List of strings. Files added by this changeset.""" repo, ctx, revcache = args['repo'], args['ctx'], args['revcache'] return showlist('file_add', getfiles(repo, ctx, revcache)[1], **args) def showfilecopies(**args): + """:file_copies: List of strings. Files copied in this changeset with + their sources. + """ cache, ctx = args['cache'], args['ctx'] copies = args['revcache'].get('copies') if copies is None: @@ -207,25 +227,37 @@ # provided before calling the templater, usually with a --copies # command line switch. def showfilecopiesswitch(**args): + """:file_copies_switch: List of strings. Like "file_copies" but displayed + only if the --copied switch is set. + """ copies = args['revcache'].get('copies') or [] c = [{'name': x[0], 'source': x[1]} for x in copies] return showlist('file_copy', c, plural='file_copies', **args) def showfiledels(**args): + """:file_dels: List of strings. Files removed by this changeset.""" repo, ctx, revcache = args['repo'], args['ctx'], args['revcache'] return showlist('file_del', getfiles(repo, ctx, revcache)[2], **args) def showfilemods(**args): + """:file_mods: List of strings. Files modified by this changeset.""" repo, ctx, revcache = args['repo'], args['ctx'], args['revcache'] return showlist('file_mod', getfiles(repo, ctx, revcache)[0], **args) def showfiles(**args): + """:files: List of strings. All files modified, added, or removed by this + changeset. + """ return showlist('file', args['ctx'].files(), **args) def showlatesttag(repo, ctx, templ, cache, **args): + """:latesttag: String. Most recent global tag in the ancestors of this + changeset. + """ return getlatesttags(repo, ctx, cache)[2] def showlatesttagdistance(repo, ctx, templ, cache, **args): + """:latesttagdistance: Integer. Longest path to the latest tag.""" return getlatesttags(repo, ctx, cache)[1] def showmanifest(**args): @@ -236,12 +268,17 @@ return templ('manifest', **args) def shownode(repo, ctx, templ, **args): + """:node: String. The changeset identification hash, as a 40 hexadecimal + digit string. + """ return ctx.hex() def showrev(repo, ctx, templ, **args): + """:rev: Integer. The repository-local changeset revision number.""" return ctx.rev() def showtags(**args): + """:tags: List of strings. Any tags associated with the changeset.""" return showlist('tag', args['ctx'].tags(), **args) # keywords are callables like: @@ -276,3 +313,8 @@ 'tags': showtags, } +def makedoc(topic, doc): + return help.makeitemsdoc(topic, doc, '.. keywordsmarker', keywords) + +# tell hggettext to extract docstrings from these functions: +i18nfunctions = keywords.values() diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/templater.py --- a/mercurial/templater.py Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/templater.py Wed Apr 20 12:44:32 2011 -0500 @@ -69,7 +69,6 @@ else: raise error.ParseError(_("syntax error"), pos) pos += 1 - data[2] = pos yield ('end', None, pos) def compiletemplate(tmpl, context): @@ -91,8 +90,8 @@ parsed.append(("string", tmpl[pos:n])) pd = [tmpl, n + 1, stop] - parsed.append(p.parse(pd)) - pos = pd[2] + parseres, pos = p.parse(pd) + parsed.append(parseres) return [compileexp(e, context) for e in parsed] diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/templates/coal/map --- a/mercurial/templates/coal/map Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/templates/coal/map Wed Apr 20 12:44:32 2011 -0500 @@ -94,14 +94,12 @@ filerename = '{file|escape}@' filelogrename = ' - - base: - - - {file|escape}@{node|short} - - - ' + + base + + {file|escape}@{node|short} + + ' fileannotateparent = ' parent: diff -r 3c753f9a2fbc -r ac1c75a7c6b5 mercurial/templates/gitweb/bookmarks.tmpl --- a/mercurial/templates/gitweb/bookmarks.tmpl Tue Apr 19 13:33:43 2011 -0500 +++ b/mercurial/templates/gitweb/bookmarks.tmpl Wed Apr 20 12:44:32 2011 -0500 @@ -8,7 +8,7 @@