--- a/contrib/check-code.py Mon Aug 02 10:48:31 2010 -0400
+++ b/contrib/check-code.py Mon Aug 02 10:55:51 2010 -0500
@@ -7,7 +7,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
-import re, glob
+import re, glob, os
import optparse
def repquote(m):
@@ -70,12 +70,19 @@
]
pypats = [
+ (r'^\s*def\s*\w+\s*\(.*,\s*\(',
+ "tuple parameter unpacking not available in Python 3+"),
+ (r'lambda\s*\(.*,.*\)',
+ "tuple parameter unpacking not available in Python 3+"),
+ (r'\breduce\s*\(.*', "reduce is not available in Python 3+"),
+ (r'\.has_key\b', "dict.has_key is not available in Python 3+"),
(r'^\s*\t', "don't use tabs"),
(r'\S;\s*\n', "semicolon"),
(r'\w,\w', "missing whitespace after ,"),
(r'\w[+/*\-<>]\w', "missing whitespace in expression"),
(r'^\s+\w+=\w+[^,)]$', "missing whitespace in assignment"),
(r'.{85}', "line too long"),
+ (r'.{81}', "warning: line over 80 characters"),
(r'[^\n]\Z', "no trailing newline"),
# (r'^\s+[^_ ][^_. ]+_[^_]+\s*=', "don't use underbars in identifiers"),
# (r'\w*[a-z][A-Z]\w*\s*=', "don't use camelcase in identifiers"),
@@ -154,7 +161,7 @@
def __init__(self):
self._lastseen = None
- def log(self, fname, lineno, line, msg):
+ def log(self, fname, lineno, line, msg, blame):
"""print error related a to given line of a given file.
The faulty line will also be printed but only once in the case
@@ -167,14 +174,26 @@
"""
msgid = fname, lineno, line
if msgid != self._lastseen:
- print "%s:%d:" % (fname, lineno)
+ if blame:
+ print "%s:%d (%s):" % (fname, lineno, blame)
+ else:
+ print "%s:%d:" % (fname, lineno)
print " > %s" % line
self._lastseen = msgid
print " " + msg
_defaultlogger = norepeatlogger()
-def checkfile(f, logfunc=_defaultlogger.log, maxerr=None, warnings=False):
+def getblame(f):
+ lines = []
+ for l in os.popen('hg annotate -un %s' % f):
+ start, line = l.split(':', 1)
+ user, rev = start.split()
+ lines.append((line[1:-1], user, rev))
+ return lines
+
+def checkfile(f, logfunc=_defaultlogger.log, maxerr=None, warnings=False,
+ blame=False):
"""checks style and portability of a given file
:f: filepath
@@ -185,6 +204,7 @@
return True if no error is found, False otherwise.
"""
+ blamecache = None
result = True
for name, match, filters, pats in checks:
fc = 0
@@ -204,7 +224,16 @@
if not warnings and msg.startswith("warning"):
continue
if re.search(p, l[1]):
- logfunc(f, n + 1, l[0], msg)
+ bd = ""
+ if blame:
+ bd = 'working directory'
+ if not blamecache:
+ blamecache = getblame(f)
+ if n < len(blamecache):
+ bl, bu, br = blamecache[n]
+ if bl == l[0]:
+ bd = '%s@%s' % (bu, br)
+ logfunc(f, n + 1, l[0], msg, bd)
fc += 1
result = False
if maxerr is not None and fc >= maxerr:
@@ -213,15 +242,16 @@
break
return result
-
if __name__ == "__main__":
parser = optparse.OptionParser("%prog [options] [files]")
parser.add_option("-w", "--warnings", action="store_true",
help="include warning-level checks")
parser.add_option("-p", "--per-file", type="int",
help="max warnings per file")
+ parser.add_option("-b", "--blame", action="store_true",
+ help="use annotate to generate blame info")
- parser.set_defaults(per_file=15, warnings=False)
+ parser.set_defaults(per_file=15, warnings=False, blame=False)
(options, args) = parser.parse_args()
if len(args) == 0:
@@ -230,4 +260,5 @@
check = args
for f in check:
- checkfile(f, maxerr=options.per_file, warnings=options.warnings)
+ checkfile(f, maxerr=options.per_file, warnings=options.warnings,
+ blame=options.blame)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/debugshell.py Mon Aug 02 10:55:51 2010 -0500
@@ -0,0 +1,21 @@
+# debugshell extension
+"""a python shell with repo, changelog & manifest objects"""
+
+import mercurial
+import code
+
+def debugshell(ui, repo, **opts):
+ objects = {
+ 'mercurial': mercurial,
+ 'repo': repo,
+ 'cl': repo.changelog,
+ 'mf': repo.manifest,
+ }
+ bannermsg = "loaded repo : %s\n" \
+ "using source: %s" % (repo.root,
+ mercurial.__path__[0])
+ code.interact(bannermsg, local=objects)
+
+cmdtable = {
+ "debugshell|dbsh": (debugshell, [])
+}
--- a/contrib/mergetools.hgrc Mon Aug 02 10:48:31 2010 -0400
+++ b/contrib/mergetools.hgrc Mon Aug 02 10:55:51 2010 -0500
@@ -13,6 +13,9 @@
gvimdiff.regname=path
gvimdiff.priority=-9
+vimdiff.args=$local $other $base
+vimdiff.priority=-10
+
merge.checkconflicts=True
merge.priority=-100
--- a/contrib/perf.py Mon Aug 02 10:48:31 2010 -0400
+++ b/contrib/perf.py Mon Aug 02 10:55:51 2010 -0500
@@ -133,6 +133,16 @@
title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none')
timer(d, title)
+def perfrevlog(ui, repo, file_, **opts):
+ from mercurial import revlog
+ dist = opts['dist']
+ def d():
+ r = revlog.revlog(lambda fn: open(fn, 'rb'), file_)
+ for x in xrange(0, len(r), dist):
+ r.revision(r.node(x))
+
+ timer(d)
+
cmdtable = {
'perflookup': (perflookup, []),
'perfparents': (perfparents, []),
@@ -149,4 +159,7 @@
[('', 'rename', False, 'ask log to follow renames')]),
'perftemplating': (perftemplating, []),
'perfdiffwd': (perfdiffwd, []),
+ 'perfrevlog': (perfrevlog,
+ [('d', 'dist', 100, 'distance between the revisions')],
+ "[INDEXFILE]"),
}
--- a/hgext/bookmarks.py Mon Aug 02 10:48:31 2010 -0400
+++ b/hgext/bookmarks.py Mon Aug 02 10:55:51 2010 -0500
@@ -460,7 +460,7 @@
lmarks = repo.listkeys('bookmarks')
rmarks = remote.listkeys('bookmarks')
- diff = set(rmarks) - set(lmarks)
+ diff = sorted(set(rmarks) - set(lmarks))
for k in diff:
ui.write(" %-25s %s\n" % (k, rmarks[k][:12]))
--- a/hgext/bugzilla.py Mon Aug 02 10:48:31 2010 -0400
+++ b/hgext/bugzilla.py Mon Aug 02 10:55:51 2010 -0500
@@ -437,5 +437,5 @@
bz.update(id, ctx)
bz.notify(ids, util.email(ctx.user()))
except MySQLdb.MySQLError, err:
- raise util.Abort(_('database error: %s') % err[1])
+ raise util.Abort(_('database error: %s') % err.args[1])
--- a/hgext/churn.py Mon Aug 02 10:48:31 2010 -0400
+++ b/hgext/churn.py Mon Aug 02 10:55:51 2010 -0500
@@ -149,7 +149,8 @@
if opts.get('diffstat'):
width -= 15
- def format(name, (added, removed)):
+ def format(name, diffstat):
+ added, removed = diffstat
return "%s %15s %s%s\n" % (pad(name, maxname),
'+%d/-%d' % (added, removed),
ui.label('+' * charnum(added),
--- a/hgext/convert/__init__.py Mon Aug 02 10:48:31 2010 -0400
+++ b/hgext/convert/__init__.py Mon Aug 02 10:55:51 2010 -0500
@@ -86,7 +86,7 @@
rename path/to/source path/to/destination
- Comment lines start with '#'. A specificed path matches if it
+ Comment lines start with '#'. A specified path matches if it
equals the full relative name of a file or one of its parent
directories. The 'include' or 'exclude' directive with the longest
matching path applies, so line order does not matter.
@@ -96,8 +96,8 @@
exclusion of all other files and directories not explicitly
included. The 'exclude' directive causes files or directories to
be omitted. The 'rename' directive renames a file or directory if
- is converted. To rename from a subdirectory into the root of the
- repository, use '.' as the path to rename to.
+ it is converted. To rename from a subdirectory into the root of
+ the repository, use '.' as the path to rename to.
The splicemap is a file that allows insertion of synthetic
history, letting you specify the parents of a revision. This is
--- a/hgext/convert/filemap.py Mon Aug 02 10:48:31 2010 -0400
+++ b/hgext/convert/filemap.py Mon Aug 02 10:55:51 2010 -0500
@@ -33,10 +33,20 @@
def parse(self, path):
errs = 0
def check(name, mapping, listname):
+ if not name:
+ self.ui.warn(_('%s:%d: path to %s is missing\n') %
+ (lex.infile, lex.lineno, listname))
+ return 1
if name in mapping:
self.ui.warn(_('%s:%d: %r already in %s list\n') %
(lex.infile, lex.lineno, name, listname))
return 1
+ if (name.startswith('/') or
+ name.endswith('/') or
+ '//' in name):
+ self.ui.warn(_('%s:%d: superfluous / in %s %r\n') %
+ (lex.infile, lex.lineno, listname, name))
+ return 1
return 0
lex = shlex.shlex(open(path), path, True)
lex.wordchars += '!@#$%^&*()-=+[]{}|;:,./<>?'
@@ -298,7 +308,9 @@
self.origparents[rev] = parents
- if len(mparents) < 2 and not self.wanted(rev, wp):
+ closed = 'close' in self.commits[rev].extra
+
+ if len(mparents) < 2 and not closed and not self.wanted(rev, wp):
# We don't want this revision.
# Update our state and tell the convert process to map this
# revision to the same revision its parent as mapped to.
--- a/hgext/convert/hg.py Mon Aug 02 10:48:31 2010 -0400
+++ b/hgext/convert/hg.py Mon Aug 02 10:55:51 2010 -0500
@@ -175,7 +175,8 @@
if self.filemapmode and nparents == 1:
man = self.repo.manifest
mnode = self.repo.changelog.read(bin(p2))[0]
- if not man.cmp(m1node, man.revision(mnode)):
+ closed = 'close' in commit.extra
+ if not closed and not man.cmp(m1node, man.revision(mnode)):
self.ui.status(_("filtering out empty revision\n"))
self.repo.rollback()
return parent
--- a/hgext/convert/transport.py Mon Aug 02 10:48:31 2010 -0400
+++ b/hgext/convert/transport.py Mon Aug 02 10:55:51 2010 -0500
@@ -98,9 +98,8 @@
svn.ra.reparent(self.ra, self.svn_url.encode('utf8'))
class Reporter(object):
- def __init__(self, (reporter, report_baton)):
- self._reporter = reporter
- self._baton = report_baton
+ def __init__(self, reporter_data):
+ self._reporter, self._baton = reporter_data
def set_path(self, path, revnum, start_empty, lock_token, pool=None):
svn.ra.reporter2_invoke_set_path(self._reporter, self._baton,
--- a/hgext/inotify/client.py Mon Aug 02 10:48:31 2010 -0400
+++ b/hgext/inotify/client.py Mon Aug 02 10:55:51 2010 -0500
@@ -27,11 +27,11 @@
except (OSError, socket.error), err:
autostart = self.ui.configbool('inotify', 'autostart', True)
- if err[0] == errno.ECONNREFUSED:
+ if err.args[0] == errno.ECONNREFUSED:
self.ui.warn(_('inotify-client: found dead inotify server '
'socket; removing it\n'))
os.unlink(os.path.join(self.root, '.hg', 'inotify.sock'))
- if err[0] in (errno.ECONNREFUSED, errno.ENOENT) and autostart:
+ if err.args[0] in (errno.ECONNREFUSED, errno.ENOENT) and autostart:
self.ui.debug('(starting inotify server)\n')
try:
try:
@@ -49,13 +49,13 @@
return function(self, *args)
except socket.error, err:
self.ui.warn(_('inotify-client: could not talk to new '
- 'inotify server: %s\n') % err[-1])
- elif err[0] in (errno.ECONNREFUSED, errno.ENOENT):
+ 'inotify server: %s\n') % err.args[-1])
+ elif err.args[0] in (errno.ECONNREFUSED, errno.ENOENT):
# silently ignore normal errors if autostart is False
self.ui.debug('(inotify server not running)\n')
else:
self.ui.warn(_('inotify-client: failed to contact inotify '
- 'server: %s\n') % err[-1])
+ 'server: %s\n') % err.args[-1])
self.ui.traceback()
raise QueryFailed('inotify query failed')
@@ -75,7 +75,7 @@
try:
self.sock.connect(sockpath)
except socket.error, err:
- if err[0] == "AF_UNIX path too long":
+ if err.args[0] == "AF_UNIX path too long":
sockpath = os.readlink(sockpath)
self.sock.connect(sockpath)
else:
--- a/hgext/inotify/linux/_inotify.c Mon Aug 02 10:48:31 2010 -0400
+++ b/hgext/inotify/linux/_inotify.c Mon Aug 02 10:55:51 2010 -0500
@@ -15,6 +15,15 @@
#include <sys/ioctl.h>
#include <unistd.h>
+#include <util.h>
+
+/* Variables used in the event string representation */
+static PyObject *join;
+static PyObject *er_wm;
+static PyObject *er_wmc;
+static PyObject *er_wmn;
+static PyObject *er_wmcn;
+
static PyObject *init(PyObject *self, PyObject *args)
{
PyObject *ret = NULL;
@@ -312,8 +321,8 @@
};
PyDoc_STRVAR(
- event_doc,
- "event: Structure describing an inotify event.");
+ event_doc,
+ "event: Structure describing an inotify event.");
static PyObject *event_new(PyTypeObject *t, PyObject *a, PyObject *k)
{
@@ -327,20 +336,14 @@
Py_XDECREF(evt->cookie);
Py_XDECREF(evt->name);
- (*evt->ob_type->tp_free)(evt);
+ Py_TYPE(evt)->tp_free(evt);
}
static PyObject *event_repr(struct event *evt)
{
- int wd = PyInt_AsLong(evt->wd);
int cookie = evt->cookie == Py_None ? -1 : PyInt_AsLong(evt->cookie);
PyObject *ret = NULL, *pymasks = NULL, *pymask = NULL;
- PyObject *join = NULL;
- char *maskstr;
-
- join = PyString_FromString("|");
- if (join == NULL)
- goto bail;
+ PyObject *tuple = NULL, *formatstr = NULL;
pymasks = decode_mask(PyInt_AsLong(evt->mask));
if (pymasks == NULL)
@@ -350,33 +353,35 @@
if (pymask == NULL)
goto bail;
- maskstr = PyString_AsString(pymask);
-
if (evt->name != Py_None) {
- PyObject *pyname = PyString_Repr(evt->name, 1);
- char *name = pyname ? PyString_AsString(pyname) : "???";
-
- if (cookie == -1)
- ret = PyString_FromFormat(
- "event(wd=%d, mask=%s, name=%s)",
- wd, maskstr, name);
- else
- ret = PyString_FromFormat("event(wd=%d, mask=%s, "
- "cookie=0x%x, name=%s)",
- wd, maskstr, cookie, name);
-
- Py_XDECREF(pyname);
+ if (cookie == -1) {
+ formatstr = er_wmn;
+ tuple = PyTuple_Pack(3, evt->wd, pymask, evt->name);
+ }
+ else {
+ formatstr = er_wmcn;
+ tuple = PyTuple_Pack(4, evt->wd, pymask,
+ evt->cookie, evt->name);
+ }
} else {
- if (cookie == -1)
- ret = PyString_FromFormat("event(wd=%d, mask=%s)",
- wd, maskstr);
+ if (cookie == -1) {
+ formatstr = er_wm;
+ tuple = PyTuple_Pack(2, evt->wd, pymask);
+ }
else {
- ret = PyString_FromFormat(
- "event(wd=%d, mask=%s, cookie=0x%x)",
- wd, maskstr, cookie);
+ formatstr = er_wmc;
+ tuple = PyTuple_Pack(3, evt->wd, pymask, evt->cookie);
}
}
+ if (tuple == NULL)
+ goto bail;
+
+ ret = PyNumber_Remainder(formatstr, tuple);
+
+ if (ret == NULL)
+ goto bail;
+
goto done;
bail:
Py_CLEAR(ret);
@@ -384,14 +389,13 @@
done:
Py_XDECREF(pymask);
Py_XDECREF(pymasks);
- Py_XDECREF(join);
+ Py_XDECREF(tuple);
return ret;
}
static PyTypeObject event_type = {
- PyObject_HEAD_INIT(NULL)
- 0, /*ob_size*/
+ PyVarObject_HEAD_INIT(NULL, 0)
"_inotify.event", /*tp_name*/
sizeof(struct event), /*tp_basicsize*/
0, /*tp_itemsize*/
@@ -561,6 +565,17 @@
return ret;
}
+static int init_globals(void)
+{
+ join = PyString_FromString("|");
+ er_wm = PyString_FromString("event(wd=%d, mask=%s)");
+ er_wmn = PyString_FromString("event(wd=%d, mask=%s, name=%s)");
+ er_wmc = PyString_FromString("event(wd=%d, mask=%s, cookie=0x%x)");
+ er_wmcn = PyString_FromString("event(wd=%d, mask=%s, cookie=0x%x, name=%s)");
+
+ return join && er_wm && er_wmn && er_wmc && er_wmcn;
+}
+
PyDoc_STRVAR(
read_doc,
"read(fd, bufsize[=65536]) -> list_of_events\n"
@@ -585,6 +600,35 @@
{NULL},
};
+#ifdef IS_PY3K
+static struct PyModuleDef _inotify_module = {
+ PyModuleDef_HEAD_INIT,
+ "_inotify",
+ doc,
+ -1,
+ methods
+};
+
+PyMODINIT_FUNC PyInit__inotify(void)
+{
+ PyObject *mod, *dict;
+
+ mod = PyModule_Create(&_inotify_module);
+
+ if (mod == NULL)
+ return NULL;
+
+ if (!init_globals())
+ return;
+
+ dict = PyModule_GetDict(mod);
+
+ if (dict)
+ define_consts(dict);
+
+ return mod;
+}
+#else
void init_inotify(void)
{
PyObject *mod, *dict;
@@ -592,6 +636,9 @@
if (PyType_Ready(&event_type) == -1)
return;
+ if (!init_globals())
+ return;
+
mod = Py_InitModule3("_inotify", methods, doc);
dict = PyModule_GetDict(mod);
@@ -599,3 +646,4 @@
if (dict)
define_consts(dict);
}
+#endif
--- a/hgext/inotify/linuxserver.py Mon Aug 02 10:48:31 2010 -0400
+++ b/hgext/inotify/linuxserver.py Mon Aug 02 10:55:51 2010 -0500
@@ -117,7 +117,7 @@
try:
events = cls.poll.poll(timeout)
except select.error, err:
- if err[0] == errno.EINTR:
+ if err.args[0] == errno.EINTR:
continue
raise
if events:
--- a/hgext/inotify/server.py Mon Aug 02 10:48:31 2010 -0400
+++ b/hgext/inotify/server.py Mon Aug 02 10:55:51 2010 -0500
@@ -336,10 +336,10 @@
try:
self.sock.bind(self.sockpath)
except socket.error, err:
- if err[0] == errno.EADDRINUSE:
+ if err.args[0] == errno.EADDRINUSE:
raise AlreadyStartedException(_('cannot start: socket is '
'already bound'))
- if err[0] == "AF_UNIX path too long":
+ if err.args[0] == "AF_UNIX path too long":
if os.path.islink(self.sockpath) and \
not os.path.exists(self.sockpath):
raise util.Abort('inotify-server: cannot start: '
@@ -437,7 +437,7 @@
finally:
sock.shutdown(socket.SHUT_WR)
except socket.error, err:
- if err[0] != errno.EPIPE:
+ if err.args[0] != errno.EPIPE:
raise
if sys.platform == 'linux2':
--- a/hgext/keyword.py Mon Aug 02 10:48:31 2010 -0400
+++ b/hgext/keyword.py Mon Aug 02 10:55:51 2010 -0500
@@ -158,9 +158,9 @@
kwpat = r'\$(%s)(: [^$\n\r]*? )??\$' % '|'.join(escaped)
self.re_kw = re.compile(kwpat)
- templatefilters.filters['utcdate'] = utcdate
- templatefilters.filters['svnisodate'] = svnisodate
- templatefilters.filters['svnutcdate'] = svnutcdate
+ templatefilters.filters.update({'utcdate': utcdate,
+ 'svnisodate': svnisodate,
+ 'svnutcdate': svnutcdate})
def substitute(self, data, path, ctx, subfunc):
'''Replaces keywords in data with expanded template.'''
--- a/hgext/mq.py Mon Aug 02 10:48:31 2010 -0400
+++ b/hgext/mq.py Mon Aug 02 10:55:51 2010 -0500
@@ -1691,11 +1691,22 @@
if existing:
if filename == '-':
raise util.Abort(_('-e is incompatible with import from -'))
- if not patchname:
- patchname = normname(filename)
- self.check_reserved_name(patchname)
- if not os.path.isfile(self.join(patchname)):
- raise util.Abort(_("patch %s does not exist") % patchname)
+ filename = normname(filename)
+ self.check_reserved_name(filename)
+ originpath = self.join(filename)
+ if not os.path.isfile(originpath):
+ raise util.Abort(_("patch %s does not exist") % filename)
+
+ if patchname:
+ self.check_reserved_name(patchname)
+ checkfile(patchname)
+
+ self.ui.write(_('renaming %s to %s\n')
+ % (filename, patchname))
+ util.rename(originpath, self.join(patchname))
+ else:
+ patchname = filename
+
else:
try:
if filename == '-':
@@ -1810,6 +1821,10 @@
To import a patch from standard input, pass - as the patch file.
When importing from standard input, a patch name must be specified
using the --name flag.
+
+ To import an existing patch while renaming it::
+
+ hg qimport -e existing-patch -n new-name
"""
q = repo.mq
try:
@@ -2003,7 +2018,7 @@
"""
msg = cmdutil.logmessage(opts)
def getmsg():
- return ui.edit(msg, ui.username())
+ return ui.edit(msg, opts['user'] or ui.username())
q = repo.mq
opts['msg'] = msg
if opts.get('edit'):
@@ -2559,7 +2574,7 @@
if not opts['applied'] and not revrange:
raise util.Abort(_('no revisions specified'))
elif opts['applied']:
- revrange = ('qbase:qtip',) + revrange
+ revrange = ('qbase::qtip',) + revrange
q = repo.mq
if not q.applied:
--- a/hgext/rebase.py Mon Aug 02 10:48:31 2010 -0400
+++ b/hgext/rebase.py Mon Aug 02 10:55:51 2010 -0500
@@ -148,9 +148,13 @@
targetancestors = set(repo.changelog.ancestors(target))
targetancestors.add(target)
- for rev in sorted(state):
+ sortedstate = sorted(state)
+ total = len(sortedstate)
+ pos = 0
+ for rev in sortedstate:
+ pos += 1
if state[rev] == -1:
- ui.debug("rebasing %d:%s\n" % (rev, repo[rev]))
+ ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, repo[rev])), _(' changesets'), total)
storestatus(repo, originalwd, target, state, collapsef, keepf,
keepbranchesf, external)
p1, p2 = defineparents(repo, rev, target, state,
@@ -179,6 +183,7 @@
skipped.add(rev)
state[rev] = p1
+ ui.progress(_('rebasing'), None)
ui.note(_('rebase merging completed\n'))
if collapsef and not keepopen:
--- a/hgext/record.py Mon Aug 02 10:48:31 2010 -0400
+++ b/hgext/record.py Mon Aug 02 10:55:51 2010 -0500
@@ -10,7 +10,7 @@
from mercurial.i18n import gettext, _
from mercurial import cmdutil, commands, extensions, hg, mdiff, patch
from mercurial import util
-import copy, cStringIO, errno, operator, os, re, tempfile
+import copy, cStringIO, errno, os, re, tempfile
lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
@@ -97,7 +97,7 @@
if h.startswith('---'):
fp.write(_('%d hunks, %d lines changed\n') %
(len(self.hunks),
- sum([h.added + h.removed for h in self.hunks])))
+ sum([max(h.added, h.removed) for h in self.hunks])))
break
fp.write(h)
@@ -186,7 +186,8 @@
self.hunk = []
self.stream = []
- def addrange(self, (fromstart, fromend, tostart, toend, proc)):
+ def addrange(self, limits):
+ fromstart, fromend, tostart, toend, proc = limits
self.fromline = int(fromstart)
self.toline = int(tostart)
self.proc = proc
@@ -354,8 +355,8 @@
applied[chunk.filename()].append(chunk)
else:
fixoffset += chunk.removed - chunk.added
- return reduce(operator.add, [h for h in applied.itervalues()
- if h[0].special() or len(h) > 1], [])
+ return sum([h for h in applied.itervalues()
+ if h[0].special() or len(h) > 1], [])
def record(ui, repo, *pats, **opts):
'''interactively select changes to commit
@@ -485,7 +486,8 @@
# 3a. apply filtered patch to clean repo (clean)
if backups:
- hg.revert(repo, repo.dirstate.parents()[0], backups.has_key)
+ hg.revert(repo, repo.dirstate.parents()[0],
+ lambda key: key in backups)
# 3b. (apply)
if dopatch:
--- a/mercurial/archival.py Mon Aug 02 10:48:31 2010 -0400
+++ b/mercurial/archival.py Mon Aug 02 10:55:51 2010 -0500
@@ -12,7 +12,7 @@
import cStringIO, os, stat, tarfile, time, zipfile
import zlib, gzip
-def tidyprefix(dest, prefix, suffixes):
+def tidyprefix(dest, kind, prefix):
'''choose prefix to use for names in archive. make sure prefix is
safe for consumers.'''
@@ -23,7 +23,7 @@
raise ValueError('dest must be string if no prefix')
prefix = os.path.basename(dest)
lower = prefix.lower()
- for sfx in suffixes:
+ for sfx in exts.get(kind, []):
if lower.endswith(sfx):
prefix = prefix[:-len(sfx)]
break
@@ -35,6 +35,20 @@
raise util.Abort(_('archive prefix contains illegal components'))
return prefix
+exts = {
+ 'tar': ['.tar'],
+ 'tbz2': ['.tbz2', '.tar.bz2'],
+ 'tgz': ['.tgz', '.tar.gz'],
+ 'zip': ['.zip'],
+ }
+
+def guesskind(dest):
+ for kind, extensions in exts.iteritems():
+ if util.any(dest.endswith(ext) for ext in extensions):
+ return kind
+ return None
+
+
class tarit(object):
'''write archive to tar file or stream. can write uncompressed,
or compress with gzip or bzip2.'''
@@ -66,9 +80,7 @@
if fname:
self.fileobj.write(fname + '\000')
- def __init__(self, dest, prefix, mtime, kind=''):
- self.prefix = tidyprefix(dest, prefix, ['.tar', '.tar.bz2', '.tar.gz',
- '.tgz', '.tbz2'])
+ def __init__(self, dest, mtime, kind=''):
self.mtime = mtime
def taropen(name, mode, fileobj=None):
@@ -90,7 +102,7 @@
self.z = taropen(name='', mode='w|', fileobj=dest)
def addfile(self, name, mode, islink, data):
- i = tarfile.TarInfo(self.prefix + name)
+ i = tarfile.TarInfo(name)
i.mtime = self.mtime
i.size = len(data)
if islink:
@@ -129,8 +141,7 @@
'''write archive to zip file or stream. can write uncompressed,
or compressed with deflate.'''
- def __init__(self, dest, prefix, mtime, compress=True):
- self.prefix = tidyprefix(dest, prefix, ('.zip',))
+ def __init__(self, dest, mtime, compress=True):
if not isinstance(dest, str):
try:
dest.tell()
@@ -142,7 +153,7 @@
self.date_time = time.gmtime(mtime)[:6]
def addfile(self, name, mode, islink, data):
- i = zipfile.ZipInfo(self.prefix + name, self.date_time)
+ i = zipfile.ZipInfo(name, self.date_time)
i.compress_type = self.z.compression
# unzip will not honor unix file modes unless file creator is
# set to unix (id 3).
@@ -160,9 +171,7 @@
class fileit(object):
'''write archive as files in directory.'''
- def __init__(self, name, prefix, mtime):
- if prefix:
- raise util.Abort(_('cannot give prefix when archiving to files'))
+ def __init__(self, name, mtime):
self.basedir = name
self.opener = util.opener(self.basedir)
@@ -182,9 +191,9 @@
archivers = {
'files': fileit,
'tar': tarit,
- 'tbz2': lambda name, prefix, mtime: tarit(name, prefix, mtime, 'bz2'),
- 'tgz': lambda name, prefix, mtime: tarit(name, prefix, mtime, 'gz'),
- 'uzip': lambda name, prefix, mtime: zipit(name, prefix, mtime, False),
+ 'tbz2': lambda name, mtime: tarit(name, mtime, 'bz2'),
+ 'tgz': lambda name, mtime: tarit(name, mtime, 'gz'),
+ 'uzip': lambda name, mtime: zipit(name, mtime, False),
'zip': zipit,
}
@@ -204,19 +213,25 @@
prefix is name of path to put before every archive member.'''
+ if kind == 'files':
+ if prefix:
+ raise util.Abort(_('cannot give prefix when archiving to files'))
+ else:
+ prefix = tidyprefix(dest, kind, prefix)
+
def write(name, mode, islink, getdata):
if matchfn and not matchfn(name):
return
data = getdata()
if decode:
data = repo.wwritedata(name, data)
- archiver.addfile(name, mode, islink, data)
+ archiver.addfile(prefix + name, mode, islink, data)
if kind not in archivers:
raise util.Abort(_("unknown archive type '%s'") % kind)
ctx = repo[node]
- archiver = archivers[kind](dest, prefix, mtime or ctx.date()[0])
+ archiver = archivers[kind](dest, mtime or ctx.date()[0])
if repo.ui.configbool("ui", "archivemeta", True):
def metadata():
--- a/mercurial/changegroup.py Mon Aug 02 10:48:31 2010 -0400
+++ b/mercurial/changegroup.py Mon Aug 02 10:55:51 2010 -0500
@@ -61,8 +61,7 @@
# We want to gather manifests needed and filelogs affected.
def collect(node):
c = cl.read(node)
- for fn in c[3]:
- files.setdefault(fn, fn)
+ files.update(c[3])
mmfs.setdefault(c[0], node)
return collect
--- a/mercurial/cmdutil.py Mon Aug 02 10:48:31 2010 -0400
+++ b/mercurial/cmdutil.py Mon Aug 02 10:55:51 2010 -0500
@@ -1045,36 +1045,43 @@
fncache = {}
change = util.cachefunc(repo.changectx)
+ # First step is to fill wanted, the set of revisions that we want to yield.
+ # When it does not induce extra cost, we also fill fncache for revisions in
+ # wanted: a cache of filenames that were changed (ctx.files()) and that
+ # match the file filtering conditions.
+
if not slowpath and not match.files():
# No files, no patterns. Display all revs.
wanted = set(revs)
copies = []
if not slowpath:
+ # We only have to read through the filelog to find wanted revisions
+
+ minrev, maxrev = min(revs), max(revs)
# Only files, no patterns. Check the history of each file.
- def filerevgen(filelog, node):
+ def filerevgen(filelog, last):
cl_count = len(repo)
- if node is None:
- last = len(filelog) - 1
- else:
- last = filelog.rev(node)
- for i, window in increasing_windows(last, nullrev):
- revs = []
- for j in xrange(i - window, i + 1):
- n = filelog.node(j)
- revs.append((filelog.linkrev(j),
- follow and filelog.renamed(n)))
- for rev in reversed(revs):
- # only yield rev for which we have the changelog, it can
- # happen while doing "hg log" during a pull or commit
- if rev[0] < cl_count:
- yield rev
+ revs = []
+ for j in xrange(0, last + 1):
+ linkrev = filelog.linkrev(j)
+ if linkrev < minrev:
+ continue
+ # only yield rev for which we have the changelog, it can
+ # happen while doing "hg log" during a pull or commit
+ if linkrev > maxrev or linkrev >= cl_count:
+ break
+ n = filelog.node(j)
+ revs.append((filelog.linkrev(j),
+ follow and filelog.renamed(n)))
+
+ for rev in reversed(revs):
+ yield rev
def iterfiles():
for filename in match.files():
yield filename, None
for filename_node in copies:
yield filename_node
- minrev, maxrev = min(revs), max(revs)
for file_, node in iterfiles():
filelog = repo.file(file_)
if not len(filelog):
@@ -1088,31 +1095,33 @@
break
else:
continue
- for rev, copied in filerevgen(filelog, node):
- if rev <= maxrev:
- if rev < minrev:
- break
- fncache.setdefault(rev, [])
- fncache[rev].append(file_)
- wanted.add(rev)
- if copied:
- copies.append(copied)
+
+ if node is None:
+ last = len(filelog) - 1
+ else:
+ last = filelog.rev(node)
+
+ for rev, copied in filerevgen(filelog, last):
+ fncache.setdefault(rev, [])
+ fncache[rev].append(file_)
+ wanted.add(rev)
+ if copied:
+ copies.append(copied)
if slowpath:
+ # We have to read the changelog to match filenames against
+ # changed files
+
if follow:
raise util.Abort(_('can only follow copies/renames for explicit '
'filenames'))
# The slow path checks files modified in every changeset.
- def changerevgen():
- for i, window in increasing_windows(len(repo) - 1, nullrev):
- for j in xrange(i - window, i + 1):
- yield change(j)
-
- for ctx in changerevgen():
+ for i in sorted(revs):
+ ctx = change(i)
matches = filter(match, ctx.files())
if matches:
- fncache[ctx.rev()] = matches
- wanted.add(ctx.rev())
+ fncache[i] = matches
+ wanted.add(i)
class followfilter(object):
def __init__(self, onlyfirst=False):
@@ -1161,6 +1170,8 @@
if ff.match(x):
wanted.discard(x)
+ # Now that wanted is correctly initialized, we can iterate over the
+ # revision range, yielding only revisions in wanted.
def iterate():
if follow and not match.files():
ff = followfilter(onlyfirst=opts.get('follow_first'))
@@ -1171,7 +1182,6 @@
return rev in wanted
for i, window in increasing_windows(0, len(revs)):
- change = util.cachefunc(repo.changectx)
nrevs = [rev for rev in revs[i:i + window] if want(rev)]
for rev in sorted(nrevs):
fns = fncache.get(rev)
--- a/mercurial/commands.py Mon Aug 02 10:48:31 2010 -0400
+++ b/mercurial/commands.py Mon Aug 02 10:55:51 2010 -0500
@@ -83,7 +83,7 @@
Returns 0 if all files are successfully added.
"""
try:
- sim = float(opts.get('similarity') or 0)
+ sim = float(opts.get('similarity') or 100)
except ValueError:
raise util.Abort(_('similarity must be a number'))
if sim < 0 or sim > 100:
@@ -197,20 +197,7 @@
if os.path.realpath(dest) == repo.root:
raise util.Abort(_('repository root cannot be destination'))
- def guess_type():
- exttypes = {
- 'tar': ['.tar'],
- 'tbz2': ['.tbz2', '.tar.bz2'],
- 'tgz': ['.tgz', '.tar.gz'],
- 'zip': ['.zip'],
- }
-
- for type, extensions in exttypes.items():
- if util.any(dest.endswith(ext) for ext in extensions):
- return type
- return None
-
- kind = opts.get('type') or guess_type() or 'files'
+ kind = opts.get('type') or archival.guesskind(dest) or 'files'
prefix = opts.get('prefix')
if dest == '-':
@@ -903,7 +890,7 @@
# we don't want to fail in merges during buildup
os.environ['HGMERGE'] = 'internal:local'
- def writefile(fname, text, fmode="w"):
+ def writefile(fname, text, fmode="wb"):
f = open(fname, fmode)
try:
f.write(text)
@@ -938,7 +925,7 @@
merge(ui, repo, node=p2)
if mergeable_file:
- f = open("mf", "r+")
+ f = open("mf", "rb+")
try:
lines = f.read().split("\n")
lines[id * linesperrev] += " r%i" % id
@@ -948,7 +935,7 @@
f.close()
if appended_file:
- writefile("af", "r%i\n" % id, "a")
+ writefile("af", "r%i\n" % id, "ab")
if overwritten_file:
writefile("of", "r%i\n" % id)
@@ -1473,11 +1460,11 @@
given using a format string. The formatting rules are as follows:
:``%%``: literal "%" character
- :``%H``: changeset hash (40 bytes of hexadecimal)
+ :``%H``: changeset hash (40 hexadecimal digits)
:``%N``: number of patches being generated
:``%R``: changeset revision number
:``%b``: basename of the exporting repository
- :``%h``: short-form changeset hash (12 bytes of hexadecimal)
+ :``%h``: short-form changeset hash (12 hexadecimal digits)
:``%n``: zero-padded sequence number, starting at 1
:``%r``: zero-padded changeset revision number
@@ -1869,7 +1856,10 @@
if not doc:
doc = _("(no help text available)")
if hasattr(entry[0], 'definition'): # aliased command
- doc = _('alias for: hg %s\n\n%s') % (entry[0].definition, doc)
+ if entry[0].definition.startswith('!'): # shell alias
+ doc = _('shell alias for::\n\n %s') % entry[0].definition[1:]
+ else:
+ doc = _('alias for: hg %s\n\n%s') % (entry[0].definition, doc)
if ui.quiet:
doc = doc.splitlines()[0]
keep = ui.verbose and ['verbose'] or []
--- a/mercurial/context.py Mon Aug 02 10:48:31 2010 -0400
+++ b/mercurial/context.py Mon Aug 02 10:55:51 2010 -0500
@@ -352,12 +352,12 @@
def size(self):
return self._filelog.size(self._filerev)
- def cmp(self, text):
- """compare text with stored file revision
+ def cmp(self, fctx):
+ """compare with other file context
- returns True if text is different than what is stored.
+ returns True if different than fctx.
"""
- return self._filelog.cmp(self._filenode, text)
+ return self._filelog.cmp(self._filenode, fctx.data())
def renamed(self):
"""check if file was actually renamed in this changeset revision
@@ -935,12 +935,14 @@
raise
return (t, tz)
- def cmp(self, text):
- """compare text with disk content
+ def cmp(self, fctx):
+ """compare with other file context
- returns True if text is different than what is on disk.
+ returns True if different than fctx.
"""
- return self._repo.wread(self._path) != text
+ # fctx should be a filectx (not a wfctx)
+ # invert comparison to reuse the same code path
+ return fctx.cmp(self)
class memctx(object):
"""Use memctx to perform in-memory commits via localrepo.commitctx().
--- a/mercurial/discovery.py Mon Aug 02 10:48:31 2010 -0400
+++ b/mercurial/discovery.py Mon Aug 02 10:55:51 2010 -0500
@@ -35,7 +35,9 @@
exist on the remote side and that no child of a node of base exists
in both remote and repo.
Furthermore base will be updated to include the nodes that exists
- in repo and remote but no children exists in repo and remote.
+ in repo and remote but no children exists in both repo and remote.
+ In other words, base is the set of heads of the DAG resulting from
+ the intersection of the nodes from repo and remote.
If a list of heads is specified, return only nodes which are heads
or ancestors of these heads.
@@ -172,18 +174,17 @@
return base.keys(), list(fetch), heads
-def findoutgoing(repo, remote, base=None, heads=None, force=False):
+def findoutgoing(repo, remote, base=None, remoteheads=None, force=False):
"""Return list of nodes that are roots of subsets not in remote
If base dict is specified, assume that these nodes and their parents
exist on the remote side.
- If a list of heads is specified, return only nodes which are heads
- or ancestors of these heads, and return a second element which
- contains all remote heads which get new children.
+ If remotehead is specified, assume it is the list of the heads from
+ the remote repository.
"""
if base is None:
base = {}
- findincoming(repo, remote, base, heads, force=force)
+ findincoming(repo, remote, base, remoteheads, force=force)
repo.ui.debug("common changesets up to "
+ " ".join(map(short, base.keys())) + "\n")
@@ -203,22 +204,12 @@
# find every node whose parents have been pruned
subset = []
# find every remote head that will get new children
- updated_heads = set()
for n in remain:
p1, p2 = repo.changelog.parents(n)
if p1 not in remain and p2 not in remain:
subset.append(n)
- if heads:
- if p1 in heads:
- updated_heads.add(p1)
- if p2 in heads:
- updated_heads.add(p2)
- # this is the set of all roots we have to push
- if heads:
- return subset, list(updated_heads)
- else:
- return subset
+ return subset
def prepush(repo, remote, force, revs, newbranch):
'''Analyze the local and remote repositories and determine which
@@ -235,34 +226,18 @@
successive changegroup chunks ready to be sent over the wire and
remoteheads is the list of remote heads.'''
common = {}
- remote_heads = remote.heads()
- inc = findincoming(repo, remote, common, remote_heads, force=force)
+ remoteheads = remote.heads()
+ inc = findincoming(repo, remote, common, remoteheads, force=force)
cl = repo.changelog
- update, updated_heads = findoutgoing(repo, remote, common, remote_heads)
+ update = findoutgoing(repo, remote, common, remoteheads)
outg, bases, heads = cl.nodesbetween(update, revs)
if not bases:
repo.ui.status(_("no changes found\n"))
return None, 1
- if not force and remote_heads != [nullid]:
-
- def fail_multiple_heads(unsynced, branch=None):
- if branch:
- msg = _("abort: push creates new remote heads"
- " on branch '%s'!\n") % branch
- else:
- msg = _("abort: push creates new remote heads!\n")
- repo.ui.warn(msg)
- if unsynced:
- repo.ui.status(_("(you should pull and merge or"
- " use push -f to force)\n"))
- else:
- repo.ui.status(_("(did you forget to merge?"
- " use push -f to force)\n"))
- return None, 0
-
+ if not force and remoteheads != [nullid]:
if remote.capable('branchmap'):
# Check for each named branch if we're creating new remote heads.
# To be a remote head after push, node must be either:
@@ -281,12 +256,10 @@
newbranches = branches - set(remotemap)
if newbranches and not newbranch: # new branch requires --new-branch
branchnames = ', '.join(sorted(newbranches))
- repo.ui.warn(_("abort: push creates "
- "new remote branches: %s!\n")
- % branchnames)
- repo.ui.status(_("(use 'hg push --new-branch' to create new "
- "remote branches)\n"))
- return None, 0
+ raise util.Abort(_("push creates new remote branches: %s!")
+ % branchnames,
+ hint=_("use 'hg push --new-branch' to create"
+ " new remote branches"))
branches.difference_update(newbranches)
# 3. Construct the initial oldmap and newmap dicts.
@@ -299,11 +272,11 @@
newmap = {}
unsynced = set()
for branch in branches:
- remoteheads = remotemap[branch]
- prunedheads = [h for h in remoteheads if h in cl.nodemap]
- oldmap[branch] = prunedheads
- newmap[branch] = list(prunedheads)
- if len(remoteheads) > len(prunedheads):
+ remotebrheads = remotemap[branch]
+ prunedbrheads = [h for h in remotebrheads if h in cl.nodemap]
+ oldmap[branch] = prunedbrheads
+ newmap[branch] = list(prunedbrheads)
+ if len(remotebrheads) > len(prunedbrheads):
unsynced.add(branch)
# 4. Update newmap with outgoing changes.
@@ -311,23 +284,12 @@
ctxgen = (repo[n] for n in outg)
repo._updatebranchcache(newmap, ctxgen)
- # 5. Check for new heads.
- # If there are more heads after the push than before, a suitable
- # warning, depending on unsynced status, is displayed.
- for branch in branches:
- if len(newmap[branch]) > len(oldmap[branch]):
- return fail_multiple_heads(branch in unsynced, branch)
-
- # 6. Check for unsynced changes on involved branches.
- if unsynced:
- repo.ui.warn(_("note: unsynced remote changes!\n"))
-
else:
- # Old servers: Check for new topological heads.
- # Code based on _updatebranchcache.
- newheads = set(h for h in remote_heads if h in cl.nodemap)
- oldheadcnt = len(newheads)
- newheads.update(outg)
+ # 1-4b. old servers: Check for new topological heads.
+ # Construct {old,new}map with branch = None (topological branch).
+ # (code based on _updatebranchcache)
+ oldheads = set(h for h in remoteheads if h in cl.nodemap)
+ newheads = oldheads.union(outg)
if len(newheads) > 1:
for latest in reversed(outg):
if latest not in newheads:
@@ -336,10 +298,31 @@
reachable = cl.reachable(latest, cl.node(minhrev))
reachable.remove(latest)
newheads.difference_update(reachable)
- if len(newheads) > oldheadcnt:
- return fail_multiple_heads(inc)
- if inc:
- repo.ui.warn(_("note: unsynced remote changes!\n"))
+ branches = set([None])
+ newmap = {None: newheads}
+ oldmap = {None: oldheads}
+ unsynced = inc and branches or set()
+
+ # 5. Check for new heads.
+ # If there are more heads after the push than before, a suitable
+ # warning, depending on unsynced status, is displayed.
+ for branch in branches:
+ if len(newmap[branch]) > len(oldmap[branch]):
+ if branch:
+ msg = _("push creates new remote heads "
+ "on branch '%s'!") % branch
+ else:
+ msg = _("push creates new remote heads!")
+
+ if branch in unsynced:
+ hint = _("you should pull and merge or use push -f to force")
+ else:
+ hint = _("did you forget to merge? use push -f to force")
+ raise util.Abort(msg, hint=hint)
+
+ # 6. Check for unsynced changes on involved branches.
+ if unsynced:
+ repo.ui.warn(_("note: unsynced remote changes!\n"))
if revs is None:
# use the fast path, no race possible on push
@@ -347,4 +330,4 @@
cg = repo._changegroup(nodes, 'push')
else:
cg = repo.changegroupsubset(update, revs, 'push')
- return cg, remote_heads
+ return cg, remoteheads
--- a/mercurial/dispatch.py Mon Aug 02 10:48:31 2010 -0400
+++ b/mercurial/dispatch.py Mon Aug 02 10:55:51 2010 -0500
@@ -6,7 +6,7 @@
# GNU General Public License version 2 or any later version.
from i18n import _
-import os, sys, atexit, signal, pdb, socket, errno, shlex, time
+import os, sys, atexit, signal, pdb, socket, errno, shlex, time, traceback
import util, commands, hg, fancyopts, extensions, hook, error
import cmdutil, encoding
import ui as uimod
@@ -23,6 +23,8 @@
u.setconfig('ui', 'traceback', 'on')
except util.Abort, inst:
sys.stderr.write(_("abort: %s\n") % inst)
+ if inst.hint:
+ sys.stderr.write(_("(%s)\n") % inst.hint)
return -1
except error.ParseError, inst:
if len(inst.args) > 1:
@@ -49,6 +51,8 @@
try:
# enter the debugger before command execution
if '--debugger' in args:
+ ui.warn(_("entering debugger - "
+ "type c to continue starting hg or h for help\n"))
pdb.set_trace()
try:
return _dispatch(ui, args)
@@ -57,6 +61,7 @@
except:
# enter the debugger when we hit an exception
if '--debugger' in args:
+ traceback.print_exc()
pdb.post_mortem(sys.exc_info()[2])
ui.traceback()
raise
@@ -113,6 +118,8 @@
commands.help_(ui, 'shortlist')
except util.Abort, inst:
ui.warn(_("abort: %s\n") % inst)
+ if inst.hint:
+ ui.warn(_("(%s)\n") % inst.hint)
except ImportError, inst:
ui.warn(_("abort: %s!\n") % inst)
m = str(inst).split()[-1]
@@ -205,10 +212,29 @@
return
+ if self.definition.startswith('!'):
+ def fn(ui, *args):
+ cmd = '%s %s' % (self.definition[1:], ' '.join(args))
+ return util.system(cmd)
+ self.fn = fn
+ return
+
args = shlex.split(self.definition)
cmd = args.pop(0)
args = map(util.expandpath, args)
+ for invalidarg in ("--cwd", "-R", "--repository", "--repo"):
+ if _earlygetopt([invalidarg], args):
+ def fn(ui, *args):
+ ui.warn(_("error in definition for alias '%s': %s may only "
+ "be given on the command line\n")
+ % (self.name, invalidarg))
+ return 1
+
+ self.fn = fn
+ self.badalias = True
+ return
+
try:
tableentry = cmdutil.findcmd(cmd, cmdtable, False)[1]
if len(tableentry) > 2:
--- a/mercurial/error.py Mon Aug 02 10:48:31 2010 -0400
+++ b/mercurial/error.py Mon Aug 02 10:55:51 2010 -0500
@@ -32,6 +32,9 @@
class Abort(Exception):
"""Raised if a command needs to print an error and exit."""
+ def __init__(self, *args, **kw):
+ Exception.__init__(self, *args)
+ self.hint = kw.get('hint')
class ConfigError(Abort):
'Exception raised when parsing config files'
--- a/mercurial/filemerge.py Mon Aug 02 10:48:31 2010 -0400
+++ b/mercurial/filemerge.py Mon Aug 02 10:55:51 2010 -0500
@@ -135,7 +135,7 @@
except IOError:
return False
- if not fco.cmp(fcd.data()): # files identical?
+ if not fco.cmp(fcd): # files identical?
return None
if fca == fco: # backwards, use working dir parent as ancestor
--- a/mercurial/help/glossary.txt Mon Aug 02 10:48:31 2010 -0400
+++ b/mercurial/help/glossary.txt Mon Aug 02 10:55:51 2010 -0500
@@ -16,7 +16,7 @@
a remote repository, since new heads may be created by these
operations. Note that the term branch can also be used informally
to describe a development process in which certain development is
- done independently of other development.This is sometimes done
+ done independently of other development. This is sometimes done
explicitly with a named branch, but it can also be done locally,
using bookmarks or clones and anonymous branches.
@@ -83,8 +83,8 @@
Changeset id
A SHA-1 hash that uniquely identifies a changeset. It may be
- represented as either a "long" 40-byte hexadecimal string, or a
- "short" 12-byte hexadecimal string.
+ represented as either a "long" 40 hexadecimal digit string, or a
+ "short" 12 hexadecimal digit string.
Changeset, merge
A changeset with two parents. This occurs when a merge is
--- a/mercurial/help/revsets.txt Mon Aug 02 10:48:31 2010 -0400
+++ b/mercurial/help/revsets.txt Mon Aug 02 10:55:51 2010 -0500
@@ -58,8 +58,7 @@
Alias for ``user(string)``.
``branch(set)``
- The branch names are found for changesets in set, and the result is
- all changesets belonging to one those branches.
+ All changesets belonging to the branches of changesets in set.
``children(set)``
Child changesets of changesets in set.
@@ -74,10 +73,10 @@
Changesets within the interval, see :hg:`help dates`.
``descendants(set)``
- Changesets which are decendants of changesets in set.
+ Changesets which are descendants of changesets in set.
``file(pattern)``
- Changesets which manually affected files matching pattern.
+ Changesets affecting files matched by pattern.
``follow()``
An alias for ``::.`` (ancestors of the working copy's first parent).
@@ -101,14 +100,18 @@
``max(set)``
Changeset with highest revision number in set.
+``min(set)``
+ Changeset with lowest revision number in set.
+
``merge()``
Changeset is a merge changeset.
``modifies(pattern)``
- Changesets which modify files matching pattern.
+ Changesets modifying files matched by pattern.
``outgoing([path])``
- Changesets missing in path.
+ Changesets not found in the specified destination repository, or the
+ default push location.
``p1(set)``
First parent of changesets in set.
--- a/mercurial/help/templates.txt Mon Aug 02 10:48:31 2010 -0400
+++ b/mercurial/help/templates.txt Mon Aug 02 10:55:51 2010 -0500
@@ -28,6 +28,8 @@
:branches: String. The name of the branch on which the changeset was
committed. Will be empty if the branch name was default.
+:children: List of strings. The children of the changeset.
+
:date: Date information. The date when the changeset was committed.
:desc: String. The text of the changeset description.
@@ -50,8 +52,8 @@
:file_dels: List of strings. Files removed by this changeset.
-:node: String. The changeset identification hash, as a 40-character
- hexadecimal string.
+:node: String. The changeset identification hash, as a 40 hexadecimal
+ digit string.
:parents: List of strings. The parents of the changeset.
@@ -136,7 +138,7 @@
specified in RFC 3339: "2009-08-18T13:00:13+02:00".
:short: Changeset hash. Returns the short form of a changeset hash,
- i.e. a 12-byte hexadecimal string.
+ i.e. a 12 hexadecimal digit string.
:shortdate: Date. Returns a date like "2006-09-18".
--- a/mercurial/hgweb/hgweb_mod.py Mon Aug 02 10:48:31 2010 -0400
+++ b/mercurial/hgweb/hgweb_mod.py Mon Aug 02 10:55:51 2010 -0500
@@ -6,8 +6,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
-import os
-from mercurial import ui, hg, hook, error, encoding, templater
+import os, sys, urllib
+from mercurial import ui, hg, hook, error, encoding, templater, util
from common import get_mtime, ErrorResponse, permhooks
from common import HTTP_OK, HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_SERVER_ERROR
from request import wsgirequest
@@ -112,24 +112,18 @@
# and the clients always use the old URL structure
cmd = req.form.get('cmd', [''])[0]
- if cmd and cmd in protocol.__all__:
+ if protocol.iscmd(cmd):
if query:
raise ErrorResponse(HTTP_NOT_FOUND)
- try:
- if cmd in perms:
- try:
- self.check_perm(req, perms[cmd])
- except ErrorResponse, inst:
- if cmd == 'unbundle':
- req.drain()
- raise
- method = getattr(protocol, cmd)
- return method(self.repo, req)
- except ErrorResponse, inst:
- req.respond(inst, protocol.HGTYPE)
- if not inst.message:
- return []
- return '0\n%s\n' % inst.message,
+ if cmd in perms:
+ try:
+ self.check_perm(req, perms[cmd])
+ except ErrorResponse, inst:
+ if cmd == 'unbundle':
+ req.drain()
+ req.respond(inst, protocol.HGTYPE)
+ return '0\n%s\n' % inst.message
+ return protocol.call(self.repo, req, cmd)
# translate user-visible url structure to internal structure
--- a/mercurial/hgweb/protocol.py Mon Aug 02 10:48:31 2010 -0400
+++ b/mercurial/hgweb/protocol.py Mon Aug 02 10:55:51 2010 -0500
@@ -5,221 +5,64 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
-import cStringIO, zlib, tempfile, errno, os, sys, urllib, copy
-from mercurial import util, streamclone, pushkey
-from mercurial.node import bin, hex
-from mercurial import changegroup as changegroupmod
-from common import ErrorResponse, HTTP_OK, HTTP_NOT_FOUND, HTTP_SERVER_ERROR
-
-# __all__ is populated with the allowed commands. Be sure to add to it if
-# you're adding a new command, or the new command won't work.
-
-__all__ = [
- 'lookup', 'heads', 'branches', 'between', 'changegroup',
- 'changegroupsubset', 'capabilities', 'unbundle', 'stream_out',
- 'branchmap', 'pushkey', 'listkeys'
-]
+import cStringIO, zlib, sys, urllib
+from mercurial import util, wireproto
+from common import HTTP_OK
HGTYPE = 'application/mercurial-0.1'
-basecaps = 'lookup changegroupsubset branchmap pushkey'.split()
-
-def lookup(repo, req):
- try:
- r = hex(repo.lookup(req.form['key'][0]))
- success = 1
- except Exception, inst:
- r = str(inst)
- success = 0
- resp = "%s %s\n" % (success, r)
- req.respond(HTTP_OK, HGTYPE, length=len(resp))
- yield resp
-
-def heads(repo, req):
- resp = " ".join(map(hex, repo.heads())) + "\n"
- req.respond(HTTP_OK, HGTYPE, length=len(resp))
- yield resp
-
-def branchmap(repo, req):
- branches = repo.branchmap()
- heads = []
- for branch, nodes in branches.iteritems():
- branchname = urllib.quote(branch)
- branchnodes = [hex(node) for node in nodes]
- heads.append('%s %s' % (branchname, ' '.join(branchnodes)))
- resp = '\n'.join(heads)
- req.respond(HTTP_OK, HGTYPE, length=len(resp))
- yield resp
-
-def branches(repo, req):
- nodes = []
- if 'nodes' in req.form:
- nodes = map(bin, req.form['nodes'][0].split(" "))
- resp = cStringIO.StringIO()
- for b in repo.branches(nodes):
- resp.write(" ".join(map(hex, b)) + "\n")
- resp = resp.getvalue()
- req.respond(HTTP_OK, HGTYPE, length=len(resp))
- yield resp
-
-def between(repo, req):
- pairs = [map(bin, p.split("-"))
- for p in req.form['pairs'][0].split(" ")]
- resp = ''.join(" ".join(map(hex, b)) + "\n" for b in repo.between(pairs))
- req.respond(HTTP_OK, HGTYPE, length=len(resp))
- yield resp
-
-def changegroup(repo, req):
- req.respond(HTTP_OK, HGTYPE)
- nodes = []
-
- if 'roots' in req.form:
- nodes = map(bin, req.form['roots'][0].split(" "))
-
- z = zlib.compressobj()
- f = repo.changegroup(nodes, 'serve')
- while 1:
- chunk = f.read(4096)
- if not chunk:
- break
- yield z.compress(chunk)
-
- yield z.flush()
-
-def changegroupsubset(repo, req):
- req.respond(HTTP_OK, HGTYPE)
- bases = []
- heads = []
-
- if 'bases' in req.form:
- bases = [bin(x) for x in req.form['bases'][0].split(' ')]
- if 'heads' in req.form:
- heads = [bin(x) for x in req.form['heads'][0].split(' ')]
-
- z = zlib.compressobj()
- f = repo.changegroupsubset(bases, heads, 'serve')
- while 1:
- chunk = f.read(4096)
- if not chunk:
- break
- yield z.compress(chunk)
-
- yield z.flush()
-
-def capabilities(repo, req):
- caps = copy.copy(basecaps)
- if streamclone.allowed(repo.ui):
- caps.append('stream=%d' % repo.changelog.version)
- if changegroupmod.bundlepriority:
- caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority))
- rsp = ' '.join(caps)
- req.respond(HTTP_OK, HGTYPE, length=len(rsp))
- yield rsp
-
-def unbundle(repo, req):
-
- proto = req.env.get('wsgi.url_scheme') or 'http'
- their_heads = req.form['heads'][0].split(' ')
- def check_heads():
- heads = map(hex, repo.heads())
- return their_heads == [hex('force')] or their_heads == heads
-
- # fail early if possible
- if not check_heads():
- req.drain()
- raise ErrorResponse(HTTP_OK, 'unsynced changes')
-
- # do not lock repo until all changegroup data is
- # streamed. save to temporary file.
-
- fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
- fp = os.fdopen(fd, 'wb+')
- try:
- length = int(req.env['CONTENT_LENGTH'])
- for s in util.filechunkiter(req, limit=length):
+class webproto(object):
+ def __init__(self, req):
+ self.req = req
+ self.response = ''
+ def getargs(self, args):
+ data = {}
+ keys = args.split()
+ for k in keys:
+ if k == '*':
+ star = {}
+ for key in self.req.form.keys():
+ if key not in keys:
+ star[key] = self.req.form[key][0]
+ data['*'] = star
+ else:
+ data[k] = self.req.form[k][0]
+ return [data[k] for k in keys]
+ def getfile(self, fp):
+ length = int(self.req.env['CONTENT_LENGTH'])
+ for s in util.filechunkiter(self.req, limit=length):
fp.write(s)
-
- try:
- lock = repo.lock()
- try:
- if not check_heads():
- raise ErrorResponse(HTTP_OK, 'unsynced changes')
-
- fp.seek(0)
- header = fp.read(6)
- if header.startswith('HG') and not header.startswith('HG10'):
- raise ValueError('unknown bundle version')
- elif header not in changegroupmod.bundletypes:
- raise ValueError('unknown bundle compression type')
- gen = changegroupmod.unbundle(header, fp)
-
- # send addchangegroup output to client
-
- oldio = sys.stdout, sys.stderr
- sys.stderr = sys.stdout = cStringIO.StringIO()
+ def redirect(self):
+ self.oldio = sys.stdout, sys.stderr
+ sys.stderr = sys.stdout = cStringIO.StringIO()
+ def groupchunks(self, cg):
+ z = zlib.compressobj()
+ while 1:
+ chunk = cg.read(4096)
+ if not chunk:
+ break
+ yield z.compress(chunk)
+ yield z.flush()
+ def _client(self):
+ return 'remote:%s:%s:%s' % (
+ self.req.env.get('wsgi.url_scheme') or 'http',
+ urllib.quote(self.req.env.get('REMOTE_HOST', '')),
+ urllib.quote(self.req.env.get('REMOTE_USER', '')))
- try:
- url = 'remote:%s:%s:%s' % (
- proto,
- urllib.quote(req.env.get('REMOTE_HOST', '')),
- urllib.quote(req.env.get('REMOTE_USER', '')))
- try:
- ret = repo.addchangegroup(gen, 'serve', url, lock=lock)
- except util.Abort, inst:
- sys.stdout.write("abort: %s\n" % inst)
- ret = 0
- finally:
- val = sys.stdout.getvalue()
- sys.stdout, sys.stderr = oldio
- req.respond(HTTP_OK, HGTYPE)
- return '%d\n%s' % (ret, val),
- finally:
- lock.release()
- except ValueError, inst:
- raise ErrorResponse(HTTP_OK, inst)
- except (OSError, IOError), inst:
- error = getattr(inst, 'strerror', 'Unknown error')
- if not isinstance(error, str):
- error = 'Error: %s' % str(error)
- if inst.errno == errno.ENOENT:
- code = HTTP_NOT_FOUND
- else:
- code = HTTP_SERVER_ERROR
- filename = getattr(inst, 'filename', '')
- # Don't send our filesystem layout to the client
- if filename and filename.startswith(repo.root):
- filename = filename[len(repo.root)+1:]
- text = '%s: %s' % (error, filename)
- else:
- text = error.replace(repo.root + os.path.sep, '')
- raise ErrorResponse(code, text)
- finally:
- fp.close()
- os.unlink(tempname)
+def iscmd(cmd):
+ return cmd in wireproto.commands
-def stream_out(repo, req):
- req.respond(HTTP_OK, HGTYPE)
- try:
- for chunk in streamclone.stream_out(repo):
- yield chunk
- except streamclone.StreamException, inst:
- yield str(inst)
-
-def pushkey(repo, req):
- namespace = req.form['namespace'][0]
- key = req.form['key'][0]
- old = req.form['old'][0]
- new = req.form['new'][0]
-
- r = repo.pushkey(namespace, key, old, new)
- r = '%d\n' % int(r)
- req.respond(HTTP_OK, HGTYPE, length=len(r))
- yield r
-
-def listkeys(repo, req):
- namespace = req.form['namespace'][0]
- d = repo.listkeys(namespace).items()
- t = '\n'.join(['%s\t%s' % (k.encode('string-escape'),
- v.encode('string-escape')) for k, v in d])
- req.respond(HTTP_OK, HGTYPE, length=len(t))
- yield t
+def call(repo, req, cmd):
+ p = webproto(req)
+ rsp = wireproto.dispatch(repo, p, cmd)
+ if isinstance(rsp, str):
+ req.respond(HTTP_OK, HGTYPE, length=len(rsp))
+ return [rsp]
+ elif isinstance(rsp, wireproto.streamres):
+ req.respond(HTTP_OK, HGTYPE)
+ return rsp.gen
+ elif isinstance(rsp, wireproto.pushres):
+ val = sys.stdout.getvalue()
+ sys.stdout, sys.stderr = p.oldio
+ req.respond(HTTP_OK, HGTYPE)
+ return ['%d\n%s' % (rsp.res, val)]
--- a/mercurial/httprepo.py Mon Aug 02 10:48:31 2010 -0400
+++ b/mercurial/httprepo.py Mon Aug 02 10:55:51 2010 -0500
@@ -8,7 +8,7 @@
from node import bin, hex, nullid
from i18n import _
-import repo, changegroup, statichttprepo, error, url, util, pushkey
+import repo, changegroup, statichttprepo, error, url, util, wireproto
import os, urllib, urllib2, urlparse, zlib, httplib
import errno, socket
import encoding
@@ -22,7 +22,7 @@
raise IOError(None, _('connection ended unexpectedly'))
yield zd.flush()
-class httprepository(repo.repository):
+class httprepository(wireproto.wirerepository):
def __init__(self, ui, path):
self.path = path
self.caps = None
@@ -54,7 +54,7 @@
def get_caps(self):
if self.caps is None:
try:
- self.caps = set(self.do_read('capabilities').split())
+ self.caps = set(self._call('capabilities').split())
except error.RepoError:
self.caps = set()
self.ui.debug('capabilities: %s\n' %
@@ -66,7 +66,7 @@
def lock(self):
raise util.Abort(_('operation not supported over http'))
- def do_cmd(self, cmd, **args):
+ def _callstream(self, cmd, **args):
data = args.pop('data', None)
headers = args.pop('headers', {})
self.ui.debug("sending %s command\n" % cmd)
@@ -130,90 +130,15 @@
return resp
- def do_read(self, cmd, **args):
- fp = self.do_cmd(cmd, **args)
+ def _call(self, cmd, **args):
+ fp = self._callstream(cmd, **args)
try:
return fp.read()
finally:
# if using keepalive, allow connection to be reused
fp.close()
- def lookup(self, key):
- self.requirecap('lookup', _('look up remote revision'))
- d = self.do_cmd("lookup", key = key).read()
- success, data = d[:-1].split(' ', 1)
- if int(success):
- return bin(data)
- raise error.RepoError(data)
-
- def heads(self):
- d = self.do_read("heads")
- try:
- return map(bin, d[:-1].split(" "))
- except:
- raise error.ResponseError(_("unexpected response:"), d)
-
- def branchmap(self):
- d = self.do_read("branchmap")
- try:
- branchmap = {}
- for branchpart in d.splitlines():
- branchheads = branchpart.split(' ')
- branchname = urllib.unquote(branchheads[0])
- # Earlier servers (1.3.x) send branch names in (their) local
- # charset. The best we can do is assume it's identical to our
- # own local charset, in case it's not utf-8.
- try:
- branchname.decode('utf-8')
- except UnicodeDecodeError:
- branchname = encoding.fromlocal(branchname)
- branchheads = [bin(x) for x in branchheads[1:]]
- branchmap[branchname] = branchheads
- return branchmap
- except:
- raise error.ResponseError(_("unexpected response:"), d)
-
- def branches(self, nodes):
- n = " ".join(map(hex, nodes))
- d = self.do_read("branches", nodes=n)
- try:
- br = [tuple(map(bin, b.split(" "))) for b in d.splitlines()]
- return br
- except:
- raise error.ResponseError(_("unexpected response:"), d)
-
- def between(self, pairs):
- batch = 8 # avoid giant requests
- r = []
- for i in xrange(0, len(pairs), batch):
- n = " ".join(["-".join(map(hex, p)) for p in pairs[i:i + batch]])
- d = self.do_read("between", pairs=n)
- try:
- r += [l and map(bin, l.split(" ")) or []
- for l in d.splitlines()]
- except:
- raise error.ResponseError(_("unexpected response:"), d)
- return r
-
- def changegroup(self, nodes, kind):
- n = " ".join(map(hex, nodes))
- f = self.do_cmd("changegroup", roots=n)
- return util.chunkbuffer(zgenerator(f))
-
- def changegroupsubset(self, bases, heads, source):
- self.requirecap('changegroupsubset', _('look up remote changes'))
- baselst = " ".join([hex(n) for n in bases])
- headlst = " ".join([hex(n) for n in heads])
- f = self.do_cmd("changegroupsubset", bases=baselst, heads=headlst)
- return util.chunkbuffer(zgenerator(f))
-
- def unbundle(self, cg, heads, source):
- '''Send cg (a readable file-like object representing the
- changegroup to push, typically a chunkbuffer object) to the
- remote server as a bundle. Return an integer response code:
- non-zero indicates a successful push (see
- localrepository.addchangegroup()), and zero indicates either
- error or nothing to push.'''
+ def _callpush(self, cmd, cg, **args):
# have to stream bundle to a temp file because we do not have
# http 1.1 chunked transfer.
@@ -233,56 +158,25 @@
tempname = changegroup.writebundle(cg, None, type)
fp = url.httpsendfile(tempname, "rb")
+ headers = {'Content-Type': 'application/mercurial-0.1'}
+
try:
try:
- resp = self.do_read(
- 'unbundle', data=fp,
- headers={'Content-Type': 'application/mercurial-0.1'},
- heads=' '.join(map(hex, heads)))
- resp_code, output = resp.split('\n', 1)
- try:
- ret = int(resp_code)
- except ValueError, err:
- raise error.ResponseError(
- _('push failed (unexpected response):'), resp)
- for l in output.splitlines(True):
- self.ui.status(_('remote: '), l)
- return ret
+ r = self._call(cmd, data=fp, headers=headers, **args)
+ return r.split('\n', 1)
except socket.error, err:
- if err[0] in (errno.ECONNRESET, errno.EPIPE):
- raise util.Abort(_('push failed: %s') % err[1])
- raise util.Abort(err[1])
+ if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
+ raise util.Abort(_('push failed: %s') % err.args[1])
+ raise util.Abort(err.args[1])
finally:
fp.close()
os.unlink(tempname)
- def stream_out(self):
- return self.do_cmd('stream_out')
+ def _abort(self, exception):
+ raise exception
- def pushkey(self, namespace, key, old, new):
- if not self.capable('pushkey'):
- return False
- d = self.do_cmd("pushkey", data="", # force a POST
- namespace=namespace, key=key, old=old, new=new).read()
- code, output = d.split('\n', 1)
- try:
- ret = bool(int(code))
- except ValueError, err:
- raise error.ResponseError(
- _('push failed (unexpected response):'), d)
- for l in output.splitlines(True):
- self.ui.status(_('remote: '), l)
- return ret
-
- def listkeys(self, namespace):
- if not self.capable('pushkey'):
- return {}
- d = self.do_cmd("listkeys", namespace=namespace).read()
- r = {}
- for l in d.splitlines():
- k, v = l.split('\t')
- r[k.decode('string-escape')] = v.decode('string-escape')
- return r
+ def _decompress(self, stream):
+ return util.chunkbuffer(zgenerator(stream))
class httpsrepository(httprepository):
def __init__(self, ui, path):
--- a/mercurial/localrepo.py Mon Aug 02 10:48:31 2010 -0400
+++ b/mercurial/localrepo.py Mon Aug 02 10:55:51 2010 -0500
@@ -510,7 +510,7 @@
def _link(self, f):
return os.path.islink(self.wjoin(f))
- def _filter(self, filter, filename, data):
+ def _loadfilter(self, filter):
if filter not in self.filterpats:
l = []
for pat, cmd in self.ui.configitems(filter):
@@ -533,6 +533,9 @@
l.append((mf, fn, params))
self.filterpats[filter] = l
+ def _filter(self, filter, filename, data):
+ self._loadfilter(filter)
+
for mf, fn, cmd in self.filterpats[filter]:
if mf(filename):
self.ui.debug("filtering %s through %s\n" % (filename, cmd))
@@ -1059,16 +1062,16 @@
# do a full compare of any files that might have changed
for f in sorted(cmp):
if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
- or ctx1[f].cmp(ctx2[f].data())):
+ or ctx1[f].cmp(ctx2[f])):
modified.append(f)
else:
fixup.append(f)
- if listclean:
- clean += fixup
-
# update dirstate for files that are actually clean
if fixup:
+ if listclean:
+ clean += fixup
+
try:
# updating the dirstate is optional
# so we don't wait on the lock
@@ -1103,7 +1106,7 @@
if fn in mf1:
if (mf1.flags(fn) != mf2.flags(fn) or
(mf1[fn] != mf2[fn] and
- (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
+ (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
modified.append(fn)
elif listclean:
clean.append(fn)
@@ -1223,46 +1226,34 @@
# unbundle assumes local user cannot lock remote repo (new ssh
# servers, http servers).
- if remote.capable('unbundle'):
- return self.push_unbundle(remote, force, revs, newbranch)
- return self.push_addchangegroup(remote, force, revs, newbranch)
-
- def push_addchangegroup(self, remote, force, revs, newbranch):
- '''Push a changegroup by locking the remote and sending the
- addchangegroup command to it. Used for local and old SSH repos.
- Return an integer: see push().
- '''
- lock = remote.lock()
+ lock = None
+ unbundle = remote.capable('unbundle')
+ if not unbundle:
+ lock = remote.lock()
try:
ret = discovery.prepush(self, remote, force, revs, newbranch)
- if ret[0] is not None:
- cg, remote_heads = ret
+ if ret[0] is None:
+ # and here we return 0 for "nothing to push" or 1 for
+ # "something to push but I refuse"
+ return ret[1]
+
+ cg, remote_heads = ret
+ if unbundle:
+ # local repo finds heads on server, finds out what revs it must
+ # push. once revs transferred, if server finds it has
+ # different heads (someone else won commit/push race), server
+ # aborts.
+ if force:
+ remote_heads = ['force']
+ # ssh: return remote's addchangegroup()
+ # http: return remote's addchangegroup() or 0 for error
+ return remote.unbundle(cg, remote_heads, 'push')
+ else:
# we return an integer indicating remote head count change
return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
- # and here we return 0 for "nothing to push" or 1 for
- # "something to push but I refuse"
- return ret[1]
finally:
- lock.release()
-
- def push_unbundle(self, remote, force, revs, newbranch):
- '''Push a changegroup by unbundling it on the remote. Used for new
- SSH and HTTP repos. Return an integer: see push().'''
- # local repo finds heads on server, finds out what revs it
- # must push. once revs transferred, if server finds it has
- # different heads (someone else won commit/push race), server
- # aborts.
-
- ret = discovery.prepush(self, remote, force, revs, newbranch)
- if ret[0] is not None:
- cg, remote_heads = ret
- if force:
- remote_heads = ['force']
- # ssh: return remote's addchangegroup()
- # http: return remote's addchangegroup() or 0 for error
- return remote.unbundle(cg, remote_heads, 'push')
- # as in push_addchangegroup()
- return ret[1]
+ if lock is not None:
+ lock.release()
def changegroupinfo(self, nodes, source):
if self.ui.verbose or source == 'bundle':
@@ -1296,8 +1287,10 @@
# Set up some initial variables
# Make it easy to refer to self.changelog
cl = self.changelog
- # msng is short for missing - compute the list of changesets in this
- # changegroup.
+ # Compute the list of changesets in this changegroup.
+ # Some bases may turn out to be superfluous, and some heads may be
+ # too. nodesbetween will return the minimal set of bases and heads
+ # necessary to re-create the changegroup.
if not bases:
bases = [nullid]
msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
@@ -1314,31 +1307,9 @@
self.hook('preoutgoing', throw=True, source=source)
self.changegroupinfo(msng_cl_lst, source)
- # Some bases may turn out to be superfluous, and some heads may be
- # too. nodesbetween will return the minimal set of bases and heads
- # necessary to re-create the changegroup.
- # Known heads are the list of heads that it is assumed the recipient
- # of this changegroup will know about.
- knownheads = set()
- # We assume that all parents of bases are known heads.
- for n in bases:
- knownheads.update(cl.parents(n))
- knownheads.discard(nullid)
- knownheads = list(knownheads)
- if knownheads:
- # Now that we know what heads are known, we can compute which
- # changesets are known. The recipient must know about all
- # changesets required to reach the known heads from the null
- # changeset.
- has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
- junk = None
- # Transform the list into a set.
- has_cl_set = set(has_cl_set)
- else:
- # If there were no known heads, the recipient cannot be assumed to
- # know about any changesets.
- has_cl_set = set()
+ # We assume that all ancestors of bases are known
+ commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
# Make it easy to refer to self.manifest
mnfst = self.manifest
@@ -1355,19 +1326,6 @@
def identity(x):
return x
- # If we determine that a particular file or manifest node must be a
- # node that the recipient of the changegroup will already have, we can
- # also assume the recipient will have all the parents. This function
- # prunes them from the set of missing nodes.
- def prune_parents(revlog, hasset, msngset):
- for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
- msngset.pop(revlog.node(r), None)
-
- # Use the information collected in collect_manifests_and_files to say
- # which changenode any manifestnode belongs to.
- def lookup_manifest_link(mnfstnode):
- return msng_mnfst_set[mnfstnode]
-
# A function generating function that sets up the initial environment
# the inner function.
def filenode_collector(changedfiles):
@@ -1386,10 +1344,9 @@
deltamf = mnfst.readdelta(mnfstnode)
# For each line in the delta
for f, fnode in deltamf.iteritems():
- f = changedfiles.get(f, None)
# And if the file is in the list of files we care
# about.
- if f is not None:
+ if f in changedfiles:
# Get the changenode this manifest belongs to
clnode = msng_mnfst_set[mnfstnode]
# Create the set of filenodes for the file if
@@ -1412,28 +1369,23 @@
ndset.setdefault(fnode, clnode)
return collect_msng_filenodes
- # We have a list of filenodes we think we need for a file, lets remove
- # all those we know the recipient must have.
- def prune_filenodes(f, filerevlog):
- msngset = msng_filenode_set[f]
+ # If we determine that a particular file or manifest node must be a
+ # node that the recipient of the changegroup will already have, we can
+ # also assume the recipient will have all the parents. This function
+ # prunes them from the set of missing nodes.
+ def prune(revlog, missingnodes):
hasset = set()
# If a 'missing' filenode thinks it belongs to a changenode we
# assume the recipient must have, then the recipient must have
# that filenode.
- for n in msngset:
- clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
- if clnode in has_cl_set:
+ for n in missingnodes:
+ clrev = revlog.linkrev(revlog.rev(n))
+ if clrev in commonrevs:
hasset.add(n)
- prune_parents(filerevlog, hasset, msngset)
-
- # A function generator function that sets up the a context for the
- # inner function.
- def lookup_filenode_link_func(fname):
- msngset = msng_filenode_set[fname]
- # Lookup the changenode the filenode belongs to.
- def lookup_filenode_link(fnode):
- return msngset[fnode]
- return lookup_filenode_link
+ for n in hasset:
+ missingnodes.pop(n, None)
+ for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
+ missingnodes.pop(revlog.node(r), None)
# Add the nodes that were explicitly requested.
def add_extra_nodes(name, nodes):
@@ -1448,45 +1400,30 @@
# logically divide up the task, generate the group.
def gengroup():
# The set of changed files starts empty.
- changedfiles = {}
+ changedfiles = set()
collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
# Create a changenode group generator that will call our functions
# back to lookup the owning changenode and collect information.
group = cl.group(msng_cl_lst, identity, collect)
- cnt = 0
- for chnk in group:
+ for cnt, chnk in enumerate(group):
yield chnk
self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
- cnt += 1
self.ui.progress(_('bundling changes'), None)
-
- # Figure out which manifest nodes (of the ones we think might be
- # part of the changegroup) the recipient must know about and
- # remove them from the changegroup.
- has_mnfst_set = set()
- for n in msng_mnfst_set:
- # If a 'missing' manifest thinks it belongs to a changenode
- # the recipient is assumed to have, obviously the recipient
- # must have that manifest.
- linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
- if linknode in has_cl_set:
- has_mnfst_set.add(n)
- prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
+ prune(mnfst, msng_mnfst_set)
add_extra_nodes(1, msng_mnfst_set)
msng_mnfst_lst = msng_mnfst_set.keys()
# Sort the manifestnodes by revision number.
msng_mnfst_lst.sort(key=mnfst.rev)
# Create a generator for the manifestnodes that calls our lookup
# and data collection functions back.
- group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
+ group = mnfst.group(msng_mnfst_lst,
+ lambda mnode: msng_mnfst_set[mnode],
filenode_collector(changedfiles))
- cnt = 0
- for chnk in group:
+ for cnt, chnk in enumerate(group):
yield chnk
self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
- cnt += 1
self.ui.progress(_('bundling manifests'), None)
# These are no longer needed, dereference and toss the memory for
@@ -1499,7 +1436,7 @@
if isinstance(fname, int):
continue
msng_filenode_set.setdefault(fname, {})
- changedfiles[fname] = 1
+ changedfiles.add(fname)
# Go through all our files in order sorted by name.
cnt = 0
for fname in sorted(changedfiles):
@@ -1508,32 +1445,27 @@
raise util.Abort(_("empty or missing revlog for %s") % fname)
# Toss out the filenodes that the recipient isn't really
# missing.
- if fname in msng_filenode_set:
- prune_filenodes(fname, filerevlog)
- add_extra_nodes(fname, msng_filenode_set[fname])
- msng_filenode_lst = msng_filenode_set[fname].keys()
- else:
- msng_filenode_lst = []
+ missingfnodes = msng_filenode_set.pop(fname, {})
+ prune(filerevlog, missingfnodes)
+ add_extra_nodes(fname, missingfnodes)
# If any filenodes are left, generate the group for them,
# otherwise don't bother.
- if len(msng_filenode_lst) > 0:
+ if missingfnodes:
yield changegroup.chunkheader(len(fname))
yield fname
- # Sort the filenodes by their revision #
- msng_filenode_lst.sort(key=filerevlog.rev)
+ # Sort the filenodes by their revision # (topological order)
+ nodeiter = list(missingfnodes)
+ nodeiter.sort(key=filerevlog.rev)
# Create a group generator and only pass in a changenode
# lookup function as we need to collect no information
# from filenodes.
- group = filerevlog.group(msng_filenode_lst,
- lookup_filenode_link_func(fname))
+ group = filerevlog.group(nodeiter,
+ lambda fnode: missingfnodes[fnode])
for chnk in group:
self.ui.progress(
_('bundling files'), cnt, item=fname, unit=_('chunks'))
cnt += 1
yield chnk
- if fname in msng_filenode_set:
- # Don't need this anymore, toss it to free memory.
- del msng_filenode_set[fname]
# Signal that no more groups are left.
yield changegroup.closechunk()
self.ui.progress(_('bundling files'), None)
@@ -1571,31 +1503,28 @@
if log.linkrev(r) in revset:
yield log.node(r)
- def lookuprevlink_func(revlog):
- def lookuprevlink(n):
+ def lookuplinkrev_func(revlog):
+ def lookuplinkrev(n):
return cl.node(revlog.linkrev(revlog.rev(n)))
- return lookuprevlink
+ return lookuplinkrev
def gengroup():
'''yield a sequence of changegroup chunks (strings)'''
# construct a list of all changed files
- changedfiles = {}
+ changedfiles = set()
mmfs = {}
collect = changegroup.collector(cl, mmfs, changedfiles)
- cnt = 0
- for chnk in cl.group(nodes, identity, collect):
+ for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
- cnt += 1
yield chnk
self.ui.progress(_('bundling changes'), None)
mnfst = self.manifest
nodeiter = gennodelst(mnfst)
- cnt = 0
- for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
+ for cnt, chnk in enumerate(mnfst.group(nodeiter,
+ lookuplinkrev_func(mnfst))):
self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
- cnt += 1
yield chnk
self.ui.progress(_('bundling manifests'), None)
@@ -1609,7 +1538,7 @@
if nodeiter:
yield changegroup.chunkheader(len(fname))
yield fname
- lookup = lookuprevlink_func(filerevlog)
+ lookup = lookuplinkrev_func(filerevlog)
for chnk in filerevlog.group(nodeiter, lookup):
self.ui.progress(
_('bundling files'), cnt, item=fname, unit=_('chunks'))
--- a/mercurial/merge.py Mon Aug 02 10:48:31 2010 -0400
+++ b/mercurial/merge.py Mon Aug 02 10:55:51 2010 -0500
@@ -73,7 +73,7 @@
def _checkunknown(wctx, mctx):
"check for collisions between unknown files and files in mctx"
for f in wctx.unknown():
- if f in mctx and mctx[f].cmp(wctx[f].data()):
+ if f in mctx and mctx[f].cmp(wctx[f]):
raise util.Abort(_("untracked file in working directory differs"
" from file in requested revision: '%s'") % f)
--- a/mercurial/revlog.py Mon Aug 02 10:48:31 2010 -0400
+++ b/mercurial/revlog.py Mon Aug 02 10:55:51 2010 -0500
@@ -131,7 +131,7 @@
self.dataf = dataf
self.s = struct.calcsize(indexformatng)
self.datasize = size
- self.l = size / self.s
+ self.l = size // self.s
self.index = [None] * self.l
self.map = {nullid: nullrev}
self.allmap = 0
@@ -176,8 +176,8 @@
# limit blocksize so that we don't get too much data.
blocksize = max(self.datasize - blockstart, 0)
data = self.dataf.read(blocksize)
- lend = len(data) / self.s
- i = blockstart / self.s
+ lend = len(data) // self.s
+ i = blockstart // self.s
off = 0
# lazyindex supports __delitem__
if lend > len(self.index) - i:
@@ -533,6 +533,8 @@
return self.index[rev][1]
def base(self, rev):
return self.index[rev][3]
+ def flags(self, rev):
+ return self.index[rev][0] & 0xFFFF
def size(self, rev):
"""return the length of the uncompressed text for a given revision"""
@@ -1020,9 +1022,9 @@
base = self.base(rev)
# check rev flags
- if self.index[rev][0] & 0xFFFF:
+ if self.flags(rev):
raise RevlogError(_('incompatible revision flag %x') %
- (self.index[rev][0] & 0xFFFF))
+ (self.flags(rev)))
# do we have useful data cached?
if self._cache and self._cache[1] >= base and self._cache[1] < rev:
@@ -1193,14 +1195,7 @@
d = self.revdiff(a, b)
yield changegroup.chunkheader(len(meta) + len(d))
yield meta
- if len(d) > 2**20:
- pos = 0
- while pos < len(d):
- pos2 = pos + 2 ** 18
- yield d[pos:pos2]
- pos = pos2
- else:
- yield d
+ yield d
yield changegroup.closechunk()
--- a/mercurial/revset.py Mon Aug 02 10:48:31 2010 -0400
+++ b/mercurial/revset.py Mon Aug 02 10:55:51 2010 -0500
@@ -195,6 +195,14 @@
return [m]
return []
+def minrev(repo, subset, x):
+ s = getset(repo, subset, x)
+ if s:
+ m = min(s)
+ if m in subset:
+ return [m]
+ return []
+
def limit(repo, subset, x):
l = getargs(x, 2, 2, _("limit wants two arguments"))
try:
@@ -466,6 +474,7 @@
"keyword": keyword,
"limit": limit,
"max": maxrev,
+ "min": minrev,
"merge": merge,
"modifies": modifies,
"outgoing": outgoing,
--- a/mercurial/sshrepo.py Mon Aug 02 10:48:31 2010 -0400
+++ b/mercurial/sshrepo.py Mon Aug 02 10:55:51 2010 -0500
@@ -5,10 +5,9 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
-from node import bin, hex
from i18n import _
-import repo, util, error, encoding
-import re, urllib
+import repo, util, error, wireproto
+import re
class remotelock(object):
def __init__(self, repo):
@@ -20,14 +19,14 @@
if self.repo:
self.release()
-class sshrepository(repo.repository):
+class sshrepository(wireproto.wirerepository):
def __init__(self, ui, path, create=0):
self._url = path
self.ui = ui
m = re.match(r'^ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?$', path)
if not m:
- self.abort(error.RepoError(_("couldn't parse location %s") % path))
+ self._abort(error.RepoError(_("couldn't parse location %s") % path))
self.user = m.group(2)
self.host = m.group(3)
@@ -46,7 +45,7 @@
ui.note(_('running %s\n') % cmd)
res = util.system(cmd)
if res != 0:
- self.abort(error.RepoError(_("could not create remote repo")))
+ self._abort(error.RepoError(_("could not create remote repo")))
self.validate_repo(ui, sshcmd, args, remotecmd)
@@ -65,8 +64,8 @@
self.pipeo, self.pipei, self.pipee = util.popen3(cmd)
# skip any noise generated by remote shell
- self.do_cmd("hello")
- r = self.do_cmd("between", pairs=("%s-%s" % ("0"*40, "0"*40)))
+ self._callstream("hello")
+ r = self._callstream("between", pairs=("%s-%s" % ("0"*40, "0"*40)))
lines = ["", "dummy"]
max_noise = 500
while lines[-1] and max_noise:
@@ -79,7 +78,7 @@
lines.append(l)
max_noise -= 1
else:
- self.abort(error.RepoError(_("no suitable response from remote hg")))
+ self._abort(error.RepoError(_("no suitable response from remote hg")))
self.capabilities = set()
for l in reversed(lines):
@@ -97,7 +96,7 @@
break
self.ui.status(_("remote: "), l)
- def abort(self, exception):
+ def _abort(self, exception):
self.cleanup()
raise exception
@@ -114,7 +113,7 @@
__del__ = cleanup
- def do_cmd(self, cmd, **args):
+ def _callstream(self, cmd, **args):
self.ui.debug("sending %s command\n" % cmd)
self.pipeo.write("%s\n" % cmd)
for k, v in sorted(args.iteritems()):
@@ -124,17 +123,35 @@
return self.pipei
- def call(self, cmd, **args):
- self.do_cmd(cmd, **args)
+ def _call(self, cmd, **args):
+ self._callstream(cmd, **args)
return self._recv()
+ def _callpush(self, cmd, fp, **args):
+ r = self._call(cmd, **args)
+ if r:
+ return '', r
+ while 1:
+ d = fp.read(4096)
+ if not d:
+ break
+ self._send(d)
+ self._send("", flush=True)
+ r = self._recv()
+ if r:
+ return '', r
+ return self._recv(), ''
+
+ def _decompress(self, stream):
+ return stream
+
def _recv(self):
l = self.pipei.readline()
self.readerr()
try:
l = int(l)
except:
- self.abort(error.ResponseError(_("unexpected response:"), l))
+ self._abort(error.ResponseError(_("unexpected response:"), l))
return self.pipei.read(l)
def _send(self, data, flush=False):
@@ -146,112 +163,19 @@
self.readerr()
def lock(self):
- self.call("lock")
+ self._call("lock")
return remotelock(self)
def unlock(self):
- self.call("unlock")
-
- def lookup(self, key):
- self.requirecap('lookup', _('look up remote revision'))
- d = self.call("lookup", key=key)
- success, data = d[:-1].split(" ", 1)
- if int(success):
- return bin(data)
- else:
- self.abort(error.RepoError(data))
-
- def heads(self):
- d = self.call("heads")
- try:
- return map(bin, d[:-1].split(" "))
- except:
- self.abort(error.ResponseError(_("unexpected response:"), d))
-
- def branchmap(self):
- d = self.call("branchmap")
- try:
- branchmap = {}
- for branchpart in d.splitlines():
- branchheads = branchpart.split(' ')
- branchname = urllib.unquote(branchheads[0])
- # Earlier servers (1.3.x) send branch names in (their) local
- # charset. The best we can do is assume it's identical to our
- # own local charset, in case it's not utf-8.
- try:
- branchname.decode('utf-8')
- except UnicodeDecodeError:
- branchname = encoding.fromlocal(branchname)
- branchheads = [bin(x) for x in branchheads[1:]]
- branchmap[branchname] = branchheads
- return branchmap
- except:
- raise error.ResponseError(_("unexpected response:"), d)
-
- def branches(self, nodes):
- n = " ".join(map(hex, nodes))
- d = self.call("branches", nodes=n)
- try:
- br = [tuple(map(bin, b.split(" "))) for b in d.splitlines()]
- return br
- except:
- self.abort(error.ResponseError(_("unexpected response:"), d))
-
- def between(self, pairs):
- n = " ".join(["-".join(map(hex, p)) for p in pairs])
- d = self.call("between", pairs=n)
- try:
- p = [l and map(bin, l.split(" ")) or [] for l in d.splitlines()]
- return p
- except:
- self.abort(error.ResponseError(_("unexpected response:"), d))
-
- def changegroup(self, nodes, kind):
- n = " ".join(map(hex, nodes))
- return self.do_cmd("changegroup", roots=n)
-
- def changegroupsubset(self, bases, heads, kind):
- self.requirecap('changegroupsubset', _('look up remote changes'))
- bases = " ".join(map(hex, bases))
- heads = " ".join(map(hex, heads))
- return self.do_cmd("changegroupsubset", bases=bases, heads=heads)
-
- def unbundle(self, cg, heads, source):
- '''Send cg (a readable file-like object representing the
- changegroup to push, typically a chunkbuffer object) to the
- remote server as a bundle. Return an integer indicating the
- result of the push (see localrepository.addchangegroup()).'''
- d = self.call("unbundle", heads=' '.join(map(hex, heads)))
- if d:
- # remote may send "unsynced changes"
- self.abort(error.RepoError(_("push refused: %s") % d))
-
- while 1:
- d = cg.read(4096)
- if not d:
- break
- self._send(d)
-
- self._send("", flush=True)
-
- r = self._recv()
- if r:
- # remote may send "unsynced changes"
- self.abort(error.RepoError(_("push failed: %s") % r))
-
- r = self._recv()
- try:
- return int(r)
- except:
- self.abort(error.ResponseError(_("unexpected response:"), r))
+ self._call("unlock")
def addchangegroup(self, cg, source, url):
'''Send a changegroup to the remote server. Return an integer
similar to unbundle(). DEPRECATED, since it requires locking the
remote.'''
- d = self.call("addchangegroup")
+ d = self._call("addchangegroup")
if d:
- self.abort(error.RepoError(_("push refused: %s") % d))
+ self._abort(error.RepoError(_("push refused: %s") % d))
while 1:
d = cg.read(4096)
if not d:
@@ -268,26 +192,6 @@
try:
return int(r)
except:
- self.abort(error.ResponseError(_("unexpected response:"), r))
-
- def stream_out(self):
- return self.do_cmd('stream_out')
-
- def pushkey(self, namespace, key, old, new):
- if not self.capable('pushkey'):
- return False
- d = self.call("pushkey",
- namespace=namespace, key=key, old=old, new=new)
- return bool(int(d))
-
- def listkeys(self, namespace):
- if not self.capable('pushkey'):
- return {}
- d = self.call("listkeys", namespace=namespace)
- r = {}
- for l in d.splitlines():
- k, v = l.split('\t')
- r[k.decode('string-escape')] = v.decode('string-escape')
- return r
+ self._abort(error.ResponseError(_("unexpected response:"), r))
instance = sshrepository
--- a/mercurial/sshserver.py Mon Aug 02 10:48:31 2010 -0400
+++ b/mercurial/sshserver.py Mon Aug 02 10:55:51 2010 -0500
@@ -7,14 +7,10 @@
# GNU General Public License version 2 or any later version.
from i18n import _
-from node import bin, hex
-import streamclone, util, hook, pushkey
-import os, sys, tempfile, urllib, copy
+import util, hook, wireproto
+import os, sys
class sshserver(object):
-
- caps = 'unbundle lookup changegroupsubset branchmap pushkey'.split()
-
def __init__(self, ui, repo):
self.ui = ui
self.repo = repo
@@ -29,17 +25,61 @@
util.set_binary(self.fin)
util.set_binary(self.fout)
- def getarg(self):
- argline = self.fin.readline()[:-1]
- arg, l = argline.split()
- val = self.fin.read(int(l))
- return arg, val
+ def getargs(self, args):
+ data = {}
+ keys = args.split()
+ count = len(keys)
+ for n in xrange(len(keys)):
+ argline = self.fin.readline()[:-1]
+ arg, l = argline.split()
+ val = self.fin.read(int(l))
+ if arg not in keys:
+ raise util.Abort("unexpected parameter %r" % arg)
+ if arg == '*':
+ star = {}
+ for n in xrange(int(l)):
+ arg, l = argline.split()
+ val = self.fin.read(int(l))
+ star[arg] = val
+ data['*'] = star
+ else:
+ data[arg] = val
+ return [data[k] for k in keys]
- def respond(self, v):
+ def getarg(self, name):
+ return self.getargs(name)[0]
+
+ def getfile(self, fpout):
+ self.sendresponse('')
+ count = int(self.fin.readline())
+ while count:
+ fpout.write(self.fin.read(count))
+ count = int(self.fin.readline())
+
+ def redirect(self):
+ pass
+
+ def groupchunks(self, changegroup):
+ while True:
+ d = changegroup.read(4096)
+ if not d:
+ break
+ yield d
+
+ def sendresponse(self, v):
self.fout.write("%d\n" % len(v))
self.fout.write(v)
self.fout.flush()
+ def sendstream(self, source):
+ for chunk in source.gen:
+ self.fout.write(chunk)
+ self.fout.flush()
+
+ def sendpushresponse(self, rsp):
+ self.sendresponse('')
+ self.sendresponse(str(rsp.res))
+
def serve_forever(self):
try:
while self.serve_one():
@@ -49,57 +89,31 @@
self.lock.release()
sys.exit(0)
+ handlers = {
+ str: sendresponse,
+ wireproto.streamres: sendstream,
+ wireproto.pushres: sendpushresponse,
+ }
+
def serve_one(self):
cmd = self.fin.readline()[:-1]
- if cmd:
+ if cmd and cmd in wireproto.commands:
+ rsp = wireproto.dispatch(self.repo, self, cmd)
+ self.handlers[rsp.__class__](self, rsp)
+ elif cmd:
impl = getattr(self, 'do_' + cmd, None)
if impl:
- impl()
- else: self.respond("")
+ r = impl()
+ if r is not None:
+ self.sendresponse(r)
+ else: self.sendresponse("")
return cmd != ''
- def do_lookup(self):
- arg, key = self.getarg()
- assert arg == 'key'
- try:
- r = hex(self.repo.lookup(key))
- success = 1
- except Exception, inst:
- r = str(inst)
- success = 0
- self.respond("%s %s\n" % (success, r))
-
- def do_branchmap(self):
- branchmap = self.repo.branchmap()
- heads = []
- for branch, nodes in branchmap.iteritems():
- branchname = urllib.quote(branch)
- branchnodes = [hex(node) for node in nodes]
- heads.append('%s %s' % (branchname, ' '.join(branchnodes)))
- self.respond('\n'.join(heads))
-
- def do_heads(self):
- h = self.repo.heads()
- self.respond(" ".join(map(hex, h)) + "\n")
-
- def do_hello(self):
- '''the hello command returns a set of lines describing various
- interesting things about the server, in an RFC822-like format.
- Currently the only one defined is "capabilities", which
- consists of a line in the form:
-
- capabilities: space separated list of tokens
- '''
- caps = copy.copy(self.caps)
- if streamclone.allowed(self.repo.ui):
- caps.append('stream=%d' % self.repo.changelog.version)
- self.respond("capabilities: %s\n" % (' '.join(caps),))
-
def do_lock(self):
'''DEPRECATED - allowing remote client to lock repo is not safe'''
self.lock = self.repo.lock()
- self.respond("")
+ return ""
def do_unlock(self):
'''DEPRECATED'''
@@ -107,136 +121,20 @@
if self.lock:
self.lock.release()
self.lock = None
- self.respond("")
-
- def do_branches(self):
- arg, nodes = self.getarg()
- nodes = map(bin, nodes.split(" "))
- r = []
- for b in self.repo.branches(nodes):
- r.append(" ".join(map(hex, b)) + "\n")
- self.respond("".join(r))
-
- def do_between(self):
- arg, pairs = self.getarg()
- pairs = [map(bin, p.split("-")) for p in pairs.split(" ")]
- r = []
- for b in self.repo.between(pairs):
- r.append(" ".join(map(hex, b)) + "\n")
- self.respond("".join(r))
-
- def do_changegroup(self):
- nodes = []
- arg, roots = self.getarg()
- nodes = map(bin, roots.split(" "))
-
- cg = self.repo.changegroup(nodes, 'serve')
- while True:
- d = cg.read(4096)
- if not d:
- break
- self.fout.write(d)
-
- self.fout.flush()
-
- def do_changegroupsubset(self):
- argmap = dict([self.getarg(), self.getarg()])
- bases = [bin(n) for n in argmap['bases'].split(' ')]
- heads = [bin(n) for n in argmap['heads'].split(' ')]
-
- cg = self.repo.changegroupsubset(bases, heads, 'serve')
- while True:
- d = cg.read(4096)
- if not d:
- break
- self.fout.write(d)
-
- self.fout.flush()
+ return ""
def do_addchangegroup(self):
'''DEPRECATED'''
if not self.lock:
- self.respond("not locked")
- return
-
- self.respond("")
- r = self.repo.addchangegroup(self.fin, 'serve', self.client_url(),
- lock=self.lock)
- self.respond(str(r))
-
- def client_url(self):
- client = os.environ.get('SSH_CLIENT', '').split(' ', 1)[0]
- return 'remote:ssh:' + client
-
- def do_unbundle(self):
- their_heads = self.getarg()[1].split()
-
- def check_heads():
- heads = map(hex, self.repo.heads())
- return their_heads == [hex('force')] or their_heads == heads
-
- # fail early if possible
- if not check_heads():
- self.respond(_('unsynced changes'))
+ self.sendresponse("not locked")
return
- self.respond('')
-
- # write bundle data to temporary file because it can be big
- fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
- fp = os.fdopen(fd, 'wb+')
- try:
- count = int(self.fin.readline())
- while count:
- fp.write(self.fin.read(count))
- count = int(self.fin.readline())
-
- was_locked = self.lock is not None
- if not was_locked:
- self.lock = self.repo.lock()
- try:
- if not check_heads():
- # someone else committed/pushed/unbundled while we
- # were transferring data
- self.respond(_('unsynced changes'))
- return
- self.respond('')
-
- # push can proceed
+ self.sendresponse("")
+ r = self.repo.addchangegroup(self.fin, 'serve', self._client(),
+ lock=self.lock)
+ return str(r)
- fp.seek(0)
- r = self.repo.addchangegroup(fp, 'serve', self.client_url(),
- lock=self.lock)
- self.respond(str(r))
- finally:
- if not was_locked:
- self.lock.release()
- self.lock = None
- finally:
- fp.close()
- os.unlink(tempname)
-
- def do_stream_out(self):
- try:
- for chunk in streamclone.stream_out(self.repo):
- self.fout.write(chunk)
- self.fout.flush()
- except streamclone.StreamException, inst:
- self.fout.write(str(inst))
- self.fout.flush()
-
- def do_pushkey(self):
- arg, key = self.getarg()
- arg, namespace = self.getarg()
- arg, new = self.getarg()
- arg, old = self.getarg()
- r = pushkey.push(self.repo, namespace, key, old, new)
- self.respond('%s\n' % int(r))
-
- def do_listkeys(self):
- arg, namespace = self.getarg()
- d = pushkey.list(self.repo, namespace).items()
- t = '\n'.join(['%s\t%s' % (k.encode('string-escape'),
- v.encode('string-escape')) for k, v in d])
- self.respond(t)
+ def _client(self):
+ client = os.environ.get('SSH_CLIENT', '').split(' ', 1)[0]
+ return 'remote:ssh:' + client
--- a/mercurial/streamclone.py Mon Aug 02 10:48:31 2010 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,69 +0,0 @@
-# streamclone.py - streaming clone server support for mercurial
-#
-# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-import util, error
-
-from mercurial import store
-
-class StreamException(Exception):
- def __init__(self, code):
- Exception.__init__(self)
- self.code = code
- def __str__(self):
- return '%i\n' % self.code
-
-# if server supports streaming clone, it advertises "stream"
-# capability with value that is version+flags of repo it is serving.
-# client only streams if it can read that repo format.
-
-# stream file format is simple.
-#
-# server writes out line that says how many files, how many total
-# bytes. separator is ascii space, byte counts are strings.
-#
-# then for each file:
-#
-# server writes out line that says filename, how many bytes in
-# file. separator is ascii nul, byte count is string.
-#
-# server writes out raw file data.
-
-def allowed(ui):
- return ui.configbool('server', 'uncompressed', True, untrusted=True)
-
-def stream_out(repo):
- '''stream out all metadata files in repository.
- writes to file-like object, must support write() and optional flush().'''
-
- if not allowed(repo.ui):
- raise StreamException(1)
-
- entries = []
- total_bytes = 0
- try:
- # get consistent snapshot of repo, lock during scan
- lock = repo.lock()
- try:
- repo.ui.debug('scanning\n')
- for name, ename, size in repo.store.walk():
- entries.append((name, size))
- total_bytes += size
- finally:
- lock.release()
- except error.LockError:
- raise StreamException(2)
-
- yield '0\n'
- repo.ui.debug('%d files, %d bytes to transfer\n' %
- (len(entries), total_bytes))
- yield '%d %d\n' % (len(entries), total_bytes)
- for name, size in entries:
- repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
- # partially encode name over the wire for backwards compat
- yield '%s\0%d\n' % (store.encodedir(name), size)
- for chunk in util.filechunkiter(repo.sopener(name), limit=size):
- yield chunk
--- a/mercurial/subrepo.py Mon Aug 02 10:48:31 2010 -0400
+++ b/mercurial/subrepo.py Mon Aug 02 10:55:51 2010 -0500
@@ -182,22 +182,49 @@
raise util.Abort(_('unknown subrepo type %s') % state[2])
return types[state[2]](ctx, path, state[:2])
-# subrepo classes need to implement the following methods:
-# __init__(self, ctx, path, state)
-# dirty(self): returns true if the dirstate of the subrepo
-# does not match current stored state
-# commit(self, text, user, date): commit the current changes
-# to the subrepo with the given log message. Use given
-# user and date if possible. Return the new state of the subrepo.
-# remove(self): remove the subrepo (should verify the dirstate
-# is not dirty first)
-# get(self, state): run whatever commands are needed to put the
-# subrepo into this state
-# merge(self, state): merge currently-saved state with the new state.
-# push(self, force): perform whatever action is analogous to 'hg push'
-# This may be a no-op on some systems.
+# subrepo classes need to implement the following abstract class:
+
+class abstractsubrepo(object):
+
+ def dirty(self):
+ """returns true if the dirstate of the subrepo does not match
+ current stored state
+ """
+ raise NotImplementedError
+
+ def commit(self, text, user, date):
+ """commit the current changes to the subrepo with the given
+ log message. Use given user and date if possible. Return the
+ new state of the subrepo.
+ """
+ raise NotImplementedError
+
+ def remove(self):
+ """remove the subrepo
-class hgsubrepo(object):
+ (should verify the dirstate is not dirty first)
+ """
+ raise NotImplementedError
+
+ def get(self, state):
+ """run whatever commands are needed to put the subrepo into
+ this state
+ """
+ raise NotImplementedError
+
+ def merge(self, state):
+ """merge currently-saved state with the new state."""
+ raise NotImplementedError
+
+ def push(self, force):
+ """perform whatever action is analogous to 'hg push'
+
+ This may be a no-op on some systems.
+ """
+ raise NotImplementedError
+
+
+class hgsubrepo(abstractsubrepo):
def __init__(self, ctx, path, state):
self._path = path
self._state = state
@@ -294,15 +321,15 @@
other = hg.repository(self._repo.ui, dsturl)
return self._repo.push(other, force)
-class svnsubrepo(object):
+class svnsubrepo(abstractsubrepo):
def __init__(self, ctx, path, state):
self._path = path
self._state = state
self._ctx = ctx
self._ui = ctx._repo.ui
- def _svncommand(self, commands):
- path = os.path.join(self._ctx._repo.origroot, self._path)
+ def _svncommand(self, commands, filename=''):
+ path = os.path.join(self._ctx._repo.origroot, self._path, filename)
cmd = ['svn'] + commands + [path]
cmd = [util.shellquote(arg) for arg in cmd]
cmd = util.quotecommand(' '.join(cmd))
--- a/mercurial/templatekw.py Mon Aug 02 10:48:31 2010 -0400
+++ b/mercurial/templatekw.py Mon Aug 02 10:55:51 2010 -0500
@@ -151,6 +151,11 @@
branch = encoding.tolocal(branch)
return showlist('branch', [branch], plural='branches', **args)
+def showchildren(**args):
+ ctx = args['ctx']
+ childrevs = ['%d:%s' % (cctx, cctx) for cctx in ctx.children()]
+ return showlist('children', childrevs, **args)
+
def showdate(repo, ctx, templ, **args):
return ctx.date()
@@ -245,6 +250,7 @@
keywords = {
'author': showauthor,
'branches': showbranches,
+ 'children': showchildren,
'date': showdate,
'desc': showdescription,
'diffstat': showdiffstat,
--- a/mercurial/transaction.py Mon Aug 02 10:48:31 2010 -0400
+++ b/mercurial/transaction.py Mon Aug 02 10:55:51 2010 -0500
@@ -115,7 +115,7 @@
def release(self):
if self.count > 0:
self.usages -= 1
- # of the transaction scopes are left without being closed, fail
+ # if the transaction scopes are left without being closed, fail
if self.count > 0 and self.usages == 0:
self._abort()
--- a/mercurial/util.h Mon Aug 02 10:48:31 2010 -0400
+++ b/mercurial/util.h Mon Aug 02 10:55:51 2010 -0500
@@ -12,6 +12,48 @@
#define IS_PY3K
#define PyInt_FromLong PyLong_FromLong
+#define PyInt_AsLong PyLong_AsLong
+
+/*
+ Mapping of some of the python < 2.x PyString* functions to py3k's PyUnicode.
+
+ The commented names below represent those that are present in the PyBytes
+ definitions for python < 2.6 (below in this file) that don't have a direct
+ implementation.
+*/
+
+#define PyStringObject PyUnicodeObject
+#define PyString_Type PyUnicode_Type
+
+#define PyString_Check PyUnicode_Check
+#define PyString_CheckExact PyUnicode_CheckExact
+#define PyString_CHECK_INTERNED PyUnicode_CHECK_INTERNED
+#define PyString_AS_STRING PyUnicode_AsLatin1String
+#define PyString_GET_SIZE PyUnicode_GET_SIZE
+
+#define PyString_FromStringAndSize PyUnicode_FromStringAndSize
+#define PyString_FromString PyUnicode_FromString
+#define PyString_FromFormatV PyUnicode_FromFormatV
+#define PyString_FromFormat PyUnicode_FromFormat
+/* #define PyString_Size PyUnicode_GET_SIZE */
+/* #define PyString_AsString */
+/* #define PyString_Repr */
+#define PyString_Concat PyUnicode_Concat
+#define PyString_ConcatAndDel PyUnicode_AppendAndDel
+#define _PyString_Resize PyUnicode_Resize
+/* #define _PyString_Eq */
+#define PyString_Format PyUnicode_Format
+/* #define _PyString_FormatLong */
+/* #define PyString_DecodeEscape */
+#define _PyString_Join PyUnicode_Join
+#define PyString_Decode PyUnicode_Decode
+#define PyString_Encode PyUnicode_Encode
+#define PyString_AsEncodedObject PyUnicode_AsEncodedObject
+#define PyString_AsEncodedString PyUnicode_AsEncodedString
+#define PyString_AsDecodedObject PyUnicode_AsDecodedObject
+#define PyString_AsDecodedString PyUnicode_AsDecodedUnicode
+/* #define PyString_AsStringAndSize */
+#define _PyString_InsertThousandsGrouping _PyUnicode_InsertThousandsGrouping
#endif /* PY_MAJOR_VERSION */
--- a/mercurial/util.py Mon Aug 02 10:48:31 2010 -0400
+++ b/mercurial/util.py Mon Aug 02 10:55:51 2010 -0500
@@ -15,7 +15,7 @@
from i18n import _
import error, osutil, encoding
-import cStringIO, errno, re, shutil, sys, tempfile, traceback
+import errno, re, shutil, sys, tempfile, traceback
import os, stat, time, calendar, textwrap, unicodedata, signal
import imp
@@ -38,9 +38,15 @@
import __builtin__
-def fakebuffer(sliceable, offset=0):
- return sliceable[offset:]
-if not hasattr(__builtin__, 'buffer'):
+if sys.version_info[0] < 3:
+ def fakebuffer(sliceable, offset=0):
+ return sliceable[offset:]
+else:
+ def fakebuffer(sliceable, offset=0):
+ return memoryview(sliceable)[offset:]
+try:
+ buffer
+except NameError:
__builtin__.buffer = fakebuffer
import subprocess
@@ -908,27 +914,35 @@
def __init__(self, in_iter):
"""in_iter is the iterator that's iterating over the input chunks.
targetsize is how big a buffer to try to maintain."""
- self.iter = iter(in_iter)
+ def splitbig(chunks):
+ for chunk in chunks:
+ if len(chunk) > 2**20:
+ pos = 0
+ while pos < len(chunk):
+ end = pos + 2 ** 18
+ yield chunk[pos:end]
+ pos = end
+ else:
+ yield chunk
+ self.iter = splitbig(in_iter)
self.buf = ''
- self.targetsize = 2**16
def read(self, l):
"""Read L bytes of data from the iterator of chunks of data.
Returns less than L bytes if the iterator runs dry."""
if l > len(self.buf) and self.iter:
- # Clamp to a multiple of self.targetsize
- targetsize = max(l, self.targetsize)
- collector = cStringIO.StringIO()
- collector.write(self.buf)
+ # Clamp to a multiple of 2**16
+ targetsize = max(l, 2**16)
+ collector = [str(self.buf)]
collected = len(self.buf)
for chunk in self.iter:
- collector.write(chunk)
+ collector.append(chunk)
collected += len(chunk)
if collected >= targetsize:
break
- if collected < targetsize:
+ else:
self.iter = False
- self.buf = collector.getvalue()
+ self.buf = ''.join(collector)
if len(self.buf) == l:
s, self.buf = str(self.buf), ''
else:
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/wireproto.py Mon Aug 02 10:55:51 2010 -0500
@@ -0,0 +1,332 @@
+# wireproto.py - generic wire protocol support functions
+#
+# Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import urllib, tempfile, os
+from i18n import _
+from node import bin, hex
+import changegroup as changegroupmod
+import repo, error, encoding, util, store
+import pushkey as pushkey_
+
+# list of nodes encoding / decoding
+
+def decodelist(l, sep=' '):
+ return map(bin, l.split(sep))
+
+def encodelist(l, sep=' '):
+ return sep.join(map(hex, l))
+
+# client side
+
+class wirerepository(repo.repository):
+ def lookup(self, key):
+ self.requirecap('lookup', _('look up remote revision'))
+ d = self._call("lookup", key=key)
+ success, data = d[:-1].split(" ", 1)
+ if int(success):
+ return bin(data)
+ self._abort(error.RepoError(data))
+
+ def heads(self):
+ d = self._call("heads")
+ try:
+ return decodelist(d[:-1])
+ except:
+ self.abort(error.ResponseError(_("unexpected response:"), d))
+
+ def branchmap(self):
+ d = self._call("branchmap")
+ try:
+ branchmap = {}
+ for branchpart in d.splitlines():
+ branchname, branchheads = branchpart.split(' ', 1)
+ branchname = urllib.unquote(branchname)
+ # Earlier servers (1.3.x) send branch names in (their) local
+ # charset. The best we can do is assume it's identical to our
+ # own local charset, in case it's not utf-8.
+ try:
+ branchname.decode('utf-8')
+ except UnicodeDecodeError:
+ branchname = encoding.fromlocal(branchname)
+ branchheads = decodelist(branchheads)
+ branchmap[branchname] = branchheads
+ return branchmap
+ except TypeError:
+ self._abort(error.ResponseError(_("unexpected response:"), d))
+
+ def branches(self, nodes):
+ n = encodelist(nodes)
+ d = self._call("branches", nodes=n)
+ try:
+ br = [tuple(decodelist(b)) for b in d.splitlines()]
+ return br
+ except:
+ self._abort(error.ResponseError(_("unexpected response:"), d))
+
+ def between(self, pairs):
+ batch = 8 # avoid giant requests
+ r = []
+ for i in xrange(0, len(pairs), batch):
+ n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
+ d = self._call("between", pairs=n)
+ try:
+ r.extend(l and decodelist(l) or [] for l in d.splitlines())
+ except:
+ self._abort(error.ResponseError(_("unexpected response:"), d))
+ return r
+
+ def pushkey(self, namespace, key, old, new):
+ if not self.capable('pushkey'):
+ return False
+ d = self._call("pushkey",
+ namespace=namespace, key=key, old=old, new=new)
+ return bool(int(d))
+
+ def listkeys(self, namespace):
+ if not self.capable('pushkey'):
+ return {}
+ d = self._call("listkeys", namespace=namespace)
+ r = {}
+ for l in d.splitlines():
+ k, v = l.split('\t')
+ r[k.decode('string-escape')] = v.decode('string-escape')
+ return r
+
+ def stream_out(self):
+ return self._callstream('stream_out')
+
+ def changegroup(self, nodes, kind):
+ n = encodelist(nodes)
+ f = self._callstream("changegroup", roots=n)
+ return self._decompress(f)
+
+ def changegroupsubset(self, bases, heads, kind):
+ self.requirecap('changegroupsubset', _('look up remote changes'))
+ bases = encodelist(bases)
+ heads = encodelist(heads)
+ return self._decompress(self._callstream("changegroupsubset",
+ bases=bases, heads=heads))
+
+ def unbundle(self, cg, heads, source):
+ '''Send cg (a readable file-like object representing the
+ changegroup to push, typically a chunkbuffer object) to the
+ remote server as a bundle. Return an integer indicating the
+ result of the push (see localrepository.addchangegroup()).'''
+
+ ret, output = self._callpush("unbundle", cg, heads=encodelist(heads))
+ if ret == "":
+ raise error.ResponseError(
+ _('push failed:'), output)
+ try:
+ ret = int(ret)
+ except ValueError, err:
+ raise error.ResponseError(
+ _('push failed (unexpected response):'), ret)
+
+ for l in output.splitlines(True):
+ self.ui.status(_('remote: '), l)
+ return ret
+
+# server side
+
+class streamres(object):
+ def __init__(self, gen):
+ self.gen = gen
+
+class pushres(object):
+ def __init__(self, res):
+ self.res = res
+
+def dispatch(repo, proto, command):
+ func, spec = commands[command]
+ args = proto.getargs(spec)
+ return func(repo, proto, *args)
+
+def between(repo, proto, pairs):
+ pairs = [decodelist(p, '-') for p in pairs.split(" ")]
+ r = []
+ for b in repo.between(pairs):
+ r.append(encodelist(b) + "\n")
+ return "".join(r)
+
+def branchmap(repo, proto):
+ branchmap = repo.branchmap()
+ heads = []
+ for branch, nodes in branchmap.iteritems():
+ branchname = urllib.quote(branch)
+ branchnodes = encodelist(nodes)
+ heads.append('%s %s' % (branchname, branchnodes))
+ return '\n'.join(heads)
+
+def branches(repo, proto, nodes):
+ nodes = decodelist(nodes)
+ r = []
+ for b in repo.branches(nodes):
+ r.append(encodelist(b) + "\n")
+ return "".join(r)
+
+def capabilities(repo, proto):
+ caps = 'lookup changegroupsubset branchmap pushkey'.split()
+ if _allowstream(repo.ui):
+ caps.append('stream=%d' % repo.changelog.version)
+ caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority))
+ return ' '.join(caps)
+
+def changegroup(repo, proto, roots):
+ nodes = decodelist(roots)
+ cg = repo.changegroup(nodes, 'serve')
+ return streamres(proto.groupchunks(cg))
+
+def changegroupsubset(repo, proto, bases, heads):
+ bases = decodelist(bases)
+ heads = decodelist(heads)
+ cg = repo.changegroupsubset(bases, heads, 'serve')
+ return streamres(proto.groupchunks(cg))
+
+def heads(repo, proto):
+ h = repo.heads()
+ return encodelist(h) + "\n"
+
+def hello(repo, proto):
+ '''the hello command returns a set of lines describing various
+ interesting things about the server, in an RFC822-like format.
+ Currently the only one defined is "capabilities", which
+ consists of a line in the form:
+
+ capabilities: space separated list of tokens
+ '''
+ return "capabilities: %s\n" % (capabilities(repo, proto))
+
+def listkeys(repo, proto, namespace):
+ d = pushkey_.list(repo, namespace).items()
+ t = '\n'.join(['%s\t%s' % (k.encode('string-escape'),
+ v.encode('string-escape')) for k, v in d])
+ return t
+
+def lookup(repo, proto, key):
+ try:
+ r = hex(repo.lookup(key))
+ success = 1
+ except Exception, inst:
+ r = str(inst)
+ success = 0
+ return "%s %s\n" % (success, r)
+
+def pushkey(repo, proto, namespace, key, old, new):
+ r = pushkey_.push(repo, namespace, key, old, new)
+ return '%s\n' % int(r)
+
+def _allowstream(ui):
+ return ui.configbool('server', 'uncompressed', True, untrusted=True)
+
+def stream(repo, proto):
+ '''If the server supports streaming clone, it advertises the "stream"
+ capability with a value representing the version and flags of the repo
+ it is serving. Client checks to see if it understands the format.
+
+ The format is simple: the server writes out a line with the amount
+ of files, then the total amount of bytes to be transfered (separated
+ by a space). Then, for each file, the server first writes the filename
+ and filesize (separated by the null character), then the file contents.
+ '''
+
+ if not _allowstream(repo.ui):
+ return '1\n'
+
+ entries = []
+ total_bytes = 0
+ try:
+ # get consistent snapshot of repo, lock during scan
+ lock = repo.lock()
+ try:
+ repo.ui.debug('scanning\n')
+ for name, ename, size in repo.store.walk():
+ entries.append((name, size))
+ total_bytes += size
+ finally:
+ lock.release()
+ except error.LockError:
+ return '2\n' # error: 2
+
+ def streamer(repo, entries, total):
+ '''stream out all metadata files in repository.'''
+ yield '0\n' # success
+ repo.ui.debug('%d files, %d bytes to transfer\n' %
+ (len(entries), total_bytes))
+ yield '%d %d\n' % (len(entries), total_bytes)
+ for name, size in entries:
+ repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
+ # partially encode name over the wire for backwards compat
+ yield '%s\0%d\n' % (store.encodedir(name), size)
+ for chunk in util.filechunkiter(repo.sopener(name), limit=size):
+ yield chunk
+
+ return streamres(streamer(repo, entries, total_bytes))
+
+def unbundle(repo, proto, heads):
+ their_heads = decodelist(heads)
+
+ def check_heads():
+ heads = repo.heads()
+ return their_heads == ['force'] or their_heads == heads
+
+ # fail early if possible
+ if not check_heads():
+ return 'unsynced changes'
+
+ # write bundle data to temporary file because it can be big
+ fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
+ fp = os.fdopen(fd, 'wb+')
+ r = 0
+ proto.redirect()
+ try:
+ proto.getfile(fp)
+ lock = repo.lock()
+ try:
+ if not check_heads():
+ # someone else committed/pushed/unbundled while we
+ # were transferring data
+ return 'unsynced changes'
+
+ # push can proceed
+ fp.seek(0)
+ header = fp.read(6)
+ if header.startswith('HG'):
+ if not header.startswith('HG10'):
+ raise ValueError('unknown bundle version')
+ elif header not in changegroupmod.bundletypes:
+ raise ValueError('unknown bundle compression type')
+ gen = changegroupmod.unbundle(header, fp)
+
+ try:
+ r = repo.addchangegroup(gen, 'serve', proto._client(),
+ lock=lock)
+ except util.Abort, inst:
+ sys.stderr.write("abort: %s\n" % inst)
+ finally:
+ lock.release()
+ return pushres(r)
+
+ finally:
+ fp.close()
+ os.unlink(tempname)
+
+commands = {
+ 'between': (between, 'pairs'),
+ 'branchmap': (branchmap, ''),
+ 'branches': (branches, 'nodes'),
+ 'capabilities': (capabilities, ''),
+ 'changegroup': (changegroup, 'roots'),
+ 'changegroupsubset': (changegroupsubset, 'bases heads'),
+ 'heads': (heads, ''),
+ 'hello': (hello, ''),
+ 'listkeys': (listkeys, 'namespace'),
+ 'lookup': (lookup, 'key'),
+ 'pushkey': (pushkey, 'namespace key old new'),
+ 'stream_out': (stream, ''),
+ 'unbundle': (unbundle, 'heads'),
+}
--- a/setup.py Mon Aug 02 10:48:31 2010 -0400
+++ b/setup.py Mon Aug 02 10:55:51 2010 -0500
@@ -9,6 +9,17 @@
if not hasattr(sys, 'version_info') or sys.version_info < (2, 4, 0, 'final'):
raise SystemExit("Mercurial requires Python 2.4 or later.")
+if sys.version_info[0] >= 3:
+ def b(s):
+ '''A helper function to emulate 2.6+ bytes literals using string
+ literals.'''
+ return s.encode('latin1')
+else:
+ def b(s):
+ '''A helper function to emulate 2.6+ bytes literals using string
+ literals.'''
+ return s
+
# Solaris Python packaging brain damage
try:
import hashlib
@@ -114,8 +125,8 @@
# fine, we don't want to load it anyway. Python may warn about
# a missing __init__.py in mercurial/locale, we also ignore that.
err = [e for e in err.splitlines()
- if not e.startswith('Not trusting file') \
- and not e.startswith('warning: Not importing')]
+ if not e.startswith(b('Not trusting file')) \
+ and not e.startswith(b('warning: Not importing'))]
if err:
return ''
return out
@@ -275,7 +286,8 @@
cc = new_compiler()
if hasfunction(cc, 'inotify_add_watch'):
inotify = Extension('hgext.inotify.linux._inotify',
- ['hgext/inotify/linux/_inotify.c'])
+ ['hgext/inotify/linux/_inotify.c'],
+ ['mercurial'])
inotify.optional = True
extmodules.append(inotify)
packages.extend(['hgext.inotify', 'hgext.inotify.linux'])
--- a/tests/test-alias Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-alias Mon Aug 02 10:55:51 2010 -0500
@@ -8,12 +8,17 @@
ambiguous = s
recursive = recursive
nodefinition =
+no--cwd = status --cwd elsewhere
+no-R = status -R elsewhere
+no--repo = status --repo elsewhere
+no--repository = status --repository elsewhere
mylog = log
lognull = log -r null
shortlog = log --template '{rev} {node|short} | {date|isodate}\n'
dln = lognull --debug
nousage = rollback
put = export -r 0 -o "\$FOO/%R.diff"
+echo = !echo
rt = root
[defaults]
@@ -41,6 +46,16 @@
hg nodef
hg help nodef
+echo '% invalid options'
+hg no--cwd
+hg help no--cwd
+hg no-R
+hg help no-R
+hg no--repo
+hg help no--repo
+hg no--repository
+hg help no--repository
+
cd alias
echo '% no usage'
@@ -66,6 +81,8 @@
FOO=`pwd` hg put
cat 0.diff
+echo '% shell aliases'
+hg echo foo
echo '% invalid arguments'
hg rt foo
--- a/tests/test-alias.out Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-alias.out Mon Aug 02 10:55:51 2010 -0500
@@ -11,6 +11,15 @@
% no definition
no definition for alias 'nodefinition'
no definition for alias 'nodefinition'
+% invalid options
+error in definition for alias 'no--cwd': --cwd may only be given on the command line
+error in definition for alias 'no--cwd': --cwd may only be given on the command line
+error in definition for alias 'no-R': -R may only be given on the command line
+error in definition for alias 'no-R': -R may only be given on the command line
+error in definition for alias 'no--repo': --repo may only be given on the command line
+error in definition for alias 'no--repo': --repo may only be given on the command line
+error in definition for alias 'no--repository': --repository may only be given on the command line
+error in definition for alias 'no--repository': --repository may only be given on the command line
% no usage
no rollback information available
adding foo
@@ -43,6 +52,8 @@
+++ b/foo Thu Jan 01 00:00:00 1970 +0000
@@ -0,0 +1,1 @@
+foo
+% shell aliases
+foo
% invalid arguments
hg rt: invalid arguments
hg rt
--- a/tests/test-branchmap Mon Aug 02 10:48:31 2010 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,58 +0,0 @@
-#!/bin/sh
-
-hgserve()
-{
- hg serve -a localhost -p $HGPORT1 -d --pid-file=hg.pid -E errors.log -v $@ \
- | sed -e 's/:[0-9][0-9]*//g' -e 's/http:\/\/[^/]*\//http:\/\/localhost\//'
- cat hg.pid >> "$DAEMON_PIDS"
-}
-
-hg init a
-hg --encoding utf-8 -R a branch æ
-echo foo > a/foo
-hg -R a ci -Am foo
-
-hgserve -R a --config web.push_ssl=False --config web.allow_push=* --encoding latin1
-hg --encoding utf-8 clone http://localhost:$HGPORT1 b
-hg --encoding utf-8 -R b log
-echo bar >> b/foo
-hg -R b ci -m bar
-hg --encoding utf-8 -R b push | sed "s/$HGPORT1/PORT/"
-hg -R a --encoding utf-8 log
-
-kill `cat hg.pid`
-
-
-# verify 7e7d56fe4833 (encoding fallback in branchmap to maintain compatibility with 1.3.x)
-
-cat <<EOF > oldhg
-import sys
-from mercurial import ui, hg, commands
-
-class StdoutWrapper(object):
- def __init__(self, stdout):
- self._file = stdout
-
- def write(self, data):
- if data == '47\n':
- # latin1 encoding is one %xx (3 bytes) shorter
- data = '44\n'
- elif data.startswith('%C3%A6 '):
- # translate to latin1 encoding
- data = '%%E6 %s' % data[7:]
- self._file.write(data)
-
- def __getattr__(self, name):
- return getattr(self._file, name)
-
-sys.stdout = StdoutWrapper(sys.stdout)
-sys.stderr = StdoutWrapper(sys.stderr)
-
-myui = ui.ui()
-repo = hg.repository(myui, 'a')
-commands.serve(myui, repo, stdio=True)
-EOF
-
-echo baz >> b/foo
-hg -R b ci -m baz
-hg push -R b -e 'python oldhg' ssh://dummy/ --encoding latin1
--- a/tests/test-branchmap.out Mon Aug 02 10:48:31 2010 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-marked working directory as branch æ
-adding foo
-listening at http://localhost/ (bound to 127.0.0.1)
-requesting all changes
-adding changesets
-adding manifests
-adding file changes
-added 1 changesets with 1 changes to 1 files
-updating to branch æ
-1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-changeset: 0:867c11ce77b8
-branch: æ
-tag: tip
-user: test
-date: Thu Jan 01 00:00:00 1970 +0000
-summary: foo
-
-pushing to http://localhost:PORT
-searching for changes
-remote: adding changesets
-remote: adding manifests
-remote: adding file changes
-remote: added 1 changesets with 1 changes to 1 files
-changeset: 1:58e7c90d67cb
-branch: æ
-tag: tip
-user: test
-date: Thu Jan 01 00:00:00 1970 +0000
-summary: bar
-
-changeset: 0:867c11ce77b8
-branch: æ
-user: test
-date: Thu Jan 01 00:00:00 1970 +0000
-summary: foo
-
-pushing to ssh://dummy/
-searching for changes
-remote: adding changesets
-remote: adding manifests
-remote: adding file changes
-remote: added 1 changesets with 1 changes to 1 files
--- a/tests/test-clone-cgi Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-clone-cgi Mon Aug 02 10:55:51 2010 -0500
@@ -55,7 +55,7 @@
SERVER_SOFTWARE="Apache/2.0.53 (Fedora)"; export SERVER_SOFTWARE
echo % try hgweb request
-QUERY_STRING="cmd=changegroup"; export QUERY_STRING
+QUERY_STRING="cmd=changegroup&roots=0000000000000000000000000000000000000000"; export QUERY_STRING
python hgweb.cgi >page1 2>&1 ; echo $?
python "$TESTDIR/md5sum.py" page1
--- a/tests/test-command-template Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-command-template Mon Aug 02 10:55:51 2010 -0500
@@ -125,6 +125,7 @@
hg log --template '{desc|firstline}\n'
hg log --template '{node|short}\n'
hg log --template '<changeset author="{author|xmlescape}"/>\n'
+hg log --template '{rev}: {children}\n'
echo '# formatnode filter works'
echo '# quiet'
--- a/tests/test-command-template.out Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-command-template.out Mon Aug 02 10:55:51 2010 -0500
@@ -1018,6 +1018,15 @@
<changeset author="other@place"/>
<changeset author="A. N. Other <other@place>"/>
<changeset author="User Name <user@hostname>"/>
+8:
+7: 8:95c24699272e
+6:
+5: 6:c7b487c6c50e
+4: 6:c7b487c6c50e
+3: 4:32a18f097fcc 5:13207e5a10d9
+2: 3:10e46f2dcbf4
+1: 2:97054abb4ab8
+0: 1:b608e9d1a3f0
# formatnode filter works
# quiet
1e4e1b8f71e0
--- a/tests/test-convert-filemap Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-convert-filemap Mon Aug 02 10:55:51 2010 -0500
@@ -128,3 +128,14 @@
hg --cwd source cat copied
echo 'copied2:'
hg --cwd renames.repo cat copied2
+
+echo % filemap errors
+cat > errors.fmap <<EOF
+include dir/ # beware that comments changes error line numbers!
+exclude /dir
+rename dir//dir /dir//dir/ "out of sync"
+include
+EOF
+hg -q convert --filemap errors.fmap source errors.repo
+
+true # happy ending
--- a/tests/test-convert-filemap.out Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-convert-filemap.out Mon Aug 02 10:55:51 2010 -0500
@@ -157,3 +157,11 @@
foo
copied2:
foo
+% filemap errors
+errors.fmap:1: superfluous / in exclude 'dir/'
+errors.fmap:3: superfluous / in include '/dir'
+errors.fmap:3: superfluous / in rename '/dir'
+errors.fmap:3: superfluous / in exclude 'dir//dir'
+errors.fmap:4: unknown directive 'out of sync'
+errors.fmap:5: path to exclude is missing
+abort: errors in filemap
--- a/tests/test-convert-hg-source Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-convert-hg-source Mon Aug 02 10:55:51 2010 -0500
@@ -32,6 +32,10 @@
chmod +x baz
hg ci -m 'mark baz executable' -d '5 0'
+hg branch foo
+hg ci -m 'branch foo' -d '6 0'
+hg ci --close-branch -m 'close' -d '7 0'
+
cd ..
hg convert --datesort orig new 2>&1 | grep -v 'subversion python bindings could not be loaded'
cd new
--- a/tests/test-convert-hg-source.out Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-convert-hg-source.out Mon Aug 02 10:55:51 2010 -0500
@@ -7,16 +7,19 @@
1 files updated, 1 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)
created new head
+marked working directory as branch foo
initializing destination new repository
scanning source...
sorting...
converting...
-5 add foo bar
-4 change foo
-3 make bar and baz copies of foo
-2 merge local copy
-1 merge remote copy
-0 mark baz executable
+7 add foo bar
+6 change foo
+5 make bar and baz copies of foo
+4 merge local copy
+3 merge remote copy
+2 mark baz executable
+1 branch foo
+0 close
comparing with ../orig
searching for changes
no changes found
--- a/tests/test-convert.out Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-convert.out Mon Aug 02 10:55:51 2010 -0500
@@ -65,7 +65,7 @@
rename path/to/source path/to/destination
- Comment lines start with '#'. A specificed path matches if it equals the
+ Comment lines start with '#'. A specified path matches if it equals the
full relative name of a file or one of its parent directories. The
'include' or 'exclude' directive with the longest matching path applies,
so line order does not matter.
@@ -74,7 +74,7 @@
be included in the destination repository, and the exclusion of all other
files and directories not explicitly included. The 'exclude' directive
causes files or directories to be omitted. The 'rename' directive renames
- a file or directory if is converted. To rename from a subdirectory into
+ a file or directory if it is converted. To rename from a subdirectory into
the root of the repository, use '.' as the path to rename to.
The splicemap is a file that allows insertion of synthetic history,
--- a/tests/test-debugbuilddag Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-debugbuilddag Mon Aug 02 10:55:51 2010 -0500
@@ -13,6 +13,8 @@
hg debugbuilddag '+2:f +3:p2 @temp <f+4 @default /p2 +2' -q -oa
echo -- dag
hg debugdag -t -b
+echo -- tip
+hg id
echo -- glog
hg glog --template '{rev}: {desc} [{branches}] @ {date}\n'
echo -- glog of
@@ -35,6 +37,8 @@
hg debugbuilddag '+2:f +3:p2 @temp <f+4 @default /p2 +2' -q -mn
echo -- dag
hg debugdag -t -b
+echo -- tip
+hg id
echo -- glog
hg glog --template '{rev}: {desc} [{branches}] @ {date}\n'
echo -- glog mf
--- a/tests/test-debugbuilddag.out Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-debugbuilddag.out Mon Aug 02 10:55:51 2010 -0500
@@ -4,6 +4,8 @@
+3:p2
@temp*f+3
@default*/p2+2:tip
+-- tip
+f96e381c614c tip
-- glog
@ 11: r11 [] @ 11.00
|
@@ -101,6 +103,8 @@
+3:p2
@temp*f+3
@default*/p2+2:tip
+-- tip
+9c5ce9b70771 tip
-- glog
@ 11: r11 [] @ 11.00
|
--- a/tests/test-diff-upgrade Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-diff-upgrade Mon Aug 02 10:55:51 2010 -0500
@@ -35,7 +35,7 @@
python -c "file('binary', 'wb').write('\0\0')"
python -c "file('newbinary', 'wb').write('\0')"
rm rmbinary
-hg addremove
+hg addremove -s 0
echo '% git=no: regular diff for all files'
hg autodiff --git=no
--- a/tests/test-hgweb-commands Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-hgweb-commands Mon Aug 02 10:55:51 2010 -0500
@@ -46,11 +46,11 @@
echo % heads
"$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT '?cmd=heads'
echo % lookup
-"$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT '?cmd=lookup&node=1'
+"$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT '?cmd=lookup&key=1'
echo % branches
-"$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT '?cmd=branches'
+"$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT '?cmd=branches&nodes=0000000000000000000000000000000000000000'
echo % changegroup
-"$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT '?cmd=changegroup' \
+"$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT '?cmd=changegroup&roots=0000000000000000000000000000000000000000' \
| $TESTDIR/printrepr.py
echo % stream_out
"$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT '?cmd=stream_out'
--- a/tests/test-hgweb-commands.out Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-hgweb-commands.out Mon Aug 02 10:55:51 2010 -0500
@@ -852,11 +852,11 @@
% lookup
200 Script output follows
-0 'key'
+1 a4f92ed23982be056b9852de5dfe873eaac7f0de
% branches
200 Script output follows
-1d22e65f027e5a0609357e7d8e7508cd2ba5d2fe 2ef0ac749a14e4f57a5a822464a0902c6f7f448f 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
+0000000000000000000000000000000000000000 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
% changegroup
200 Script output follows
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-hgweb-raw Mon Aug 02 10:55:51 2010 -0500
@@ -0,0 +1,23 @@
+#!/bin/sh
+
+hg init test
+cd test
+mkdir sub
+cat >'sub/some "text".txt' <<ENDSOME
+This is just some random text
+that will go inside the file and take a few lines.
+It is very boring to read, but computers don't
+care about things like that.
+ENDSOME
+hg add 'sub/some "text".txt'
+hg commit -d "1 0" -m "Just some text"
+hg serve -p $HGPORT -A access.log -E error.log -d --pid-file=hg.pid
+cat hg.pid >> $DAEMON_PIDS
+("$TESTDIR/get-with-headers.py" localhost:$HGPORT '/?f=a23bf1310f6e;file=sub/some%20%22text%22.txt;style=raw' content-type content-length content-disposition) >getoutput.txt &
+
+sleep 5
+kill `cat hg.pid`
+sleep 1 # wait for server to scream and die
+cat getoutput.txt
+cat access.log error.log | \
+ sed 's/^[^ ]*\( [^[]*\[\)[^]]*\(\].*\)$/host\1date\2/'
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-hgweb-raw.out Mon Aug 02 10:55:51 2010 -0500
@@ -0,0 +1,10 @@
+200 Script output follows
+content-type: text/plain; charset="ascii"
+content-length: 157
+content-disposition: inline; filename="some \"text\".txt"
+
+This is just some random text
+that will go inside the file and take a few lines.
+It is very boring to read, but computers don't
+care about things like that.
+host - - [date] "GET /?f=a23bf1310f6e;file=sub/some%20%22text%22.txt;style=raw HTTP/1.1" 200 -
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-http-branchmap Mon Aug 02 10:55:51 2010 -0500
@@ -0,0 +1,58 @@
+#!/bin/sh
+
+hgserve()
+{
+ hg serve -a localhost -p $HGPORT1 -d --pid-file=hg.pid -E errors.log -v $@ \
+ | sed -e 's/:[0-9][0-9]*//g' -e 's/http:\/\/[^/]*\//http:\/\/localhost\//'
+ cat hg.pid >> "$DAEMON_PIDS"
+}
+
+hg init a
+hg --encoding utf-8 -R a branch æ
+echo foo > a/foo
+hg -R a ci -Am foo
+
+hgserve -R a --config web.push_ssl=False --config web.allow_push=* --encoding latin1
+hg --encoding utf-8 clone http://localhost:$HGPORT1 b
+hg --encoding utf-8 -R b log
+echo bar >> b/foo
+hg -R b ci -m bar
+hg --encoding utf-8 -R b push | sed "s/$HGPORT1/PORT/"
+hg -R a --encoding utf-8 log
+
+kill `cat hg.pid`
+
+
+# verify 7e7d56fe4833 (encoding fallback in branchmap to maintain compatibility with 1.3.x)
+
+cat <<EOF > oldhg
+import sys
+from mercurial import ui, hg, commands
+
+class StdoutWrapper(object):
+ def __init__(self, stdout):
+ self._file = stdout
+
+ def write(self, data):
+ if data == '47\n':
+ # latin1 encoding is one %xx (3 bytes) shorter
+ data = '44\n'
+ elif data.startswith('%C3%A6 '):
+ # translate to latin1 encoding
+ data = '%%E6 %s' % data[7:]
+ self._file.write(data)
+
+ def __getattr__(self, name):
+ return getattr(self._file, name)
+
+sys.stdout = StdoutWrapper(sys.stdout)
+sys.stderr = StdoutWrapper(sys.stderr)
+
+myui = ui.ui()
+repo = hg.repository(myui, 'a')
+commands.serve(myui, repo, stdio=True)
+EOF
+
+echo baz >> b/foo
+hg -R b ci -m baz
+hg push -R b -e 'python oldhg' ssh://dummy/ --encoding latin1
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-http-branchmap.out Mon Aug 02 10:55:51 2010 -0500
@@ -0,0 +1,42 @@
+marked working directory as branch æ
+adding foo
+listening at http://localhost/ (bound to 127.0.0.1)
+requesting all changes
+adding changesets
+adding manifests
+adding file changes
+added 1 changesets with 1 changes to 1 files
+updating to branch æ
+1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+changeset: 0:867c11ce77b8
+branch: æ
+tag: tip
+user: test
+date: Thu Jan 01 00:00:00 1970 +0000
+summary: foo
+
+pushing to http://localhost:PORT
+searching for changes
+remote: adding changesets
+remote: adding manifests
+remote: adding file changes
+remote: added 1 changesets with 1 changes to 1 files
+changeset: 1:58e7c90d67cb
+branch: æ
+tag: tip
+user: test
+date: Thu Jan 01 00:00:00 1970 +0000
+summary: bar
+
+changeset: 0:867c11ce77b8
+branch: æ
+user: test
+date: Thu Jan 01 00:00:00 1970 +0000
+summary: foo
+
+pushing to ssh://dummy/
+searching for changes
+remote: adding changesets
+remote: adding manifests
+remote: adding file changes
+remote: added 1 changesets with 1 changes to 1 files
--- a/tests/test-issue660 Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-issue660 Mon Aug 02 10:55:51 2010 -0500
@@ -56,7 +56,7 @@
echo a > a/a
echo b > b
-hg addremove
+hg addremove -s 0
hg st
echo % commit
--- a/tests/test-log Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-log Mon Aug 02 10:55:51 2010 -0500
@@ -62,6 +62,11 @@
echo '% log -p d'
hg log -pv d
+echo '% log --removed file'
+hg log --removed -v a
+echo '% log --removed revrange file'
+hg log --removed -v -r0:2 a
+
# log --follow tests
hg init ../follow
cd ../follow
--- a/tests/test-log.out Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-log.out Mon Aug 02 10:55:51 2010 -0500
@@ -196,6 +196,32 @@
@@ -0,0 +1,1 @@
+a
+% log --removed file
+changeset: 3:7c6c671bb7cc
+user: test
+date: Thu Jan 01 00:00:04 1970 +0000
+files: a b d
+description:
+d
+
+
+changeset: 0:8580ff50825a
+user: test
+date: Thu Jan 01 00:00:01 1970 +0000
+files: a
+description:
+a
+
+
+% log --removed revrange file
+changeset: 0:8580ff50825a
+user: test
+date: Thu Jan 01 00:00:01 1970 +0000
+files: a
+description:
+a
+
+
adding base
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
adding b1
--- a/tests/test-mq-qimport Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-mq-qimport Mon Aug 02 10:55:51 2010 -0500
@@ -109,3 +109,19 @@
hg qimport --push another.diff
hg qfin -a
hg qimport -rtip -P
+
+hg qpop -a
+hg qdel -k 2.diff
+echo % qimport -e
+hg qimport -e 2.diff
+hg qdel -k 2.diff
+echo % qimport -e --name newname oldexisitingpatch
+hg qimport -e --name this-name-is-better 2.diff
+hg qser
+echo % qimport -e --name without --force
+cp .hg/patches/this-name-is-better .hg/patches/3.diff
+hg qimport -e --name this-name-is-better 3.diff
+hg qser
+echo % qimport -e --name with --force
+hg qimport --force -e --name this-name-is-better 3.diff
+hg qser
--- a/tests/test-mq-qimport.out Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-mq-qimport.out Mon Aug 02 10:55:51 2010 -0500
@@ -52,3 +52,21 @@
now at: another.diff
patch b.diff finalized without changeset message
patch another.diff finalized without changeset message
+popping 2.diff
+patch queue now empty
+% qimport -e
+adding 2.diff to series file
+% qimport -e --name newname oldexisitingpatch
+renaming 2.diff to this-name-is-better
+adding this-name-is-better to series file
+this-name-is-better
+url.diff
+% qimport -e --name without --force
+abort: patch "this-name-is-better" already exists
+this-name-is-better
+url.diff
+% qimport -e --name with --force
+renaming 3.diff to this-name-is-better
+adding this-name-is-better to series file
+this-name-is-better
+url.diff
--- a/tests/test-mq-qnew Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-mq-qnew Mon Aug 02 10:55:51 2010 -0500
@@ -70,6 +70,10 @@
HGUSER= hg qnew -u blue red
catpatch ../.hg/patches/red
+ echo '% qnew -e -u with no username configured'
+ HGUSER= hg qnew -e -u chartreuse fucsia
+ catpatch ../.hg/patches/fucsia
+
echo '% fail when trying to import a merge'
hg init merge
cd merge
--- a/tests/test-mq-qnew.out Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-mq-qnew.out Mon Aug 02 10:55:51 2010 -0500
@@ -42,6 +42,9 @@
% qnew -u with no username configured
From: blue
+% qnew -e -u with no username configured
+From: chartreuse
+
% fail when trying to import a merge
adding a
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -101,6 +104,10 @@
# HG changeset patch
# Parent
# User blue
+% qnew -e -u with no username configured
+# HG changeset patch
+# Parent
+# User chartreuse
% fail when trying to import a merge
adding a
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-notify-changegroup Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-notify-changegroup Mon Aug 02 10:55:51 2010 -0500
@@ -10,7 +10,7 @@
[notify]
sources = push
diffstat = False
-maxsubject = 200
+maxsubject = 10
[usersubs]
foo@bar = *
@@ -37,7 +37,7 @@
python -c 'import sys,re; print re.sub("\n\t", " ", sys.stdin.read()),' |
sed -e 's/\(Message-Id:\).*/\1/' \
-e 's/changeset \([0-9a-f]* *\)in .*test-notif/changeset \1in test-notif/' \
- -e 's/^Subject: .*test-notify/Subject: test-notify/' \
+ -e 's/^Subject: .*/Subject: test-notify-changegroup/' \
-e 's/^details: .*test-notify/details: test-notify/' \
-e 's/^Date:.*/Date:/'
--- a/tests/test-notify-changegroup.out Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-notify-changegroup.out Mon Aug 02 10:55:51 2010 -0500
@@ -15,7 +15,7 @@
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Date:
-Subject: test-notify-changegroup/a: 2 new changesets
+Subject: test-notify-changegroup
From: test
X-Hg-Notification: changeset cb9a9f314b8b
Message-Id:
--- a/tests/test-permissions Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-permissions Mon Aug 02 10:55:51 2010 -0500
@@ -1,27 +1,37 @@
#!/bin/sh
+echo '% hg init t'
hg init t
cd t
echo foo > a
+echo '% hg add a'
hg add a
+echo '% hg commit'
hg commit -m "1" -d "1000000 0"
+echo '% hg verify'
hg verify
chmod -r .hg/store/data/a.i
+echo '% hg verify'
hg verify 2>/dev/null || echo verify failed
chmod +r .hg/store/data/a.i
+echo '% hg verify'
hg verify 2>/dev/null || echo verify failed
chmod -w .hg/store/data/a.i
echo barber > a
+echo '% hg commit'
hg commit -m "2" -d "1000000 0" 2>/dev/null || echo commit failed
chmod -w .
+echo '% hg diff'
hg diff --nodates
chmod +w .
chmod +w .hg/store/data/a.i
mkdir dir
touch dir/a
+echo '% hg status'
hg status
chmod -rx dir
+echo '% hg status'
hg status
# reenable perm to allow deletion
chmod +rx dir
--- a/tests/test-permissions.out Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-permissions.out Mon Aug 02 10:55:51 2010 -0500
@@ -1,26 +1,36 @@
+% hg init t
+% hg add a
+% hg commit
+% hg verify
checking changesets
checking manifests
crosschecking files in changesets and manifests
checking files
1 files, 1 changesets, 1 total revisions
+% hg verify
checking changesets
checking manifests
crosschecking files in changesets and manifests
checking files
verify failed
+% hg verify
checking changesets
checking manifests
crosschecking files in changesets and manifests
checking files
1 files, 1 changesets, 1 total revisions
+% hg commit
commit failed
+% hg diff
diff -r c1fab96507ef a
--- a/a
+++ b/a
@@ -1,1 +1,1 @@
-foo
+barber
+% hg status
M a
? dir/a
+% hg status
dir: Permission denied
M a
--- a/tests/test-push-warn.out Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-push-warn.out Mon Aug 02 10:55:51 2010 -0500
@@ -37,7 +37,7 @@
searching for changes
abort: push creates new remote heads on branch 'default'!
(did you forget to merge? use push -f to force)
-1
+255
pushing to ../c
searching for changes
no changes found
@@ -46,12 +46,12 @@
searching for changes
abort: push creates new remote heads on branch 'default'!
(did you forget to merge? use push -f to force)
-1
+255
pushing to ../c
searching for changes
abort: push creates new remote heads on branch 'default'!
(did you forget to merge? use push -f to force)
-1
+255
pushing to ../c
searching for changes
adding changesets
@@ -90,29 +90,29 @@
searching for changes
abort: push creates new remote branches: c!
(use 'hg push --new-branch' to create new remote branches)
-1
+255
pushing to ../f
searching for changes
abort: push creates new remote branches: c!
(use 'hg push --new-branch' to create new remote branches)
-1
+255
% multiple new branches
pushing to ../f
searching for changes
abort: push creates new remote branches: c, d!
(use 'hg push --new-branch' to create new remote branches)
-1
+255
pushing to ../f
searching for changes
abort: push creates new remote branches: c, d!
(use 'hg push --new-branch' to create new remote branches)
-1
+255
% fail on multiple head push
pushing to ../f
searching for changes
abort: push creates new remote heads on branch 'a'!
(did you forget to merge? use push -f to force)
-1
+255
% push replacement head on existing branches
pushing to ../f
searching for changes
@@ -149,7 +149,7 @@
searching for changes
abort: push creates new remote branches: e!
(use 'hg push --new-branch' to create new remote branches)
-1
+255
% using --new-branch to push new named branch
pushing to ../f
searching for changes
--- a/tests/test-qrecord.out Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-qrecord.out Mon Aug 02 10:55:51 2010 -0500
@@ -81,7 +81,7 @@
up
% qrecord a.patch
diff --git a/1.txt b/1.txt
-2 hunks, 4 lines changed
+2 hunks, 2 lines changed
examine changes to '1.txt'? [Ynsfdaq?]
@@ -1,3 +1,3 @@
1
@@ -96,7 +96,7 @@
5
record change 2/6 to '1.txt'? [Ynsfdaq?]
diff --git a/2.txt b/2.txt
-1 hunks, 2 lines changed
+1 hunks, 1 lines changed
examine changes to '2.txt'? [Ynsfdaq?]
@@ -1,5 +1,5 @@
a
@@ -107,7 +107,7 @@
e
record change 4/6 to '2.txt'? [Ynsfdaq?]
diff --git a/dir/a.txt b/dir/a.txt
-1 hunks, 2 lines changed
+1 hunks, 1 lines changed
examine changes to 'dir/a.txt'? [Ynsfdaq?]
% after qrecord a.patch 'tip'
@@ -164,7 +164,7 @@
up
% qrecord b.patch
diff --git a/1.txt b/1.txt
-1 hunks, 2 lines changed
+1 hunks, 1 lines changed
examine changes to '1.txt'? [Ynsfdaq?]
@@ -1,5 +1,5 @@
1
@@ -175,7 +175,7 @@
5
record change 1/3 to '1.txt'? [Ynsfdaq?]
diff --git a/dir/a.txt b/dir/a.txt
-1 hunks, 2 lines changed
+1 hunks, 1 lines changed
examine changes to 'dir/a.txt'? [Ynsfdaq?]
@@ -1,4 +1,4 @@
-hello world
--- a/tests/test-record.out Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-record.out Mon Aug 02 10:55:51 2010 -0500
@@ -222,7 +222,7 @@
record this change to 'plain'? [Ynsfdaq?]
% modify end of plain file, add EOL
diff --git a/plain b/plain
-1 hunks, 2 lines changed
+1 hunks, 1 lines changed
examine changes to 'plain'? [Ynsfdaq?]
@@ -9,4 +9,4 @@
9
@@ -234,7 +234,7 @@
record this change to 'plain'? [Ynsfdaq?]
% modify beginning, trim end, record both
diff --git a/plain b/plain
-2 hunks, 4 lines changed
+2 hunks, 3 lines changed
examine changes to 'plain'? [Ynsfdaq?]
@@ -1,4 +1,4 @@
-1
@@ -276,7 +276,7 @@
% trim beginning, modify end
% record end
diff --git a/plain b/plain
-2 hunks, 5 lines changed
+2 hunks, 4 lines changed
examine changes to 'plain'? [Ynsfdaq?]
@@ -1,9 +1,6 @@
-2
--- a/tests/test-rename Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-rename Mon Aug 02 10:55:51 2010 -0500
@@ -26,7 +26,7 @@
echo '# rename --after a single file when src and tgt already tracked'
mv d1/d11/a1 d2/c
-hg addrem
+hg addrem -s 0
hg rename --after d1/d11/a1 d2/c
hg status -C
hg update -C
--- a/tests/test-revset Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-revset Mon Aug 02 10:55:51 2010 -0500
@@ -110,6 +110,7 @@
log 'keyword(issue)'
log 'limit(head(), 1)'
log 'max(contains(a))'
+log 'min(contains(a))'
log 'merge()'
log 'modifies(b)'
log 'p1(merge())'
--- a/tests/test-revset.out Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-revset.out Mon Aug 02 10:55:51 2010 -0500
@@ -152,6 +152,8 @@
0
% log 'max(contains(a))'
5
+% log 'min(contains(a))'
+0
% log 'merge()'
6
% log 'modifies(b)'
--- a/tests/test-subrepo.out Mon Aug 02 10:48:31 2010 -0400
+++ b/tests/test-subrepo.out Mon Aug 02 10:55:51 2010 -0500
@@ -183,13 +183,13 @@
% push -f
committing subrepository s
abort: push creates new remote heads on branch 'default'!
+(did you forget to merge? use push -f to force)
pushing ...sub/t
pushing ...sub/t/s/ss
searching for changes
no changes found
pushing ...sub/t/s
searching for changes
-(did you forget to merge? use push -f to force)
pushing ...sub/t
pushing ...sub/t/s/ss
searching for changes
@@ -300,4 +300,5 @@
created new head
committing subrepository s
abort: push creates new remote heads on branch 'default'!
+(did you forget to merge? use push -f to force)
0 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-webraw Mon Aug 02 10:48:31 2010 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,23 +0,0 @@
-#!/bin/sh
-
-hg init test
-cd test
-mkdir sub
-cat >'sub/some "text".txt' <<ENDSOME
-This is just some random text
-that will go inside the file and take a few lines.
-It is very boring to read, but computers don't
-care about things like that.
-ENDSOME
-hg add 'sub/some "text".txt'
-hg commit -d "1 0" -m "Just some text"
-hg serve -p $HGPORT -A access.log -E error.log -d --pid-file=hg.pid
-cat hg.pid >> $DAEMON_PIDS
-("$TESTDIR/get-with-headers.py" localhost:$HGPORT '/?f=a23bf1310f6e;file=sub/some%20%22text%22.txt;style=raw' content-type content-length content-disposition) >getoutput.txt &
-
-sleep 5
-kill `cat hg.pid`
-sleep 1 # wait for server to scream and die
-cat getoutput.txt
-cat access.log error.log | \
- sed 's/^[^ ]*\( [^[]*\[\)[^]]*\(\].*\)$/host\1date\2/'
--- a/tests/test-webraw.out Mon Aug 02 10:48:31 2010 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,10 +0,0 @@
-200 Script output follows
-content-type: text/plain; charset="ascii"
-content-length: 157
-content-disposition: inline; filename="some \"text\".txt"
-
-This is just some random text
-that will go inside the file and take a few lines.
-It is very boring to read, but computers don't
-care about things like that.
-host - - [date] "GET /?f=a23bf1310f6e;file=sub/some%20%22text%22.txt;style=raw HTTP/1.1" 200 -