--- a/contrib/check-code.py Thu Jul 22 08:17:38 2010 -0500
+++ b/contrib/check-code.py Thu Jul 22 08:24:56 2010 -0500
@@ -7,7 +7,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
-import re, glob
+import re, glob, os
import optparse
def repquote(m):
@@ -70,6 +70,12 @@
]
pypats = [
+ (r'^\s*def\s*\w+\s*\(.*,\s*\(',
+ "tuple parameter unpacking not available in Python 3+"),
+ (r'lambda\s*\(.*,.*\)',
+ "tuple parameter unpacking not available in Python 3+"),
+ (r'\breduce\s*\(.*', "reduce is not available in Python 3+"),
+ (r'\.has_key\b', "dict.has_key is not available in Python 3+"),
(r'^\s*\t', "don't use tabs"),
(r'\S;\s*\n', "semicolon"),
(r'\w,\w', "missing whitespace after ,"),
@@ -154,7 +160,7 @@
def __init__(self):
self._lastseen = None
- def log(self, fname, lineno, line, msg):
+ def log(self, fname, lineno, line, msg, blame):
"""print error related a to given line of a given file.
The faulty line will also be printed but only once in the case
@@ -167,14 +173,26 @@
"""
msgid = fname, lineno, line
if msgid != self._lastseen:
- print "%s:%d:" % (fname, lineno)
+ if blame:
+ print "%s:%d (%s):" % (fname, lineno, blame)
+ else:
+ print "%s:%d:" % (fname, lineno)
print " > %s" % line
self._lastseen = msgid
print " " + msg
_defaultlogger = norepeatlogger()
-def checkfile(f, logfunc=_defaultlogger.log, maxerr=None, warnings=False):
+def getblame(f):
+ lines = []
+ for l in os.popen('hg annotate -un %s' % f):
+ start, line = l.split(':', 1)
+ user, rev = start.split()
+ lines.append((line[1:-1], user, rev))
+ return lines
+
+def checkfile(f, logfunc=_defaultlogger.log, maxerr=None, warnings=False,
+ blame=False):
"""checks style and portability of a given file
:f: filepath
@@ -185,6 +203,7 @@
return True if no error is found, False otherwise.
"""
+ blamecache = None
result = True
for name, match, filters, pats in checks:
fc = 0
@@ -204,7 +223,16 @@
if not warnings and msg.startswith("warning"):
continue
if re.search(p, l[1]):
- logfunc(f, n + 1, l[0], msg)
+ bd = ""
+ if blame:
+ bd = 'working directory'
+ if not blamecache:
+ blamecache = getblame(f)
+ if n < len(blamecache):
+ bl, bu, br = blamecache[n]
+ if bl == l[0]:
+ bd = '%s@%s' % (bu, br)
+ logfunc(f, n + 1, l[0], msg, bd)
fc += 1
result = False
if maxerr is not None and fc >= maxerr:
@@ -213,15 +241,16 @@
break
return result
-
if __name__ == "__main__":
parser = optparse.OptionParser("%prog [options] [files]")
parser.add_option("-w", "--warnings", action="store_true",
help="include warning-level checks")
parser.add_option("-p", "--per-file", type="int",
help="max warnings per file")
+ parser.add_option("-b", "--blame", action="store_true",
+ help="use annotate to generate blame info")
- parser.set_defaults(per_file=15, warnings=False)
+ parser.set_defaults(per_file=15, warnings=False, blame=False)
(options, args) = parser.parse_args()
if len(args) == 0:
@@ -230,4 +259,5 @@
check = args
for f in check:
- checkfile(f, maxerr=options.per_file, warnings=options.warnings)
+ checkfile(f, maxerr=options.per_file, warnings=options.warnings,
+ blame=options.blame)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/debugshell.py Thu Jul 22 08:24:56 2010 -0500
@@ -0,0 +1,21 @@
+# debugshell extension
+"""a python shell with repo, changelog & manifest objects"""
+
+import mercurial
+import code
+
+def debugshell(ui, repo, **opts):
+ objects = {
+ 'mercurial': mercurial,
+ 'repo': repo,
+ 'cl': repo.changelog,
+ 'mf': repo.manifest,
+ }
+ bannermsg = "loaded repo : %s\n" \
+ "using source: %s" % (repo.root,
+ mercurial.__path__[0])
+ code.interact(bannermsg, local=objects)
+
+cmdtable = {
+ "debugshell|dbsh": (debugshell, [])
+}
--- a/contrib/mergetools.hgrc Thu Jul 22 08:17:38 2010 -0500
+++ b/contrib/mergetools.hgrc Thu Jul 22 08:24:56 2010 -0500
@@ -13,6 +13,9 @@
gvimdiff.regname=path
gvimdiff.priority=-9
+vimdiff.args=$local $other $base
+vimdiff.priority=-10
+
merge.checkconflicts=True
merge.priority=-100
--- a/hgext/bugzilla.py Thu Jul 22 08:17:38 2010 -0500
+++ b/hgext/bugzilla.py Thu Jul 22 08:24:56 2010 -0500
@@ -437,5 +437,5 @@
bz.update(id, ctx)
bz.notify(ids, util.email(ctx.user()))
except MySQLdb.MySQLError, err:
- raise util.Abort(_('database error: %s') % err[1])
+ raise util.Abort(_('database error: %s') % err.args[1])
--- a/hgext/churn.py Thu Jul 22 08:17:38 2010 -0500
+++ b/hgext/churn.py Thu Jul 22 08:24:56 2010 -0500
@@ -149,7 +149,8 @@
if opts.get('diffstat'):
width -= 15
- def format(name, (added, removed)):
+ def format(name, diffstat):
+ added, removed = diffstat
return "%s %15s %s%s\n" % (pad(name, maxname),
'+%d/-%d' % (added, removed),
ui.label('+' * charnum(added),
--- a/hgext/convert/transport.py Thu Jul 22 08:17:38 2010 -0500
+++ b/hgext/convert/transport.py Thu Jul 22 08:24:56 2010 -0500
@@ -98,9 +98,8 @@
svn.ra.reparent(self.ra, self.svn_url.encode('utf8'))
class Reporter(object):
- def __init__(self, (reporter, report_baton)):
- self._reporter = reporter
- self._baton = report_baton
+ def __init__(self, reporter_data):
+ self._reporter, self._baton = reporter_data
def set_path(self, path, revnum, start_empty, lock_token, pool=None):
svn.ra.reporter2_invoke_set_path(self._reporter, self._baton,
--- a/hgext/inotify/client.py Thu Jul 22 08:17:38 2010 -0500
+++ b/hgext/inotify/client.py Thu Jul 22 08:24:56 2010 -0500
@@ -27,11 +27,11 @@
except (OSError, socket.error), err:
autostart = self.ui.configbool('inotify', 'autostart', True)
- if err[0] == errno.ECONNREFUSED:
+ if err.args[0] == errno.ECONNREFUSED:
self.ui.warn(_('inotify-client: found dead inotify server '
'socket; removing it\n'))
os.unlink(os.path.join(self.root, '.hg', 'inotify.sock'))
- if err[0] in (errno.ECONNREFUSED, errno.ENOENT) and autostart:
+ if err.args[0] in (errno.ECONNREFUSED, errno.ENOENT) and autostart:
self.ui.debug('(starting inotify server)\n')
try:
try:
@@ -49,13 +49,13 @@
return function(self, *args)
except socket.error, err:
self.ui.warn(_('inotify-client: could not talk to new '
- 'inotify server: %s\n') % err[-1])
- elif err[0] in (errno.ECONNREFUSED, errno.ENOENT):
+ 'inotify server: %s\n') % err.args[-1])
+ elif err.args[0] in (errno.ECONNREFUSED, errno.ENOENT):
# silently ignore normal errors if autostart is False
self.ui.debug('(inotify server not running)\n')
else:
self.ui.warn(_('inotify-client: failed to contact inotify '
- 'server: %s\n') % err[-1])
+ 'server: %s\n') % err.args[-1])
self.ui.traceback()
raise QueryFailed('inotify query failed')
@@ -75,7 +75,7 @@
try:
self.sock.connect(sockpath)
except socket.error, err:
- if err[0] == "AF_UNIX path too long":
+ if err.args[0] == "AF_UNIX path too long":
sockpath = os.readlink(sockpath)
self.sock.connect(sockpath)
else:
--- a/hgext/inotify/linux/_inotify.c Thu Jul 22 08:17:38 2010 -0500
+++ b/hgext/inotify/linux/_inotify.c Thu Jul 22 08:24:56 2010 -0500
@@ -15,6 +15,15 @@
#include <sys/ioctl.h>
#include <unistd.h>
+#include <util.h>
+
+/* Variables used in the event string representation */
+static PyObject *join;
+static PyObject *er_wm;
+static PyObject *er_wmc;
+static PyObject *er_wmn;
+static PyObject *er_wmcn;
+
static PyObject *init(PyObject *self, PyObject *args)
{
PyObject *ret = NULL;
@@ -312,8 +321,8 @@
};
PyDoc_STRVAR(
- event_doc,
- "event: Structure describing an inotify event.");
+ event_doc,
+ "event: Structure describing an inotify event.");
static PyObject *event_new(PyTypeObject *t, PyObject *a, PyObject *k)
{
@@ -327,20 +336,15 @@
Py_XDECREF(evt->cookie);
Py_XDECREF(evt->name);
- (*evt->ob_type->tp_free)(evt);
+ Py_TYPE(evt)->tp_free(evt);
}
static PyObject *event_repr(struct event *evt)
{
- int wd = PyInt_AsLong(evt->wd);
int cookie = evt->cookie == Py_None ? -1 : PyInt_AsLong(evt->cookie);
PyObject *ret = NULL, *pymasks = NULL, *pymask = NULL;
- PyObject *join = NULL;
char *maskstr;
-
- join = PyString_FromString("|");
- if (join == NULL)
- goto bail;
+ PyObject *tuple = NULL, *formatstr = NULL;
pymasks = decode_mask(PyInt_AsLong(evt->mask));
if (pymasks == NULL)
@@ -350,33 +354,35 @@
if (pymask == NULL)
goto bail;
- maskstr = PyString_AsString(pymask);
-
if (evt->name != Py_None) {
- PyObject *pyname = PyString_Repr(evt->name, 1);
- char *name = pyname ? PyString_AsString(pyname) : "???";
-
- if (cookie == -1)
- ret = PyString_FromFormat(
- "event(wd=%d, mask=%s, name=%s)",
- wd, maskstr, name);
- else
- ret = PyString_FromFormat("event(wd=%d, mask=%s, "
- "cookie=0x%x, name=%s)",
- wd, maskstr, cookie, name);
-
- Py_XDECREF(pyname);
+ if (cookie == -1) {
+ formatstr = er_wmn;
+ tuple = PyTuple_Pack(3, evt->wd, pymask, evt->name);
+ }
+ else {
+ formatstr = er_wmcn;
+ tuple = PyTuple_Pack(4, evt->wd, pymask,
+ evt->cookie, evt->name);
+ }
} else {
- if (cookie == -1)
- ret = PyString_FromFormat("event(wd=%d, mask=%s)",
- wd, maskstr);
+ if (cookie == -1) {
+ formatstr = er_wm;
+ tuple = PyTuple_Pack(2, evt->wd, pymask);
+ }
else {
- ret = PyString_FromFormat(
- "event(wd=%d, mask=%s, cookie=0x%x)",
- wd, maskstr, cookie);
+ formatstr = er_wmc;
+ tuple = PyTuple_Pack(3, evt->wd, pymask, evt->cookie);
}
}
+ if (tuple == NULL)
+ goto bail;
+
+ ret = PyNumber_Remainder(formatstr, tuple);
+
+ if (ret == NULL)
+ goto bail;
+
goto done;
bail:
Py_CLEAR(ret);
@@ -384,14 +390,13 @@
done:
Py_XDECREF(pymask);
Py_XDECREF(pymasks);
- Py_XDECREF(join);
+ Py_XDECREF(tuple);
return ret;
}
static PyTypeObject event_type = {
- PyObject_HEAD_INIT(NULL)
- 0, /*ob_size*/
+ PyVarObject_HEAD_INIT(NULL, 0)
"_inotify.event", /*tp_name*/
sizeof(struct event), /*tp_basicsize*/
0, /*tp_itemsize*/
@@ -561,6 +566,17 @@
return ret;
}
+static int init_globals(void)
+{
+ join = PyString_FromString("|");
+ er_wm = PyString_FromString("event(wd=%d, mask=%s)");
+ er_wmn = PyString_FromString("event(wd=%d, mask=%s, name=%s)");
+ er_wmc = PyString_FromString("event(wd=%d, mask=%s, cookie=0x%x)");
+ er_wmcn = PyString_FromString("event(wd=%d, mask=%s, cookie=0x%x, name=%s)");
+
+ return join && er_wm && er_wmn && er_wmc && er_wmcn;
+}
+
PyDoc_STRVAR(
read_doc,
"read(fd, bufsize[=65536]) -> list_of_events\n"
@@ -585,6 +601,35 @@
{NULL},
};
+#ifdef IS_PY3K
+static struct PyModuleDef _inotify_module = {
+ PyModuleDef_HEAD_INIT,
+ "_inotify",
+ doc,
+ -1,
+ methods
+};
+
+PyMODINIT_FUNC PyInit__inotify(void)
+{
+ PyObject *mod, *dict;
+
+ mod = PyModule_Create(&_inotify_module);
+
+ if (mod == NULL)
+ return NULL;
+
+ if (!init_globals())
+ return;
+
+ dict = PyModule_GetDict(mod);
+
+ if (dict)
+ define_consts(dict);
+
+ return mod;
+}
+#else
void init_inotify(void)
{
PyObject *mod, *dict;
@@ -592,6 +637,9 @@
if (PyType_Ready(&event_type) == -1)
return;
+ if (!init_globals())
+ return;
+
mod = Py_InitModule3("_inotify", methods, doc);
dict = PyModule_GetDict(mod);
@@ -599,3 +647,4 @@
if (dict)
define_consts(dict);
}
+#endif
--- a/hgext/inotify/linuxserver.py Thu Jul 22 08:17:38 2010 -0500
+++ b/hgext/inotify/linuxserver.py Thu Jul 22 08:24:56 2010 -0500
@@ -117,7 +117,7 @@
try:
events = cls.poll.poll(timeout)
except select.error, err:
- if err[0] == errno.EINTR:
+ if err.args[0] == errno.EINTR:
continue
raise
if events:
--- a/hgext/inotify/server.py Thu Jul 22 08:17:38 2010 -0500
+++ b/hgext/inotify/server.py Thu Jul 22 08:24:56 2010 -0500
@@ -336,10 +336,10 @@
try:
self.sock.bind(self.sockpath)
except socket.error, err:
- if err[0] == errno.EADDRINUSE:
+ if err.args[0] == errno.EADDRINUSE:
raise AlreadyStartedException(_('cannot start: socket is '
'already bound'))
- if err[0] == "AF_UNIX path too long":
+ if err.args[0] == "AF_UNIX path too long":
if os.path.islink(self.sockpath) and \
not os.path.exists(self.sockpath):
raise util.Abort('inotify-server: cannot start: '
@@ -437,7 +437,7 @@
finally:
sock.shutdown(socket.SHUT_WR)
except socket.error, err:
- if err[0] != errno.EPIPE:
+ if err.args[0] != errno.EPIPE:
raise
if sys.platform == 'linux2':
--- a/hgext/keyword.py Thu Jul 22 08:17:38 2010 -0500
+++ b/hgext/keyword.py Thu Jul 22 08:24:56 2010 -0500
@@ -159,9 +159,9 @@
kwpat = r'\$(%s)(: [^$\n\r]*? )??\$' % '|'.join(escaped)
self.re_kw = re.compile(kwpat)
- templatefilters.filters['utcdate'] = utcdate
- templatefilters.filters['svnisodate'] = svnisodate
- templatefilters.filters['svnutcdate'] = svnutcdate
+ templatefilters.filters.update({'utcdate': utcdate,
+ 'svnisodate': svnisodate,
+ 'svnutcdate': svnutcdate})
def substitute(self, data, path, ctx, subfunc):
'''Replaces keywords in data with expanded template.'''
--- a/hgext/mq.py Thu Jul 22 08:17:38 2010 -0500
+++ b/hgext/mq.py Thu Jul 22 08:24:56 2010 -0500
@@ -1999,7 +1999,7 @@
"""
msg = cmdutil.logmessage(opts)
def getmsg():
- return ui.edit(msg, ui.username())
+ return ui.edit(msg, opts['user'] or ui.username())
q = repo.mq
opts['msg'] = msg
if opts.get('edit'):
--- a/hgext/record.py Thu Jul 22 08:17:38 2010 -0500
+++ b/hgext/record.py Thu Jul 22 08:24:56 2010 -0500
@@ -10,7 +10,7 @@
from mercurial.i18n import gettext, _
from mercurial import cmdutil, commands, extensions, hg, mdiff, patch
from mercurial import util
-import copy, cStringIO, errno, operator, os, re, tempfile
+import copy, cStringIO, errno, os, re, tempfile
lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
@@ -186,7 +186,8 @@
self.hunk = []
self.stream = []
- def addrange(self, (fromstart, fromend, tostart, toend, proc)):
+ def addrange(self, limits):
+ fromstart, fromend, tostart, toend, proc = limits
self.fromline = int(fromstart)
self.toline = int(tostart)
self.proc = proc
@@ -354,8 +355,8 @@
applied[chunk.filename()].append(chunk)
else:
fixoffset += chunk.removed - chunk.added
- return reduce(operator.add, [h for h in applied.itervalues()
- if h[0].special() or len(h) > 1], [])
+ return sum([h for h in applied.itervalues()
+ if h[0].special() or len(h) > 1], [])
def record(ui, repo, *pats, **opts):
'''interactively select changes to commit
@@ -485,7 +486,8 @@
# 3a. apply filtered patch to clean repo (clean)
if backups:
- hg.revert(repo, repo.dirstate.parents()[0], backups.has_key)
+ hg.revert(repo, repo.dirstate.parents()[0],
+ lambda key: key in backups)
# 3b. (apply)
if dopatch:
--- a/mercurial/archival.py Thu Jul 22 08:17:38 2010 -0500
+++ b/mercurial/archival.py Thu Jul 22 08:24:56 2010 -0500
@@ -12,7 +12,7 @@
import cStringIO, os, stat, tarfile, time, zipfile
import zlib, gzip
-def tidyprefix(dest, prefix, suffixes):
+def tidyprefix(dest, kind, prefix):
'''choose prefix to use for names in archive. make sure prefix is
safe for consumers.'''
@@ -23,7 +23,7 @@
raise ValueError('dest must be string if no prefix')
prefix = os.path.basename(dest)
lower = prefix.lower()
- for sfx in suffixes:
+ for sfx in exts.get(kind, []):
if lower.endswith(sfx):
prefix = prefix[:-len(sfx)]
break
@@ -35,6 +35,20 @@
raise util.Abort(_('archive prefix contains illegal components'))
return prefix
+exts = {
+ 'tar': ['.tar'],
+ 'tbz2': ['.tbz2', '.tar.bz2'],
+ 'tgz': ['.tgz', '.tar.gz'],
+ 'zip': ['.zip'],
+ }
+
+def guesskind(dest):
+ for kind, extensions in exts.iteritems():
+ if util.any(dest.endswith(ext) for ext in extensions):
+ return kind
+ return None
+
+
class tarit(object):
'''write archive to tar file or stream. can write uncompressed,
or compress with gzip or bzip2.'''
@@ -66,9 +80,7 @@
if fname:
self.fileobj.write(fname + '\000')
- def __init__(self, dest, prefix, mtime, kind=''):
- self.prefix = tidyprefix(dest, prefix, ['.tar', '.tar.bz2', '.tar.gz',
- '.tgz', '.tbz2'])
+ def __init__(self, dest, mtime, kind=''):
self.mtime = mtime
def taropen(name, mode, fileobj=None):
@@ -90,7 +102,7 @@
self.z = taropen(name='', mode='w|', fileobj=dest)
def addfile(self, name, mode, islink, data):
- i = tarfile.TarInfo(self.prefix + name)
+ i = tarfile.TarInfo(name)
i.mtime = self.mtime
i.size = len(data)
if islink:
@@ -129,8 +141,7 @@
'''write archive to zip file or stream. can write uncompressed,
or compressed with deflate.'''
- def __init__(self, dest, prefix, mtime, compress=True):
- self.prefix = tidyprefix(dest, prefix, ('.zip',))
+ def __init__(self, dest, mtime, compress=True):
if not isinstance(dest, str):
try:
dest.tell()
@@ -142,7 +153,7 @@
self.date_time = time.gmtime(mtime)[:6]
def addfile(self, name, mode, islink, data):
- i = zipfile.ZipInfo(self.prefix + name, self.date_time)
+ i = zipfile.ZipInfo(name, self.date_time)
i.compress_type = self.z.compression
# unzip will not honor unix file modes unless file creator is
# set to unix (id 3).
@@ -160,9 +171,7 @@
class fileit(object):
'''write archive as files in directory.'''
- def __init__(self, name, prefix, mtime):
- if prefix:
- raise util.Abort(_('cannot give prefix when archiving to files'))
+ def __init__(self, name, mtime):
self.basedir = name
self.opener = util.opener(self.basedir)
@@ -182,9 +191,9 @@
archivers = {
'files': fileit,
'tar': tarit,
- 'tbz2': lambda name, prefix, mtime: tarit(name, prefix, mtime, 'bz2'),
- 'tgz': lambda name, prefix, mtime: tarit(name, prefix, mtime, 'gz'),
- 'uzip': lambda name, prefix, mtime: zipit(name, prefix, mtime, False),
+ 'tbz2': lambda name, mtime: tarit(name, mtime, 'bz2'),
+ 'tgz': lambda name, mtime: tarit(name, mtime, 'gz'),
+ 'uzip': lambda name, mtime: zipit(name, mtime, False),
'zip': zipit,
}
@@ -204,19 +213,25 @@
prefix is name of path to put before every archive member.'''
+ if kind == 'files':
+ if prefix:
+ raise util.Abort(_('cannot give prefix when archiving to files'))
+ else:
+ prefix = tidyprefix(dest, kind, prefix)
+
def write(name, mode, islink, getdata):
if matchfn and not matchfn(name):
return
data = getdata()
if decode:
data = repo.wwritedata(name, data)
- archiver.addfile(name, mode, islink, data)
+ archiver.addfile(prefix + name, mode, islink, data)
if kind not in archivers:
raise util.Abort(_("unknown archive type '%s'") % kind)
ctx = repo[node]
- archiver = archivers[kind](dest, prefix, mtime or ctx.date()[0])
+ archiver = archivers[kind](dest, mtime or ctx.date()[0])
if repo.ui.configbool("ui", "archivemeta", True):
def metadata():
--- a/mercurial/changegroup.py Thu Jul 22 08:17:38 2010 -0500
+++ b/mercurial/changegroup.py Thu Jul 22 08:24:56 2010 -0500
@@ -61,8 +61,7 @@
# We want to gather manifests needed and filelogs affected.
def collect(node):
c = cl.read(node)
- for fn in c[3]:
- files.setdefault(fn, fn)
+ files.update(c[3])
mmfs.setdefault(c[0], node)
return collect
--- a/mercurial/cmdutil.py Thu Jul 22 08:17:38 2010 -0500
+++ b/mercurial/cmdutil.py Thu Jul 22 08:24:56 2010 -0500
@@ -1045,36 +1045,43 @@
fncache = {}
change = util.cachefunc(repo.changectx)
+ # First step is to fill wanted, the set of revisions that we want to yield.
+ # When it does not induce extra cost, we also fill fncache for revisions in
+ # wanted: a cache of filenames that were changed (ctx.files()) and that
+ # match the file filtering conditions.
+
if not slowpath and not match.files():
# No files, no patterns. Display all revs.
wanted = set(revs)
copies = []
if not slowpath:
+ # We only have to read through the filelog to find wanted revisions
+
+ minrev, maxrev = min(revs), max(revs)
# Only files, no patterns. Check the history of each file.
- def filerevgen(filelog, node):
+ def filerevgen(filelog, last):
cl_count = len(repo)
- if node is None:
- last = len(filelog) - 1
- else:
- last = filelog.rev(node)
- for i, window in increasing_windows(last, nullrev):
- revs = []
- for j in xrange(i - window, i + 1):
- n = filelog.node(j)
- revs.append((filelog.linkrev(j),
- follow and filelog.renamed(n)))
- for rev in reversed(revs):
- # only yield rev for which we have the changelog, it can
- # happen while doing "hg log" during a pull or commit
- if rev[0] < cl_count:
- yield rev
+ revs = []
+ for j in xrange(0, last + 1):
+ linkrev = filelog.linkrev(j)
+ if linkrev < minrev:
+ continue
+ # only yield rev for which we have the changelog, it can
+ # happen while doing "hg log" during a pull or commit
+ if linkrev > maxrev or linkrev >= cl_count:
+ break
+ n = filelog.node(j)
+ revs.append((filelog.linkrev(j),
+ follow and filelog.renamed(n)))
+
+ for rev in reversed(revs):
+ yield rev
def iterfiles():
for filename in match.files():
yield filename, None
for filename_node in copies:
yield filename_node
- minrev, maxrev = min(revs), max(revs)
for file_, node in iterfiles():
filelog = repo.file(file_)
if not len(filelog):
@@ -1088,31 +1095,33 @@
break
else:
continue
- for rev, copied in filerevgen(filelog, node):
- if rev <= maxrev:
- if rev < minrev:
- break
- fncache.setdefault(rev, [])
- fncache[rev].append(file_)
- wanted.add(rev)
- if copied:
- copies.append(copied)
+
+ if node is None:
+ last = len(filelog) - 1
+ else:
+ last = filelog.rev(node)
+
+ for rev, copied in filerevgen(filelog, last):
+ fncache.setdefault(rev, [])
+ fncache[rev].append(file_)
+ wanted.add(rev)
+ if copied:
+ copies.append(copied)
if slowpath:
+ # We have to read the changelog to match filenames against
+ # changed files
+
if follow:
raise util.Abort(_('can only follow copies/renames for explicit '
'filenames'))
# The slow path checks files modified in every changeset.
- def changerevgen():
- for i, window in increasing_windows(len(repo) - 1, nullrev):
- for j in xrange(i - window, i + 1):
- yield change(j)
-
- for ctx in changerevgen():
+ for i in sorted(revs):
+ ctx = change(i)
matches = filter(match, ctx.files())
if matches:
- fncache[ctx.rev()] = matches
- wanted.add(ctx.rev())
+ fncache[i] = matches
+ wanted.add(i)
class followfilter(object):
def __init__(self, onlyfirst=False):
@@ -1161,6 +1170,8 @@
if ff.match(x):
wanted.discard(x)
+ # Now that wanted is correctly initialized, we can iterate over the
+ # revision range, yielding only revisions in wanted.
def iterate():
if follow and not match.files():
ff = followfilter(onlyfirst=opts.get('follow_first'))
@@ -1171,7 +1182,6 @@
return rev in wanted
for i, window in increasing_windows(0, len(revs)):
- change = util.cachefunc(repo.changectx)
nrevs = [rev for rev in revs[i:i + window] if want(rev)]
for rev in sorted(nrevs):
fns = fncache.get(rev)
--- a/mercurial/commands.py Thu Jul 22 08:17:38 2010 -0500
+++ b/mercurial/commands.py Thu Jul 22 08:24:56 2010 -0500
@@ -83,7 +83,7 @@
Returns 0 if all files are successfully added.
"""
try:
- sim = float(opts.get('similarity') or 0)
+ sim = float(opts.get('similarity') or 100)
except ValueError:
raise util.Abort(_('similarity must be a number'))
if sim < 0 or sim > 100:
@@ -197,20 +197,7 @@
if os.path.realpath(dest) == repo.root:
raise util.Abort(_('repository root cannot be destination'))
- def guess_type():
- exttypes = {
- 'tar': ['.tar'],
- 'tbz2': ['.tbz2', '.tar.bz2'],
- 'tgz': ['.tgz', '.tar.gz'],
- 'zip': ['.zip'],
- }
-
- for type, extensions in exttypes.items():
- if util.any(dest.endswith(ext) for ext in extensions):
- return type
- return None
-
- kind = opts.get('type') or guess_type() or 'files'
+ kind = opts.get('type') or archival.guesskind(dest) or 'files'
prefix = opts.get('prefix')
if dest == '-':
@@ -1869,7 +1856,10 @@
if not doc:
doc = _("(no help text available)")
if hasattr(entry[0], 'definition'): # aliased command
- doc = _('alias for: hg %s\n\n%s') % (entry[0].definition, doc)
+ if entry[0].definition.startswith('!'): # shell alias
+ doc = _('shell alias for::\n\n %s') % entry[0].definition[1:]
+ else:
+ doc = _('alias for: hg %s\n\n%s') % (entry[0].definition, doc)
if ui.quiet:
doc = doc.splitlines()[0]
keep = ui.verbose and ['verbose'] or []
--- a/mercurial/discovery.py Thu Jul 22 08:17:38 2010 -0500
+++ b/mercurial/discovery.py Thu Jul 22 08:24:56 2010 -0500
@@ -35,7 +35,9 @@
exist on the remote side and that no child of a node of base exists
in both remote and repo.
Furthermore base will be updated to include the nodes that exists
- in repo and remote but no children exists in repo and remote.
+ in repo and remote but no children exists in both repo and remote.
+ In other words, base is the set of heads of the DAG resulting from
+ the intersection of the nodes from repo and remote.
If a list of heads is specified, return only nodes which are heads
or ancestors of these heads.
@@ -172,18 +174,17 @@
return base.keys(), list(fetch), heads
-def findoutgoing(repo, remote, base=None, heads=None, force=False):
+def findoutgoing(repo, remote, base=None, remoteheads=None, force=False):
"""Return list of nodes that are roots of subsets not in remote
If base dict is specified, assume that these nodes and their parents
exist on the remote side.
- If a list of heads is specified, return only nodes which are heads
- or ancestors of these heads, and return a second element which
- contains all remote heads which get new children.
+ If remotehead is specified, assume it is the list of the heads from
+ the remote repository.
"""
if base is None:
base = {}
- findincoming(repo, remote, base, heads, force=force)
+ findincoming(repo, remote, base, remoteheads, force=force)
repo.ui.debug("common changesets up to "
+ " ".join(map(short, base.keys())) + "\n")
@@ -203,22 +204,12 @@
# find every node whose parents have been pruned
subset = []
# find every remote head that will get new children
- updated_heads = set()
for n in remain:
p1, p2 = repo.changelog.parents(n)
if p1 not in remain and p2 not in remain:
subset.append(n)
- if heads:
- if p1 in heads:
- updated_heads.add(p1)
- if p2 in heads:
- updated_heads.add(p2)
- # this is the set of all roots we have to push
- if heads:
- return subset, list(updated_heads)
- else:
- return subset
+ return subset
def prepush(repo, remote, force, revs, newbranch):
'''Analyze the local and remote repositories and determine which
@@ -235,34 +226,18 @@
successive changegroup chunks ready to be sent over the wire and
remoteheads is the list of remote heads.'''
common = {}
- remote_heads = remote.heads()
- inc = findincoming(repo, remote, common, remote_heads, force=force)
+ remoteheads = remote.heads()
+ inc = findincoming(repo, remote, common, remoteheads, force=force)
cl = repo.changelog
- update, updated_heads = findoutgoing(repo, remote, common, remote_heads)
+ update = findoutgoing(repo, remote, common, remoteheads)
outg, bases, heads = cl.nodesbetween(update, revs)
if not bases:
repo.ui.status(_("no changes found\n"))
return None, 1
- if not force and remote_heads != [nullid]:
-
- def fail_multiple_heads(unsynced, branch=None):
- if branch:
- msg = _("abort: push creates new remote heads"
- " on branch '%s'!\n") % branch
- else:
- msg = _("abort: push creates new remote heads!\n")
- repo.ui.warn(msg)
- if unsynced:
- repo.ui.status(_("(you should pull and merge or"
- " use push -f to force)\n"))
- else:
- repo.ui.status(_("(did you forget to merge?"
- " use push -f to force)\n"))
- return None, 0
-
+ if not force and remoteheads != [nullid]:
if remote.capable('branchmap'):
# Check for each named branch if we're creating new remote heads.
# To be a remote head after push, node must be either:
@@ -281,12 +256,10 @@
newbranches = branches - set(remotemap)
if newbranches and not newbranch: # new branch requires --new-branch
branchnames = ', '.join(sorted(newbranches))
- repo.ui.warn(_("abort: push creates "
- "new remote branches: %s!\n")
- % branchnames)
- repo.ui.status(_("(use 'hg push --new-branch' to create new "
- "remote branches)\n"))
- return None, 0
+ raise util.Abort(_("push creates new remote branches: %s!")
+ % branchnames,
+ hint=_("use 'hg push --new-branch' to create"
+ " new remote branches"))
branches.difference_update(newbranches)
# 3. Construct the initial oldmap and newmap dicts.
@@ -299,11 +272,11 @@
newmap = {}
unsynced = set()
for branch in branches:
- remoteheads = remotemap[branch]
- prunedheads = [h for h in remoteheads if h in cl.nodemap]
- oldmap[branch] = prunedheads
- newmap[branch] = list(prunedheads)
- if len(remoteheads) > len(prunedheads):
+ remotebrheads = remotemap[branch]
+ prunedbrheads = [h for h in remotebrheads if h in cl.nodemap]
+ oldmap[branch] = prunedbrheads
+ newmap[branch] = list(prunedbrheads)
+ if len(remotebrheads) > len(prunedbrheads):
unsynced.add(branch)
# 4. Update newmap with outgoing changes.
@@ -311,23 +284,12 @@
ctxgen = (repo[n] for n in outg)
repo._updatebranchcache(newmap, ctxgen)
- # 5. Check for new heads.
- # If there are more heads after the push than before, a suitable
- # warning, depending on unsynced status, is displayed.
- for branch in branches:
- if len(newmap[branch]) > len(oldmap[branch]):
- return fail_multiple_heads(branch in unsynced, branch)
-
- # 6. Check for unsynced changes on involved branches.
- if unsynced:
- repo.ui.warn(_("note: unsynced remote changes!\n"))
-
else:
- # Old servers: Check for new topological heads.
- # Code based on _updatebranchcache.
- newheads = set(h for h in remote_heads if h in cl.nodemap)
- oldheadcnt = len(newheads)
- newheads.update(outg)
+ # 1-4b. old servers: Check for new topological heads.
+ # Construct {old,new}map with branch = None (topological branch).
+ # (code based on _updatebranchcache)
+ oldheads = set(h for h in remoteheads if h in cl.nodemap)
+ newheads = oldheads.union(outg)
if len(newheads) > 1:
for latest in reversed(outg):
if latest not in newheads:
@@ -336,10 +298,31 @@
reachable = cl.reachable(latest, cl.node(minhrev))
reachable.remove(latest)
newheads.difference_update(reachable)
- if len(newheads) > oldheadcnt:
- return fail_multiple_heads(inc)
- if inc:
- repo.ui.warn(_("note: unsynced remote changes!\n"))
+ branches = set([None])
+ newmap = {None: newheads}
+ oldmap = {None: oldheads}
+ unsynced = inc and branches or set()
+
+ # 5. Check for new heads.
+ # If there are more heads after the push than before, a suitable
+ # warning, depending on unsynced status, is displayed.
+ for branch in branches:
+ if len(newmap[branch]) > len(oldmap[branch]):
+ if branch:
+ msg = _("push creates new remote heads "
+ "on branch '%s'!") % branch
+ else:
+ msg = _("push creates new remote heads!")
+
+ if branch in unsynced:
+ hint = _("you should pull and merge or use push -f to force")
+ else:
+ hint = _("did you forget to merge? use push -f to force")
+ raise util.Abort(msg, hint=hint)
+
+ # 6. Check for unsynced changes on involved branches.
+ if unsynced:
+ repo.ui.warn(_("note: unsynced remote changes!\n"))
if revs is None:
# use the fast path, no race possible on push
@@ -347,4 +330,4 @@
cg = repo._changegroup(nodes, 'push')
else:
cg = repo.changegroupsubset(update, revs, 'push')
- return cg, remote_heads
+ return cg, remoteheads
--- a/mercurial/dispatch.py Thu Jul 22 08:17:38 2010 -0500
+++ b/mercurial/dispatch.py Thu Jul 22 08:24:56 2010 -0500
@@ -6,7 +6,7 @@
# GNU General Public License version 2 or any later version.
from i18n import _
-import os, sys, atexit, signal, pdb, socket, errno, shlex, time
+import os, sys, atexit, signal, pdb, socket, errno, shlex, time, traceback
import util, commands, hg, fancyopts, extensions, hook, error
import cmdutil, encoding
import ui as uimod
@@ -23,6 +23,8 @@
u.setconfig('ui', 'traceback', 'on')
except util.Abort, inst:
sys.stderr.write(_("abort: %s\n") % inst)
+ if inst.hint:
+ sys.stdout.write(_("(%s)\n") % inst.hint)
return -1
except error.ParseError, inst:
if len(inst.args) > 1:
@@ -49,6 +51,8 @@
try:
# enter the debugger before command execution
if '--debugger' in args:
+ ui.warn(_("entering debugger - "
+ "type c to continue starting hg or h for help\n"))
pdb.set_trace()
try:
return _dispatch(ui, args)
@@ -57,6 +61,7 @@
except:
# enter the debugger when we hit an exception
if '--debugger' in args:
+ traceback.print_exc()
pdb.post_mortem(sys.exc_info()[2])
ui.traceback()
raise
@@ -113,6 +118,8 @@
commands.help_(ui, 'shortlist')
except util.Abort, inst:
ui.warn(_("abort: %s\n") % inst)
+ if inst.hint:
+ ui.status(_("(%s)\n") % inst.hint)
except ImportError, inst:
ui.warn(_("abort: %s!\n") % inst)
m = str(inst).split()[-1]
@@ -205,6 +212,13 @@
return
+ if self.definition.startswith('!'):
+ def fn(ui, *args):
+ cmd = '%s %s' % (self.definition[1:], ' '.join(args))
+ return util.system(cmd)
+ self.fn = fn
+ return
+
args = shlex.split(self.definition)
cmd = args.pop(0)
args = map(util.expandpath, args)
--- a/mercurial/error.py Thu Jul 22 08:17:38 2010 -0500
+++ b/mercurial/error.py Thu Jul 22 08:24:56 2010 -0500
@@ -32,6 +32,9 @@
class Abort(Exception):
"""Raised if a command needs to print an error and exit."""
+ def __init__(self, *args, **kw):
+ Exception.__init__(self, *args)
+ self.hint = kw.get('hint')
class ConfigError(Abort):
'Exception raised when parsing config files'
--- a/mercurial/hgweb/hgweb_mod.py Thu Jul 22 08:17:38 2010 -0500
+++ b/mercurial/hgweb/hgweb_mod.py Thu Jul 22 08:24:56 2010 -0500
@@ -6,8 +6,8 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
-import os
-from mercurial import ui, hg, hook, error, encoding, templater
+import os, sys, urllib
+from mercurial import ui, hg, hook, error, encoding, templater, util
from common import get_mtime, ErrorResponse, permhooks
from common import HTTP_OK, HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_SERVER_ERROR
from request import wsgirequest
@@ -112,24 +112,18 @@
# and the clients always use the old URL structure
cmd = req.form.get('cmd', [''])[0]
- if cmd and cmd in protocol.__all__:
+ if protocol.iscmd(cmd):
if query:
raise ErrorResponse(HTTP_NOT_FOUND)
- try:
- if cmd in perms:
- try:
- self.check_perm(req, perms[cmd])
- except ErrorResponse, inst:
- if cmd == 'unbundle':
- req.drain()
- raise
- method = getattr(protocol, cmd)
- return method(self.repo, req)
- except ErrorResponse, inst:
- req.respond(inst, protocol.HGTYPE)
- if not inst.message:
- return []
- return '0\n%s\n' % inst.message,
+ if cmd in perms:
+ try:
+ self.check_perm(req, perms[cmd])
+ except ErrorResponse, inst:
+ if cmd == 'unbundle':
+ req.drain()
+ req.respond(inst, protocol.HGTYPE)
+ return '0\n%s\n' % inst.message
+ return protocol.call(self.repo, req, cmd)
# translate user-visible url structure to internal structure
--- a/mercurial/hgweb/protocol.py Thu Jul 22 08:17:38 2010 -0500
+++ b/mercurial/hgweb/protocol.py Thu Jul 22 08:24:56 2010 -0500
@@ -5,221 +5,64 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
-import cStringIO, zlib, tempfile, errno, os, sys, urllib, copy
-from mercurial import util, streamclone, pushkey
-from mercurial.node import bin, hex
-from mercurial import changegroup as changegroupmod
-from common import ErrorResponse, HTTP_OK, HTTP_NOT_FOUND, HTTP_SERVER_ERROR
-
-# __all__ is populated with the allowed commands. Be sure to add to it if
-# you're adding a new command, or the new command won't work.
-
-__all__ = [
- 'lookup', 'heads', 'branches', 'between', 'changegroup',
- 'changegroupsubset', 'capabilities', 'unbundle', 'stream_out',
- 'branchmap', 'pushkey', 'listkeys'
-]
+import cStringIO, zlib, sys, urllib
+from mercurial import util, wireproto
+from common import HTTP_OK
HGTYPE = 'application/mercurial-0.1'
-basecaps = 'lookup changegroupsubset branchmap pushkey'.split()
-
-def lookup(repo, req):
- try:
- r = hex(repo.lookup(req.form['key'][0]))
- success = 1
- except Exception, inst:
- r = str(inst)
- success = 0
- resp = "%s %s\n" % (success, r)
- req.respond(HTTP_OK, HGTYPE, length=len(resp))
- yield resp
-
-def heads(repo, req):
- resp = " ".join(map(hex, repo.heads())) + "\n"
- req.respond(HTTP_OK, HGTYPE, length=len(resp))
- yield resp
-
-def branchmap(repo, req):
- branches = repo.branchmap()
- heads = []
- for branch, nodes in branches.iteritems():
- branchname = urllib.quote(branch)
- branchnodes = [hex(node) for node in nodes]
- heads.append('%s %s' % (branchname, ' '.join(branchnodes)))
- resp = '\n'.join(heads)
- req.respond(HTTP_OK, HGTYPE, length=len(resp))
- yield resp
-
-def branches(repo, req):
- nodes = []
- if 'nodes' in req.form:
- nodes = map(bin, req.form['nodes'][0].split(" "))
- resp = cStringIO.StringIO()
- for b in repo.branches(nodes):
- resp.write(" ".join(map(hex, b)) + "\n")
- resp = resp.getvalue()
- req.respond(HTTP_OK, HGTYPE, length=len(resp))
- yield resp
-
-def between(repo, req):
- pairs = [map(bin, p.split("-"))
- for p in req.form['pairs'][0].split(" ")]
- resp = ''.join(" ".join(map(hex, b)) + "\n" for b in repo.between(pairs))
- req.respond(HTTP_OK, HGTYPE, length=len(resp))
- yield resp
-
-def changegroup(repo, req):
- req.respond(HTTP_OK, HGTYPE)
- nodes = []
-
- if 'roots' in req.form:
- nodes = map(bin, req.form['roots'][0].split(" "))
-
- z = zlib.compressobj()
- f = repo.changegroup(nodes, 'serve')
- while 1:
- chunk = f.read(4096)
- if not chunk:
- break
- yield z.compress(chunk)
-
- yield z.flush()
-
-def changegroupsubset(repo, req):
- req.respond(HTTP_OK, HGTYPE)
- bases = []
- heads = []
-
- if 'bases' in req.form:
- bases = [bin(x) for x in req.form['bases'][0].split(' ')]
- if 'heads' in req.form:
- heads = [bin(x) for x in req.form['heads'][0].split(' ')]
-
- z = zlib.compressobj()
- f = repo.changegroupsubset(bases, heads, 'serve')
- while 1:
- chunk = f.read(4096)
- if not chunk:
- break
- yield z.compress(chunk)
-
- yield z.flush()
-
-def capabilities(repo, req):
- caps = copy.copy(basecaps)
- if streamclone.allowed(repo.ui):
- caps.append('stream=%d' % repo.changelog.version)
- if changegroupmod.bundlepriority:
- caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority))
- rsp = ' '.join(caps)
- req.respond(HTTP_OK, HGTYPE, length=len(rsp))
- yield rsp
-
-def unbundle(repo, req):
-
- proto = req.env.get('wsgi.url_scheme') or 'http'
- their_heads = req.form['heads'][0].split(' ')
- def check_heads():
- heads = map(hex, repo.heads())
- return their_heads == [hex('force')] or their_heads == heads
-
- # fail early if possible
- if not check_heads():
- req.drain()
- raise ErrorResponse(HTTP_OK, 'unsynced changes')
-
- # do not lock repo until all changegroup data is
- # streamed. save to temporary file.
-
- fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
- fp = os.fdopen(fd, 'wb+')
- try:
- length = int(req.env['CONTENT_LENGTH'])
- for s in util.filechunkiter(req, limit=length):
+class webproto(object):
+ def __init__(self, req):
+ self.req = req
+ self.response = ''
+ def getargs(self, args):
+ data = {}
+ keys = args.split()
+ for k in keys:
+ if k == '*':
+ star = {}
+ for key in self.req.form.keys():
+ if key not in keys:
+ star[key] = self.req.form[key][0]
+ data['*'] = star
+ else:
+ data[k] = self.req.form[k][0]
+ return [data[k] for k in keys]
+ def getfile(self, fp):
+ length = int(self.req.env['CONTENT_LENGTH'])
+ for s in util.filechunkiter(self.req, limit=length):
fp.write(s)
-
- try:
- lock = repo.lock()
- try:
- if not check_heads():
- raise ErrorResponse(HTTP_OK, 'unsynced changes')
-
- fp.seek(0)
- header = fp.read(6)
- if header.startswith('HG') and not header.startswith('HG10'):
- raise ValueError('unknown bundle version')
- elif header not in changegroupmod.bundletypes:
- raise ValueError('unknown bundle compression type')
- gen = changegroupmod.unbundle(header, fp)
-
- # send addchangegroup output to client
-
- oldio = sys.stdout, sys.stderr
- sys.stderr = sys.stdout = cStringIO.StringIO()
+ def redirect(self):
+ self.oldio = sys.stdout, sys.stderr
+ sys.stderr = sys.stdout = cStringIO.StringIO()
+ def groupchunks(self, cg):
+ z = zlib.compressobj()
+ while 1:
+ chunk = cg.read(4096)
+ if not chunk:
+ break
+ yield z.compress(chunk)
+ yield z.flush()
+ def _client(self):
+ return 'remote:%s:%s:%s' % (
+ self.req.env.get('wsgi.url_scheme') or 'http',
+ urllib.quote(self.req.env.get('REMOTE_HOST', '')),
+ urllib.quote(self.req.env.get('REMOTE_USER', '')))
- try:
- url = 'remote:%s:%s:%s' % (
- proto,
- urllib.quote(req.env.get('REMOTE_HOST', '')),
- urllib.quote(req.env.get('REMOTE_USER', '')))
- try:
- ret = repo.addchangegroup(gen, 'serve', url, lock=lock)
- except util.Abort, inst:
- sys.stdout.write("abort: %s\n" % inst)
- ret = 0
- finally:
- val = sys.stdout.getvalue()
- sys.stdout, sys.stderr = oldio
- req.respond(HTTP_OK, HGTYPE)
- return '%d\n%s' % (ret, val),
- finally:
- lock.release()
- except ValueError, inst:
- raise ErrorResponse(HTTP_OK, inst)
- except (OSError, IOError), inst:
- error = getattr(inst, 'strerror', 'Unknown error')
- if not isinstance(error, str):
- error = 'Error: %s' % str(error)
- if inst.errno == errno.ENOENT:
- code = HTTP_NOT_FOUND
- else:
- code = HTTP_SERVER_ERROR
- filename = getattr(inst, 'filename', '')
- # Don't send our filesystem layout to the client
- if filename and filename.startswith(repo.root):
- filename = filename[len(repo.root)+1:]
- text = '%s: %s' % (error, filename)
- else:
- text = error.replace(repo.root + os.path.sep, '')
- raise ErrorResponse(code, text)
- finally:
- fp.close()
- os.unlink(tempname)
+def iscmd(cmd):
+ return cmd in wireproto.commands
-def stream_out(repo, req):
- req.respond(HTTP_OK, HGTYPE)
- try:
- for chunk in streamclone.stream_out(repo):
- yield chunk
- except streamclone.StreamException, inst:
- yield str(inst)
-
-def pushkey(repo, req):
- namespace = req.form['namespace'][0]
- key = req.form['key'][0]
- old = req.form['old'][0]
- new = req.form['new'][0]
-
- r = repo.pushkey(namespace, key, old, new)
- r = '%d\n' % int(r)
- req.respond(HTTP_OK, HGTYPE, length=len(r))
- yield r
-
-def listkeys(repo, req):
- namespace = req.form['namespace'][0]
- d = repo.listkeys(namespace).items()
- t = '\n'.join(['%s\t%s' % (k.encode('string-escape'),
- v.encode('string-escape')) for k, v in d])
- req.respond(HTTP_OK, HGTYPE, length=len(t))
- yield t
+def call(repo, req, cmd):
+ p = webproto(req)
+ rsp = wireproto.dispatch(repo, p, cmd)
+ if isinstance(rsp, str):
+ req.respond(HTTP_OK, HGTYPE, length=len(rsp))
+ return [rsp]
+ elif isinstance(rsp, wireproto.streamres):
+ req.respond(HTTP_OK, HGTYPE)
+ return rsp.gen
+ elif isinstance(rsp, wireproto.pushres):
+ val = sys.stdout.getvalue()
+ sys.stdout, sys.stderr = p.oldio
+ req.respond(HTTP_OK, HGTYPE)
+ return ['%d\n%s' % (rsp.res, val)]
--- a/mercurial/httprepo.py Thu Jul 22 08:17:38 2010 -0500
+++ b/mercurial/httprepo.py Thu Jul 22 08:24:56 2010 -0500
@@ -8,7 +8,7 @@
from node import bin, hex, nullid
from i18n import _
-import repo, changegroup, statichttprepo, error, url, util, pushkey
+import repo, changegroup, statichttprepo, error, url, util, wireproto
import os, urllib, urllib2, urlparse, zlib, httplib
import errno, socket
import encoding
@@ -22,7 +22,7 @@
raise IOError(None, _('connection ended unexpectedly'))
yield zd.flush()
-class httprepository(repo.repository):
+class httprepository(wireproto.wirerepository):
def __init__(self, ui, path):
self.path = path
self.caps = None
@@ -54,7 +54,7 @@
def get_caps(self):
if self.caps is None:
try:
- self.caps = set(self.do_read('capabilities').split())
+ self.caps = set(self._call('capabilities').split())
except error.RepoError:
self.caps = set()
self.ui.debug('capabilities: %s\n' %
@@ -66,7 +66,7 @@
def lock(self):
raise util.Abort(_('operation not supported over http'))
- def do_cmd(self, cmd, **args):
+ def _callstream(self, cmd, **args):
data = args.pop('data', None)
headers = args.pop('headers', {})
self.ui.debug("sending %s command\n" % cmd)
@@ -130,90 +130,15 @@
return resp
- def do_read(self, cmd, **args):
- fp = self.do_cmd(cmd, **args)
+ def _call(self, cmd, **args):
+ fp = self._callstream(cmd, **args)
try:
return fp.read()
finally:
# if using keepalive, allow connection to be reused
fp.close()
- def lookup(self, key):
- self.requirecap('lookup', _('look up remote revision'))
- d = self.do_cmd("lookup", key = key).read()
- success, data = d[:-1].split(' ', 1)
- if int(success):
- return bin(data)
- raise error.RepoError(data)
-
- def heads(self):
- d = self.do_read("heads")
- try:
- return map(bin, d[:-1].split(" "))
- except:
- raise error.ResponseError(_("unexpected response:"), d)
-
- def branchmap(self):
- d = self.do_read("branchmap")
- try:
- branchmap = {}
- for branchpart in d.splitlines():
- branchheads = branchpart.split(' ')
- branchname = urllib.unquote(branchheads[0])
- # Earlier servers (1.3.x) send branch names in (their) local
- # charset. The best we can do is assume it's identical to our
- # own local charset, in case it's not utf-8.
- try:
- branchname.decode('utf-8')
- except UnicodeDecodeError:
- branchname = encoding.fromlocal(branchname)
- branchheads = [bin(x) for x in branchheads[1:]]
- branchmap[branchname] = branchheads
- return branchmap
- except:
- raise error.ResponseError(_("unexpected response:"), d)
-
- def branches(self, nodes):
- n = " ".join(map(hex, nodes))
- d = self.do_read("branches", nodes=n)
- try:
- br = [tuple(map(bin, b.split(" "))) for b in d.splitlines()]
- return br
- except:
- raise error.ResponseError(_("unexpected response:"), d)
-
- def between(self, pairs):
- batch = 8 # avoid giant requests
- r = []
- for i in xrange(0, len(pairs), batch):
- n = " ".join(["-".join(map(hex, p)) for p in pairs[i:i + batch]])
- d = self.do_read("between", pairs=n)
- try:
- r += [l and map(bin, l.split(" ")) or []
- for l in d.splitlines()]
- except:
- raise error.ResponseError(_("unexpected response:"), d)
- return r
-
- def changegroup(self, nodes, kind):
- n = " ".join(map(hex, nodes))
- f = self.do_cmd("changegroup", roots=n)
- return util.chunkbuffer(zgenerator(f))
-
- def changegroupsubset(self, bases, heads, source):
- self.requirecap('changegroupsubset', _('look up remote changes'))
- baselst = " ".join([hex(n) for n in bases])
- headlst = " ".join([hex(n) for n in heads])
- f = self.do_cmd("changegroupsubset", bases=baselst, heads=headlst)
- return util.chunkbuffer(zgenerator(f))
-
- def unbundle(self, cg, heads, source):
- '''Send cg (a readable file-like object representing the
- changegroup to push, typically a chunkbuffer object) to the
- remote server as a bundle. Return an integer response code:
- non-zero indicates a successful push (see
- localrepository.addchangegroup()), and zero indicates either
- error or nothing to push.'''
+ def _callpush(self, cmd, cg, **args):
# have to stream bundle to a temp file because we do not have
# http 1.1 chunked transfer.
@@ -233,56 +158,25 @@
tempname = changegroup.writebundle(cg, None, type)
fp = url.httpsendfile(tempname, "rb")
+ headers = {'Content-Type': 'application/mercurial-0.1'}
+
try:
try:
- resp = self.do_read(
- 'unbundle', data=fp,
- headers={'Content-Type': 'application/mercurial-0.1'},
- heads=' '.join(map(hex, heads)))
- resp_code, output = resp.split('\n', 1)
- try:
- ret = int(resp_code)
- except ValueError, err:
- raise error.ResponseError(
- _('push failed (unexpected response):'), resp)
- for l in output.splitlines(True):
- self.ui.status(_('remote: '), l)
- return ret
+ r = self._call(cmd, data=fp, headers=headers, **args)
+ return r.split('\n', 1)
except socket.error, err:
- if err[0] in (errno.ECONNRESET, errno.EPIPE):
- raise util.Abort(_('push failed: %s') % err[1])
- raise util.Abort(err[1])
+ if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
+ raise util.Abort(_('push failed: %s') % err.args[1])
+ raise util.Abort(err.args[1])
finally:
fp.close()
os.unlink(tempname)
- def stream_out(self):
- return self.do_cmd('stream_out')
+ def _abort(self, exception):
+ raise exception
- def pushkey(self, namespace, key, old, new):
- if not self.capable('pushkey'):
- return False
- d = self.do_cmd("pushkey", data="", # force a POST
- namespace=namespace, key=key, old=old, new=new).read()
- code, output = d.split('\n', 1)
- try:
- ret = bool(int(code))
- except ValueError, err:
- raise error.ResponseError(
- _('push failed (unexpected response):'), d)
- for l in output.splitlines(True):
- self.ui.status(_('remote: '), l)
- return ret
-
- def listkeys(self, namespace):
- if not self.capable('pushkey'):
- return {}
- d = self.do_cmd("listkeys", namespace=namespace).read()
- r = {}
- for l in d.splitlines():
- k, v = l.split('\t')
- r[k.decode('string-escape')] = v.decode('string-escape')
- return r
+ def _decompress(self, stream):
+ return util.chunkbuffer(zgenerator(stream))
class httpsrepository(httprepository):
def __init__(self, ui, path):
--- a/mercurial/localrepo.py Thu Jul 22 08:17:38 2010 -0500
+++ b/mercurial/localrepo.py Thu Jul 22 08:24:56 2010 -0500
@@ -1223,46 +1223,34 @@
# unbundle assumes local user cannot lock remote repo (new ssh
# servers, http servers).
- if remote.capable('unbundle'):
- return self.push_unbundle(remote, force, revs, newbranch)
- return self.push_addchangegroup(remote, force, revs, newbranch)
-
- def push_addchangegroup(self, remote, force, revs, newbranch):
- '''Push a changegroup by locking the remote and sending the
- addchangegroup command to it. Used for local and old SSH repos.
- Return an integer: see push().
- '''
- lock = remote.lock()
+ lock = None
+ unbundle = remote.capable('unbundle')
+ if not unbundle:
+ lock = remote.lock()
try:
ret = discovery.prepush(self, remote, force, revs, newbranch)
- if ret[0] is not None:
- cg, remote_heads = ret
+ if ret[0] is None:
+ # and here we return 0 for "nothing to push" or 1 for
+ # "something to push but I refuse"
+ return ret[1]
+
+ cg, remote_heads = ret
+ if unbundle:
+ # local repo finds heads on server, finds out what revs it must
+ # push. once revs transferred, if server finds it has
+ # different heads (someone else won commit/push race), server
+ # aborts.
+ if force:
+ remote_heads = ['force']
+ # ssh: return remote's addchangegroup()
+ # http: return remote's addchangegroup() or 0 for error
+ return remote.unbundle(cg, remote_heads, 'push')
+ else:
# we return an integer indicating remote head count change
return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
- # and here we return 0 for "nothing to push" or 1 for
- # "something to push but I refuse"
- return ret[1]
finally:
- lock.release()
-
- def push_unbundle(self, remote, force, revs, newbranch):
- '''Push a changegroup by unbundling it on the remote. Used for new
- SSH and HTTP repos. Return an integer: see push().'''
- # local repo finds heads on server, finds out what revs it
- # must push. once revs transferred, if server finds it has
- # different heads (someone else won commit/push race), server
- # aborts.
-
- ret = discovery.prepush(self, remote, force, revs, newbranch)
- if ret[0] is not None:
- cg, remote_heads = ret
- if force:
- remote_heads = ['force']
- # ssh: return remote's addchangegroup()
- # http: return remote's addchangegroup() or 0 for error
- return remote.unbundle(cg, remote_heads, 'push')
- # as in push_addchangegroup()
- return ret[1]
+ if lock is not None:
+ lock.release()
def changegroupinfo(self, nodes, source):
if self.ui.verbose or source == 'bundle':
@@ -1325,7 +1313,6 @@
for n in bases:
knownheads.update(cl.parents(n))
knownheads.discard(nullid)
- knownheads = list(knownheads)
if knownheads:
# Now that we know what heads are known, we can compute which
# changesets are known. The recipient must know about all
@@ -1386,10 +1373,9 @@
deltamf = mnfst.readdelta(mnfstnode)
# For each line in the delta
for f, fnode in deltamf.iteritems():
- f = changedfiles.get(f, None)
# And if the file is in the list of files we care
# about.
- if f is not None:
+ if f in changedfiles:
# Get the changenode this manifest belongs to
clnode = msng_mnfst_set[mnfstnode]
# Create the set of filenodes for the file if
@@ -1448,7 +1434,7 @@
# logically divide up the task, generate the group.
def gengroup():
# The set of changed files starts empty.
- changedfiles = {}
+ changedfiles = set()
collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
# Create a changenode group generator that will call our functions
@@ -1499,7 +1485,7 @@
if isinstance(fname, int):
continue
msng_filenode_set.setdefault(fname, {})
- changedfiles[fname] = 1
+ changedfiles.add(fname)
# Go through all our files in order sorted by name.
cnt = 0
for fname in sorted(changedfiles):
@@ -1579,7 +1565,7 @@
def gengroup():
'''yield a sequence of changegroup chunks (strings)'''
# construct a list of all changed files
- changedfiles = {}
+ changedfiles = set()
mmfs = {}
collect = changegroup.collector(cl, mmfs, changedfiles)
--- a/mercurial/revlog.py Thu Jul 22 08:17:38 2010 -0500
+++ b/mercurial/revlog.py Thu Jul 22 08:24:56 2010 -0500
@@ -131,7 +131,7 @@
self.dataf = dataf
self.s = struct.calcsize(indexformatng)
self.datasize = size
- self.l = size / self.s
+ self.l = size // self.s
self.index = [None] * self.l
self.map = {nullid: nullrev}
self.allmap = 0
@@ -176,8 +176,8 @@
# limit blocksize so that we don't get too much data.
blocksize = max(self.datasize - blockstart, 0)
data = self.dataf.read(blocksize)
- lend = len(data) / self.s
- i = blockstart / self.s
+ lend = len(data) // self.s
+ i = blockstart // self.s
off = 0
# lazyindex supports __delitem__
if lend > len(self.index) - i:
--- a/mercurial/sshrepo.py Thu Jul 22 08:17:38 2010 -0500
+++ b/mercurial/sshrepo.py Thu Jul 22 08:24:56 2010 -0500
@@ -5,10 +5,9 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
-from node import bin, hex
from i18n import _
-import repo, util, error, encoding
-import re, urllib
+import repo, util, error, wireproto
+import re
class remotelock(object):
def __init__(self, repo):
@@ -20,14 +19,14 @@
if self.repo:
self.release()
-class sshrepository(repo.repository):
+class sshrepository(wireproto.wirerepository):
def __init__(self, ui, path, create=0):
self._url = path
self.ui = ui
m = re.match(r'^ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?$', path)
if not m:
- self.abort(error.RepoError(_("couldn't parse location %s") % path))
+ self._abort(error.RepoError(_("couldn't parse location %s") % path))
self.user = m.group(2)
self.host = m.group(3)
@@ -46,7 +45,7 @@
ui.note(_('running %s\n') % cmd)
res = util.system(cmd)
if res != 0:
- self.abort(error.RepoError(_("could not create remote repo")))
+ self._abort(error.RepoError(_("could not create remote repo")))
self.validate_repo(ui, sshcmd, args, remotecmd)
@@ -65,8 +64,8 @@
self.pipeo, self.pipei, self.pipee = util.popen3(cmd)
# skip any noise generated by remote shell
- self.do_cmd("hello")
- r = self.do_cmd("between", pairs=("%s-%s" % ("0"*40, "0"*40)))
+ self._callstream("hello")
+ r = self._callstream("between", pairs=("%s-%s" % ("0"*40, "0"*40)))
lines = ["", "dummy"]
max_noise = 500
while lines[-1] and max_noise:
@@ -79,7 +78,7 @@
lines.append(l)
max_noise -= 1
else:
- self.abort(error.RepoError(_("no suitable response from remote hg")))
+ self._abort(error.RepoError(_("no suitable response from remote hg")))
self.capabilities = set()
for l in reversed(lines):
@@ -97,7 +96,7 @@
break
self.ui.status(_("remote: "), l)
- def abort(self, exception):
+ def _abort(self, exception):
self.cleanup()
raise exception
@@ -114,7 +113,7 @@
__del__ = cleanup
- def do_cmd(self, cmd, **args):
+ def _callstream(self, cmd, **args):
self.ui.debug("sending %s command\n" % cmd)
self.pipeo.write("%s\n" % cmd)
for k, v in sorted(args.iteritems()):
@@ -124,17 +123,35 @@
return self.pipei
- def call(self, cmd, **args):
- self.do_cmd(cmd, **args)
+ def _call(self, cmd, **args):
+ self._callstream(cmd, **args)
return self._recv()
+ def _callpush(self, cmd, fp, **args):
+ r = self._call(cmd, **args)
+ if r:
+ return '', r
+ while 1:
+ d = fp.read(4096)
+ if not d:
+ break
+ self._send(d)
+ self._send("", flush=True)
+ r = self._recv()
+ if r:
+ return '', r
+ return self._recv(), ''
+
+ def _decompress(self, stream):
+ return stream
+
def _recv(self):
l = self.pipei.readline()
self.readerr()
try:
l = int(l)
except:
- self.abort(error.ResponseError(_("unexpected response:"), l))
+ self._abort(error.ResponseError(_("unexpected response:"), l))
return self.pipei.read(l)
def _send(self, data, flush=False):
@@ -146,112 +163,19 @@
self.readerr()
def lock(self):
- self.call("lock")
+ self._call("lock")
return remotelock(self)
def unlock(self):
- self.call("unlock")
-
- def lookup(self, key):
- self.requirecap('lookup', _('look up remote revision'))
- d = self.call("lookup", key=key)
- success, data = d[:-1].split(" ", 1)
- if int(success):
- return bin(data)
- else:
- self.abort(error.RepoError(data))
-
- def heads(self):
- d = self.call("heads")
- try:
- return map(bin, d[:-1].split(" "))
- except:
- self.abort(error.ResponseError(_("unexpected response:"), d))
-
- def branchmap(self):
- d = self.call("branchmap")
- try:
- branchmap = {}
- for branchpart in d.splitlines():
- branchheads = branchpart.split(' ')
- branchname = urllib.unquote(branchheads[0])
- # Earlier servers (1.3.x) send branch names in (their) local
- # charset. The best we can do is assume it's identical to our
- # own local charset, in case it's not utf-8.
- try:
- branchname.decode('utf-8')
- except UnicodeDecodeError:
- branchname = encoding.fromlocal(branchname)
- branchheads = [bin(x) for x in branchheads[1:]]
- branchmap[branchname] = branchheads
- return branchmap
- except:
- raise error.ResponseError(_("unexpected response:"), d)
-
- def branches(self, nodes):
- n = " ".join(map(hex, nodes))
- d = self.call("branches", nodes=n)
- try:
- br = [tuple(map(bin, b.split(" "))) for b in d.splitlines()]
- return br
- except:
- self.abort(error.ResponseError(_("unexpected response:"), d))
-
- def between(self, pairs):
- n = " ".join(["-".join(map(hex, p)) for p in pairs])
- d = self.call("between", pairs=n)
- try:
- p = [l and map(bin, l.split(" ")) or [] for l in d.splitlines()]
- return p
- except:
- self.abort(error.ResponseError(_("unexpected response:"), d))
-
- def changegroup(self, nodes, kind):
- n = " ".join(map(hex, nodes))
- return self.do_cmd("changegroup", roots=n)
-
- def changegroupsubset(self, bases, heads, kind):
- self.requirecap('changegroupsubset', _('look up remote changes'))
- bases = " ".join(map(hex, bases))
- heads = " ".join(map(hex, heads))
- return self.do_cmd("changegroupsubset", bases=bases, heads=heads)
-
- def unbundle(self, cg, heads, source):
- '''Send cg (a readable file-like object representing the
- changegroup to push, typically a chunkbuffer object) to the
- remote server as a bundle. Return an integer indicating the
- result of the push (see localrepository.addchangegroup()).'''
- d = self.call("unbundle", heads=' '.join(map(hex, heads)))
- if d:
- # remote may send "unsynced changes"
- self.abort(error.RepoError(_("push refused: %s") % d))
-
- while 1:
- d = cg.read(4096)
- if not d:
- break
- self._send(d)
-
- self._send("", flush=True)
-
- r = self._recv()
- if r:
- # remote may send "unsynced changes"
- self.abort(error.RepoError(_("push failed: %s") % r))
-
- r = self._recv()
- try:
- return int(r)
- except:
- self.abort(error.ResponseError(_("unexpected response:"), r))
+ self._call("unlock")
def addchangegroup(self, cg, source, url):
'''Send a changegroup to the remote server. Return an integer
similar to unbundle(). DEPRECATED, since it requires locking the
remote.'''
- d = self.call("addchangegroup")
+ d = self._call("addchangegroup")
if d:
- self.abort(error.RepoError(_("push refused: %s") % d))
+ self._abort(error.RepoError(_("push refused: %s") % d))
while 1:
d = cg.read(4096)
if not d:
@@ -268,26 +192,6 @@
try:
return int(r)
except:
- self.abort(error.ResponseError(_("unexpected response:"), r))
-
- def stream_out(self):
- return self.do_cmd('stream_out')
-
- def pushkey(self, namespace, key, old, new):
- if not self.capable('pushkey'):
- return False
- d = self.call("pushkey",
- namespace=namespace, key=key, old=old, new=new)
- return bool(int(d))
-
- def listkeys(self, namespace):
- if not self.capable('pushkey'):
- return {}
- d = self.call("listkeys", namespace=namespace)
- r = {}
- for l in d.splitlines():
- k, v = l.split('\t')
- r[k.decode('string-escape')] = v.decode('string-escape')
- return r
+ self._abort(error.ResponseError(_("unexpected response:"), r))
instance = sshrepository
--- a/mercurial/sshserver.py Thu Jul 22 08:17:38 2010 -0500
+++ b/mercurial/sshserver.py Thu Jul 22 08:24:56 2010 -0500
@@ -7,14 +7,10 @@
# GNU General Public License version 2 or any later version.
from i18n import _
-from node import bin, hex
-import streamclone, util, hook, pushkey
-import os, sys, tempfile, urllib, copy
+import util, hook, wireproto
+import os, sys
class sshserver(object):
-
- caps = 'unbundle lookup changegroupsubset branchmap pushkey'.split()
-
def __init__(self, ui, repo):
self.ui = ui
self.repo = repo
@@ -29,17 +25,61 @@
util.set_binary(self.fin)
util.set_binary(self.fout)
- def getarg(self):
- argline = self.fin.readline()[:-1]
- arg, l = argline.split()
- val = self.fin.read(int(l))
- return arg, val
+ def getargs(self, args):
+ data = {}
+ keys = args.split()
+ count = len(keys)
+ for n in xrange(len(keys)):
+ argline = self.fin.readline()[:-1]
+ arg, l = argline.split()
+ val = self.fin.read(int(l))
+ if arg not in keys:
+ raise util.Abort("unexpected parameter %r" % arg)
+ if arg == '*':
+ star = {}
+ for n in xrange(int(l)):
+ arg, l = argline.split()
+ val = self.fin.read(int(l))
+ star[arg] = val
+ data['*'] = star
+ else:
+ data[arg] = val
+ return [data[k] for k in keys]
- def respond(self, v):
+ def getarg(self, name):
+ return self.getargs(name)[0]
+
+ def getfile(self, fpout):
+ self.sendresponse('')
+ count = int(self.fin.readline())
+ while count:
+ fpout.write(self.fin.read(count))
+ count = int(self.fin.readline())
+
+ def redirect(self):
+ pass
+
+ def groupchunks(self, changegroup):
+ while True:
+ d = changegroup.read(4096)
+ if not d:
+ break
+ yield d
+
+ def sendresponse(self, v):
self.fout.write("%d\n" % len(v))
self.fout.write(v)
self.fout.flush()
+ def sendstream(self, source):
+ for chunk in source.gen:
+ self.fout.write(chunk)
+ self.fout.flush()
+
+ def sendpushresponse(self, rsp):
+ self.sendresponse('')
+ self.sendresponse(str(rsp.res))
+
def serve_forever(self):
try:
while self.serve_one():
@@ -49,57 +89,31 @@
self.lock.release()
sys.exit(0)
+ handlers = {
+ str: sendresponse,
+ wireproto.streamres: sendstream,
+ wireproto.pushres: sendpushresponse,
+ }
+
def serve_one(self):
cmd = self.fin.readline()[:-1]
- if cmd:
+ if cmd and cmd in wireproto.commands:
+ rsp = wireproto.dispatch(self.repo, self, cmd)
+ self.handlers[rsp.__class__](self, rsp)
+ elif cmd:
impl = getattr(self, 'do_' + cmd, None)
if impl:
- impl()
- else: self.respond("")
+ r = impl()
+ if r is not None:
+ self.sendresponse(r)
+ else: self.sendresponse("")
return cmd != ''
- def do_lookup(self):
- arg, key = self.getarg()
- assert arg == 'key'
- try:
- r = hex(self.repo.lookup(key))
- success = 1
- except Exception, inst:
- r = str(inst)
- success = 0
- self.respond("%s %s\n" % (success, r))
-
- def do_branchmap(self):
- branchmap = self.repo.branchmap()
- heads = []
- for branch, nodes in branchmap.iteritems():
- branchname = urllib.quote(branch)
- branchnodes = [hex(node) for node in nodes]
- heads.append('%s %s' % (branchname, ' '.join(branchnodes)))
- self.respond('\n'.join(heads))
-
- def do_heads(self):
- h = self.repo.heads()
- self.respond(" ".join(map(hex, h)) + "\n")
-
- def do_hello(self):
- '''the hello command returns a set of lines describing various
- interesting things about the server, in an RFC822-like format.
- Currently the only one defined is "capabilities", which
- consists of a line in the form:
-
- capabilities: space separated list of tokens
- '''
- caps = copy.copy(self.caps)
- if streamclone.allowed(self.repo.ui):
- caps.append('stream=%d' % self.repo.changelog.version)
- self.respond("capabilities: %s\n" % (' '.join(caps),))
-
def do_lock(self):
'''DEPRECATED - allowing remote client to lock repo is not safe'''
self.lock = self.repo.lock()
- self.respond("")
+ return ""
def do_unlock(self):
'''DEPRECATED'''
@@ -107,136 +121,20 @@
if self.lock:
self.lock.release()
self.lock = None
- self.respond("")
-
- def do_branches(self):
- arg, nodes = self.getarg()
- nodes = map(bin, nodes.split(" "))
- r = []
- for b in self.repo.branches(nodes):
- r.append(" ".join(map(hex, b)) + "\n")
- self.respond("".join(r))
-
- def do_between(self):
- arg, pairs = self.getarg()
- pairs = [map(bin, p.split("-")) for p in pairs.split(" ")]
- r = []
- for b in self.repo.between(pairs):
- r.append(" ".join(map(hex, b)) + "\n")
- self.respond("".join(r))
-
- def do_changegroup(self):
- nodes = []
- arg, roots = self.getarg()
- nodes = map(bin, roots.split(" "))
-
- cg = self.repo.changegroup(nodes, 'serve')
- while True:
- d = cg.read(4096)
- if not d:
- break
- self.fout.write(d)
-
- self.fout.flush()
-
- def do_changegroupsubset(self):
- argmap = dict([self.getarg(), self.getarg()])
- bases = [bin(n) for n in argmap['bases'].split(' ')]
- heads = [bin(n) for n in argmap['heads'].split(' ')]
-
- cg = self.repo.changegroupsubset(bases, heads, 'serve')
- while True:
- d = cg.read(4096)
- if not d:
- break
- self.fout.write(d)
-
- self.fout.flush()
+ return ""
def do_addchangegroup(self):
'''DEPRECATED'''
if not self.lock:
- self.respond("not locked")
- return
-
- self.respond("")
- r = self.repo.addchangegroup(self.fin, 'serve', self.client_url(),
- lock=self.lock)
- self.respond(str(r))
-
- def client_url(self):
- client = os.environ.get('SSH_CLIENT', '').split(' ', 1)[0]
- return 'remote:ssh:' + client
-
- def do_unbundle(self):
- their_heads = self.getarg()[1].split()
-
- def check_heads():
- heads = map(hex, self.repo.heads())
- return their_heads == [hex('force')] or their_heads == heads
-
- # fail early if possible
- if not check_heads():
- self.respond(_('unsynced changes'))
+ self.sendresponse("not locked")
return
- self.respond('')
-
- # write bundle data to temporary file because it can be big
- fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
- fp = os.fdopen(fd, 'wb+')
- try:
- count = int(self.fin.readline())
- while count:
- fp.write(self.fin.read(count))
- count = int(self.fin.readline())
-
- was_locked = self.lock is not None
- if not was_locked:
- self.lock = self.repo.lock()
- try:
- if not check_heads():
- # someone else committed/pushed/unbundled while we
- # were transferring data
- self.respond(_('unsynced changes'))
- return
- self.respond('')
-
- # push can proceed
+ self.sendresponse("")
+ r = self.repo.addchangegroup(self.fin, 'serve', self._client(),
+ lock=self.lock)
+ return str(r)
- fp.seek(0)
- r = self.repo.addchangegroup(fp, 'serve', self.client_url(),
- lock=self.lock)
- self.respond(str(r))
- finally:
- if not was_locked:
- self.lock.release()
- self.lock = None
- finally:
- fp.close()
- os.unlink(tempname)
-
- def do_stream_out(self):
- try:
- for chunk in streamclone.stream_out(self.repo):
- self.fout.write(chunk)
- self.fout.flush()
- except streamclone.StreamException, inst:
- self.fout.write(str(inst))
- self.fout.flush()
-
- def do_pushkey(self):
- arg, key = self.getarg()
- arg, namespace = self.getarg()
- arg, new = self.getarg()
- arg, old = self.getarg()
- r = pushkey.push(self.repo, namespace, key, old, new)
- self.respond('%s\n' % int(r))
-
- def do_listkeys(self):
- arg, namespace = self.getarg()
- d = pushkey.list(self.repo, namespace).items()
- t = '\n'.join(['%s\t%s' % (k.encode('string-escape'),
- v.encode('string-escape')) for k, v in d])
- self.respond(t)
+ def _client(self):
+ client = os.environ.get('SSH_CLIENT', '').split(' ', 1)[0]
+ return 'remote:ssh:' + client
--- a/mercurial/streamclone.py Thu Jul 22 08:17:38 2010 -0500
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,69 +0,0 @@
-# streamclone.py - streaming clone server support for mercurial
-#
-# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-import util, error
-
-from mercurial import store
-
-class StreamException(Exception):
- def __init__(self, code):
- Exception.__init__(self)
- self.code = code
- def __str__(self):
- return '%i\n' % self.code
-
-# if server supports streaming clone, it advertises "stream"
-# capability with value that is version+flags of repo it is serving.
-# client only streams if it can read that repo format.
-
-# stream file format is simple.
-#
-# server writes out line that says how many files, how many total
-# bytes. separator is ascii space, byte counts are strings.
-#
-# then for each file:
-#
-# server writes out line that says filename, how many bytes in
-# file. separator is ascii nul, byte count is string.
-#
-# server writes out raw file data.
-
-def allowed(ui):
- return ui.configbool('server', 'uncompressed', True, untrusted=True)
-
-def stream_out(repo):
- '''stream out all metadata files in repository.
- writes to file-like object, must support write() and optional flush().'''
-
- if not allowed(repo.ui):
- raise StreamException(1)
-
- entries = []
- total_bytes = 0
- try:
- # get consistent snapshot of repo, lock during scan
- lock = repo.lock()
- try:
- repo.ui.debug('scanning\n')
- for name, ename, size in repo.store.walk():
- entries.append((name, size))
- total_bytes += size
- finally:
- lock.release()
- except error.LockError:
- raise StreamException(2)
-
- yield '0\n'
- repo.ui.debug('%d files, %d bytes to transfer\n' %
- (len(entries), total_bytes))
- yield '%d %d\n' % (len(entries), total_bytes)
- for name, size in entries:
- repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
- # partially encode name over the wire for backwards compat
- yield '%s\0%d\n' % (store.encodedir(name), size)
- for chunk in util.filechunkiter(repo.sopener(name), limit=size):
- yield chunk
--- a/mercurial/subrepo.py Thu Jul 22 08:17:38 2010 -0500
+++ b/mercurial/subrepo.py Thu Jul 22 08:24:56 2010 -0500
@@ -182,22 +182,49 @@
raise util.Abort(_('unknown subrepo type %s') % state[2])
return types[state[2]](ctx, path, state[:2])
-# subrepo classes need to implement the following methods:
-# __init__(self, ctx, path, state)
-# dirty(self): returns true if the dirstate of the subrepo
-# does not match current stored state
-# commit(self, text, user, date): commit the current changes
-# to the subrepo with the given log message. Use given
-# user and date if possible. Return the new state of the subrepo.
-# remove(self): remove the subrepo (should verify the dirstate
-# is not dirty first)
-# get(self, state): run whatever commands are needed to put the
-# subrepo into this state
-# merge(self, state): merge currently-saved state with the new state.
-# push(self, force): perform whatever action is analogous to 'hg push'
-# This may be a no-op on some systems.
+# subrepo classes need to implement the following abstract class:
+
+class abstractsubrepo(object):
+
+ def dirty(self):
+ """returns true if the dirstate of the subrepo does not match
+ current stored state
+ """
+ raise NotImplementedError
+
+ def commit(self, text, user, date):
+ """commit the current changes to the subrepo with the given
+ log message. Use given user and date if possible. Return the
+ new state of the subrepo.
+ """
+ raise NotImplementedError
+
+ def remove(self):
+ """remove the subrepo
-class hgsubrepo(object):
+ (should verify the dirstate is not dirty first)
+ """
+ raise NotImplementedError
+
+ def get(self, state):
+ """run whatever commands are needed to put the subrepo into
+ this state
+ """
+ raise NotImplementedError
+
+ def merge(self, state):
+ """merge currently-saved state with the new state."""
+ raise NotImplementedError
+
+ def push(self, force):
+ """perform whatever action is analogous to 'hg push'
+
+ This may be a no-op on some systems.
+ """
+ raise NotImplementedError
+
+
+class hgsubrepo(abstractsubrepo):
def __init__(self, ctx, path, state):
self._path = path
self._state = state
@@ -294,15 +321,15 @@
other = hg.repository(self._repo.ui, dsturl)
return self._repo.push(other, force)
-class svnsubrepo(object):
+class svnsubrepo(abstractsubrepo):
def __init__(self, ctx, path, state):
self._path = path
self._state = state
self._ctx = ctx
self._ui = ctx._repo.ui
- def _svncommand(self, commands):
- path = os.path.join(self._ctx._repo.origroot, self._path)
+ def _svncommand(self, commands, filename=''):
+ path = os.path.join(self._ctx._repo.origroot, self._path, filename)
cmd = ['svn'] + commands + [path]
cmd = [util.shellquote(arg) for arg in cmd]
cmd = util.quotecommand(' '.join(cmd))
--- a/mercurial/util.h Thu Jul 22 08:17:38 2010 -0500
+++ b/mercurial/util.h Thu Jul 22 08:24:56 2010 -0500
@@ -12,6 +12,48 @@
#define IS_PY3K
#define PyInt_FromLong PyLong_FromLong
+#define PyInt_AsLong PyLong_AsLong
+
+/*
+ Mapping of some of the python < 2.x PyString* functions to py3k's PyUnicode.
+
+ The commented names below represent those that are present in the PyBytes
+ definitions for python < 2.6 (below in this file) that don't have a direct
+ implementation.
+*/
+
+#define PyStringObject PyUnicodeObject
+#define PyString_Type PyUnicode_Type
+
+#define PyString_Check PyUnicode_Check
+#define PyString_CheckExact PyUnicode_CheckExact
+#define PyString_CHECK_INTERNED PyUnicode_CHECK_INTERNED
+#define PyString_AS_STRING PyUnicode_AsLatin1String
+#define PyString_GET_SIZE PyUnicode_GET_SIZE
+
+#define PyString_FromStringAndSize PyUnicode_FromStringAndSize
+#define PyString_FromString PyUnicode_FromString
+#define PyString_FromFormatV PyUnicode_FromFormatV
+#define PyString_FromFormat PyUnicode_FromFormat
+/* #define PyString_Size PyUnicode_GET_SIZE */
+/* #define PyString_AsString */
+/* #define PyString_Repr */
+#define PyString_Concat PyUnicode_Concat
+#define PyString_ConcatAndDel PyUnicode_AppendAndDel
+#define _PyString_Resize PyUnicode_Resize
+/* #define _PyString_Eq */
+#define PyString_Format PyUnicode_Format
+/* #define _PyString_FormatLong */
+/* #define PyString_DecodeEscape */
+#define _PyString_Join PyUnicode_Join
+#define PyString_Decode PyUnicode_Decode
+#define PyString_Encode PyUnicode_Encode
+#define PyString_AsEncodedObject PyUnicode_AsEncodedObject
+#define PyString_AsEncodedString PyUnicode_AsEncodedString
+#define PyString_AsDecodedObject PyUnicode_AsDecodedObject
+#define PyString_AsDecodedString PyUnicode_AsDecodedUnicode
+/* #define PyString_AsStringAndSize */
+#define _PyString_InsertThousandsGrouping _PyUnicode_InsertThousandsGrouping
#endif /* PY_MAJOR_VERSION */
--- a/mercurial/util.py Thu Jul 22 08:17:38 2010 -0500
+++ b/mercurial/util.py Thu Jul 22 08:24:56 2010 -0500
@@ -38,9 +38,15 @@
import __builtin__
-def fakebuffer(sliceable, offset=0):
- return sliceable[offset:]
-if not hasattr(__builtin__, 'buffer'):
+if sys.version_info[0] < 3:
+ def fakebuffer(sliceable, offset=0):
+ return sliceable[offset:]
+else:
+ def fakebuffer(sliceable, offset=0):
+ return memoryview(sliceable)[offset:]
+try:
+ buffer
+except NameError:
__builtin__.buffer = fakebuffer
import subprocess
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/wireproto.py Thu Jul 22 08:24:56 2010 -0500
@@ -0,0 +1,332 @@
+# wireproto.py - generic wire protocol support functions
+#
+# Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import urllib, tempfile, os
+from i18n import _
+from node import bin, hex
+import changegroup as changegroupmod
+import repo, error, encoding, util, store
+import pushkey as pushkey_
+
+# list of nodes encoding / decoding
+
+def decodelist(l, sep=' '):
+ return map(bin, l.split(sep))
+
+def encodelist(l, sep=' '):
+ return sep.join(map(hex, l))
+
+# client side
+
+class wirerepository(repo.repository):
+ def lookup(self, key):
+ self.requirecap('lookup', _('look up remote revision'))
+ d = self._call("lookup", key=key)
+ success, data = d[:-1].split(" ", 1)
+ if int(success):
+ return bin(data)
+ self._abort(error.RepoError(data))
+
+ def heads(self):
+ d = self._call("heads")
+ try:
+ return decodelist(d[:-1])
+ except:
+ self.abort(error.ResponseError(_("unexpected response:"), d))
+
+ def branchmap(self):
+ d = self._call("branchmap")
+ try:
+ branchmap = {}
+ for branchpart in d.splitlines():
+ branchname, branchheads = branchpart.split(' ', 1)
+ branchname = urllib.unquote(branchname)
+ # Earlier servers (1.3.x) send branch names in (their) local
+ # charset. The best we can do is assume it's identical to our
+ # own local charset, in case it's not utf-8.
+ try:
+ branchname.decode('utf-8')
+ except UnicodeDecodeError:
+ branchname = encoding.fromlocal(branchname)
+ branchheads = decodelist(branchheads)
+ branchmap[branchname] = branchheads
+ return branchmap
+ except TypeError:
+ self._abort(error.ResponseError(_("unexpected response:"), d))
+
+ def branches(self, nodes):
+ n = encodelist(nodes)
+ d = self._call("branches", nodes=n)
+ try:
+ br = [tuple(decodelist(b)) for b in d.splitlines()]
+ return br
+ except:
+ self._abort(error.ResponseError(_("unexpected response:"), d))
+
+ def between(self, pairs):
+ batch = 8 # avoid giant requests
+ r = []
+ for i in xrange(0, len(pairs), batch):
+ n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
+ d = self._call("between", pairs=n)
+ try:
+ r.extend(l and decodelist(l) or [] for l in d.splitlines())
+ except:
+ self._abort(error.ResponseError(_("unexpected response:"), d))
+ return r
+
+ def pushkey(self, namespace, key, old, new):
+ if not self.capable('pushkey'):
+ return False
+ d = self._call("pushkey",
+ namespace=namespace, key=key, old=old, new=new)
+ return bool(int(d))
+
+ def listkeys(self, namespace):
+ if not self.capable('pushkey'):
+ return {}
+ d = self._call("listkeys", namespace=namespace)
+ r = {}
+ for l in d.splitlines():
+ k, v = l.split('\t')
+ r[k.decode('string-escape')] = v.decode('string-escape')
+ return r
+
+ def stream_out(self):
+ return self._callstream('stream_out')
+
+ def changegroup(self, nodes, kind):
+ n = encodelist(nodes)
+ f = self._callstream("changegroup", roots=n)
+ return self._decompress(f)
+
+ def changegroupsubset(self, bases, heads, kind):
+ self.requirecap('changegroupsubset', _('look up remote changes'))
+ bases = encodelist(bases)
+ heads = encodelist(heads)
+ return self._decompress(self._callstream("changegroupsubset",
+ bases=bases, heads=heads))
+
+ def unbundle(self, cg, heads, source):
+ '''Send cg (a readable file-like object representing the
+ changegroup to push, typically a chunkbuffer object) to the
+ remote server as a bundle. Return an integer indicating the
+ result of the push (see localrepository.addchangegroup()).'''
+
+ ret, output = self._callpush("unbundle", cg, heads=encodelist(heads))
+ if ret == "":
+ raise error.ResponseError(
+ _('push failed:'), output)
+ try:
+ ret = int(ret)
+ except ValueError, err:
+ raise error.ResponseError(
+ _('push failed (unexpected response):'), ret)
+
+ for l in output.splitlines(True):
+ self.ui.status(_('remote: '), l)
+ return ret
+
+# server side
+
+class streamres(object):
+ def __init__(self, gen):
+ self.gen = gen
+
+class pushres(object):
+ def __init__(self, res):
+ self.res = res
+
+def dispatch(repo, proto, command):
+ func, spec = commands[command]
+ args = proto.getargs(spec)
+ return func(repo, proto, *args)
+
+def between(repo, proto, pairs):
+ pairs = [decodelist(p, '-') for p in pairs.split(" ")]
+ r = []
+ for b in repo.between(pairs):
+ r.append(encodelist(b) + "\n")
+ return "".join(r)
+
+def branchmap(repo, proto):
+ branchmap = repo.branchmap()
+ heads = []
+ for branch, nodes in branchmap.iteritems():
+ branchname = urllib.quote(branch)
+ branchnodes = encodelist(nodes)
+ heads.append('%s %s' % (branchname, branchnodes))
+ return '\n'.join(heads)
+
+def branches(repo, proto, nodes):
+ nodes = decodelist(nodes)
+ r = []
+ for b in repo.branches(nodes):
+ r.append(encodelist(b) + "\n")
+ return "".join(r)
+
+def capabilities(repo, proto):
+ caps = 'lookup changegroupsubset branchmap pushkey'.split()
+ if _allowstream(repo.ui):
+ caps.append('stream=%d' % repo.changelog.version)
+ caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority))
+ return ' '.join(caps)
+
+def changegroup(repo, proto, roots):
+ nodes = decodelist(roots)
+ cg = repo.changegroup(nodes, 'serve')
+ return streamres(proto.groupchunks(cg))
+
+def changegroupsubset(repo, proto, bases, heads):
+ bases = decodelist(bases)
+ heads = decodelist(heads)
+ cg = repo.changegroupsubset(bases, heads, 'serve')
+ return streamres(proto.groupchunks(cg))
+
+def heads(repo, proto):
+ h = repo.heads()
+ return encodelist(h) + "\n"
+
+def hello(repo, proto):
+ '''the hello command returns a set of lines describing various
+ interesting things about the server, in an RFC822-like format.
+ Currently the only one defined is "capabilities", which
+ consists of a line in the form:
+
+ capabilities: space separated list of tokens
+ '''
+ return "capabilities: %s\n" % (capabilities(repo, proto))
+
+def listkeys(repo, proto, namespace):
+ d = pushkey_.list(repo, namespace).items()
+ t = '\n'.join(['%s\t%s' % (k.encode('string-escape'),
+ v.encode('string-escape')) for k, v in d])
+ return t
+
+def lookup(repo, proto, key):
+ try:
+ r = hex(repo.lookup(key))
+ success = 1
+ except Exception, inst:
+ r = str(inst)
+ success = 0
+ return "%s %s\n" % (success, r)
+
+def pushkey(repo, proto, namespace, key, old, new):
+ r = pushkey_.push(repo, namespace, key, old, new)
+ return '%s\n' % int(r)
+
+def _allowstream(ui):
+ return ui.configbool('server', 'uncompressed', True, untrusted=True)
+
+def stream(repo, proto):
+ '''If the server supports streaming clone, it advertises the "stream"
+ capability with a value representing the version and flags of the repo
+ it is serving. Client checks to see if it understands the format.
+
+ The format is simple: the server writes out a line with the amount
+ of files, then the total amount of bytes to be transfered (separated
+ by a space). Then, for each file, the server first writes the filename
+ and filesize (separated by the null character), then the file contents.
+ '''
+
+ if not _allowstream(repo.ui):
+ return '1\n'
+
+ entries = []
+ total_bytes = 0
+ try:
+ # get consistent snapshot of repo, lock during scan
+ lock = repo.lock()
+ try:
+ repo.ui.debug('scanning\n')
+ for name, ename, size in repo.store.walk():
+ entries.append((name, size))
+ total_bytes += size
+ finally:
+ lock.release()
+ except error.LockError:
+ return '2\n' # error: 2
+
+ def streamer(repo, entries, total):
+ '''stream out all metadata files in repository.'''
+ yield '0\n' # success
+ repo.ui.debug('%d files, %d bytes to transfer\n' %
+ (len(entries), total_bytes))
+ yield '%d %d\n' % (len(entries), total_bytes)
+ for name, size in entries:
+ repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
+ # partially encode name over the wire for backwards compat
+ yield '%s\0%d\n' % (store.encodedir(name), size)
+ for chunk in util.filechunkiter(repo.sopener(name), limit=size):
+ yield chunk
+
+ return streamres(streamer(repo, entries, total_bytes))
+
+def unbundle(repo, proto, heads):
+ their_heads = decodelist(heads)
+
+ def check_heads():
+ heads = repo.heads()
+ return their_heads == ['force'] or their_heads == heads
+
+ # fail early if possible
+ if not check_heads():
+ return 'unsynced changes'
+
+ # write bundle data to temporary file because it can be big
+ fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
+ fp = os.fdopen(fd, 'wb+')
+ r = 0
+ proto.redirect()
+ try:
+ proto.getfile(fp)
+ lock = repo.lock()
+ try:
+ if not check_heads():
+ # someone else committed/pushed/unbundled while we
+ # were transferring data
+ return 'unsynced changes'
+
+ # push can proceed
+ fp.seek(0)
+ header = fp.read(6)
+ if header.startswith('HG'):
+ if not header.startswith('HG10'):
+ raise ValueError('unknown bundle version')
+ elif header not in changegroupmod.bundletypes:
+ raise ValueError('unknown bundle compression type')
+ gen = changegroupmod.unbundle(header, fp)
+
+ try:
+ r = repo.addchangegroup(gen, 'serve', proto._client(),
+ lock=lock)
+ except util.Abort, inst:
+ sys.stderr.write("abort: %s\n" % inst)
+ finally:
+ lock.release()
+ return pushres(r)
+
+ finally:
+ fp.close()
+ os.unlink(tempname)
+
+commands = {
+ 'between': (between, 'pairs'),
+ 'branchmap': (branchmap, ''),
+ 'branches': (branches, 'nodes'),
+ 'capabilities': (capabilities, ''),
+ 'changegroup': (changegroup, 'roots'),
+ 'changegroupsubset': (changegroupsubset, 'bases heads'),
+ 'heads': (heads, ''),
+ 'hello': (hello, ''),
+ 'listkeys': (listkeys, 'namespace'),
+ 'lookup': (lookup, 'key'),
+ 'pushkey': (pushkey, 'namespace key old new'),
+ 'stream_out': (stream, ''),
+ 'unbundle': (unbundle, 'heads'),
+}
--- a/setup.py Thu Jul 22 08:17:38 2010 -0500
+++ b/setup.py Thu Jul 22 08:24:56 2010 -0500
@@ -9,6 +9,17 @@
if not hasattr(sys, 'version_info') or sys.version_info < (2, 4, 0, 'final'):
raise SystemExit("Mercurial requires Python 2.4 or later.")
+if sys.version_info[0] >= 3:
+ def b(s):
+ '''A helper function to emulate 2.6+ bytes literals using string
+ literals.'''
+ return s.encode('latin1')
+else:
+ def b(s):
+ '''A helper function to emulate 2.6+ bytes literals using string
+ literals.'''
+ return s
+
# Solaris Python packaging brain damage
try:
import hashlib
@@ -114,8 +125,8 @@
# fine, we don't want to load it anyway. Python may warn about
# a missing __init__.py in mercurial/locale, we also ignore that.
err = [e for e in err.splitlines()
- if not e.startswith('Not trusting file') \
- and not e.startswith('warning: Not importing')]
+ if not e.startswith(b('Not trusting file')) \
+ and not e.startswith(b('warning: Not importing'))]
if err:
return ''
return out
@@ -275,7 +286,8 @@
cc = new_compiler()
if hasfunction(cc, 'inotify_add_watch'):
inotify = Extension('hgext.inotify.linux._inotify',
- ['hgext/inotify/linux/_inotify.c'])
+ ['hgext/inotify/linux/_inotify.c'],
+ ['mercurial'])
inotify.optional = True
extmodules.append(inotify)
packages.extend(['hgext.inotify', 'hgext.inotify.linux'])
--- a/tests/test-alias Thu Jul 22 08:17:38 2010 -0500
+++ b/tests/test-alias Thu Jul 22 08:24:56 2010 -0500
@@ -14,6 +14,7 @@
dln = lognull --debug
nousage = rollback
put = export -r 0 -o "\$FOO/%R.diff"
+echo = !echo
[defaults]
mylog = -q
@@ -64,3 +65,6 @@
echo '% path expanding'
FOO=`pwd` hg put
cat 0.diff
+
+echo '% shell aliases'
+hg echo foo
--- a/tests/test-alias.out Thu Jul 22 08:17:38 2010 -0500
+++ b/tests/test-alias.out Thu Jul 22 08:24:56 2010 -0500
@@ -43,3 +43,5 @@
+++ b/foo Thu Jan 01 00:00:00 1970 +0000
@@ -0,0 +1,1 @@
+foo
+% shell aliases
+foo
--- a/tests/test-branchmap Thu Jul 22 08:17:38 2010 -0500
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,58 +0,0 @@
-#!/bin/sh
-
-hgserve()
-{
- hg serve -a localhost -p $HGPORT1 -d --pid-file=hg.pid -E errors.log -v $@ \
- | sed -e 's/:[0-9][0-9]*//g' -e 's/http:\/\/[^/]*\//http:\/\/localhost\//'
- cat hg.pid >> "$DAEMON_PIDS"
-}
-
-hg init a
-hg --encoding utf-8 -R a branch æ
-echo foo > a/foo
-hg -R a ci -Am foo
-
-hgserve -R a --config web.push_ssl=False --config web.allow_push=* --encoding latin1
-hg --encoding utf-8 clone http://localhost:$HGPORT1 b
-hg --encoding utf-8 -R b log
-echo bar >> b/foo
-hg -R b ci -m bar
-hg --encoding utf-8 -R b push | sed "s/$HGPORT1/PORT/"
-hg -R a --encoding utf-8 log
-
-kill `cat hg.pid`
-
-
-# verify 7e7d56fe4833 (encoding fallback in branchmap to maintain compatibility with 1.3.x)
-
-cat <<EOF > oldhg
-import sys
-from mercurial import ui, hg, commands
-
-class StdoutWrapper(object):
- def __init__(self, stdout):
- self._file = stdout
-
- def write(self, data):
- if data == '47\n':
- # latin1 encoding is one %xx (3 bytes) shorter
- data = '44\n'
- elif data.startswith('%C3%A6 '):
- # translate to latin1 encoding
- data = '%%E6 %s' % data[7:]
- self._file.write(data)
-
- def __getattr__(self, name):
- return getattr(self._file, name)
-
-sys.stdout = StdoutWrapper(sys.stdout)
-sys.stderr = StdoutWrapper(sys.stderr)
-
-myui = ui.ui()
-repo = hg.repository(myui, 'a')
-commands.serve(myui, repo, stdio=True)
-EOF
-
-echo baz >> b/foo
-hg -R b ci -m baz
-hg push -R b -e 'python oldhg' ssh://dummy/ --encoding latin1
--- a/tests/test-branchmap.out Thu Jul 22 08:17:38 2010 -0500
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-marked working directory as branch æ
-adding foo
-listening at http://localhost/ (bound to 127.0.0.1)
-requesting all changes
-adding changesets
-adding manifests
-adding file changes
-added 1 changesets with 1 changes to 1 files
-updating to branch æ
-1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-changeset: 0:867c11ce77b8
-branch: æ
-tag: tip
-user: test
-date: Thu Jan 01 00:00:00 1970 +0000
-summary: foo
-
-pushing to http://localhost:PORT
-searching for changes
-remote: adding changesets
-remote: adding manifests
-remote: adding file changes
-remote: added 1 changesets with 1 changes to 1 files
-changeset: 1:58e7c90d67cb
-branch: æ
-tag: tip
-user: test
-date: Thu Jan 01 00:00:00 1970 +0000
-summary: bar
-
-changeset: 0:867c11ce77b8
-branch: æ
-user: test
-date: Thu Jan 01 00:00:00 1970 +0000
-summary: foo
-
-pushing to ssh://dummy/
-searching for changes
-remote: adding changesets
-remote: adding manifests
-remote: adding file changes
-remote: added 1 changesets with 1 changes to 1 files
--- a/tests/test-clone-cgi Thu Jul 22 08:17:38 2010 -0500
+++ b/tests/test-clone-cgi Thu Jul 22 08:24:56 2010 -0500
@@ -55,7 +55,7 @@
SERVER_SOFTWARE="Apache/2.0.53 (Fedora)"; export SERVER_SOFTWARE
echo % try hgweb request
-QUERY_STRING="cmd=changegroup"; export QUERY_STRING
+QUERY_STRING="cmd=changegroup&roots=0000000000000000000000000000000000000000"; export QUERY_STRING
python hgweb.cgi >page1 2>&1 ; echo $?
python "$TESTDIR/md5sum.py" page1
--- a/tests/test-diff-upgrade Thu Jul 22 08:17:38 2010 -0500
+++ b/tests/test-diff-upgrade Thu Jul 22 08:24:56 2010 -0500
@@ -35,7 +35,7 @@
python -c "file('binary', 'wb').write('\0\0')"
python -c "file('newbinary', 'wb').write('\0')"
rm rmbinary
-hg addremove
+hg addremove -s 0
echo '% git=no: regular diff for all files'
hg autodiff --git=no
--- a/tests/test-hgweb-commands Thu Jul 22 08:17:38 2010 -0500
+++ b/tests/test-hgweb-commands Thu Jul 22 08:24:56 2010 -0500
@@ -46,11 +46,11 @@
echo % heads
"$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT '?cmd=heads'
echo % lookup
-"$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT '?cmd=lookup&node=1'
+"$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT '?cmd=lookup&key=1'
echo % branches
-"$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT '?cmd=branches'
+"$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT '?cmd=branches&nodes=0000000000000000000000000000000000000000'
echo % changegroup
-"$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT '?cmd=changegroup' \
+"$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT '?cmd=changegroup&roots=0000000000000000000000000000000000000000' \
| $TESTDIR/printrepr.py
echo % stream_out
"$TESTDIR/get-with-headers.py" 127.0.0.1:$HGPORT '?cmd=stream_out'
--- a/tests/test-hgweb-commands.out Thu Jul 22 08:17:38 2010 -0500
+++ b/tests/test-hgweb-commands.out Thu Jul 22 08:24:56 2010 -0500
@@ -852,11 +852,11 @@
% lookup
200 Script output follows
-0 'key'
+1 a4f92ed23982be056b9852de5dfe873eaac7f0de
% branches
200 Script output follows
-1d22e65f027e5a0609357e7d8e7508cd2ba5d2fe 2ef0ac749a14e4f57a5a822464a0902c6f7f448f 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
+0000000000000000000000000000000000000000 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
% changegroup
200 Script output follows
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-hgweb-raw Thu Jul 22 08:24:56 2010 -0500
@@ -0,0 +1,23 @@
+#!/bin/sh
+
+hg init test
+cd test
+mkdir sub
+cat >'sub/some "text".txt' <<ENDSOME
+This is just some random text
+that will go inside the file and take a few lines.
+It is very boring to read, but computers don't
+care about things like that.
+ENDSOME
+hg add 'sub/some "text".txt'
+hg commit -d "1 0" -m "Just some text"
+hg serve -p $HGPORT -A access.log -E error.log -d --pid-file=hg.pid
+cat hg.pid >> $DAEMON_PIDS
+("$TESTDIR/get-with-headers.py" localhost:$HGPORT '/?f=a23bf1310f6e;file=sub/some%20%22text%22.txt;style=raw' content-type content-length content-disposition) >getoutput.txt &
+
+sleep 5
+kill `cat hg.pid`
+sleep 1 # wait for server to scream and die
+cat getoutput.txt
+cat access.log error.log | \
+ sed 's/^[^ ]*\( [^[]*\[\)[^]]*\(\].*\)$/host\1date\2/'
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-hgweb-raw.out Thu Jul 22 08:24:56 2010 -0500
@@ -0,0 +1,10 @@
+200 Script output follows
+content-type: text/plain; charset="ascii"
+content-length: 157
+content-disposition: inline; filename="some \"text\".txt"
+
+This is just some random text
+that will go inside the file and take a few lines.
+It is very boring to read, but computers don't
+care about things like that.
+host - - [date] "GET /?f=a23bf1310f6e;file=sub/some%20%22text%22.txt;style=raw HTTP/1.1" 200 -
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-http-branchmap Thu Jul 22 08:24:56 2010 -0500
@@ -0,0 +1,58 @@
+#!/bin/sh
+
+hgserve()
+{
+ hg serve -a localhost -p $HGPORT1 -d --pid-file=hg.pid -E errors.log -v $@ \
+ | sed -e 's/:[0-9][0-9]*//g' -e 's/http:\/\/[^/]*\//http:\/\/localhost\//'
+ cat hg.pid >> "$DAEMON_PIDS"
+}
+
+hg init a
+hg --encoding utf-8 -R a branch æ
+echo foo > a/foo
+hg -R a ci -Am foo
+
+hgserve -R a --config web.push_ssl=False --config web.allow_push=* --encoding latin1
+hg --encoding utf-8 clone http://localhost:$HGPORT1 b
+hg --encoding utf-8 -R b log
+echo bar >> b/foo
+hg -R b ci -m bar
+hg --encoding utf-8 -R b push | sed "s/$HGPORT1/PORT/"
+hg -R a --encoding utf-8 log
+
+kill `cat hg.pid`
+
+
+# verify 7e7d56fe4833 (encoding fallback in branchmap to maintain compatibility with 1.3.x)
+
+cat <<EOF > oldhg
+import sys
+from mercurial import ui, hg, commands
+
+class StdoutWrapper(object):
+ def __init__(self, stdout):
+ self._file = stdout
+
+ def write(self, data):
+ if data == '47\n':
+ # latin1 encoding is one %xx (3 bytes) shorter
+ data = '44\n'
+ elif data.startswith('%C3%A6 '):
+ # translate to latin1 encoding
+ data = '%%E6 %s' % data[7:]
+ self._file.write(data)
+
+ def __getattr__(self, name):
+ return getattr(self._file, name)
+
+sys.stdout = StdoutWrapper(sys.stdout)
+sys.stderr = StdoutWrapper(sys.stderr)
+
+myui = ui.ui()
+repo = hg.repository(myui, 'a')
+commands.serve(myui, repo, stdio=True)
+EOF
+
+echo baz >> b/foo
+hg -R b ci -m baz
+hg push -R b -e 'python oldhg' ssh://dummy/ --encoding latin1
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-http-branchmap.out Thu Jul 22 08:24:56 2010 -0500
@@ -0,0 +1,42 @@
+marked working directory as branch æ
+adding foo
+listening at http://localhost/ (bound to 127.0.0.1)
+requesting all changes
+adding changesets
+adding manifests
+adding file changes
+added 1 changesets with 1 changes to 1 files
+updating to branch æ
+1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+changeset: 0:867c11ce77b8
+branch: æ
+tag: tip
+user: test
+date: Thu Jan 01 00:00:00 1970 +0000
+summary: foo
+
+pushing to http://localhost:PORT
+searching for changes
+remote: adding changesets
+remote: adding manifests
+remote: adding file changes
+remote: added 1 changesets with 1 changes to 1 files
+changeset: 1:58e7c90d67cb
+branch: æ
+tag: tip
+user: test
+date: Thu Jan 01 00:00:00 1970 +0000
+summary: bar
+
+changeset: 0:867c11ce77b8
+branch: æ
+user: test
+date: Thu Jan 01 00:00:00 1970 +0000
+summary: foo
+
+pushing to ssh://dummy/
+searching for changes
+remote: adding changesets
+remote: adding manifests
+remote: adding file changes
+remote: added 1 changesets with 1 changes to 1 files
--- a/tests/test-issue660 Thu Jul 22 08:17:38 2010 -0500
+++ b/tests/test-issue660 Thu Jul 22 08:24:56 2010 -0500
@@ -56,7 +56,7 @@
echo a > a/a
echo b > b
-hg addremove
+hg addremove -s 0
hg st
echo % commit
--- a/tests/test-log Thu Jul 22 08:17:38 2010 -0500
+++ b/tests/test-log Thu Jul 22 08:24:56 2010 -0500
@@ -62,6 +62,11 @@
echo '% log -p d'
hg log -pv d
+echo '% log --removed file'
+hg log --removed -v a
+echo '% log --removed revrange file'
+hg log --removed -v -r0:2 a
+
# log --follow tests
hg init ../follow
cd ../follow
--- a/tests/test-log.out Thu Jul 22 08:17:38 2010 -0500
+++ b/tests/test-log.out Thu Jul 22 08:24:56 2010 -0500
@@ -196,6 +196,32 @@
@@ -0,0 +1,1 @@
+a
+% log --removed file
+changeset: 3:7c6c671bb7cc
+user: test
+date: Thu Jan 01 00:00:04 1970 +0000
+files: a b d
+description:
+d
+
+
+changeset: 0:8580ff50825a
+user: test
+date: Thu Jan 01 00:00:01 1970 +0000
+files: a
+description:
+a
+
+
+% log --removed revrange file
+changeset: 0:8580ff50825a
+user: test
+date: Thu Jan 01 00:00:01 1970 +0000
+files: a
+description:
+a
+
+
adding base
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
adding b1
--- a/tests/test-mq-qnew Thu Jul 22 08:17:38 2010 -0500
+++ b/tests/test-mq-qnew Thu Jul 22 08:24:56 2010 -0500
@@ -70,6 +70,10 @@
HGUSER= hg qnew -u blue red
catpatch ../.hg/patches/red
+ echo '% qnew -e -u with no username configured'
+ HGUSER= hg qnew -e -u chartreuse fucsia
+ catpatch ../.hg/patches/fucsia
+
echo '% fail when trying to import a merge'
hg init merge
cd merge
--- a/tests/test-mq-qnew.out Thu Jul 22 08:17:38 2010 -0500
+++ b/tests/test-mq-qnew.out Thu Jul 22 08:24:56 2010 -0500
@@ -42,6 +42,9 @@
% qnew -u with no username configured
From: blue
+% qnew -e -u with no username configured
+From: chartreuse
+
% fail when trying to import a merge
adding a
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -101,6 +104,10 @@
# HG changeset patch
# Parent
# User blue
+% qnew -e -u with no username configured
+# HG changeset patch
+# Parent
+# User chartreuse
% fail when trying to import a merge
adding a
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-push-warn.out Thu Jul 22 08:17:38 2010 -0500
+++ b/tests/test-push-warn.out Thu Jul 22 08:24:56 2010 -0500
@@ -37,7 +37,7 @@
searching for changes
abort: push creates new remote heads on branch 'default'!
(did you forget to merge? use push -f to force)
-1
+255
pushing to ../c
searching for changes
no changes found
@@ -46,12 +46,12 @@
searching for changes
abort: push creates new remote heads on branch 'default'!
(did you forget to merge? use push -f to force)
-1
+255
pushing to ../c
searching for changes
abort: push creates new remote heads on branch 'default'!
(did you forget to merge? use push -f to force)
-1
+255
pushing to ../c
searching for changes
adding changesets
@@ -90,29 +90,29 @@
searching for changes
abort: push creates new remote branches: c!
(use 'hg push --new-branch' to create new remote branches)
-1
+255
pushing to ../f
searching for changes
abort: push creates new remote branches: c!
(use 'hg push --new-branch' to create new remote branches)
-1
+255
% multiple new branches
pushing to ../f
searching for changes
abort: push creates new remote branches: c, d!
(use 'hg push --new-branch' to create new remote branches)
-1
+255
pushing to ../f
searching for changes
abort: push creates new remote branches: c, d!
(use 'hg push --new-branch' to create new remote branches)
-1
+255
% fail on multiple head push
pushing to ../f
searching for changes
abort: push creates new remote heads on branch 'a'!
(did you forget to merge? use push -f to force)
-1
+255
% push replacement head on existing branches
pushing to ../f
searching for changes
@@ -149,7 +149,7 @@
searching for changes
abort: push creates new remote branches: e!
(use 'hg push --new-branch' to create new remote branches)
-1
+255
% using --new-branch to push new named branch
pushing to ../f
searching for changes
--- a/tests/test-rename Thu Jul 22 08:17:38 2010 -0500
+++ b/tests/test-rename Thu Jul 22 08:24:56 2010 -0500
@@ -26,7 +26,7 @@
echo '# rename --after a single file when src and tgt already tracked'
mv d1/d11/a1 d2/c
-hg addrem
+hg addrem -s 0
hg rename --after d1/d11/a1 d2/c
hg status -C
hg update -C
--- a/tests/test-webraw Thu Jul 22 08:17:38 2010 -0500
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,23 +0,0 @@
-#!/bin/sh
-
-hg init test
-cd test
-mkdir sub
-cat >'sub/some "text".txt' <<ENDSOME
-This is just some random text
-that will go inside the file and take a few lines.
-It is very boring to read, but computers don't
-care about things like that.
-ENDSOME
-hg add 'sub/some "text".txt'
-hg commit -d "1 0" -m "Just some text"
-hg serve -p $HGPORT -A access.log -E error.log -d --pid-file=hg.pid
-cat hg.pid >> $DAEMON_PIDS
-("$TESTDIR/get-with-headers.py" localhost:$HGPORT '/?f=a23bf1310f6e;file=sub/some%20%22text%22.txt;style=raw' content-type content-length content-disposition) >getoutput.txt &
-
-sleep 5
-kill `cat hg.pid`
-sleep 1 # wait for server to scream and die
-cat getoutput.txt
-cat access.log error.log | \
- sed 's/^[^ ]*\( [^[]*\[\)[^]]*\(\].*\)$/host\1date\2/'
--- a/tests/test-webraw.out Thu Jul 22 08:17:38 2010 -0500
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,10 +0,0 @@
-200 Script output follows
-content-type: text/plain; charset="ascii"
-content-length: 157
-content-disposition: inline; filename="some \"text\".txt"
-
-This is just some random text
-that will go inside the file and take a few lines.
-It is very boring to read, but computers don't
-care about things like that.
-host - - [date] "GET /?f=a23bf1310f6e;file=sub/some%20%22text%22.txt;style=raw HTTP/1.1" 200 -