view mercurial/mpatch.c @ 20182:04036798ebed

branches: avoid unnecessary changectx.branch() calls This requires reading from the changelog, which can be costly over NFS. Note that this does not totally remove reading from the changelog; we still do that when calling changectx.closesbranch(). That call will be removed in a later patch. Running hg branches on the PyPy repo (with 996) over a busy NFS server, before this change: $ time hg --profile branches > /dev/null CallCount Recursive Total(s) Inline(s) module:lineno(function) 2042 0 2.2827 2.2827 <open> 2036 0 0.9840 0.9840 <method 'close' of 'file' objects> 2036 0 0.0464 0.0464 <method 'read' of 'file' objects> 5233 0 0.1985 0.0453 mercurial.repoview:161(changelog) 10462 0 0.0791 0.0314 mercurial.changelog:133(tip) 5233 0 0.0388 0.0176 mercurial.localrepo:26(__get__) 10462 0 0.0250 0.0126 <len> 5233 0 0.0059 0.0039 mercurial.repoview:112(filterrevs) 10462 0 0.0029 0.0029 <hash> 2034 0 0.0444 0.0444 <method 'seek' of 'file' objects> 5340 0 0.0390 0.0390 mercurial.revlog:296(rev) 2582 0 0.0371 0.0371 <zlib.decompress> 3155 0 0.1963 0.0366 mercurial.context:202(__init__) 3155 0 0.1238 0.0306 mercurial.repoview:161(changelog) 3155 0 0.0261 0.0080 mercurial.changelog:183(rev) 9465 0 0.0061 0.0061 <isinstance> 1096 0 0.0023 0.0023 <binascii.unhexlify> 4251 0 0.0014 0.0014 <len> 2059 0 3.7341 0.0332 mercurial.changelog:270(read) 2059 0 3.6304 0.0307 mercurial.revlog:907(revision) 2057 0 0.0262 0.0137 mercurial.changelog:28(decodeextra) 4118 0 0.0094 0.0094 <method 'split' of 'str' objects> 4118 0 0.0270 0.0048 mercurial.encoding:61(tolocal) 2059 0 0.0040 0.0040 <method 'index' of 'str' objects> 10462 0 0.0791 0.0314 mercurial.changelog:133(tip) 10462 0 0.0289 0.0207 mercurial.changelog:190(node) 10462 0 0.0188 0.0091 <len> 52433 20932 0.0478 0.0310 <len> 20932 0 0.0221 0.0168 mercurial.revlog:262(__len__) 2059 0 3.6304 0.0307 mercurial.revlog:907(revision) real 0m4.361s user 0m0.986s sys 0m0.237s After this change: $ time hg --profile branches > /dev/null CallCount Recursive Total(s) Inline(s) module:lineno(function) 1069 0 1.1098 1.1098 <open> 1063 0 0.4865 0.4865 <method 'close' of 'file' objects> 4122 0 0.1811 0.0404 mercurial.repoview:161(changelog) 8240 0 0.0712 0.0272 mercurial.changelog:133(tip) 4122 0 0.0378 0.0177 mercurial.localrepo:26(__get__) 8240 0 0.0221 0.0115 <len> 4122 0 0.0057 0.0033 mercurial.repoview:112(filterrevs) 8240 0 0.0025 0.0025 <hash> 3029 0 0.1979 0.0371 mercurial.context:202(__init__) 3029 0 0.1278 0.0310 mercurial.repoview:161(changelog) 3029 0 0.0230 0.0081 mercurial.changelog:183(rev) 9087 0 0.0061 0.0061 <isinstance> 1096 0 0.0026 0.0026 <binascii.unhexlify> 4125 0 0.0014 0.0014 <len> 4229 0 0.0337 0.0337 mercurial.revlog:296(rev) 1061 0 0.0296 0.0296 <method 'seek' of 'file' objects> 1063 0 0.0292 0.0292 <method 'read' of 'file' objects> 8240 0 0.0712 0.0272 mercurial.changelog:133(tip) 8240 0 0.0271 0.0196 mercurial.changelog:190(node) 8240 0 0.0169 0.0083 <len> 40476 16488 0.0422 0.0271 <len> 16488 0 0.0193 0.0152 mercurial.revlog:262(__len__) 1342 0 0.0241 0.0241 <zlib.decompress> 9445 0 0.0336 0.0224 mercurial.changelog:190(node) 9445 0 0.0112 0.0112 mercurial.revlog:317(node) 1074 0 1.9102 0.0224 mercurial.changelog:270(read) 1074 0 1.8397 0.0202 mercurial.revlog:907(revision) 1073 0 0.0187 0.0099 mercurial.changelog:28(decodeextra) 2148 0 0.0061 0.0061 <method 'split' of 'str' objects> 2148 0 0.0184 0.0034 mercurial.encoding:61(tolocal) real 0m2.402s user 0m0.735s sys 0m0.177s
author Brodie Rao <brodie@sf.io>
date Fri, 15 Nov 2013 23:18:08 -0500
parents 09e41ac6289d
children b6ed2505d6cf
line wrap: on
line source

/*
 mpatch.c - efficient binary patching for Mercurial

 This implements a patch algorithm that's O(m + nlog n) where m is the
 size of the output and n is the number of patches.

 Given a list of binary patches, it unpacks each into a hunk list,
 then combines the hunk lists with a treewise recursion to form a
 single hunk list. This hunk list is then applied to the original
 text.

 The text (or binary) fragments are copied directly from their source
 Python objects into a preallocated output string to avoid the
 allocation of intermediate Python objects. Working memory is about 2x
 the total number of hunks.

 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>

 This software may be used and distributed according to the terms
 of the GNU General Public License, incorporated herein by reference.
*/

#define PY_SSIZE_T_CLEAN
#include <Python.h>
#include <stdlib.h>
#include <string.h>

#include "util.h"

static char mpatch_doc[] = "Efficient binary patching.";
static PyObject *mpatch_Error;

struct frag {
	int start, end, len;
	const char *data;
};

struct flist {
	struct frag *base, *head, *tail;
};

static struct flist *lalloc(Py_ssize_t size)
{
	struct flist *a = NULL;

	if (size < 1)
		size = 1;

	a = (struct flist *)malloc(sizeof(struct flist));
	if (a) {
		a->base = (struct frag *)malloc(sizeof(struct frag) * size);
		if (a->base) {
			a->head = a->tail = a->base;
			return a;
		}
		free(a);
		a = NULL;
	}
	if (!PyErr_Occurred())
		PyErr_NoMemory();
	return NULL;
}

static void lfree(struct flist *a)
{
	if (a) {
		free(a->base);
		free(a);
	}
}

static Py_ssize_t lsize(struct flist *a)
{
	return a->tail - a->head;
}

/* move hunks in source that are less cut to dest, compensating
   for changes in offset. the last hunk may be split if necessary.
*/
static int gather(struct flist *dest, struct flist *src, int cut, int offset)
{
	struct frag *d = dest->tail, *s = src->head;
	int postend, c, l;

	while (s != src->tail) {
		if (s->start + offset >= cut)
			break; /* we've gone far enough */

		postend = offset + s->start + s->len;
		if (postend <= cut) {
			/* save this hunk */
			offset += s->start + s->len - s->end;
			*d++ = *s++;
		}
		else {
			/* break up this hunk */
			c = cut - offset;
			if (s->end < c)
				c = s->end;
			l = cut - offset - s->start;
			if (s->len < l)
				l = s->len;

			offset += s->start + l - c;

			d->start = s->start;
			d->end = c;
			d->len = l;
			d->data = s->data;
			d++;
			s->start = c;
			s->len = s->len - l;
			s->data = s->data + l;

			break;
		}
	}

	dest->tail = d;
	src->head = s;
	return offset;
}

/* like gather, but with no output list */
static int discard(struct flist *src, int cut, int offset)
{
	struct frag *s = src->head;
	int postend, c, l;

	while (s != src->tail) {
		if (s->start + offset >= cut)
			break;

		postend = offset + s->start + s->len;
		if (postend <= cut) {
			offset += s->start + s->len - s->end;
			s++;
		}
		else {
			c = cut - offset;
			if (s->end < c)
				c = s->end;
			l = cut - offset - s->start;
			if (s->len < l)
				l = s->len;

			offset += s->start + l - c;
			s->start = c;
			s->len = s->len - l;
			s->data = s->data + l;

			break;
		}
	}

	src->head = s;
	return offset;
}

/* combine hunk lists a and b, while adjusting b for offset changes in a/
   this deletes a and b and returns the resultant list. */
static struct flist *combine(struct flist *a, struct flist *b)
{
	struct flist *c = NULL;
	struct frag *bh, *ct;
	int offset = 0, post;

	if (a && b)
		c = lalloc((lsize(a) + lsize(b)) * 2);

	if (c) {

		for (bh = b->head; bh != b->tail; bh++) {
			/* save old hunks */
			offset = gather(c, a, bh->start, offset);

			/* discard replaced hunks */
			post = discard(a, bh->end, offset);

			/* insert new hunk */
			ct = c->tail;
			ct->start = bh->start - offset;
			ct->end = bh->end - post;
			ct->len = bh->len;
			ct->data = bh->data;
			c->tail++;
			offset = post;
		}

		/* hold on to tail from a */
		memcpy(c->tail, a->head, sizeof(struct frag) * lsize(a));
		c->tail += lsize(a);
	}

	lfree(a);
	lfree(b);
	return c;
}

/* decode a binary patch into a hunk list */
static struct flist *decode(const char *bin, Py_ssize_t len)
{
	struct flist *l;
	struct frag *lt;
	int pos = 0;

	/* assume worst case size, we won't have many of these lists */
	l = lalloc(len / 12);
	if (!l)
		return NULL;

	lt = l->tail;

	while (pos >= 0 && pos < len) {
		lt->start = getbe32(bin + pos);
		lt->end = getbe32(bin + pos + 4);
		lt->len = getbe32(bin + pos + 8);
		if (lt->start > lt->end)
			break; /* sanity check */
		lt->data = bin + pos + 12;
		pos += 12 + lt->len;
		lt++;
	}

	if (pos != len) {
		if (!PyErr_Occurred())
			PyErr_SetString(mpatch_Error, "patch cannot be decoded");
		lfree(l);
		return NULL;
	}

	l->tail = lt;
	return l;
}

/* calculate the size of resultant text */
static Py_ssize_t calcsize(Py_ssize_t len, struct flist *l)
{
	Py_ssize_t outlen = 0, last = 0;
	struct frag *f = l->head;

	while (f != l->tail) {
		if (f->start < last || f->end > len) {
			if (!PyErr_Occurred())
				PyErr_SetString(mpatch_Error,
				                "invalid patch");
			return -1;
		}
		outlen += f->start - last;
		last = f->end;
		outlen += f->len;
		f++;
	}

	outlen += len - last;
	return outlen;
}

static int apply(char *buf, const char *orig, Py_ssize_t len, struct flist *l)
{
	struct frag *f = l->head;
	int last = 0;
	char *p = buf;

	while (f != l->tail) {
		if (f->start < last || f->end > len) {
			if (!PyErr_Occurred())
				PyErr_SetString(mpatch_Error,
				                "invalid patch");
			return 0;
		}
		memcpy(p, orig + last, f->start - last);
		p += f->start - last;
		memcpy(p, f->data, f->len);
		last = f->end;
		p += f->len;
		f++;
	}
	memcpy(p, orig + last, len - last);
	return 1;
}

/* recursively generate a patch of all bins between start and end */
static struct flist *fold(PyObject *bins, Py_ssize_t start, Py_ssize_t end)
{
	Py_ssize_t len, blen;
	const char *buffer;

	if (start + 1 == end) {
		/* trivial case, output a decoded list */
		PyObject *tmp = PyList_GetItem(bins, start);
		if (!tmp)
			return NULL;
		if (PyObject_AsCharBuffer(tmp, &buffer, &blen))
			return NULL;
		return decode(buffer, blen);
	}

	/* divide and conquer, memory management is elsewhere */
	len = (end - start) / 2;
	return combine(fold(bins, start, start + len),
		       fold(bins, start + len, end));
}

static PyObject *
patches(PyObject *self, PyObject *args)
{
	PyObject *text, *bins, *result;
	struct flist *patch;
	const char *in;
	char *out;
	Py_ssize_t len, outlen, inlen;

	if (!PyArg_ParseTuple(args, "OO:mpatch", &text, &bins))
		return NULL;

	len = PyList_Size(bins);
	if (!len) {
		/* nothing to do */
		Py_INCREF(text);
		return text;
	}

	if (PyObject_AsCharBuffer(text, &in, &inlen))
		return NULL;

	patch = fold(bins, 0, len);
	if (!patch)
		return NULL;

	outlen = calcsize(inlen, patch);
	if (outlen < 0) {
		result = NULL;
		goto cleanup;
	}
	result = PyBytes_FromStringAndSize(NULL, outlen);
	if (!result) {
		result = NULL;
		goto cleanup;
	}
	out = PyBytes_AsString(result);
	if (!apply(out, in, inlen, patch)) {
		Py_DECREF(result);
		result = NULL;
	}
cleanup:
	lfree(patch);
	return result;
}

/* calculate size of a patched file directly */
static PyObject *
patchedsize(PyObject *self, PyObject *args)
{
	long orig, start, end, len, outlen = 0, last = 0, pos = 0;
	Py_ssize_t patchlen;
	char *bin;

	if (!PyArg_ParseTuple(args, "ls#", &orig, &bin, &patchlen))
		return NULL;

	while (pos >= 0 && pos < patchlen) {
		start = getbe32(bin + pos);
		end = getbe32(bin + pos + 4);
		len = getbe32(bin + pos + 8);
		if (start > end)
			break; /* sanity check */
		pos += 12 + len;
		outlen += start - last;
		last = end;
		outlen += len;
	}

	if (pos != patchlen) {
		if (!PyErr_Occurred())
			PyErr_SetString(mpatch_Error, "patch cannot be decoded");
		return NULL;
	}

	outlen += orig - last;
	return Py_BuildValue("l", outlen);
}

static PyMethodDef methods[] = {
	{"patches", patches, METH_VARARGS, "apply a series of patches\n"},
	{"patchedsize", patchedsize, METH_VARARGS, "calculed patched size\n"},
	{NULL, NULL}
};

#ifdef IS_PY3K
static struct PyModuleDef mpatch_module = {
	PyModuleDef_HEAD_INIT,
	"mpatch",
	mpatch_doc,
	-1,
	methods
};

PyMODINIT_FUNC PyInit_mpatch(void)
{
	PyObject *m;

	m = PyModule_Create(&mpatch_module);
	if (m == NULL)
		return NULL;

	mpatch_Error = PyErr_NewException("mpatch.mpatchError", NULL, NULL);
	Py_INCREF(mpatch_Error);
	PyModule_AddObject(m, "mpatchError", mpatch_Error);

	return m;
}
#else
PyMODINIT_FUNC
initmpatch(void)
{
	Py_InitModule3("mpatch", methods, mpatch_doc);
	mpatch_Error = PyErr_NewException("mpatch.mpatchError", NULL, NULL);
}
#endif