copies: compute the exact set of revision to walk
This change make the code clearer by removing the revision queue. It comes
without very noticeable performance impact. However the simpler code will be
easier to update in later changesets.
revision: large amount; added files: large amount; rename small amount;
c3b14617fbd7 9ba6ab77fd29
before: ! wall 1.430082 comb 1.430000 user 1.390000 sys 0.040000 (median of 10)
after: ! wall 1.405192 comb 1.410000 user 1.390000 sys 0.020000 (median of 10)
revision: large amount; added files: small amount; rename small amount;
c3b14617fbd7 f650a9b140d2
before: ! wall 1.971366 comb 1.970000 user 1.950000 sys 0.020000 (median of 10)
after: ! wall 1.892541 comb 1.890000 user 1.870000 sys 0.020000 (median of 10)
revision: large amount; added files: large amount; rename large amount;
08ea3258278e d9fa043f30c0
before: ! wall 0.252594 comb 0.250000 user 0.240000 sys 0.010000 (median of 38)
after: ! wall 0.240075 comb 0.240000 user 0.240000 sys 0.000000 (median of 40)
revision: small amount; added files: large amount; rename large amount;
df6f7a526b60 a83dc6a2d56f
before: ! wall 0.013100 comb 0.010000 user 0.010000 sys 0.000000 (median of 226)
after: ! wall 0.013247 comb 0.010000 user 0.010000 sys 0.000000 (median of 223)
revision: small amount; added files: large amount; rename small amount;
4aa4e1f8e19a 169138063d63
before: ! wall 0.001633 comb 0.000000 user 0.000000 sys 0.000000 (median of 1000)
after: ! wall 0.001670 comb 0.000000 user 0.000000 sys 0.000000 (median of 1000)
revision: small amount; added files: small amount; rename small amount;
4bc173b045a6 964879152e2e
before: ! wall 0.000078 comb 0.000000 user 0.000000 sys 0.000000 (median of 11984)
after: ! wall 0.000119 comb 0.000000 user 0.000000 sys 0.000000 (median of 7982)
revision: medium amount; added files: large amount; rename medium amount;
c95f1ced15f2 2c68e87c3efe
before: ! wall 0.207093 comb 0.210000 user 0.210000 sys 0.000000 (median of 47)
after: ! wall 0.201551 comb 0.200000 user 0.200000 sys 0.000000 (median of 48)
revision: medium amount; added files: medium amount; rename small amount;
d343da0c55a8 d7746d32bf9d
before: ! wall 0.038462 comb 0.040000 user 0.040000 sys 0.000000 (median of 100)
after: ! wall 0.036578 comb 0.030000 user 0.030000 sys 0.000000 (median of 100)
Differential Revision: https://phab.mercurial-scm.org/D7076
/*
mpatch.c - efficient binary patching for Mercurial
This implements a patch algorithm that's O(m + nlog n) where m is the
size of the output and n is the number of patches.
Given a list of binary patches, it unpacks each into a hunk list,
then combines the hunk lists with a treewise recursion to form a
single hunk list. This hunk list is then applied to the original
text.
The text (or binary) fragments are copied directly from their source
Python objects into a preallocated output string to avoid the
allocation of intermediate Python objects. Working memory is about 2x
the total number of hunks.
Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
This software may be used and distributed according to the terms
of the GNU General Public License, incorporated herein by reference.
*/
#include <limits.h>
#include <stdlib.h>
#include <string.h>
#include "bitmanipulation.h"
#include "compat.h"
#include "mpatch.h"
/* VC9 doesn't include bool and lacks stdbool.h based on cext/util.h */
#if defined(_MSC_VER) || __STDC_VERSION__ < 199901L
#define true 1
#define false 0
typedef unsigned char bool;
#else
#include <stdbool.h>
#endif
static struct mpatch_flist *lalloc(ssize_t size)
{
struct mpatch_flist *a = NULL;
if (size < 1) {
size = 1;
}
a = (struct mpatch_flist *)malloc(sizeof(struct mpatch_flist));
if (a) {
a->base = (struct mpatch_frag *)malloc(
sizeof(struct mpatch_frag) * size);
if (a->base) {
a->head = a->tail = a->base;
return a;
}
free(a);
}
return NULL;
}
void mpatch_lfree(struct mpatch_flist *a)
{
if (a) {
free(a->base);
free(a);
}
}
static ssize_t lsize(struct mpatch_flist *a)
{
return a->tail - a->head;
}
/* add helper to add src and *dest iff it won't overflow */
static inline bool safeadd(int src, int *dest)
{
if ((src > 0) == (*dest > 0)) {
if (*dest > 0) {
if (src > (INT_MAX - *dest)) {
return false;
}
} else {
if (src < (INT_MIN - *dest)) {
return false;
}
}
}
*dest += src;
return true;
}
/* subtract src from dest and store result in dest */
static inline bool safesub(int src, int *dest)
{
if (((src > 0) && (*dest < INT_MIN + src)) ||
((src < 0) && (*dest > INT_MAX + src))) {
return false;
}
*dest -= src;
return true;
}
/* move hunks in source that are less cut to dest, compensating
for changes in offset. the last hunk may be split if necessary.
*/
static int gather(struct mpatch_flist *dest, struct mpatch_flist *src, int cut,
int offset)
{
struct mpatch_frag *d = dest->tail, *s = src->head;
int postend, c, l;
while (s != src->tail) {
int soffset = s->start;
if (!safeadd(offset, &soffset)) {
break; /* add would overflow, oh well */
}
if (soffset >= cut) {
break; /* we've gone far enough */
}
postend = offset;
if (!safeadd(s->start, &postend) ||
!safeadd(s->len, &postend)) {
break;
}
if (postend <= cut) {
/* save this hunk */
int tmp = s->start;
if (!safesub(s->end, &tmp)) {
break;
}
if (!safeadd(s->len, &tmp)) {
break;
}
if (!safeadd(tmp, &offset)) {
break; /* add would overflow, oh well */
}
*d++ = *s++;
} else {
/* break up this hunk */
c = cut;
if (!safesub(offset, &c)) {
break;
}
if (s->end < c) {
c = s->end;
}
l = cut - offset - s->start;
if (s->len < l) {
l = s->len;
}
offset += s->start + l - c;
d->start = s->start;
d->end = c;
d->len = l;
d->data = s->data;
d++;
s->start = c;
s->len = s->len - l;
s->data = s->data + l;
break;
}
}
dest->tail = d;
src->head = s;
return offset;
}
/* like gather, but with no output list */
static int discard(struct mpatch_flist *src, int cut, int offset)
{
struct mpatch_frag *s = src->head;
int postend, c, l;
while (s != src->tail) {
int cmpcut = s->start;
if (!safeadd(offset, &cmpcut)) {
break;
}
if (cmpcut >= cut) {
break;
}
postend = offset;
if (!safeadd(s->start, &postend)) {
break;
}
if (!safeadd(s->len, &postend)) {
break;
}
if (postend <= cut) {
/* do the subtraction first to avoid UB integer overflow
*/
int tmp = s->start;
if (!safesub(s->end, &tmp)) {
break;
}
if (!safeadd(s->len, &tmp)) {
break;
}
if (!safeadd(tmp, &offset)) {
break;
}
s++;
} else {
c = cut;
if (!safesub(offset, &c)) {
break;
}
if (s->end < c) {
c = s->end;
}
l = cut - offset - s->start;
if (s->len < l) {
l = s->len;
}
offset += s->start + l - c;
s->start = c;
s->len = s->len - l;
s->data = s->data + l;
break;
}
}
src->head = s;
return offset;
}
/* combine hunk lists a and b, while adjusting b for offset changes in a/
this deletes a and b and returns the resultant list. */
static struct mpatch_flist *combine(struct mpatch_flist *a,
struct mpatch_flist *b)
{
struct mpatch_flist *c = NULL;
struct mpatch_frag *bh, *ct;
int offset = 0, post;
if (a && b) {
c = lalloc((lsize(a) + lsize(b)) * 2);
}
if (c) {
for (bh = b->head; bh != b->tail; bh++) {
/* save old hunks */
offset = gather(c, a, bh->start, offset);
/* discard replaced hunks */
post = discard(a, bh->end, offset);
/* insert new hunk */
ct = c->tail;
ct->start = bh->start;
ct->end = bh->end;
if (!safesub(offset, &(ct->start)) ||
!safesub(post, &(ct->end))) {
/* It was already possible to exit
* this function with a return value
* of NULL before the safesub()s were
* added, so this should be fine. */
mpatch_lfree(c);
c = NULL;
goto done;
}
ct->len = bh->len;
ct->data = bh->data;
c->tail++;
offset = post;
}
/* hold on to tail from a */
memcpy(c->tail, a->head, sizeof(struct mpatch_frag) * lsize(a));
c->tail += lsize(a);
}
done:
mpatch_lfree(a);
mpatch_lfree(b);
return c;
}
/* decode a binary patch into a hunk list */
int mpatch_decode(const char *bin, ssize_t len, struct mpatch_flist **res)
{
struct mpatch_flist *l;
struct mpatch_frag *lt;
int pos = 0;
/* assume worst case size, we won't have many of these lists */
l = lalloc(len / 12 + 1);
if (!l) {
return MPATCH_ERR_NO_MEM;
}
lt = l->tail;
/* We check against len-11 to ensure we have at least 12 bytes
left in the patch so we can read our three be32s out of it. */
while (pos >= 0 && pos < (len - 11)) {
lt->start = getbe32(bin + pos);
lt->end = getbe32(bin + pos + 4);
lt->len = getbe32(bin + pos + 8);
if (lt->start < 0 || lt->start > lt->end || lt->len < 0) {
break; /* sanity check */
}
if (!safeadd(12, &pos)) {
break;
}
lt->data = bin + pos;
if (!safeadd(lt->len, &pos)) {
break;
}
lt++;
}
if (pos != len) {
mpatch_lfree(l);
return MPATCH_ERR_CANNOT_BE_DECODED;
}
l->tail = lt;
*res = l;
return 0;
}
/* calculate the size of resultant text */
ssize_t mpatch_calcsize(ssize_t len, struct mpatch_flist *l)
{
ssize_t outlen = 0, last = 0;
struct mpatch_frag *f = l->head;
while (f != l->tail) {
if (f->start < last || f->end > len) {
return MPATCH_ERR_INVALID_PATCH;
}
outlen += f->start - last;
last = f->end;
outlen += f->len;
f++;
}
outlen += len - last;
return outlen;
}
int mpatch_apply(char *buf, const char *orig, ssize_t len,
struct mpatch_flist *l)
{
struct mpatch_frag *f = l->head;
int last = 0;
char *p = buf;
while (f != l->tail) {
if (f->start < last || f->start > len || f->end > len ||
last < 0) {
return MPATCH_ERR_INVALID_PATCH;
}
memcpy(p, orig + last, f->start - last);
p += f->start - last;
memcpy(p, f->data, f->len);
last = f->end;
p += f->len;
f++;
}
if (last < 0) {
return MPATCH_ERR_INVALID_PATCH;
}
memcpy(p, orig + last, len - last);
return 0;
}
/* recursively generate a patch of all bins between start and end */
struct mpatch_flist *
mpatch_fold(void *bins, struct mpatch_flist *(*get_next_item)(void *, ssize_t),
ssize_t start, ssize_t end)
{
ssize_t len;
if (start + 1 == end) {
/* trivial case, output a decoded list */
return get_next_item(bins, start);
}
/* divide and conquer, memory management is elsewhere */
len = (end - start) / 2;
return combine(mpatch_fold(bins, get_next_item, start, start + len),
mpatch_fold(bins, get_next_item, start + len, end));
}