mercurial/mpatch.h
author Gregory Szorc <gregory.szorc@gmail.com>
Sun, 23 Oct 2016 10:40:33 -0700
changeset 30289 1f92056c4066
parent 29749 155f0cc3f813
child 34800 761355833867
permissions -rw-r--r--
revlog: optimize _chunkraw when startrev==endrev In many cases, _chunkraw() is called with startrev==endrev. When this is true, we can avoid an extra index lookup and some other minor operations. On the mozilla-unified repo, `hg perfrevlogchunks -c` says this has the following impact: ! read w/ reused fd ! wall 0.371846 comb 0.370000 user 0.350000 sys 0.020000 (best of 27) ! wall 0.337930 comb 0.330000 user 0.300000 sys 0.030000 (best of 30) ! read batch w/ reused fd ! wall 0.014952 comb 0.020000 user 0.000000 sys 0.020000 (best of 197) ! wall 0.014866 comb 0.010000 user 0.000000 sys 0.010000 (best of 196) So, we've gone from ~25x slower than batch to ~22.5x slower. At this point, there's probably not much else we can do except implement an optimized function in the index itself, including in C.

#ifndef _HG_MPATCH_H_
#define _HG_MPATCH_H_

#define MPATCH_ERR_NO_MEM -3
#define MPATCH_ERR_CANNOT_BE_DECODED -2
#define MPATCH_ERR_INVALID_PATCH -1

struct mpatch_frag {
	int start, end, len;
	const char *data;
};

struct mpatch_flist {
	struct mpatch_frag *base, *head, *tail;
};

int mpatch_decode(const char *bin, ssize_t len, struct mpatch_flist** res);
ssize_t mpatch_calcsize(ssize_t len, struct mpatch_flist *l);
void mpatch_lfree(struct mpatch_flist *a);
int mpatch_apply(char *buf, const char *orig, ssize_t len,
	struct mpatch_flist *l);
struct mpatch_flist *mpatch_fold(void *bins,
	struct mpatch_flist* (*get_next_item)(void*, ssize_t),
	ssize_t start, ssize_t end);

#endif