view mercurial/compat.h @ 30289:1f92056c4066

revlog: optimize _chunkraw when startrev==endrev In many cases, _chunkraw() is called with startrev==endrev. When this is true, we can avoid an extra index lookup and some other minor operations. On the mozilla-unified repo, `hg perfrevlogchunks -c` says this has the following impact: ! read w/ reused fd ! wall 0.371846 comb 0.370000 user 0.350000 sys 0.020000 (best of 27) ! wall 0.337930 comb 0.330000 user 0.300000 sys 0.030000 (best of 30) ! read batch w/ reused fd ! wall 0.014952 comb 0.020000 user 0.000000 sys 0.020000 (best of 197) ! wall 0.014866 comb 0.010000 user 0.000000 sys 0.010000 (best of 196) So, we've gone from ~25x slower than batch to ~22.5x slower. At this point, there's probably not much else we can do except implement an optimized function in the index itself, including in C.
author Gregory Szorc <gregory.szorc@gmail.com>
date Sun, 23 Oct 2016 10:40:33 -0700
parents 7b22599dcb85
children f4433f2713d0
line wrap: on
line source

#ifndef _HG_COMPAT_H_
#define _HG_COMPAT_H_

#ifdef _WIN32
#ifdef _MSC_VER
/* msvc 6.0 has problems */
#define inline __inline
#if defined(_WIN64)
typedef __int64 ssize_t;
#else
typedef int ssize_t;
#endif
typedef signed char int8_t;
typedef short int16_t;
typedef long int32_t;
typedef __int64 int64_t;
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
typedef unsigned long uint32_t;
typedef unsigned __int64 uint64_t;
#else
#include <stdint.h>
#endif
#else
/* not windows */
#include <sys/types.h>
#if defined __BEOS__ && !defined __HAIKU__
#include <ByteOrder.h>
#else
#include <arpa/inet.h>
#endif
#include <inttypes.h>
#endif

#if defined __hpux || defined __SUNPRO_C || defined _AIX
#define inline
#endif

#ifdef __linux
#define inline __inline
#endif

#endif