view contrib/python-zstandard/make_cffi.py @ 40021:c537144fdbef

wireprotov2: support response caching One of the things I've learned from managing VCS servers over the years is that they are hard to scale. It is well known that some companies have very beefy (read: very expensive) servers to power their VCS needs. It is also known that specialized servers for various VCS exist in order to facilitate scaling servers. (Mercurial is in this boat.) One of the aspects that make a VCS server hard to scale is the high CPU load incurred by constant client clone/pull operations. To alleviate the scaling pain associated with data retrieval operations, I want to integrate caching into the Mercurial wire protocol server as robustly as possible such that servers can aggressively cache responses and defer as much server load as possible. This commit represents the initial implementation of a general caching layer in wire protocol version 2. We define a new interface and behavior for a wire protocol cacher in repository.py. (This is probably where a reviewer should look first to understand what is going on.) The bulk of the added code is in wireprotov2server.py, where we define how a command can opt in to being cached and integrate caching into command dispatching. From a very high-level: * A command can declare itself as cacheable by providing a callable that can be used to derive a cache key. * At dispatch time, if a command is cacheable, we attempt to construct a cacher and use it for serving the request and/or caching the request. * The dispatch layer handles the bulk of the business logic for caching, making cachers mostly "dumb content stores." * The mechanism for invalidating cached entries (one of the harder parts about caching in general) is by varying the cache key when state changes. As such, cachers don't need to be concerned with cache invalidation. Initially, we've hooked up support for caching "manifestdata" and "filedata" commands. These are the simplest to cache, as they should be immutable over time. Caching of commands related to changeset data is a bit harder (because cache validation is impacted by changes to bookmarks, phases, etc). This will be implemented later. (Strictly speaking, censoring a file should invalidate caches. I've added an inline TODO to track this edge case.) To prove it works, this commit implements a test-only extension providing in-memory caching backed by an lrucachedict. A new test showing this extension behaving properly is added. FWIW, the cacher is ~50 lines of code, demonstrating the relative ease with which a cache can be added to a server. While the test cacher is not suitable for production workloads, just for kicks I performed a clone of just the changeset and manifest data for the mozilla-unified repository. With a fully warmed cache (of just the manifest data since changeset data is not cached), server-side CPU usage dropped from ~73s to ~28s. That's pretty significant and demonstrates the potential that response caching has on server scalability! Differential Revision: https://phab.mercurial-scm.org/D4773
author Gregory Szorc <gregory.szorc@gmail.com>
date Wed, 26 Sep 2018 17:16:56 -0700
parents b1fb341d8a61
children 73fef626dae3
line wrap: on
line source

# Copyright (c) 2016-present, Gregory Szorc
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.

from __future__ import absolute_import

import cffi
import distutils.ccompiler
import os
import re
import subprocess
import tempfile


HERE = os.path.abspath(os.path.dirname(__file__))

SOURCES = ['zstd/%s' % p for p in (
    'common/entropy_common.c',
    'common/error_private.c',
    'common/fse_decompress.c',
    'common/pool.c',
    'common/threading.c',
    'common/xxhash.c',
    'common/zstd_common.c',
    'compress/fse_compress.c',
    'compress/huf_compress.c',
    'compress/zstd_compress.c',
    'compress/zstd_double_fast.c',
    'compress/zstd_fast.c',
    'compress/zstd_lazy.c',
    'compress/zstd_ldm.c',
    'compress/zstd_opt.c',
    'compress/zstdmt_compress.c',
    'decompress/huf_decompress.c',
    'decompress/zstd_decompress.c',
    'dictBuilder/cover.c',
    'dictBuilder/divsufsort.c',
    'dictBuilder/zdict.c',
)]

# Headers whose preprocessed output will be fed into cdef().
HEADERS = [os.path.join(HERE, 'zstd', *p) for p in (
    ('zstd.h',),
    ('dictBuilder', 'zdict.h'),
)]

INCLUDE_DIRS = [os.path.join(HERE, d) for d in (
    'zstd',
    'zstd/common',
    'zstd/compress',
    'zstd/decompress',
    'zstd/dictBuilder',
)]

# cffi can't parse some of the primitives in zstd.h. So we invoke the
# preprocessor and feed its output into cffi.
compiler = distutils.ccompiler.new_compiler()

# Needed for MSVC.
if hasattr(compiler, 'initialize'):
    compiler.initialize()

# Distutils doesn't set compiler.preprocessor, so invoke the preprocessor
# manually.
if compiler.compiler_type == 'unix':
    args = list(compiler.executables['compiler'])
    args.extend([
        '-E',
        '-DZSTD_STATIC_LINKING_ONLY',
        '-DZDICT_STATIC_LINKING_ONLY',
    ])
elif compiler.compiler_type == 'msvc':
    args = [compiler.cc]
    args.extend([
        '/EP',
        '/DZSTD_STATIC_LINKING_ONLY',
        '/DZDICT_STATIC_LINKING_ONLY',
    ])
else:
    raise Exception('unsupported compiler type: %s' % compiler.compiler_type)

def preprocess(path):
    with open(path, 'rb') as fh:
        lines = []
        it = iter(fh)

        for l in it:
            # zstd.h includes <stddef.h>, which is also included by cffi's
            # boilerplate. This can lead to duplicate declarations. So we strip
            # this include from the preprocessor invocation.
            #
            # The same things happens for including zstd.h, so give it the same
            # treatment.
            #
            # We define ZSTD_STATIC_LINKING_ONLY, which is redundant with the inline
            # #define in zstdmt_compress.h and results in a compiler warning. So drop
            # the inline #define.
            if l.startswith((b'#include <stddef.h>',
                             b'#include "zstd.h"',
                             b'#define ZSTD_STATIC_LINKING_ONLY')):
                continue

            # ZSTDLIB_API may not be defined if we dropped zstd.h. It isn't
            # important so just filter it out.
            if l.startswith(b'ZSTDLIB_API'):
                l = l[len(b'ZSTDLIB_API '):]

            lines.append(l)

    fd, input_file = tempfile.mkstemp(suffix='.h')
    os.write(fd, b''.join(lines))
    os.close(fd)

    try:
        process = subprocess.Popen(args + [input_file], stdout=subprocess.PIPE)
        output = process.communicate()[0]
        ret = process.poll()
        if ret:
            raise Exception('preprocessor exited with error')

        return output
    finally:
        os.unlink(input_file)


def normalize_output(output):
    lines = []
    for line in output.splitlines():
        # CFFI's parser doesn't like __attribute__ on UNIX compilers.
        if line.startswith(b'__attribute__ ((visibility ("default"))) '):
            line = line[len(b'__attribute__ ((visibility ("default"))) '):]

        if line.startswith(b'__attribute__((deprecated('):
            continue
        elif b'__declspec(deprecated(' in line:
            continue

        lines.append(line)

    return b'\n'.join(lines)


ffi = cffi.FFI()
# zstd.h uses a possible undefined MIN(). Define it until
# https://github.com/facebook/zstd/issues/976 is fixed.
# *_DISABLE_DEPRECATE_WARNINGS prevents the compiler from emitting a warning
# when cffi uses the function. Since we statically link against zstd, even
# if we use the deprecated functions it shouldn't be a huge problem.
ffi.set_source('_zstd_cffi', '''
#define MIN(a,b) ((a)<(b) ? (a) : (b))
#define ZSTD_STATIC_LINKING_ONLY
#include <zstd.h>
#define ZDICT_STATIC_LINKING_ONLY
#define ZDICT_DISABLE_DEPRECATE_WARNINGS
#include <zdict.h>
''', sources=SOURCES,
     include_dirs=INCLUDE_DIRS,
     extra_compile_args=['-DZSTD_MULTITHREAD'])

DEFINE = re.compile(b'^\\#define ([a-zA-Z0-9_]+) ')

sources = []

# Feed normalized preprocessor output for headers into the cdef parser.
for header in HEADERS:
    preprocessed = preprocess(header)
    sources.append(normalize_output(preprocessed))

    # #define's are effectively erased as part of going through preprocessor.
    # So perform a manual pass to re-add those to the cdef source.
    with open(header, 'rb') as fh:
        for line in fh:
            line = line.strip()
            m = DEFINE.match(line)
            if not m:
                continue

            if m.group(1) == b'ZSTD_STATIC_LINKING_ONLY':
                continue

            # The parser doesn't like some constants with complex values.
            if m.group(1) in (b'ZSTD_LIB_VERSION', b'ZSTD_VERSION_STRING'):
                continue

            # The ... is magic syntax by the cdef parser to resolve the
            # value at compile time.
            sources.append(m.group(0) + b' ...')

cdeflines = b'\n'.join(sources).splitlines()
cdeflines = [l for l in cdeflines if l.strip()]
ffi.cdef(b'\n'.join(cdeflines).decode('latin1'))

if __name__ == '__main__':
    ffi.compile()