tests/test-batching.py
author Gregory Szorc <gregory.szorc@gmail.com>
Sat, 26 Sep 2015 21:43:13 -0700
changeset 26380 56a640b0f656
parent 25912 cbbdd085c991
child 28731 f8872b507cd3
permissions -rw-r--r--
revlog: don't flush data file after every added revision The current behavior of revlogs is to flush the data file when writing data to it. Tracing system calls revealed that changegroup processing incurred numerous write(2) calls for values much smaller than the default buffer size (Python defaults to 4096, but it can be adjusted based on detected block size at run time by CPython). The reason we flush revlogs is so readers have all data available. For example, the current code in revlog.py will re-open the revlog file (instead of seeking an existing file handle) to read the text of a revision. This happens when starting a new delta chain when adding several revisions from changegroups, for example. Yes, this is likely sub-optimal (we should probably be sharing file descriptors between readers and writers to avoid the flushing and associated overhead of re-opening files). While flushing revlogs is necessary, it appears all callers are diligent about flushing files before a read is performed (see buildtext() in _addrevision()), making the flush in _writeentry() redundant and unncessary. So, we remove it. In practice, this means we incur a write(2) a) when the buffer is full (typically 4096 bytes) b) when a new delta chain is created rather than after every added revision. This applies to every revlog, but by volume it mostly impacts filelogs. Removing the redundant flush from _writeentry() significantly reduces the number of write(2) calls during changegroup processing on my Linux machine. When applying a changegroup of the hg repo based on my local repo, the total number of write(2) calls during application of the mercurial/localrepo.py revlogs dropped from 1,320 to 217 with this patch applied. Total I/O related system calls dropped from 1,577 to 474. When unbundling a mozilla-central gzipped bundle (264,403 changesets with 1,492,215 changes to 222,507 files), total write(2) calls dropped from 1,252,881 to 827,106 and total system calls dropped from 3,601,259 to 3,178,636 - a reduction of 425,775! While the system call reduction is significant, it appears to have no impact on wall time on my Linux and Windows machines. Still, fewer syscalls is fewer syscalls. Surely this can't hurt. If nothing else, it makes examining remaining system call usage simpler and opens the door to experimenting with the performance impact of different buffer sizes.

# test-batching.py - tests for transparent command batching
#
# Copyright 2011 Peter Arrenbrecht <peter@arrenbrecht.ch>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.

from mercurial.peer import localbatch, batchable, future
from mercurial.wireproto import remotebatch

# equivalent of repo.repository
class thing(object):
    def hello(self):
        return "Ready."

# equivalent of localrepo.localrepository
class localthing(thing):
    def foo(self, one, two=None):
        if one:
            return "%s and %s" % (one, two,)
        return "Nope"
    def bar(self, b, a):
        return "%s und %s" % (b, a,)
    def greet(self, name=None):
        return "Hello, %s" % name
    def batch(self):
        '''Support for local batching.'''
        return localbatch(self)

# usage of "thing" interface
def use(it):

    # Direct call to base method shared between client and server.
    print it.hello()

    # Direct calls to proxied methods. They cause individual roundtrips.
    print it.foo("Un", two="Deux")
    print it.bar("Eins", "Zwei")

    # Batched call to a couple of (possibly proxied) methods.
    batch = it.batch()
    # The calls return futures to eventually hold results.
    foo = batch.foo(one="One", two="Two")
    foo2 = batch.foo(None)
    bar = batch.bar("Eins", "Zwei")
    # We can call non-batchable proxy methods, but the break the current batch
    # request and cause additional roundtrips.
    greet = batch.greet(name="John Smith")
    # We can also add local methods into the mix, but they break the batch too.
    hello = batch.hello()
    bar2 = batch.bar(b="Uno", a="Due")
    # Only now are all the calls executed in sequence, with as few roundtrips
    # as possible.
    batch.submit()
    # After the call to submit, the futures actually contain values.
    print foo.value
    print foo2.value
    print bar.value
    print greet.value
    print hello.value
    print bar2.value

# local usage
mylocal = localthing()
print
print "== Local"
use(mylocal)

# demo remoting; mimicks what wireproto and HTTP/SSH do

# shared

def escapearg(plain):
    return (plain
            .replace(':', '::')
            .replace(',', ':,')
            .replace(';', ':;')
            .replace('=', ':='))
def unescapearg(escaped):
    return (escaped
            .replace(':=', '=')
            .replace(':;', ';')
            .replace(':,', ',')
            .replace('::', ':'))

# server side

# equivalent of wireproto's global functions
class server(object):
    def __init__(self, local):
        self.local = local
    def _call(self, name, args):
        args = dict(arg.split('=', 1) for arg in args)
        return getattr(self, name)(**args)
    def perform(self, req):
        print "REQ:", req
        name, args = req.split('?', 1)
        args = args.split('&')
        vals = dict(arg.split('=', 1) for arg in args)
        res = getattr(self, name)(**vals)
        print "  ->", res
        return res
    def batch(self, cmds):
        res = []
        for pair in cmds.split(';'):
            name, args = pair.split(':', 1)
            vals = {}
            for a in args.split(','):
                if a:
                    n, v = a.split('=')
                    vals[n] = unescapearg(v)
            res.append(escapearg(getattr(self, name)(**vals)))
        return ';'.join(res)
    def foo(self, one, two):
        return mangle(self.local.foo(unmangle(one), unmangle(two)))
    def bar(self, b, a):
        return mangle(self.local.bar(unmangle(b), unmangle(a)))
    def greet(self, name):
        return mangle(self.local.greet(unmangle(name)))
myserver = server(mylocal)

# local side

# equivalent of wireproto.encode/decodelist, that is, type-specific marshalling
# here we just transform the strings a bit to check we're properly en-/decoding
def mangle(s):
    return ''.join(chr(ord(c) + 1) for c in s)
def unmangle(s):
    return ''.join(chr(ord(c) - 1) for c in s)

# equivalent of wireproto.wirerepository and something like http's wire format
class remotething(thing):
    def __init__(self, server):
        self.server = server
    def _submitone(self, name, args):
        req = name + '?' + '&'.join(['%s=%s' % (n, v) for n, v in args])
        return self.server.perform(req)
    def _submitbatch(self, cmds):
        req = []
        for name, args in cmds:
            args = ','.join(n + '=' + escapearg(v) for n, v in args)
            req.append(name + ':' + args)
        req = ';'.join(req)
        res = self._submitone('batch', [('cmds', req,)])
        return res.split(';')

    def batch(self):
        return remotebatch(self)

    @batchable
    def foo(self, one, two=None):
        if not one:
            yield "Nope", None
        encargs = [('one', mangle(one),), ('two', mangle(two),)]
        encresref = future()
        yield encargs, encresref
        yield unmangle(encresref.value)

    @batchable
    def bar(self, b, a):
        encresref = future()
        yield [('b', mangle(b),), ('a', mangle(a),)], encresref
        yield unmangle(encresref.value)

    # greet is coded directly. It therefore does not support batching. If it
    # does appear in a batch, the batch is split around greet, and the call to
    # greet is done in its own roundtrip.
    def greet(self, name=None):
        return unmangle(self._submitone('greet', [('name', mangle(name),)]))

# demo remote usage

myproxy = remotething(myserver)
print
print "== Remote"
use(myproxy)