view tests/test-batching.py @ 24787:9d5c27890790

largefiles: for update -C, only update largefiles when necessary Before, a --clean update with largefiles would use the "optimization" that it didn't read hashes from standin files before and after the update. Instead of trusting the content of the standin files, it would rehash all the actual largefiles that lfdirstate reported clean and update the standins that didn't have the expected content. It could thus in some "impossible" situations automatically recover from some "largefile got out sync with its standin" issues (even there apparently still were weird corner cases where it could fail). This extra checking is similar to what core --clean intentionally do not do, and it made update --clean unbearable slow. Usually in core Mercurial, --clean will rely on the dirstate to find the files it should update. (It is thus intentionally possible (when trying to trick the system or if there should be bugs) to end up in situations where --clean not will restore the working directory content correctly.) Checking every file when we "know" it is ok is however not an option - that would be too slow. Instead, trust the content of the standin files. Use the same logic for --clean as for linear updates and trust the dirstate and that our "logic" will keep them in sync. It is much cheaper to just rehash the largefiles reported dirty by a status walk and read all standins than to hash largefiles. Most of the changes are just a change of indentation now when the different kinds of updates no longer are handled that differently. Standins for added files are however only written when doing a normal update, while deleted and removed files only will be updated for --clean updates.
author Mads Kiilerich <madski@unity3d.com>
date Wed, 15 Apr 2015 15:22:16 -0400
parents a7d5816087a9
children cbbdd085c991
line wrap: on
line source

# test-batching.py - tests for transparent command batching
#
# Copyright 2011 Peter Arrenbrecht <peter@arrenbrecht.ch>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.

from mercurial.wireproto import localbatch, remotebatch, batchable, future

# equivalent of repo.repository
class thing(object):
    def hello(self):
        return "Ready."

# equivalent of localrepo.localrepository
class localthing(thing):
    def foo(self, one, two=None):
        if one:
            return "%s and %s" % (one, two,)
        return "Nope"
    def bar(self, b, a):
        return "%s und %s" % (b, a,)
    def greet(self, name=None):
        return "Hello, %s" % name
    def batch(self):
        '''Support for local batching.'''
        return localbatch(self)

# usage of "thing" interface
def use(it):

    # Direct call to base method shared between client and server.
    print it.hello()

    # Direct calls to proxied methods. They cause individual roundtrips.
    print it.foo("Un", two="Deux")
    print it.bar("Eins", "Zwei")

    # Batched call to a couple of (possibly proxied) methods.
    batch = it.batch()
    # The calls return futures to eventually hold results.
    foo = batch.foo(one="One", two="Two")
    foo2 = batch.foo(None)
    bar = batch.bar("Eins", "Zwei")
    # We can call non-batchable proxy methods, but the break the current batch
    # request and cause additional roundtrips.
    greet = batch.greet(name="John Smith")
    # We can also add local methods into the mix, but they break the batch too.
    hello = batch.hello()
    bar2 = batch.bar(b="Uno", a="Due")
    # Only now are all the calls executed in sequence, with as few roundtrips
    # as possible.
    batch.submit()
    # After the call to submit, the futures actually contain values.
    print foo.value
    print foo2.value
    print bar.value
    print greet.value
    print hello.value
    print bar2.value

# local usage
mylocal = localthing()
print
print "== Local"
use(mylocal)

# demo remoting; mimicks what wireproto and HTTP/SSH do

# shared

def escapearg(plain):
    return (plain
            .replace(':', '::')
            .replace(',', ':,')
            .replace(';', ':;')
            .replace('=', ':='))
def unescapearg(escaped):
    return (escaped
            .replace(':=', '=')
            .replace(':;', ';')
            .replace(':,', ',')
            .replace('::', ':'))

# server side

# equivalent of wireproto's global functions
class server(object):
    def __init__(self, local):
        self.local = local
    def _call(self, name, args):
        args = dict(arg.split('=', 1) for arg in args)
        return getattr(self, name)(**args)
    def perform(self, req):
        print "REQ:", req
        name, args = req.split('?', 1)
        args = args.split('&')
        vals = dict(arg.split('=', 1) for arg in args)
        res = getattr(self, name)(**vals)
        print "  ->", res
        return res
    def batch(self, cmds):
        res = []
        for pair in cmds.split(';'):
            name, args = pair.split(':', 1)
            vals = {}
            for a in args.split(','):
                if a:
                    n, v = a.split('=')
                    vals[n] = unescapearg(v)
            res.append(escapearg(getattr(self, name)(**vals)))
        return ';'.join(res)
    def foo(self, one, two):
        return mangle(self.local.foo(unmangle(one), unmangle(two)))
    def bar(self, b, a):
        return mangle(self.local.bar(unmangle(b), unmangle(a)))
    def greet(self, name):
        return mangle(self.local.greet(unmangle(name)))
myserver = server(mylocal)

# local side

# equivalent of wireproto.encode/decodelist, that is, type-specific marshalling
# here we just transform the strings a bit to check we're properly en-/decoding
def mangle(s):
    return ''.join(chr(ord(c) + 1) for c in s)
def unmangle(s):
    return ''.join(chr(ord(c) - 1) for c in s)

# equivalent of wireproto.wirerepository and something like http's wire format
class remotething(thing):
    def __init__(self, server):
        self.server = server
    def _submitone(self, name, args):
        req = name + '?' + '&'.join(['%s=%s' % (n, v) for n, v in args])
        return self.server.perform(req)
    def _submitbatch(self, cmds):
        req = []
        for name, args in cmds:
            args = ','.join(n + '=' + escapearg(v) for n, v in args)
            req.append(name + ':' + args)
        req = ';'.join(req)
        res = self._submitone('batch', [('cmds', req,)])
        return res.split(';')

    def batch(self):
        return remotebatch(self)

    @batchable
    def foo(self, one, two=None):
        if not one:
            yield "Nope", None
        encargs = [('one', mangle(one),), ('two', mangle(two),)]
        encresref = future()
        yield encargs, encresref
        yield unmangle(encresref.value)

    @batchable
    def bar(self, b, a):
        encresref = future()
        yield [('b', mangle(b),), ('a', mangle(a),)], encresref
        yield unmangle(encresref.value)

    # greet is coded directly. It therefore does not support batching. If it
    # does appear in a batch, the batch is split around greet, and the call to
    # greet is done in its own roundtrip.
    def greet(self, name=None):
        return unmangle(self._submitone('greet', [('name', mangle(name),)]))

# demo remote usage

myproxy = remotething(myserver)
print
print "== Remote"
use(myproxy)