util: teach lrucachedict to enforce a max total cost
Now that lrucachedict entries can have a numeric cost associated
with them and we can easily pop the oldest item in the cache, it
now becomes relatively trivial to implement support for enforcing
a high water mark on the total cost of items in the cache.
This commit teaches lrucachedict instances to have a max cost
associated with them. When items are inserted, we pop old items
until enough "cost" frees up to make room for the new item.
This feature is close to zero cost when not used (modulo the insertion
regressed introduced by the previous commit):
$ ./hg perflrucachedict --size 4 --gets 1000000 --sets 1000000 --mixed 1000000
! gets
! wall 0.607444 comb 0.610000 user 0.610000 sys 0.000000 (best of 17)
! wall 0.601653 comb 0.600000 user 0.600000 sys 0.000000 (best of 17)
! inserts
! wall 0.678261 comb 0.680000 user 0.680000 sys 0.000000 (best of 14)
! wall 0.685042 comb 0.680000 user 0.680000 sys 0.000000 (best of 15)
! sets
! wall 0.808770 comb 0.800000 user 0.800000 sys 0.000000 (best of 13)
! wall 0.834241 comb 0.830000 user 0.830000 sys 0.000000 (best of 12)
! mixed
! wall 0.782441 comb 0.780000 user 0.780000 sys 0.000000 (best of 13)
! wall 0.803804 comb 0.800000 user 0.800000 sys 0.000000 (best of 13)
$ hg perflrucachedict --size 1000 --gets 1000000 --sets 1000000 --mixed 1000000
! init
! wall 0.006952 comb 0.010000 user 0.010000 sys 0.000000 (best of 418)
! gets
! wall 0.613350 comb 0.610000 user 0.610000 sys 0.000000 (best of 17)
! wall 0.617415 comb 0.620000 user 0.620000 sys 0.000000 (best of 17)
! inserts
! wall 0.701270 comb 0.700000 user 0.700000 sys 0.000000 (best of 15)
! wall 0.700516 comb 0.700000 user 0.700000 sys 0.000000 (best of 15)
! sets
! wall 0.825720 comb 0.830000 user 0.830000 sys 0.000000 (best of 13)
! wall 0.837946 comb 0.840000 user 0.830000 sys 0.010000 (best of 12)
! mixed
! wall 0.821644 comb 0.820000 user 0.820000 sys 0.000000 (best of 13)
! wall 0.850559 comb 0.850000 user 0.850000 sys 0.000000 (best of 12)
I reckon the slight slowdown on insert is due to added if checks.
For caches with total cost limiting enabled:
$ hg perflrucachedict --size 4 --gets 1000000 --sets 1000000 --mixed 1000000 --costlimit 100
! gets w/ cost limit
! wall 0.598737 comb 0.590000 user 0.590000 sys 0.000000 (best of 17)
! inserts w/ cost limit
! wall 1.694282 comb 1.700000 user 1.700000 sys 0.000000 (best of 6)
! mixed w/ cost limit
! wall 1.157655 comb 1.150000 user 1.150000 sys 0.000000 (best of 9)
$ hg perflrucachedict --size 1000 --gets 1000000 --sets 1000000 --mixed 1000000 --costlimit 10000
! gets w/ cost limit
! wall 0.598526 comb 0.600000 user 0.600000 sys 0.000000 (best of 17)
! inserts w/ cost limit
! wall 37.838315 comb 37.840000 user 37.840000 sys 0.000000 (best of 3)
! mixed w/ cost limit
! wall 18.060198 comb 18.060000 user 18.060000 sys 0.000000 (best of 3)
$ hg perflrucachedict --size 1000 --gets 1000000 --sets 1000000 --mixed 1000000 --costlimit 10000 --mixedgetfreq 90
! gets w/ cost limit
! wall 0.600024 comb 0.600000 user 0.600000 sys 0.000000 (best of 17)
! inserts w/ cost limit
! wall 37.154547 comb 37.120000 user 37.120000 sys 0.000000 (best of 3)
! mixed w/ cost limit
! wall 4.381602 comb 4.380000 user 4.370000 sys 0.010000 (best of 3)
The functions we're benchmarking are slightly different, which could
move numbers by a few milliseconds. But the slowdown on insert is too
great to be explained by that. The slowness is due to insert heavy
operations needing to call popoldest() repeatedly when the cache is
at capacity. The next commit will address this.
Differential Revision: https://phab.mercurial-scm.org/D4503
# test-batching.py - tests for transparent command batching
#
# Copyright 2011 Peter Arrenbrecht <peter@arrenbrecht.ch>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import, print_function
import contextlib
from mercurial import (
localrepo,
wireprotov1peer,
)
# equivalent of repo.repository
class thing(object):
def hello(self):
return "Ready."
# equivalent of localrepo.localrepository
class localthing(thing):
def foo(self, one, two=None):
if one:
return "%s and %s" % (one, two,)
return "Nope"
def bar(self, b, a):
return "%s und %s" % (b, a,)
def greet(self, name=None):
return "Hello, %s" % name
@contextlib.contextmanager
def commandexecutor(self):
e = localrepo.localcommandexecutor(self)
try:
yield e
finally:
e.close()
# usage of "thing" interface
def use(it):
# Direct call to base method shared between client and server.
print(it.hello())
# Direct calls to proxied methods. They cause individual roundtrips.
print(it.foo("Un", two="Deux"))
print(it.bar("Eins", "Zwei"))
# Batched call to a couple of proxied methods.
with it.commandexecutor() as e:
ffoo = e.callcommand('foo', {'one': 'One', 'two': 'Two'})
fbar = e.callcommand('bar', {'b': 'Eins', 'a': 'Zwei'})
fbar2 = e.callcommand('bar', {'b': 'Uno', 'a': 'Due'})
print(ffoo.result())
print(fbar.result())
print(fbar2.result())
# local usage
mylocal = localthing()
print()
print("== Local")
use(mylocal)
# demo remoting; mimicks what wireproto and HTTP/SSH do
# shared
def escapearg(plain):
return (plain
.replace(':', '::')
.replace(',', ':,')
.replace(';', ':;')
.replace('=', ':='))
def unescapearg(escaped):
return (escaped
.replace(':=', '=')
.replace(':;', ';')
.replace(':,', ',')
.replace('::', ':'))
# server side
# equivalent of wireproto's global functions
class server(object):
def __init__(self, local):
self.local = local
def _call(self, name, args):
args = dict(arg.split('=', 1) for arg in args)
return getattr(self, name)(**args)
def perform(self, req):
print("REQ:", req)
name, args = req.split('?', 1)
args = args.split('&')
vals = dict(arg.split('=', 1) for arg in args)
res = getattr(self, name)(**vals)
print(" ->", res)
return res
def batch(self, cmds):
res = []
for pair in cmds.split(';'):
name, args = pair.split(':', 1)
vals = {}
for a in args.split(','):
if a:
n, v = a.split('=')
vals[n] = unescapearg(v)
res.append(escapearg(getattr(self, name)(**vals)))
return ';'.join(res)
def foo(self, one, two):
return mangle(self.local.foo(unmangle(one), unmangle(two)))
def bar(self, b, a):
return mangle(self.local.bar(unmangle(b), unmangle(a)))
def greet(self, name):
return mangle(self.local.greet(unmangle(name)))
myserver = server(mylocal)
# local side
# equivalent of wireproto.encode/decodelist, that is, type-specific marshalling
# here we just transform the strings a bit to check we're properly en-/decoding
def mangle(s):
return ''.join(chr(ord(c) + 1) for c in s)
def unmangle(s):
return ''.join(chr(ord(c) - 1) for c in s)
# equivalent of wireproto.wirerepository and something like http's wire format
class remotething(thing):
def __init__(self, server):
self.server = server
def _submitone(self, name, args):
req = name + '?' + '&'.join(['%s=%s' % (n, v) for n, v in args])
return self.server.perform(req)
def _submitbatch(self, cmds):
req = []
for name, args in cmds:
args = ','.join(n + '=' + escapearg(v) for n, v in args)
req.append(name + ':' + args)
req = ';'.join(req)
res = self._submitone('batch', [('cmds', req,)])
for r in res.split(';'):
yield r
@contextlib.contextmanager
def commandexecutor(self):
e = wireprotov1peer.peerexecutor(self)
try:
yield e
finally:
e.close()
@wireprotov1peer.batchable
def foo(self, one, two=None):
encargs = [('one', mangle(one),), ('two', mangle(two),)]
encresref = wireprotov1peer.future()
yield encargs, encresref
yield unmangle(encresref.value)
@wireprotov1peer.batchable
def bar(self, b, a):
encresref = wireprotov1peer.future()
yield [('b', mangle(b),), ('a', mangle(a),)], encresref
yield unmangle(encresref.value)
# greet is coded directly. It therefore does not support batching. If it
# does appear in a batch, the batch is split around greet, and the call to
# greet is done in its own roundtrip.
def greet(self, name=None):
return unmangle(self._submitone('greet', [('name', mangle(name),)]))
# demo remote usage
myproxy = remotething(myserver)
print()
print("== Remote")
use(myproxy)