tests/test-filecache.py
author Gregory Szorc <gregory.szorc@gmail.com>
Sat, 18 Jul 2015 10:57:20 -0700
changeset 25823 2406e2baa937
parent 20045 b3684fd2ff1a
child 26098 ce26928cbe41
permissions -rw-r--r--
changegroup: compute seen files as changesets are added (issue4750) Before this patch, addchangegroup() would walk the changelog and compute the set of seen files between applying changesets and applying manifests. When cloning large repositories such as mozilla-central, this consumed a non-trivial amount of time. On my MBP, this walk takes ~10s. On a dainty EC2 instance, this was measured to take ~125s! On the latter machine, this delay was enough for the Mercurial server to disconnect the client, thinking it had timed out, thus causing a clone to abort. This patch enables the changelog to compute the set of changed files as new revisions are added. By doing so, we: * avoid a potentially heavy computation between changelog and manifest processing by spreading the computation across all changelog additions * avoid extra reads from the changelog by operating on the data as it is added The downside of this is that the add revision callback does result in extra I/O. Before, we would perform a flush (and subsequent read to construct the full revision) when new delta chains were created. For changelogs, this is typically every 2-4 revisions. Using the callback guarantees there will be a flush after every added revision *and* an open + read of the changelog to obtain the full revision in order to read the added files. So, this increases the frequency of these operations by the average chain length. In the future, the revlog should be smart enough to know how to read revisions that haven't been flushed yet, thus eliminating this extra I/O. On my MBP, the total CPU times for an `hg unbundle` with a local mozilla-central gzip bundle containing 251,934 changesets and 211,065 files did not have a statistically significant change with this patch, holding steady around 360s. So, the increased revlog flushing did not have an effect. With this patch, there is no longer a visible pause between applying changeset and manifest data. Before, it sure felt like Mercurial was lethargic making this transition. Now, the transition is nearly instantaneous, giving the impression that Mercurial is faster. Of course, eliminating this pause means that the potential for network disconnect due to channel inactivity during the changelog walk is eliminated as well. And that is the impetus behind this change.

import sys, os, subprocess

if subprocess.call(['python', '%s/hghave' % os.environ['TESTDIR'],
                    'cacheable']):
    sys.exit(80)

from mercurial import util, scmutil, extensions, hg, ui

filecache = scmutil.filecache

class fakerepo(object):
    def __init__(self):
        self._filecache = {}

    def join(self, p):
        return p

    def sjoin(self, p):
        return p

    @filecache('x', 'y')
    def cached(self):
        print 'creating'
        return 'string from function'

    def invalidate(self):
        for k in self._filecache:
            try:
                delattr(self, k)
            except AttributeError:
                pass

def basic(repo):
    print "* neither file exists"
    # calls function
    repo.cached

    repo.invalidate()
    print "* neither file still exists"
    # uses cache
    repo.cached

    # create empty file
    f = open('x', 'w')
    f.close()
    repo.invalidate()
    print "* empty file x created"
    # should recreate the object
    repo.cached

    f = open('x', 'w')
    f.write('a')
    f.close()
    repo.invalidate()
    print "* file x changed size"
    # should recreate the object
    repo.cached

    repo.invalidate()
    print "* nothing changed with either file"
    # stats file again, reuses object
    repo.cached

    # atomic replace file, size doesn't change
    # hopefully st_mtime doesn't change as well so this doesn't use the cache
    # because of inode change
    f = scmutil.opener('.')('x', 'w', atomictemp=True)
    f.write('b')
    f.close()

    repo.invalidate()
    print "* file x changed inode"
    repo.cached

    # create empty file y
    f = open('y', 'w')
    f.close()
    repo.invalidate()
    print "* empty file y created"
    # should recreate the object
    repo.cached

    f = open('y', 'w')
    f.write('A')
    f.close()
    repo.invalidate()
    print "* file y changed size"
    # should recreate the object
    repo.cached

    f = scmutil.opener('.')('y', 'w', atomictemp=True)
    f.write('B')
    f.close()

    repo.invalidate()
    print "* file y changed inode"
    repo.cached

    f = scmutil.opener('.')('x', 'w', atomictemp=True)
    f.write('c')
    f.close()
    f = scmutil.opener('.')('y', 'w', atomictemp=True)
    f.write('C')
    f.close()

    repo.invalidate()
    print "* both files changed inode"
    repo.cached

def fakeuncacheable():
    def wrapcacheable(orig, *args, **kwargs):
        return False

    def wrapinit(orig, *args, **kwargs):
        pass

    originit = extensions.wrapfunction(util.cachestat, '__init__', wrapinit)
    origcacheable = extensions.wrapfunction(util.cachestat, 'cacheable',
                                            wrapcacheable)

    for fn in ['x', 'y']:
        try:
            os.remove(fn)
        except OSError:
            pass

    basic(fakerepo())

    util.cachestat.cacheable = origcacheable
    util.cachestat.__init__ = originit

def test_filecache_synced():
    # test old behaviour that caused filecached properties to go out of sync
    os.system('hg init && echo a >> a && hg ci -qAm.')
    repo = hg.repository(ui.ui())
    # first rollback clears the filecache, but changelog to stays in __dict__
    repo.rollback()
    repo.commit('.')
    # second rollback comes along and touches the changelog externally
    # (file is moved)
    repo.rollback()
    # but since changelog isn't under the filecache control anymore, we don't
    # see that it changed, and return the old changelog without reconstructing
    # it
    repo.commit('.')

def setbeforeget(repo):
    os.remove('x')
    os.remove('y')
    repo.cached = 'string set externally'
    repo.invalidate()
    print "* neither file exists"
    print repo.cached
    repo.invalidate()
    f = open('x', 'w')
    f.write('a')
    f.close()
    print "* file x created"
    print repo.cached

    repo.cached = 'string 2 set externally'
    repo.invalidate()
    print "* string set externally again"
    print repo.cached

    repo.invalidate()
    f = open('y', 'w')
    f.write('b')
    f.close()
    print "* file y created"
    print repo.cached

print 'basic:'
print
basic(fakerepo())
print
print 'fakeuncacheable:'
print
fakeuncacheable()
test_filecache_synced()
print
print 'setbeforeget:'
print
setbeforeget(fakerepo())