mercurial/repocache.py
author Pierre-Yves David <pierre-yves.david@octobus.net>
Wed, 16 Oct 2019 17:49:30 +0200
changeset 43254 181d28ba05da
parent 43117 8ff1ecfadcd1
child 48875 6000f5b25c9b
permissions -rw-r--r--
copies: avoid instancing more changectx to access parent revisions We just need to know the revision numbers of the parents, creating full context is needlessly expensive. This provide a small, but noticeable performance boost. revision: large amount; added files: large amount; rename small amount; c3b14617fbd7 9ba6ab77fd29 before: ! wall 2.885636 comb 2.900000 user 2.870000 sys 0.030000 (median of 10) after: ! wall 2.702270 comb 2.710000 user 2.690000 sys 0.020000 (median of 10) revision: large amount; added files: small amount; rename small amount; c3b14617fbd7 f650a9b140d2 before: ! wall 4.298271 comb 4.290000 user 4.240000 sys 0.050000 (median of 10) after: ! wall 3.976610 comb 3.970000 user 3.920000 sys 0.050000 (median of 10) revision: large amount; added files: large amount; rename large amount; 08ea3258278e d9fa043f30c0 before: ! wall 0.773397 comb 0.770000 user 0.770000 sys 0.000000 (median of 11) after: ! wall 0.701634 comb 0.700000 user 0.700000 sys 0.000000 (median of 13) revision: small amount; added files: large amount; rename large amount; df6f7a526b60 a83dc6a2d56f before: ! wall 0.013585 comb 0.010000 user 0.010000 sys 0.000000 (median of 217) after: ! wall 0.013550 comb 0.010000 user 0.010000 sys 0.000000 (median of 218) revision: small amount; added files: large amount; rename small amount; 4aa4e1f8e19a 169138063d63 before: ! wall 0.003202 comb 0.000000 user 0.000000 sys 0.000000 (median of 929) after: ! wall 0.002993 comb 0.010000 user 0.010000 sys 0.000000 (median of 992) revision: small amount; added files: small amount; rename small amount; 4bc173b045a6 964879152e2e before: ! wall 0.000077 comb 0.000000 user 0.000000 sys 0.000000 (median of 12060) after: ! wall 0.000072 comb 0.000000 user 0.000000 sys 0.000000 (median of 12804) revision: medium amount; added files: large amount; rename medium amount; c95f1ced15f2 2c68e87c3efe before: ! wall 0.510614 comb 0.500000 user 0.500000 sys 0.000000 (median of 18) after: ! wall 0.473681 comb 0.470000 user 0.470000 sys 0.000000 (median of 20) revision: medium amount; added files: medium amount; rename small amount; d343da0c55a8 d7746d32bf9d before: ! wall 0.126552 comb 0.130000 user 0.130000 sys 0.000000 (median of 77) after: ! wall 0.115240 comb 0.110000 user 0.110000 sys 0.000000 (median of 85) Differential Revision: https://phab.mercurial-scm.org/D7122

# repocache.py - in-memory repository cache for long-running services
#
# Copyright 2018 Yuya Nishihara <yuya@tcha.org>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.

from __future__ import absolute_import

import collections
import gc
import threading

from . import (
    error,
    hg,
    obsolete,
    scmutil,
    util,
)


class repoloader(object):
    """Load repositories in background thread

    This is designed for a forking server. A cached repo cannot be obtained
    until the server fork()s a worker and the loader thread stops.
    """

    def __init__(self, ui, maxlen):
        self._ui = ui.copy()
        self._cache = util.lrucachedict(max=maxlen)
        # use deque and Event instead of Queue since deque can discard
        # old items to keep at most maxlen items.
        self._inqueue = collections.deque(maxlen=maxlen)
        self._accepting = False
        self._newentry = threading.Event()
        self._thread = None

    def start(self):
        assert not self._thread
        if self._inqueue.maxlen == 0:
            # no need to spawn loader thread as the cache is disabled
            return
        self._accepting = True
        self._thread = threading.Thread(target=self._mainloop)
        self._thread.start()

    def stop(self):
        if not self._thread:
            return
        self._accepting = False
        self._newentry.set()
        self._thread.join()
        self._thread = None
        self._cache.clear()
        self._inqueue.clear()

    def load(self, path):
        """Request to load the specified repository in background"""
        self._inqueue.append(path)
        self._newentry.set()

    def get(self, path):
        """Return a cached repo if available

        This function must be called after fork(), where the loader thread
        is stopped. Otherwise, the returned repo might be updated by the
        loader thread.
        """
        if self._thread and self._thread.is_alive():
            raise error.ProgrammingError(
                b'cannot obtain cached repo while loader is active'
            )
        return self._cache.peek(path, None)

    def _mainloop(self):
        while self._accepting:
            # Avoid heavy GC after fork(), which would cancel the benefit of
            # COW. We assume that GIL is acquired while GC is underway in the
            # loader thread. If that isn't true, we might have to move
            # gc.collect() to the main thread so that fork() would never stop
            # the thread where GC is in progress.
            gc.collect()

            self._newentry.wait()
            while self._accepting:
                self._newentry.clear()
                try:
                    path = self._inqueue.popleft()
                except IndexError:
                    break
                scmutil.callcatch(self._ui, lambda: self._load(path))

    def _load(self, path):
        start = util.timer()
        # TODO: repo should be recreated if storage configuration changed
        try:
            # pop before loading so inconsistent state wouldn't be exposed
            repo = self._cache.pop(path)
        except KeyError:
            repo = hg.repository(self._ui, path).unfiltered()
        _warmupcache(repo)
        repo.ui.log(
            b'repocache',
            b'loaded repo into cache: %s (in %.3fs)\n',
            path,
            util.timer() - start,
        )
        self._cache.insert(path, repo)


# TODO: think about proper API of preloading cache
def _warmupcache(repo):
    repo.invalidateall()
    repo.changelog
    repo.obsstore._all
    repo.obsstore.successors
    repo.obsstore.predecessors
    repo.obsstore.children
    for name in obsolete.cachefuncs:
        obsolete.getrevs(repo, name)
    repo._phasecache.loadphaserevs(repo)


# TODO: think about proper API of attaching preloaded attributes
def copycache(srcrepo, destrepo):
    """Copy cached attributes from srcrepo to destrepo"""
    destfilecache = destrepo._filecache
    srcfilecache = srcrepo._filecache
    if b'changelog' in srcfilecache:
        destfilecache[b'changelog'] = ce = srcfilecache[b'changelog']
        ce.obj.opener = ce.obj._realopener = destrepo.svfs
    if b'obsstore' in srcfilecache:
        destfilecache[b'obsstore'] = ce = srcfilecache[b'obsstore']
        ce.obj.svfs = destrepo.svfs
    if b'_phasecache' in srcfilecache:
        destfilecache[b'_phasecache'] = ce = srcfilecache[b'_phasecache']
        ce.obj.opener = destrepo.svfs