view mercurial/repocache.py @ 43257:675c776fbcd1

sidedatacopies: directly fetch copies information from sidedata When using the sidedata mode, we don't need a complicated and expensive `context` object. Instead we directly fetch copies information from the sidedata (through a changelogrevision object). More optimisations coming. revision: large amount; added files: large amount; rename small amount; c3b14617fbd7 9ba6ab77fd29 filelog: ! wall 3.679613 comb 3.680000 user 3.580000 sys 0.100000 (median of 3) base: ! wall 8.884369 comb 8.880000 user 8.850000 sys 0.030000 (median of 3) before: ! wall 4.681985 comb 4.680000 user 4.640000 sys 0.040000 (median of 3) after: ! wall 3.955894 comb 3.950000 user 3.940000 sys 0.010000 (median of 3) revision: large amount; added files: small amount; rename small amount; c3b14617fbd7 f650a9b140d2 filelog: ! wall 0.003357 comb 0.010000 user 0.010000 sys 0.000000 (median of 781) base: ! wall 12.398524 comb 12.400000 user 12.330000 sys 0.070000 (median of 3) before: ! wall 6.459592 comb 6.470000 user 6.390000 sys 0.080000 (median of 3) after: ! wall 5.505774 comb 5.500000 user 5.410000 sys 0.090000 (median of 3) revision: large amount; added files: large amount; rename large amount; 08ea3258278e d9fa043f30c0 filelog: ! wall 2.754687 comb 2.760000 user 2.650000 sys 0.110000 (median of 4) base: ! wall 1.423166 comb 1.420000 user 1.400000 sys 0.020000 (median of 8) before: ! wall 0.961048 comb 0.960000 user 0.940000 sys 0.020000 (median of 11) after: ! wall 0.882950 comb 0.880000 user 0.880000 sys 0.000000 (median of 11) revision: small amount; added files: large amount; rename large amount; df6f7a526b60 a83dc6a2d56f filelog: ! wall 1.552293 comb 1.550000 user 1.510000 sys 0.040000 (median of 6 base: ! wall 0.022662 comb 0.020000 user 0.020000 sys 0.000000 (median of 128) before: ! wall 0.021649 comb 0.020000 user 0.020000 sys 0.000000 (median of 135) after: ! wall 0.020951 comb 0.020000 user 0.020000 sys 0.000000 (median of 141) revision: small amount; added files: large amount; rename small amount; 4aa4e1f8e19a 169138063d63 filelog: ! wall 1.500983 comb 1.500000 user 1.420000 sys 0.080000 (median of 7) base: ! wall 0.006956 comb 0.010000 user 0.010000 sys 0.000000 (median of 392) before: ! wall 0.004022 comb 0.000000 user 0.000000 sys 0.000000 (median of 735) after: ! wall 0.003988 comb 0.000000 user 0.000000 sys 0.000000 (median of 736) revision: small amount; added files: small amount; rename small amount; 4bc173b045a6 964879152e2e filelog: ! wall 0.011745 comb 0.020000 user 0.020000 sys 0.000000 (median of 250) base: ! wall 0.000156 comb 0.000000 user 0.000000 sys 0.000000 (median of 17180) before: ! wall 0.000118 comb 0.000000 user 0.000000 sys 0.000000 (median of 19170) after: ! wall 0.000097 comb 0.000000 user 0.000000 sys 0.000000 (median of 27276) revision: medium amount; added files: large amount; rename medium amount; c95f1ced15f2 2c68e87c3efe filelog: ! wall 3.228230 comb 3.230000 user 3.110000 sys 0.120000 (median of 4) base: ! wall 0.997640 comb 1.000000 user 0.980000 sys 0.020000 (median of 10) before: ! wall 0.679500 comb 0.680000 user 0.680000 sys 0.000000 (median of 15) after: ! wall 0.596779 comb 0.600000 user 0.600000 sys 0.000000 (median of 17) revision: medium amount; added files: medium amount; rename small amount; d343da0c55a8 d7746d32bf9d filelog: ! wall 1.052501 comb 1.060000 user 1.040000 sys 0.020000 (median of 10 base: ! wall 0.214519 comb 0.220000 user 0.220000 sys 0.000000 (median of 45) before: ! wall 0.149675 comb 0.150000 user 0.150000 sys 0.000000 (median of 66) after: ! wall 0.130786 comb 0.130000 user 0.130000 sys 0.000000 (median of 75) Differential Revision: https://phab.mercurial-scm.org/D7072
author Pierre-Yves David <pierre-yves.david@octobus.net>
date Wed, 02 Oct 2019 17:53:47 -0400
parents 8ff1ecfadcd1
children 6000f5b25c9b
line wrap: on
line source

# repocache.py - in-memory repository cache for long-running services
#
# Copyright 2018 Yuya Nishihara <yuya@tcha.org>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.

from __future__ import absolute_import

import collections
import gc
import threading

from . import (
    error,
    hg,
    obsolete,
    scmutil,
    util,
)


class repoloader(object):
    """Load repositories in background thread

    This is designed for a forking server. A cached repo cannot be obtained
    until the server fork()s a worker and the loader thread stops.
    """

    def __init__(self, ui, maxlen):
        self._ui = ui.copy()
        self._cache = util.lrucachedict(max=maxlen)
        # use deque and Event instead of Queue since deque can discard
        # old items to keep at most maxlen items.
        self._inqueue = collections.deque(maxlen=maxlen)
        self._accepting = False
        self._newentry = threading.Event()
        self._thread = None

    def start(self):
        assert not self._thread
        if self._inqueue.maxlen == 0:
            # no need to spawn loader thread as the cache is disabled
            return
        self._accepting = True
        self._thread = threading.Thread(target=self._mainloop)
        self._thread.start()

    def stop(self):
        if not self._thread:
            return
        self._accepting = False
        self._newentry.set()
        self._thread.join()
        self._thread = None
        self._cache.clear()
        self._inqueue.clear()

    def load(self, path):
        """Request to load the specified repository in background"""
        self._inqueue.append(path)
        self._newentry.set()

    def get(self, path):
        """Return a cached repo if available

        This function must be called after fork(), where the loader thread
        is stopped. Otherwise, the returned repo might be updated by the
        loader thread.
        """
        if self._thread and self._thread.is_alive():
            raise error.ProgrammingError(
                b'cannot obtain cached repo while loader is active'
            )
        return self._cache.peek(path, None)

    def _mainloop(self):
        while self._accepting:
            # Avoid heavy GC after fork(), which would cancel the benefit of
            # COW. We assume that GIL is acquired while GC is underway in the
            # loader thread. If that isn't true, we might have to move
            # gc.collect() to the main thread so that fork() would never stop
            # the thread where GC is in progress.
            gc.collect()

            self._newentry.wait()
            while self._accepting:
                self._newentry.clear()
                try:
                    path = self._inqueue.popleft()
                except IndexError:
                    break
                scmutil.callcatch(self._ui, lambda: self._load(path))

    def _load(self, path):
        start = util.timer()
        # TODO: repo should be recreated if storage configuration changed
        try:
            # pop before loading so inconsistent state wouldn't be exposed
            repo = self._cache.pop(path)
        except KeyError:
            repo = hg.repository(self._ui, path).unfiltered()
        _warmupcache(repo)
        repo.ui.log(
            b'repocache',
            b'loaded repo into cache: %s (in %.3fs)\n',
            path,
            util.timer() - start,
        )
        self._cache.insert(path, repo)


# TODO: think about proper API of preloading cache
def _warmupcache(repo):
    repo.invalidateall()
    repo.changelog
    repo.obsstore._all
    repo.obsstore.successors
    repo.obsstore.predecessors
    repo.obsstore.children
    for name in obsolete.cachefuncs:
        obsolete.getrevs(repo, name)
    repo._phasecache.loadphaserevs(repo)


# TODO: think about proper API of attaching preloaded attributes
def copycache(srcrepo, destrepo):
    """Copy cached attributes from srcrepo to destrepo"""
    destfilecache = destrepo._filecache
    srcfilecache = srcrepo._filecache
    if b'changelog' in srcfilecache:
        destfilecache[b'changelog'] = ce = srcfilecache[b'changelog']
        ce.obj.opener = ce.obj._realopener = destrepo.svfs
    if b'obsstore' in srcfilecache:
        destfilecache[b'obsstore'] = ce = srcfilecache[b'obsstore']
        ce.obj.svfs = destrepo.svfs
    if b'_phasecache' in srcfilecache:
        destfilecache[b'_phasecache'] = ce = srcfilecache[b'_phasecache']
        ce.obj.opener = destrepo.svfs