view mercurial/repocache.py @ 51891:ad83e4f9b40e

typing: correct pytype mistakes in `mercurial/vfs.py` With the previous changes in this series (prior to merging the *.pyi file), this wasn't too bad- the only definitively wrong things were the `data` argument to `writelines()`, and the return type on `backgroundclosing()` (both of these errors were dropped in the previous commit; for some reason pytype doesn't like `contextlib._GeneratorContextManager`, even though that's what it determined it is): File "/mnt/c/Users/Matt/hg/mercurial/vfs.py", line 411, in abstractvfs: Bad return type 'contextlib._GeneratorContextManager' for generator function abstractvfs.backgroundclosing [bad-yield-annotation] Expected Generator, Iterable or Iterator PyCharm thinks this is `Generator[backgroundfilecloser], Any, None]`, which can be reduced to `Iterator[backgroundfilecloser]`, but pytype flagged the line that calls `yield` without an argument unless it's also `Optional`. PyCharm is happy either way. For some reason, `Iterable` didn't work for pytype: File "/mnt/c/Users/Matt/hg/mercurial/vfs.py", line 390, in abstractvfs: Function contextlib.contextmanager was called with the wrong arguments [wrong-arg-types] Expected: (func: Callable[[Any], Iterator]) Actually passed: (func: Callable[[Any, Any, Any], Iterable[Optional[Any]]]) Attributes of protocol Iterator[_T_co] are not implemented on Iterable[Optional[Any]]: __next__
author Matt Harbison <matt_harbison@yahoo.com>
date Fri, 20 Sep 2024 16:36:28 -0400
parents f4733654f144
children
line wrap: on
line source

# repocache.py - in-memory repository cache for long-running services
#
# Copyright 2018 Yuya Nishihara <yuya@tcha.org>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.

from __future__ import annotations

import collections
import gc
import threading

from . import (
    error,
    hg,
    obsolete,
    scmutil,
    util,
)


class repoloader:
    """Load repositories in background thread

    This is designed for a forking server. A cached repo cannot be obtained
    until the server fork()s a worker and the loader thread stops.
    """

    def __init__(self, ui, maxlen):
        self._ui = ui.copy()
        self._cache = util.lrucachedict(max=maxlen)
        # use deque and Event instead of Queue since deque can discard
        # old items to keep at most maxlen items.
        self._inqueue = collections.deque(maxlen=maxlen)
        self._accepting = False
        self._newentry = threading.Event()
        self._thread = None

    def start(self):
        assert not self._thread
        if self._inqueue.maxlen == 0:
            # no need to spawn loader thread as the cache is disabled
            return
        self._accepting = True
        self._thread = threading.Thread(target=self._mainloop)
        self._thread.start()

    def stop(self):
        if not self._thread:
            return
        self._accepting = False
        self._newentry.set()
        self._thread.join()
        self._thread = None
        self._cache.clear()
        self._inqueue.clear()

    def load(self, path):
        """Request to load the specified repository in background"""
        self._inqueue.append(path)
        self._newentry.set()

    def get(self, path):
        """Return a cached repo if available

        This function must be called after fork(), where the loader thread
        is stopped. Otherwise, the returned repo might be updated by the
        loader thread.
        """
        if self._thread and self._thread.is_alive():
            raise error.ProgrammingError(
                b'cannot obtain cached repo while loader is active'
            )
        return self._cache.peek(path, None)

    def _mainloop(self):
        while self._accepting:
            # Avoid heavy GC after fork(), which would cancel the benefit of
            # COW. We assume that GIL is acquired while GC is underway in the
            # loader thread. If that isn't true, we might have to move
            # gc.collect() to the main thread so that fork() would never stop
            # the thread where GC is in progress.
            gc.collect()

            self._newentry.wait()
            while self._accepting:
                self._newentry.clear()
                try:
                    path = self._inqueue.popleft()
                except IndexError:
                    break
                scmutil.callcatch(self._ui, lambda: self._load(path))

    def _load(self, path):
        start = util.timer()
        # TODO: repo should be recreated if storage configuration changed
        try:
            # pop before loading so inconsistent state wouldn't be exposed
            repo = self._cache.pop(path)
        except KeyError:
            repo = hg.repository(self._ui, path).unfiltered()
        _warmupcache(repo)
        repo.ui.log(
            b'repocache',
            b'loaded repo into cache: %s (in %.3fs)\n',
            path,
            util.timer() - start,
        )
        self._cache.insert(path, repo)


# TODO: think about proper API of preloading cache
def _warmupcache(repo):
    repo.invalidateall()
    repo.changelog
    repo.obsstore._all
    repo.obsstore.successors
    repo.obsstore.predecessors
    repo.obsstore.children
    for name in obsolete.cachefuncs:
        obsolete.getrevs(repo, name)
    # ensure the phase cache is fully initialized
    repo._phasecache.phase(repo, repo.changelog.tiprev())


# TODO: think about proper API of attaching preloaded attributes
def copycache(srcrepo, destrepo):
    """Copy cached attributes from srcrepo to destrepo"""
    destfilecache = destrepo._filecache
    srcfilecache = srcrepo._filecache
    if b'changelog' in srcfilecache:
        destfilecache[b'changelog'] = ce = srcfilecache[b'changelog']
        ce.obj.opener = ce.obj._inner.opener = destrepo.svfs
    if b'obsstore' in srcfilecache:
        destfilecache[b'obsstore'] = ce = srcfilecache[b'obsstore']
        ce.obj.svfs = destrepo.svfs
    if b'_phasecache' in srcfilecache:
        destfilecache[b'_phasecache'] = ce = srcfilecache[b'_phasecache']
        ce.obj.opener = destrepo.svfs