Mercurial > hg
view mercurial/repocache.py @ 51871:cfd30df0f8e4
bundlerepo: fix mismatches with repository and revlog classes
Both pytype and PyCharm complained that `write()` and `_write()` in the
bundlephasecache class aren't proper overrides- indeed they seem to be missing
an argument that the base class has.
PyCharm and pytype also complained that the `revlog.revlog` class doesn't have a
`_chunk()` method. That looks like it was moved from revlog to `_InnerRevlog`
back in e8ad6d8de8b8, and wasn't caught because this module wasn't type checked.
However, I couldn't figure out a syntax with `revlog.revlog._inner._chunk(self, rev)`,
as it complained about passing too many args. `bundlerevlog._rawtext()` uses
this `super(...)` style to call the super class, so hopefully that works, even
with the wonky dynamic subclassing. The revlog class needed the `_InnerRevlog`
field typed because it isn't set in the constructor.
Finally, the vfs type hints look broken. This initially failed with:
File "/mnt/c/Users/Matt/hg/mercurial/bundlerepo.py", line 65, in __init__: Function readonlyvfs.__init__ was called with the wrong arguments [wrong-arg-types]
Expected: (self, vfs: mercurial.vfs.vfs)
Actually passed: (self, vfs: Callable)
Called from (traceback):
line 232, in dirlog
line 214, in __init__
I don't see a raw Callable, but I tried changing some of the vfs args to be typed
as `vfsmod.abstractvfs`, but that class doesn't have `options`, so it failed
elsewhere. `readonlyvfs` isn't a subclass of `vfs` (it's a subclass of
`abstractvfs`), so I'm not sure how to handle that. It would be a shame to have
to make a union of vfs subclasses (but not all of them have `options` either).
author | Matt Harbison <matt_harbison@yahoo.com> |
---|---|
date | Sat, 03 Aug 2024 01:33:13 -0400 |
parents | f4733654f144 |
children |
line wrap: on
line source
# repocache.py - in-memory repository cache for long-running services # # Copyright 2018 Yuya Nishihara <yuya@tcha.org> # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. from __future__ import annotations import collections import gc import threading from . import ( error, hg, obsolete, scmutil, util, ) class repoloader: """Load repositories in background thread This is designed for a forking server. A cached repo cannot be obtained until the server fork()s a worker and the loader thread stops. """ def __init__(self, ui, maxlen): self._ui = ui.copy() self._cache = util.lrucachedict(max=maxlen) # use deque and Event instead of Queue since deque can discard # old items to keep at most maxlen items. self._inqueue = collections.deque(maxlen=maxlen) self._accepting = False self._newentry = threading.Event() self._thread = None def start(self): assert not self._thread if self._inqueue.maxlen == 0: # no need to spawn loader thread as the cache is disabled return self._accepting = True self._thread = threading.Thread(target=self._mainloop) self._thread.start() def stop(self): if not self._thread: return self._accepting = False self._newentry.set() self._thread.join() self._thread = None self._cache.clear() self._inqueue.clear() def load(self, path): """Request to load the specified repository in background""" self._inqueue.append(path) self._newentry.set() def get(self, path): """Return a cached repo if available This function must be called after fork(), where the loader thread is stopped. Otherwise, the returned repo might be updated by the loader thread. """ if self._thread and self._thread.is_alive(): raise error.ProgrammingError( b'cannot obtain cached repo while loader is active' ) return self._cache.peek(path, None) def _mainloop(self): while self._accepting: # Avoid heavy GC after fork(), which would cancel the benefit of # COW. We assume that GIL is acquired while GC is underway in the # loader thread. If that isn't true, we might have to move # gc.collect() to the main thread so that fork() would never stop # the thread where GC is in progress. gc.collect() self._newentry.wait() while self._accepting: self._newentry.clear() try: path = self._inqueue.popleft() except IndexError: break scmutil.callcatch(self._ui, lambda: self._load(path)) def _load(self, path): start = util.timer() # TODO: repo should be recreated if storage configuration changed try: # pop before loading so inconsistent state wouldn't be exposed repo = self._cache.pop(path) except KeyError: repo = hg.repository(self._ui, path).unfiltered() _warmupcache(repo) repo.ui.log( b'repocache', b'loaded repo into cache: %s (in %.3fs)\n', path, util.timer() - start, ) self._cache.insert(path, repo) # TODO: think about proper API of preloading cache def _warmupcache(repo): repo.invalidateall() repo.changelog repo.obsstore._all repo.obsstore.successors repo.obsstore.predecessors repo.obsstore.children for name in obsolete.cachefuncs: obsolete.getrevs(repo, name) # ensure the phase cache is fully initialized repo._phasecache.phase(repo, repo.changelog.tiprev()) # TODO: think about proper API of attaching preloaded attributes def copycache(srcrepo, destrepo): """Copy cached attributes from srcrepo to destrepo""" destfilecache = destrepo._filecache srcfilecache = srcrepo._filecache if b'changelog' in srcfilecache: destfilecache[b'changelog'] = ce = srcfilecache[b'changelog'] ce.obj.opener = ce.obj._inner.opener = destrepo.svfs if b'obsstore' in srcfilecache: destfilecache[b'obsstore'] = ce = srcfilecache[b'obsstore'] ce.obj.svfs = destrepo.svfs if b'_phasecache' in srcfilecache: destfilecache[b'_phasecache'] = ce = srcfilecache[b'_phasecache'] ce.obj.opener = destrepo.svfs