Mercurial > hg
view tests/test-filecache.py @ 46607:e9901d01d135
revlog: add a mechanism to verify expected file position before appending
If someone uses `hg debuglocks`, or some non-hg process writes to the .hg
directory without respecting the locks, or if the repo's on a networked
filesystem, it's possible for the revlog code to write out corrupted data.
The form of this corruption can vary depending on what data was written and how
that happened. We are in the "networked filesystem" case (though I've had users
also do this to themselves with the "`hg debuglocks`" scenario), and most often
see this with the changelog. What ends up happening is we produce two items
(let's call them rev1 and rev2) in the .i file that have the same linkrev,
baserev, and offset into the .d file, while the data in the .d file is appended
properly. rev2's compressed_size is accurate for rev2, but when we go to
decompress the data in the .d file, we use the offset that's recorded in the
index file, which is the same as rev1, and attempt to decompress
rev2.compressed_size bytes of rev1's data. This usually does not succeed. :)
When using inline data, this also fails, though I haven't investigated why too
closely. This shows up as a "patch decode" error. I believe what's happening
there is that we're basically ignoring the offset field, getting the data
properly, but since baserev != rev, it thinks this is a delta based on rev
(instead of a full text) and can't actually apply it as such.
For now, I'm going to make this an optional component and default it to entirely
off. I may increase the default severity of this in the future, once I've
enabled it for my users and we gain more experience with it. Luckily, most of my
users have a versioned filesystem and can roll back to before the corruption has
been written, it's just a hassle to do so and not everyone knows how (so it's a
support burden). Users on other filesystems will not have that luxury, and this
can cause them to have a corrupted repository that they are unlikely to know how
to resolve, and they'll see this as a data-loss event. Refusing to create the
corruption is a much better user experience.
This mechanism is not perfect. There may be false-negatives (racy writes that
are not detected). There should not be any false-positives (non-racy writes that
are detected as such). This is not a mechanism that makes putting a repo on a
networked filesystem "safe" or "supported", just *less* likely to cause
corruption.
Differential Revision: https://phab.mercurial-scm.org/D9952
author | Kyle Lippincott <spectral@google.com> |
---|---|
date | Wed, 03 Feb 2021 16:33:10 -0800 |
parents | e01ea8325859 |
children | 6000f5b25c9b |
line wrap: on
line source
from __future__ import absolute_import, print_function import os import stat import subprocess import sys if subprocess.call( [sys.executable, '%s/hghave' % os.environ['TESTDIR'], 'cacheable'] ): sys.exit(80) print_ = print def print(*args, **kwargs): """print() wrapper that flushes stdout buffers to avoid py3 buffer issues We could also just write directly to sys.stdout.buffer the way the ui object will, but this was easier for porting the test. """ print_(*args, **kwargs) sys.stdout.flush() from mercurial import ( extensions, hg, localrepo, pycompat, ui as uimod, util, vfs as vfsmod, ) if pycompat.ispy3: xrange = range class fakerepo(object): def __init__(self): self._filecache = {} class fakevfs(object): def join(self, p): return p vfs = fakevfs() def unfiltered(self): return self def sjoin(self, p): return p @localrepo.repofilecache('x', 'y') def cached(self): print('creating') return 'string from function' def invalidate(self): for k in self._filecache: try: delattr(self, pycompat.sysstr(k)) except AttributeError: pass def basic(repo): print("* neither file exists") # calls function repo.cached repo.invalidate() print("* neither file still exists") # uses cache repo.cached # create empty file f = open('x', 'w') f.close() repo.invalidate() print("* empty file x created") # should recreate the object repo.cached f = open('x', 'w') f.write('a') f.close() repo.invalidate() print("* file x changed size") # should recreate the object repo.cached repo.invalidate() print("* nothing changed with either file") # stats file again, reuses object repo.cached # atomic replace file, size doesn't change # hopefully st_mtime doesn't change as well so this doesn't use the cache # because of inode change f = vfsmod.vfs(b'.')(b'x', b'w', atomictemp=True) f.write(b'b') f.close() repo.invalidate() print("* file x changed inode") repo.cached # create empty file y f = open('y', 'w') f.close() repo.invalidate() print("* empty file y created") # should recreate the object repo.cached f = open('y', 'w') f.write('A') f.close() repo.invalidate() print("* file y changed size") # should recreate the object repo.cached f = vfsmod.vfs(b'.')(b'y', b'w', atomictemp=True) f.write(b'B') f.close() repo.invalidate() print("* file y changed inode") repo.cached f = vfsmod.vfs(b'.')(b'x', b'w', atomictemp=True) f.write(b'c') f.close() f = vfsmod.vfs(b'.')(b'y', b'w', atomictemp=True) f.write(b'C') f.close() repo.invalidate() print("* both files changed inode") repo.cached def fakeuncacheable(): def wrapcacheable(orig, *args, **kwargs): return False def wrapinit(orig, *args, **kwargs): pass originit = extensions.wrapfunction(util.cachestat, '__init__', wrapinit) origcacheable = extensions.wrapfunction( util.cachestat, 'cacheable', wrapcacheable ) for fn in ['x', 'y']: try: os.remove(fn) except OSError: pass basic(fakerepo()) util.cachestat.cacheable = origcacheable util.cachestat.__init__ = originit def test_filecache_synced(): # test old behavior that caused filecached properties to go out of sync os.system('hg init && echo a >> a && hg ci -qAm.') repo = hg.repository(uimod.ui.load()) # first rollback clears the filecache, but changelog to stays in __dict__ repo.rollback() repo.commit(b'.') # second rollback comes along and touches the changelog externally # (file is moved) repo.rollback() # but since changelog isn't under the filecache control anymore, we don't # see that it changed, and return the old changelog without reconstructing # it repo.commit(b'.') def setbeforeget(repo): os.remove('x') os.remove('y') repo.__class__.cached.set(repo, 'string set externally') repo.invalidate() print("* neither file exists") print(repo.cached) repo.invalidate() f = open('x', 'w') f.write('a') f.close() print("* file x created") print(repo.cached) repo.__class__.cached.set(repo, 'string 2 set externally') repo.invalidate() print("* string set externally again") print(repo.cached) repo.invalidate() f = open('y', 'w') f.write('b') f.close() print("* file y created") print(repo.cached) def antiambiguity(): filename = 'ambigcheck' # try some times, because reproduction of ambiguity depends on # "filesystem time" for i in xrange(5): fp = open(filename, 'w') fp.write('FOO') fp.close() oldstat = os.stat(filename) if oldstat[stat.ST_CTIME] != oldstat[stat.ST_MTIME]: # subsequent changing never causes ambiguity continue repetition = 3 # repeat changing via checkambigatclosing, to examine whether # st_mtime is advanced multiple times as expected for i in xrange(repetition): # explicit closing fp = vfsmod.checkambigatclosing(open(filename, 'a')) fp.write('FOO') fp.close() # implicit closing by "with" statement with vfsmod.checkambigatclosing(open(filename, 'a')) as fp: fp.write('BAR') newstat = os.stat(filename) if oldstat[stat.ST_CTIME] != newstat[stat.ST_CTIME]: # timestamp ambiguity was naturally avoided while repetition continue # st_mtime should be advanced "repetition * 2" times, because # all changes occurred at same time (in sec) expected = (oldstat[stat.ST_MTIME] + repetition * 2) & 0x7FFFFFFF if newstat[stat.ST_MTIME] != expected: print( "'newstat[stat.ST_MTIME] %s is not %s (as %s + %s * 2)" % ( newstat[stat.ST_MTIME], expected, oldstat[stat.ST_MTIME], repetition, ) ) # no more examination is needed regardless of result break else: # This platform seems too slow to examine anti-ambiguity # of file timestamp (or test happened to be executed at # bad timing). Exit silently in this case, because running # on other faster platforms can detect problems pass print('basic:') print() basic(fakerepo()) print() print('fakeuncacheable:') print() fakeuncacheable() test_filecache_synced() print() print('setbeforeget:') print() setbeforeget(fakerepo()) print() print('antiambiguity:') print() antiambiguity()