streamclone: disable the volatile file open handle optimization on Windows
Leaving files open caused new failures like this, since
a47f09da8bd1:
diff --git a/tests/test-persistent-nodemap-stream-clone.t b/tests/test-persistent-nodemap-stream-clone.t
--- a/tests/test-persistent-nodemap-stream-clone.t
+++ b/tests/test-persistent-nodemap-stream-clone.t
@@ -115,7 +115,12 @@ Do a mix of clone and commit at the same
$ (hg clone -U --stream ssh://user@dummy/test-repo stream-clone-race-1 --debug 2>> clone-output | grep -E '00(changelog|manifest)' >> clone-output; touch $HG_TEST_STREAM_WALKED_FILE_3) &
$ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
$ hg -R test-repo/ commit -m foo
- created new head
+ transaction abort!
+ failed to recover 00changelog.n ([WinError 32] The process cannot access the file because it is being used by another process: b'$STR_REPR_TESTTMP\\test-repo/.hg/store/00changelog.n' -> b'$STR_REPR_TESTTMP\\test-repo/.hg/store/00changelog.n-
f418dcd6')
+ rollback failed - please run hg recover
+ (failure reason: [WinError 32] The process cannot access the file because it is being used by another process: b'$STR_REPR_TESTTMP\\test-repo/.hg/store/00changelog.n' -> b'$STR_REPR_TESTTMP\\test-repo/.hg/store/00changelog.n-
f418dcd6')
+ abort: The process cannot access the file because it is being used by another process: '$TESTTMP\test-repo\.hg\store\00changelog.n'
+ [255]
$ touch $HG_TEST_STREAM_WALKED_FILE_2
$ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
$ cat clone-output
Since the `VolatileManager` falls back to the old copy method when the open file
threshold is exceeded, this just drops the threshold so that only 1 file is
open. The actual value used (2) is unexpected, and explained inline. I'd like
to have a config option for this so that we can test both ways (in theory, it
could resort to copies on non-Windows systems too), but I don't see a `uimod.ui`
handy.
Alternately, I tried replacing the 3 `open()` calls in the `VolatileManager`
with `util.posixfile()`, but that simply hung the test on Windows for some
reason, I think on the same line that's indicated as failing above. (There was
a `grep` command hanging around, as well as `hg -R test-repo serve --stdio`.)
import os
import stat
import subprocess
import sys
if subprocess.call(
[sys.executable, '%s/hghave' % os.environ['TESTDIR'], 'cacheable']
):
sys.exit(80)
print_ = print
def print(*args, **kwargs):
"""print() wrapper that flushes stdout buffers to avoid py3 buffer issues
We could also just write directly to sys.stdout.buffer the way the
ui object will, but this was easier for porting the test.
"""
print_(*args, **kwargs)
sys.stdout.flush()
from mercurial import (
extensions,
hg,
localrepo,
pycompat,
ui as uimod,
util,
vfs as vfsmod,
)
class fakerepo:
def __init__(self):
self._filecache = {}
class fakevfs:
def join(self, p):
return p
vfs = fakevfs()
def unfiltered(self):
return self
def sjoin(self, p):
return p
@localrepo.repofilecache(b'x', b'y')
def cached(self):
print('creating')
return 'string from function'
def invalidate(self):
for k in self._filecache:
try:
delattr(self, pycompat.sysstr(k))
except AttributeError:
pass
def basic(repo):
print("* neither file exists")
# calls function
repo.cached
repo.invalidate()
print("* neither file still exists")
# uses cache
repo.cached
# create empty file
f = open('x', 'wb')
f.close()
repo.invalidate()
print("* empty file x created")
# should recreate the object
repo.cached
f = open('x', 'wb')
f.write(b'a')
f.close()
repo.invalidate()
print("* file x changed size")
# should recreate the object
repo.cached
repo.invalidate()
print("* nothing changed with either file")
# stats file again, reuses object
repo.cached
# atomic replace file, size doesn't change
# hopefully st_mtime doesn't change as well so this doesn't use the cache
# because of inode change
f = vfsmod.vfs(b'.')(b'x', b'w', atomictemp=True)
f.write(b'b')
f.close()
repo.invalidate()
print("* file x changed inode")
repo.cached
# create empty file y
f = open('y', 'wb')
f.close()
repo.invalidate()
print("* empty file y created")
# should recreate the object
repo.cached
f = open('y', 'wb')
f.write(b'A')
f.close()
repo.invalidate()
print("* file y changed size")
# should recreate the object
repo.cached
f = vfsmod.vfs(b'.')(b'y', b'w', atomictemp=True)
f.write(b'B')
f.close()
repo.invalidate()
print("* file y changed inode")
repo.cached
f = vfsmod.vfs(b'.')(b'x', b'w', atomictemp=True)
f.write(b'c')
f.close()
f = vfsmod.vfs(b'.')(b'y', b'w', atomictemp=True)
f.write(b'C')
f.close()
repo.invalidate()
print("* both files changed inode")
repo.cached
def fakeuncacheable():
def wrapcacheable(orig, *args, **kwargs):
return False
def wrapinit(orig, *args, **kwargs):
pass
originit = extensions.wrapfunction(util.cachestat, '__init__', wrapinit)
origcacheable = extensions.wrapfunction(
util.cachestat, 'cacheable', wrapcacheable
)
for fn in [b'x', b'y']:
try:
os.remove(fn)
except OSError:
pass
basic(fakerepo())
util.cachestat.cacheable = origcacheable
util.cachestat.__init__ = originit
def test_filecache_synced():
# test old behavior that caused filecached properties to go out of sync
os.system('hg init && echo a >> a && hg add a && hg ci -qm.')
repo = hg.repository(uimod.ui.load())
# first rollback clears the filecache, but changelog to stays in __dict__
repo.rollback()
repo.commit(b'.')
# second rollback comes along and touches the changelog externally
# (file is moved)
repo.rollback()
# but since changelog isn't under the filecache control anymore, we don't
# see that it changed, and return the old changelog without reconstructing
# it
repo.commit(b'.')
def setbeforeget(repo):
os.remove(b'x')
os.remove(b'y')
repo.__class__.cached.set(repo, 'string set externally')
repo.invalidate()
print("* neither file exists")
print(repo.cached)
repo.invalidate()
f = open('x', 'wb')
f.write(b'a')
f.close()
print("* file x created")
print(repo.cached)
repo.__class__.cached.set(repo, 'string 2 set externally')
repo.invalidate()
print("* string set externally again")
print(repo.cached)
repo.invalidate()
f = open('y', 'wb')
f.write(b'b')
f.close()
print("* file y created")
print(repo.cached)
def antiambiguity():
filename = 'ambigcheck'
# try some times, because reproduction of ambiguity depends on
# "filesystem time"
for i in range(5):
fp = open(filename, 'wb')
fp.write(b'FOO')
fp.close()
oldstat = os.stat(filename)
if oldstat[stat.ST_CTIME] != oldstat[stat.ST_MTIME]:
# subsequent changing never causes ambiguity
continue
repetition = 3
# repeat changing via checkambigatclosing, to examine whether
# st_mtime is advanced multiple times as expected
for i in range(repetition):
# explicit closing
fp = vfsmod.checkambigatclosing(open(filename, 'ab'))
fp.write(b'FOO')
fp.close()
# implicit closing by "with" statement
with vfsmod.checkambigatclosing(open(filename, 'ab')) as fp:
fp.write(b'BAR')
newstat = os.stat(filename)
if oldstat[stat.ST_CTIME] != newstat[stat.ST_CTIME]:
# timestamp ambiguity was naturally avoided while repetition
continue
# st_mtime should be advanced "repetition * 2" times, because
# all changes occurred at same time (in sec)
expected = (oldstat[stat.ST_MTIME] + repetition * 2) & 0x7FFFFFFF
if newstat[stat.ST_MTIME] != expected:
print(
"'newstat[stat.ST_MTIME] %s is not %s (as %s + %s * 2)"
% (
newstat[stat.ST_MTIME],
expected,
oldstat[stat.ST_MTIME],
repetition,
)
)
# no more examination is needed regardless of result
break
else:
# This platform seems too slow to examine anti-ambiguity
# of file timestamp (or test happened to be executed at
# bad timing). Exit silently in this case, because running
# on other faster platforms can detect problems
pass
print('basic:')
print()
basic(fakerepo())
print()
print('fakeuncacheable:')
print()
fakeuncacheable()
test_filecache_synced()
print()
print('setbeforeget:')
print()
setbeforeget(fakerepo())
print()
print('antiambiguity:')
print()
antiambiguity()