--- a/mercurial/bundlerepo.py Wed Jun 19 10:19:32 2019 -0700
+++ b/mercurial/bundlerepo.py Fri Jun 21 23:35:04 2019 -0700
@@ -365,6 +365,11 @@
self.manstart = self._cgunpacker.tell()
return c
+ def _refreshchangelog(self):
+ # changelog for bundle repo are not filecache, this method is not
+ # applicable.
+ pass
+
@localrepo.unfilteredpropertycache
def manifestlog(self):
self._cgunpacker.seek(self.manstart)
--- a/mercurial/localrepo.py Wed Jun 19 10:19:32 2019 -0700
+++ b/mercurial/localrepo.py Fri Jun 21 23:35:04 2019 -0700
@@ -1227,8 +1227,62 @@
@mixedrepostorecache(('bookmarks', 'plain'), ('bookmarks.current', 'plain'),
('bookmarks', ''), ('00changelog.i', ''))
def _bookmarks(self):
+ # Since the multiple files involved in the transaction cannot be
+ # written atomically (with current repository format), there is a race
+ # condition here.
+ #
+ # 1) changelog content A is read
+ # 2) outside transaction update changelog to content B
+ # 3) outside transaction update bookmark file referring to content B
+ # 4) bookmarks file content is read and filtered against changelog-A
+ #
+ # When this happens, bookmarks against nodes missing from A are dropped.
+ #
+ # Having this happening during read is not great, but it become worse
+ # when this happen during write because the bookmarks to the "unknown"
+ # nodes will be dropped for good. However, writes happen within locks.
+ # This locking makes it possible to have a race free consistent read.
+ # For this purpose data read from disc before locking are
+ # "invalidated" right after the locks are taken. This invalidations are
+ # "light", the `filecache` mechanism keep the data in memory and will
+ # reuse them if the underlying files did not changed. Not parsing the
+ # same data multiple times helps performances.
+ #
+ # Unfortunately in the case describe above, the files tracked by the
+ # bookmarks file cache might not have changed, but the in-memory
+ # content is still "wrong" because we used an older changelog content
+ # to process the on-disk data. So after locking, the changelog would be
+ # refreshed but `_bookmarks` would be preserved.
+ # Adding `00changelog.i` to the list of tracked file is not
+ # enough, because at the time we build the content for `_bookmarks` in
+ # (4), the changelog file has already diverged from the content used
+ # for loading `changelog` in (1)
+ #
+ # To prevent the issue, we force the changelog to be explicitly
+ # reloaded while computing `_bookmarks`. The data race can still happen
+ # without the lock (with a narrower window), but it would no longer go
+ # undetected during the lock time refresh.
+ #
+ # The new schedule is as follow
+ #
+ # 1) filecache logic detect that `_bookmarks` needs to be computed
+ # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
+ # 3) We force `changelog` filecache to be tested
+ # 4) cachestat for `changelog` are captured (for changelog)
+ # 5) `_bookmarks` is computed and cached
+ #
+ # The step in (3) ensure we have a changelog at least as recent as the
+ # cache stat computed in (1). As a result at locking time:
+ # * if the changelog did not changed since (1) -> we can reuse the data
+ # * otherwise -> the bookmarks get refreshed.
+ self._refreshchangelog()
return bookmarks.bmstore(self)
+ def _refreshchangelog(self):
+ """make sure the in memory changelog match the on-disk one"""
+ if ('changelog' in vars(self) and self.currenttransaction() is None):
+ del self.changelog
+
@property
def _activebookmark(self):
return self._bookmarks.active
--- a/tests/run-tests.py Wed Jun 19 10:19:32 2019 -0700
+++ b/tests/run-tests.py Fri Jun 21 23:35:04 2019 -0700
@@ -1748,7 +1748,8 @@
el = m.group(1) + b"\n"
if not self._iftest(conditions):
- retry = "retry" # Not required by listed features
+ # listed feature missing, should not match
+ return "retry", False
if el.endswith(b" (esc)\n"):
if PYTHON3:
--- a/tests/test-bookmarks-corner-case.t Wed Jun 19 10:19:32 2019 -0700
+++ b/tests/test-bookmarks-corner-case.t Fri Jun 21 23:35:04 2019 -0700
@@ -119,9 +119,13 @@
> import atexit
> import os
> import time
- > from mercurial import bookmarks, error, extensions
- > def wrapinit(orig, self, repo):
+ > import atexit
+ > from mercurial import error, extensions, bookmarks
+ >
+ > def wait(repo):
> if not os.path.exists('push-A-started'):
+ > assert repo._currentlock(repo._lockref) is None
+ > assert repo._currentlock(repo._wlockref) is None
> repo.ui.status(b'setting raced push up\n')
> with open('push-A-started', 'w'):
> pass
@@ -131,11 +135,15 @@
> if clock <= 0:
> raise error.Abort("race scenario timed out")
> time.sleep(0.1)
- > return orig(self, repo)
>
+ > def reposetup(ui, repo):
+ > class racedrepo(repo.__class__):
+ > @property
+ > def _bookmarks(self):
+ > wait(self)
+ > return super(racedrepo, self)._bookmarks
> repo.__class__ = racedrepo
- > def uisetup(ui):
- > extensions.wrapfunction(bookmarks.bmstore, '__init__', wrapinit)
+ >
> def e():
> with open('push-A-done', 'w'):
> pass
@@ -193,6 +201,7 @@
$ cat push-output.txt
pushing to ssh://user@dummy/bookrace-server
searching for changes
+ remote has heads on branch 'default' that are not known locally: f26c3b5167d1
remote: setting raced push up
remote: adding changesets
remote: adding manifests
--- a/tests/test-copies.t Wed Jun 19 10:19:32 2019 -0700
+++ b/tests/test-copies.t Fri Jun 21 23:35:04 2019 -0700
@@ -501,6 +501,7 @@
$ hg debugpathcopies 0 4
x -> z (filelog !)
y -> z (compatibility !)
+ y -> z (changeset !)
$ hg debugpathcopies 1 5
y -> z (no-filelog !)
$ hg debugpathcopies 2 5
--- a/tests/test-extension.t Wed Jun 19 10:19:32 2019 -0700
+++ b/tests/test-extension.t Fri Jun 21 23:35:04 2019 -0700
@@ -84,8 +84,8 @@
uipopulate called (1 times)
uipopulate called (1 times) (chg !)
uipopulate called (1 times) (chg !)
- uipopulate called (1 times) (chg !)
- reposetup called for a (chg !)
+ uipopulate called (1 times)
+ reposetup called for a
ui == repo.ui
Foo
$ hg foo --debug
@@ -96,8 +96,8 @@
uipopulate called (1 times)
uipopulate called (1 times) (chg !)
uipopulate called (1 times) (chg !)
- uipopulate called (1 times) (chg !)
- reposetup called for a (chg !)
+ uipopulate called (1 times)
+ reposetup called for a
ui == repo.ui
Foo
@@ -107,7 +107,7 @@
uisetup called [status] (no-chg !)
uipopulate called (1 times)
uipopulate called (1 times) (chg !)
- uipopulate called (1 times) (chg !)
+ uipopulate called (1 times)
reposetup called for a
ui == repo.ui
uipopulate called (1 times)
--- a/tests/test-narrow-clone-stream.t Wed Jun 19 10:19:32 2019 -0700
+++ b/tests/test-narrow-clone-stream.t Fri Jun 21 23:35:04 2019 -0700
@@ -61,8 +61,10 @@
Making sure we have the correct set of requirements
$ cat .hg/requires
- dotencode (tree flat-fncache !)
- fncache (tree flat-fncache !)
+ dotencode (tree !)
+ dotencode (flat-fncache !)
+ fncache (tree !)
+ fncache (flat-fncache !)
generaldelta
narrowhg-experimental
revlogv1
@@ -75,8 +77,9 @@
$ ls .hg/store/
00changelog.i
00manifest.i
- data (tree flat-fncache !)
- fncache (tree flat-fncache !)
+ data
+ fncache (tree !)
+ fncache (flat-fncache !)
meta (tree !)
narrowspec
undo
--- a/tests/test-narrow-widen-no-ellipsis.t Wed Jun 19 10:19:32 2019 -0700
+++ b/tests/test-narrow-widen-no-ellipsis.t Fri Jun 21 23:35:04 2019 -0700
@@ -124,7 +124,7 @@
adding manifests
adding widest/ revisions (tree !)
adding file changes
- adding widest/f revisions (tree !)
+ adding widest/f revisions
added 0 changesets with 1 changes to 1 files
bundle2-input-part: total payload size * (glob)
bundle2-input-bundle: 0 parts total
--- a/tests/test-run-tests.t Wed Jun 19 10:19:32 2019 -0700
+++ b/tests/test-run-tests.t Fri Jun 21 23:35:04 2019 -0700
@@ -1936,3 +1936,70 @@
running 1 tests using 1 parallel processes
.
# Ran 1 tests, 0 skipped, 0 failed.
+
+Test conditional output matching
+================================
+
+ $ cat << EOF >> test-conditional-matching.t
+ > #testcases foo bar
+ > $ echo richtig
+ > richtig (true !)
+ > $ echo falsch
+ > falsch (false !)
+ > #if foo
+ > $ echo arthur
+ > arthur (bar !)
+ > #endif
+ > $ echo celeste
+ > celeste (foo !)
+ > $ echo zephir
+ > zephir (bar !)
+ > EOF
+
+ $ rt test-conditional-matching.t
+ running 2 tests using 1 parallel processes
+
+ --- $TESTTMP/anothertests/cases/test-conditional-matching.t
+ +++ $TESTTMP/anothertests/cases/test-conditional-matching.t#bar.err
+ @@ -3,11 +3,13 @@
+ richtig (true !)
+ $ echo falsch
+ falsch (false !)
+ + falsch
+ #if foo
+ $ echo arthur
+ arthur \(bar !\) (re)
+ #endif
+ $ echo celeste
+ celeste \(foo !\) (re)
+ + celeste
+ $ echo zephir
+ zephir \(bar !\) (re)
+
+ ERROR: test-conditional-matching.t#bar output changed
+ !
+ --- $TESTTMP/anothertests/cases/test-conditional-matching.t
+ +++ $TESTTMP/anothertests/cases/test-conditional-matching.t#foo.err
+ @@ -3,11 +3,14 @@
+ richtig (true !)
+ $ echo falsch
+ falsch (false !)
+ + falsch
+ #if foo
+ $ echo arthur
+ arthur \(bar !\) (re)
+ + arthur
+ #endif
+ $ echo celeste
+ celeste \(foo !\) (re)
+ $ echo zephir
+ zephir \(bar !\) (re)
+ + zephir
+
+ ERROR: test-conditional-matching.t#foo output changed
+ !
+ Failed test-conditional-matching.t#bar: output changed
+ Failed test-conditional-matching.t#foo: output changed
+ # Ran 2 tests, 0 skipped, 2 failed.
+ python hash seed: * (glob)
+ [1]