nodemap: double check the source docket when doing incremental update
In theory, the index will have the information we expect it to have. However by
security, it seems safer to double check that the incremental data are generated
from the data currently on disk.
Differential Revision: https://phab.mercurial-scm.org/D7890
--- a/mercurial/pure/parsers.py Wed Jan 15 15:50:24 2020 +0100
+++ b/mercurial/pure/parsers.py Wed Jan 15 15:50:33 2020 +0100
@@ -164,11 +164,13 @@
"""
if self._nm_root is None:
return None
+ docket = self._nm_docket
changed, data = nodemaputil.update_persistent_data(
- self, self._nm_root, self._nm_max_idx, self._nm_rev
+ self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
)
- self._nm_root = self._nm_max_idx = self._nm_rev = None
- return changed, data
+
+ self._nm_root = self._nm_max_idx = self._nm_docket = None
+ return docket, changed, data
def update_nodemap_data(self, docket, nm_data):
"""provide full block of persisted binary data for a nodemap
@@ -178,9 +180,9 @@
if nm_data is not None:
self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
if self._nm_root:
- self._nm_rev = docket.tip_rev
+ self._nm_docket = docket
else:
- self._nm_root = self._nm_max_idx = self._nm_rev = None
+ self._nm_root = self._nm_max_idx = self._nm_docket = None
class InlinedIndexObject(BaseIndexObject):
--- a/mercurial/revlogutils/nodemap.py Wed Jan 15 15:50:24 2020 +0100
+++ b/mercurial/revlogutils/nodemap.py Wed Jan 15 15:50:33 2020 +0100
@@ -77,18 +77,27 @@
can_incremental = util.safehasattr(revlog.index, "nodemap_data_incremental")
ondisk_docket = revlog._nodemap_docket
+ data = None
# first attemp an incremental update of the data
if can_incremental and ondisk_docket is not None:
target_docket = revlog._nodemap_docket.copy()
- data_changed_count, data = revlog.index.nodemap_data_incremental()
- datafile = _rawdata_filepath(revlog, target_docket)
- # EXP-TODO: if this is a cache, this should use a cache vfs, not a
- # store vfs
- with revlog.opener(datafile, b'a') as fd:
- fd.write(data)
- target_docket.data_length += len(data)
- target_docket.data_unused += data_changed_count
- else:
+ (
+ src_docket,
+ data_changed_count,
+ data,
+ ) = revlog.index.nodemap_data_incremental()
+ if src_docket != target_docket:
+ data = None
+ else:
+ datafile = _rawdata_filepath(revlog, target_docket)
+ # EXP-TODO: if this is a cache, this should use a cache vfs, not a
+ # store vfs
+ with revlog.opener(datafile, b'a') as fd:
+ fd.write(data)
+ target_docket.data_length += len(data)
+ target_docket.data_unused += data_changed_count
+
+ if data is None:
# otherwise fallback to a full new export
target_docket = NodeMapDocket()
datafile = _rawdata_filepath(revlog, target_docket)
@@ -182,6 +191,20 @@
new.data_unused = self.data_unused
return new
+ def __cmp__(self, other):
+ if self.uid < other.uid:
+ return -1
+ if self.uid > other.uid:
+ return 1
+ elif self.data_length < other.data_length:
+ return -1
+ elif self.data_length > other.data_length:
+ return 1
+ return 0
+
+ def __eq__(self, other):
+ return self.uid == other.uid and self.data_length == other.data_length
+
def serialize(self):
"""return serialized bytes for a docket using the passed uid"""
data = []