comparison mercurial/branchmap.py @ 51488:94f821490645

branchcache: change the _delayed flag to an explicit `_dirty` flag This is more consistent with the logic we use for other object and it open the way to a clearer management of the cache state. Now, cache are created clean, cache update mark them dirty, writing them on disk mark them clean again.
author Pierre-Yves David <pierre-yves.david@octobus.net>
date Fri, 08 Mar 2024 16:47:32 +0100
parents 1a9bdd0e1c44
children 659f766629c8
comparison
equal deleted inserted replaced
51487:1a9bdd0e1c44 51488:94f821490645
72 72
73 The cache for this repository view is updated if needed and written on 73 The cache for this repository view is updated if needed and written on
74 disk. 74 disk.
75 75
76 If a transaction is in progress, the writing is schedule to transaction 76 If a transaction is in progress, the writing is schedule to transaction
77 close. See the `BranchMapCache.write_delayed` method. 77 close. See the `BranchMapCache.write_dirty` method.
78 78
79 This method exist independently of __getitem__ as it is sometime useful 79 This method exist independently of __getitem__ as it is sometime useful
80 to signal that we have no intend to use the data in memory yet. 80 to signal that we have no intend to use the data in memory yet.
81 """ 81 """
82 self.updatecache(repo) 82 self.updatecache(repo)
162 return 162 return
163 163
164 def clear(self): 164 def clear(self):
165 self._per_filter.clear() 165 self._per_filter.clear()
166 166
167 def write_delayed(self, repo): 167 def write_dirty(self, repo):
168 unfi = repo.unfiltered() 168 unfi = repo.unfiltered()
169 for filtername in repoviewutil.get_ordered_subset(): 169 for filtername in repoviewutil.get_ordered_subset():
170 cache = self._per_filter.get(filtername) 170 cache = self._per_filter.get(filtername)
171 if cache is None: 171 if cache is None:
172 continue 172 continue
173 if cache._delayed: 173 if cache._dirty:
174 if filtername is None: 174 if filtername is None:
175 repo = unfi 175 repo = unfi
176 else: 176 else:
177 repo = unfi.filtered(filtername) 177 repo = unfi.filtered(filtername)
178 cache.write(repo) 178 cache.write(repo)
431 ) -> None: 431 ) -> None:
432 """hasnode is a function which can be used to verify whether changelog 432 """hasnode is a function which can be used to verify whether changelog
433 has a given node or not. If it's not provided, we assume that every node 433 has a given node or not. If it's not provided, we assume that every node
434 we have exists in changelog""" 434 we have exists in changelog"""
435 self._filtername = repo.filtername 435 self._filtername = repo.filtername
436 self._delayed = False
437 if tipnode is None: 436 if tipnode is None:
438 self.tipnode = repo.nullid 437 self.tipnode = repo.nullid
439 else: 438 else:
440 self.tipnode = tipnode 439 self.tipnode = tipnode
441 self.tiprev = tiprev 440 self.tiprev = tiprev
442 self.filteredhash = filteredhash 441 self.filteredhash = filteredhash
442 self._dirty = False
443 443
444 super().__init__(repo=repo, entries=entries, closed_nodes=closednodes) 444 super().__init__(repo=repo, entries=entries, closed_nodes=closednodes)
445 # closednodes is a set of nodes that close their branch. If the branch 445 # closednodes is a set of nodes that close their branch. If the branch
446 # cache has been updated, it may contain nodes that are no longer 446 # cache has been updated, it may contain nodes that are no longer
447 # heads. 447 # heads.
566 closednodes=set(self._closednodes), 566 closednodes=set(self._closednodes),
567 verify_node=self._verify_node, 567 verify_node=self._verify_node,
568 ) 568 )
569 # we copy will likely schedule a write anyway, but that does not seems 569 # we copy will likely schedule a write anyway, but that does not seems
570 # to hurt to overschedule 570 # to hurt to overschedule
571 other._delayed = self._delayed 571 other._dirty = self._dirty
572 # also copy information about the current verification state 572 # also copy information about the current verification state
573 other._verifiedbranches = set(self._verifiedbranches) 573 other._verifiedbranches = set(self._verifiedbranches)
574 return other 574 return other
575 575
576 def write(self, repo): 576 def write(self, repo):
581 tr = repo.currenttransaction() 581 tr = repo.currenttransaction()
582 if not getattr(tr, 'finalized', True): 582 if not getattr(tr, 'finalized', True):
583 # Avoid premature writing. 583 # Avoid premature writing.
584 # 584 #
585 # (The cache warming setup by localrepo will update the file later.) 585 # (The cache warming setup by localrepo will update the file later.)
586 self._delayed = True
587 return 586 return
588 try: 587 try:
589 filename = self._filename(repo) 588 filename = self._filename(repo)
590 with repo.cachevfs(filename, b"w", atomictemp=True) as f: 589 with repo.cachevfs(filename, b"w", atomictemp=True) as f:
591 self._write_header(f) 590 self._write_header(f)
595 b'wrote %s with %d labels and %d nodes\n', 594 b'wrote %s with %d labels and %d nodes\n',
596 _branchcachedesc(repo), 595 _branchcachedesc(repo),
597 len(self._entries), 596 len(self._entries),
598 nodecount, 597 nodecount,
599 ) 598 )
600 self._delayed = False 599 self._dirty = False
601 except (IOError, OSError, error.Abort) as inst: 600 except (IOError, OSError, error.Abort) as inst:
602 # Abort may be raised by read only opener, so log and continue 601 # Abort may be raised by read only opener, so log and continue
603 repo.ui.debug( 602 repo.ui.debug(
604 b"couldn't write branch cache: %s\n" 603 b"couldn't write branch cache: %s\n"
605 % stringutil.forcebytestr(inst) 604 % stringutil.forcebytestr(inst)
705 self.tipnode = cl.node(tiprev) 704 self.tipnode = cl.node(tiprev)
706 self.tiprev = tiprev 705 self.tiprev = tiprev
707 self.filteredhash = scmutil.filteredhash( 706 self.filteredhash = scmutil.filteredhash(
708 repo, self.tiprev, needobsolete=True 707 repo, self.tiprev, needobsolete=True
709 ) 708 )
710 709 self._dirty = True
711 self.write(repo) 710 self.write(repo)
712 711
713 712
714 class remotebranchcache(_BaseBranchCache): 713 class remotebranchcache(_BaseBranchCache):
715 """Branchmap info for a remote connection, should not write locally""" 714 """Branchmap info for a remote connection, should not write locally"""