changeset 4096:1630756a6a46 stable

obshashrange: force reset if many changes If there is many new markers (or the markers affect many changesets) resetting affected changeset can get slow. A common case for that is then changeset and markers are added in different transaction, (eg: when using clone bundle). When we detect such case, we reset the obshashrange cache, rewarming it should be faster. Here many is arbitrarily set to 10 000. The number might get adjusted.
author Pierre-Yves David <pierre-yves.david@octobus.net>
date Thu, 13 Sep 2018 02:49:58 +0200
parents 2c60ad0d54a9
children 4ea2a813b82c
files CHANGELOG hgext3rd/evolve/obsdiscovery.py
diffstat 2 files changed, 33 insertions(+), 9 deletions(-) [+]
line wrap: on
line diff
--- a/CHANGELOG	Fri Sep 07 11:08:47 2018 -0700
+++ b/CHANGELOG	Thu Sep 13 02:49:58 2018 +0200
@@ -5,6 +5,7 @@
 -------------------
 
   * obshashrange: issue the "long stable cache" update message only once
+  * obshashrange: reduce impact of cache invalidation from many new obsmarkers
 
 8.2.0 -- 2018-09-03
 -------------------
--- a/hgext3rd/evolve/obsdiscovery.py	Fri Sep 07 11:08:47 2018 -0700
+++ b/hgext3rd/evolve/obsdiscovery.py	Thu Sep 13 02:49:58 2018 +0200
@@ -397,6 +397,9 @@
 
     return affected_nodes
 
+# if there is that many new obsmarkers, reset without analysing them
+RESET_ABOVE = 10000
+
 class _obshashcache(obscache.dualsourcecache):
 
     _schemaversion = 3
@@ -470,23 +473,43 @@
 
         con = self._con
         if con is not None:
-            max_stored = con.execute(_query_max_stored).fetchall()[0][0]
-            affected_nodes = _affectedby(repo, obsmarkers)
+            reset = False
+            affected = []
+            if RESET_ABOVE < len(obsmarkers):
+                # lots of new obsmarkers, probably smarter to reset the cache
+                repo.ui.log('evoext-cache', 'obshashcache reset - '
+                            'many new markers (%d)\n'
+                            % len(obsmarkers))
+                reset = True
+            elif obsmarkers:
+                max_stored = con.execute(_query_max_stored).fetchall()[0][0]
+                affected_nodes = _affectedby(repo, obsmarkers)
 
-            rev = repo.changelog.nodemap.get
-            affected = [rev(n) for n in affected_nodes]
-            affected = [r for r in affected
-                        if r is not None and r <= max_stored]
+                rev = repo.changelog.nodemap.get
+                affected = [rev(n) for n in affected_nodes]
+                affected = [r for r in affected
+                            if r is not None and r <= max_stored]
 
-            if affected:
-                repo.ui.log('evoext-cache', 'obshashcache clean - '
-                            'new markers affect %d changeset and cached ranges\n'
+            if RESET_ABOVE < len(affected):
+                repo.ui.log('evoext-cache', 'obshashcache reset - '
+                            'new markers affect many changeset (%d)\n'
                             % len(affected))
+                reset = True
+
+            if affected or reset:
+                if not reset:
+                    repo.ui.log('evoext-cache', 'obshashcache clean - '
+                                'new markers affect %d changeset and cached ranges\n'
+                                % len(affected))
                 if con is not None:
                     # always reset for now, the code detecting affect is buggy
                     # so we need to reset more broadly than we would like.
                     try:
                         if repo.stablerange._con is None:
+                            repo.ui.log('evoext-cache', 'obshashcache reset - '
+                                        'underlying stablerange cache unavailable\n')
+                            reset = True
+                        if reset:
                             con.execute(_reset)
                             self._data.clear()
                         else: