Mercurial > hg
comparison mercurial/branchmap.py @ 41615:328ca3b9e545
branchmap: encapsulate cache updating in the map itself
Rather than have a repository update the cache, move handling of cache updates
into the branchmap module, in the form of a custom mapping class.
This makes later performance improvements easier to handle too.
Differential Revision: https://phab.mercurial-scm.org/D5638
author | Martijn Pieters <mj@octobus.net> |
---|---|
date | Mon, 21 Jan 2019 17:37:33 +0000 |
parents | c795c462b1d6 |
children | bfc49f1df615 |
comparison
equal
deleted
inserted
replaced
41612:fbd4ce55bcbd | 41615:328ca3b9e545 |
---|---|
41 'visible-hidden': 'visible', | 41 'visible-hidden': 'visible', |
42 'visible': 'served', | 42 'visible': 'served', |
43 'served': 'immutable', | 43 'served': 'immutable', |
44 'immutable': 'base'} | 44 'immutable': 'base'} |
45 | 45 |
46 def updatecache(repo): | 46 |
47 """Update the cache for the given filtered view on a repository""" | 47 class BranchMapCache(object): |
48 # This can trigger updates for the caches for subsets of the filtered | 48 """Cache mapping""" |
49 # view, e.g. when there is no cache for this filtered view or the cache | 49 def __init__(self): |
50 # is stale. | 50 self._per_filter = {} |
51 | 51 |
52 cl = repo.changelog | 52 def __getitem__(self, repo): |
53 filtername = repo.filtername | 53 self.updatecache(repo) |
54 bcache = repo._branchcaches.get(filtername) | 54 return self._per_filter[repo.filtername] |
55 if bcache is None or not bcache.validfor(repo): | 55 |
56 # cache object missing or cache object stale? Read from disk | 56 def updatecache(self, repo): |
57 bcache = branchcache.fromfile(repo) | 57 """Update the cache for the given filtered view on a repository""" |
58 | 58 # This can trigger updates for the caches for subsets of the filtered |
59 revs = [] | 59 # view, e.g. when there is no cache for this filtered view or the cache |
60 if bcache is None: | 60 # is stale. |
61 # no (fresh) cache available anymore, perhaps we can re-use | 61 |
62 # the cache for a subset, then extend that to add info on missing | 62 cl = repo.changelog |
63 # revisions. | 63 filtername = repo.filtername |
64 subsetname = subsettable.get(filtername) | 64 bcache = self._per_filter.get(filtername) |
65 if subsetname is not None: | 65 if bcache is None or not bcache.validfor(repo): |
66 subset = repo.filtered(subsetname) | 66 # cache object missing or cache object stale? Read from disk |
67 bcache = subset.branchmap().copy() | 67 bcache = branchcache.fromfile(repo) |
68 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs | 68 |
69 revs.extend(r for r in extrarevs if r <= bcache.tiprev) | 69 revs = [] |
70 else: | 70 if bcache is None: |
71 # nothing to fall back on, start empty. | 71 # no (fresh) cache available anymore, perhaps we can re-use |
72 bcache = branchcache() | 72 # the cache for a subset, then extend that to add info on missing |
73 | 73 # revisions. |
74 revs.extend(cl.revs(start=bcache.tiprev + 1)) | 74 subsetname = subsettable.get(filtername) |
75 if revs: | 75 if subsetname is not None: |
76 bcache.update(repo, revs) | 76 subset = repo.filtered(subsetname) |
77 | 77 bcache = self[subset].copy() |
78 assert bcache.validfor(repo), filtername | 78 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs |
79 repo._branchcaches[repo.filtername] = bcache | 79 revs.extend(r for r in extrarevs if r <= bcache.tiprev) |
80 | 80 else: |
81 def replacecache(repo, bm): | 81 # nothing to fall back on, start empty. |
82 """Replace the branchmap cache for a repo with a branch mapping. | 82 bcache = branchcache() |
83 | 83 |
84 This is likely only called during clone with a branch map from a remote. | 84 revs.extend(cl.revs(start=bcache.tiprev + 1)) |
85 """ | 85 if revs: |
86 cl = repo.changelog | 86 bcache.update(repo, revs) |
87 clrev = cl.rev | 87 |
88 clbranchinfo = cl.branchinfo | 88 assert bcache.validfor(repo), filtername |
89 rbheads = [] | 89 self._per_filter[repo.filtername] = bcache |
90 closed = [] | 90 |
91 for bheads in bm.itervalues(): | 91 def replace(self, repo, remotebranchmap): |
92 rbheads.extend(bheads) | 92 """Replace the branchmap cache for a repo with a branch mapping. |
93 for h in bheads: | 93 |
94 r = clrev(h) | 94 This is likely only called during clone with a branch map from a |
95 b, c = clbranchinfo(r) | 95 remote. |
96 if c: | 96 |
97 closed.append(h) | 97 """ |
98 | 98 cl = repo.changelog |
99 if rbheads: | 99 clrev = cl.rev |
100 rtiprev = max((int(clrev(node)) | 100 clbranchinfo = cl.branchinfo |
101 for node in rbheads)) | 101 rbheads = [] |
102 cache = branchcache(bm, | 102 closed = [] |
103 repo[rtiprev].node(), | 103 for bheads in remotebranchmap.itervalues(): |
104 rtiprev, | 104 rbheads += bheads |
105 closednodes=closed) | 105 for h in bheads: |
106 | 106 r = clrev(h) |
107 # Try to stick it as low as possible | 107 b, c = clbranchinfo(r) |
108 # filter above served are unlikely to be fetch from a clone | 108 if c: |
109 for candidate in ('base', 'immutable', 'served'): | 109 closed.append(h) |
110 rview = repo.filtered(candidate) | 110 |
111 if cache.validfor(rview): | 111 if rbheads: |
112 repo._branchcaches[candidate] = cache | 112 rtiprev = max((int(clrev(node)) for node in rbheads)) |
113 cache.write(rview) | 113 cache = branchcache( |
114 break | 114 remotebranchmap, repo[rtiprev].node(), rtiprev, |
115 closednodes=closed) | |
116 | |
117 # Try to stick it as low as possible | |
118 # filter above served are unlikely to be fetch from a clone | |
119 for candidate in ('base', 'immutable', 'served'): | |
120 rview = repo.filtered(candidate) | |
121 if cache.validfor(rview): | |
122 self._per_filter[candidate] = cache | |
123 cache.write(rview) | |
124 return | |
125 | |
126 def clear(self): | |
127 self._per_filter.clear() | |
128 | |
115 | 129 |
116 class branchcache(dict): | 130 class branchcache(dict): |
117 """A dict like object that hold branches heads cache. | 131 """A dict like object that hold branches heads cache. |
118 | 132 |
119 This cache is used to avoid costly computations to determine all the | 133 This cache is used to avoid costly computations to determine all the |