comparison mercurial/copies.py @ 45638:4f876e6b30fa

copies: use dedicated `_revinfo_getter` function and call We want to return data in a different form, so we need different revinfo function. At that point it make sense to have different getter. Differential Revision: https://phab.mercurial-scm.org/D9115
author Pierre-Yves David <pierre-yves.david@octobus.net>
date Fri, 25 Sep 2020 14:52:34 +0200
parents ad6ebb6f0dfe
children 7a757e893532
comparison
equal deleted inserted replaced
45637:ad6ebb6f0dfe 45638:4f876e6b30fa
199 nb_parents += 1 199 nb_parents += 1
200 return nb_parents >= 2 200 return nb_parents >= 2
201 201
202 return ismerged 202 return ismerged
203 203
204 if repo.filecopiesmode == b'changeset-sidedata': 204 changelogrevision = cl.changelogrevision
205 changelogrevision = cl.changelogrevision 205 flags = cl.flags
206 flags = cl.flags 206
207 207 # A small cache to avoid doing the work twice for merges
208 # A small cache to avoid doing the work twice for merges 208 #
209 # 209 # In the vast majority of cases, if we ask information for a revision
210 # In the vast majority of cases, if we ask information for a revision 210 # about 1 parent, we'll later ask it for the other. So it make sense to
211 # about 1 parent, we'll later ask it for the other. So it make sense to 211 # keep the information around when reaching the first parent of a merge
212 # keep the information around when reaching the first parent of a merge 212 # and dropping it after it was provided for the second parents.
213 # and dropping it after it was provided for the second parents. 213 #
214 # 214 # It exists cases were only one parent of the merge will be walked. It
215 # It exists cases were only one parent of the merge will be walked. It 215 # happens when the "destination" the copy tracing is descendant from a
216 # happens when the "destination" the copy tracing is descendant from a 216 # new root, not common with the "source". In that case, we will only walk
217 # new root, not common with the "source". In that case, we will only walk 217 # through merge parents that are descendant of changesets common
218 # through merge parents that are descendant of changesets common 218 # between "source" and "destination".
219 # between "source" and "destination". 219 #
220 # 220 # With the current case implementation if such changesets have a copy
221 # With the current case implementation if such changesets have a copy 221 # information, we'll keep them in memory until the end of
222 # information, we'll keep them in memory until the end of 222 # _changesetforwardcopies. We don't expect the case to be frequent
223 # _changesetforwardcopies. We don't expect the case to be frequent 223 # enough to matters.
224 # enough to matters. 224 #
225 # 225 # In addition, it would be possible to reach pathological case, were
226 # In addition, it would be possible to reach pathological case, were 226 # many first parent are met before any second parent is reached. In
227 # many first parent are met before any second parent is reached. In 227 # that case the cache could grow. If this even become an issue one can
228 # that case the cache could grow. If this even become an issue one can 228 # safely introduce a maximum cache size. This would trade extra CPU/IO
229 # safely introduce a maximum cache size. This would trade extra CPU/IO 229 # time to save memory.
230 # time to save memory. 230 merge_caches = {}
231 merge_caches = {} 231
232 232 def revinfo(rev):
233 def revinfo(rev): 233 p1, p2 = parents(rev)
234 p1, p2 = parents(rev) 234 value = None
235 value = None 235 if flags(rev) & REVIDX_SIDEDATA:
236 if flags(rev) & REVIDX_SIDEDATA: 236 e = merge_caches.pop(rev, None)
237 e = merge_caches.pop(rev, None) 237 if e is not None:
238 if e is not None: 238 return e
239 return e 239 c = changelogrevision(rev)
240 c = changelogrevision(rev) 240 p1copies = c.p1copies
241 p1copies = c.p1copies 241 p2copies = c.p2copies
242 p2copies = c.p2copies 242 removed = c.filesremoved
243 removed = c.filesremoved 243 if p1 != node.nullrev and p2 != node.nullrev:
244 if p1 != node.nullrev and p2 != node.nullrev: 244 # XXX some case we over cache, IGNORE
245 # XXX some case we over cache, IGNORE 245 value = merge_caches[rev] = (
246 value = merge_caches[rev] = ( 246 p1,
247 p1, 247 p2,
248 p2, 248 p1copies,
249 p1copies, 249 p2copies,
250 p2copies, 250 removed,
251 removed, 251 get_ismerged(rev),
252 get_ismerged(rev), 252 )
253 ) 253 else:
254 else: 254 p1copies = {}
255 p1copies = {} 255 p2copies = {}
256 p2copies = {} 256 removed = []
257 removed = [] 257
258 258 if value is None:
259 if value is None: 259 value = (p1, p2, p1copies, p2copies, removed, get_ismerged(rev))
260 value = (p1, p2, p1copies, p2copies, removed, get_ismerged(rev)) 260 return value
261 return value
262
263 else:
264
265 def revinfo(rev):
266 p1, p2 = parents(rev)
267 ctx = repo[rev]
268 p1copies, p2copies = ctx._copies
269 removed = ctx.filesremoved()
270 return p1, p2, p1copies, p2copies, removed, get_ismerged(rev)
271 261
272 return revinfo 262 return revinfo
273 263
274 264
275 def _changesetforwardcopies(a, b, match): 265 def _changesetforwardcopies(a, b, match):
276 if a.rev() in (node.nullrev, b.rev()): 266 if a.rev() in (node.nullrev, b.rev()):
277 return {} 267 return {}
278 268
279 repo = a.repo().unfiltered() 269 repo = a.repo().unfiltered()
280 children = {} 270 children = {}
281 revinfo = _revinfo_getter(repo)
282 271
283 cl = repo.changelog 272 cl = repo.changelog
284 isancestor = cl.isancestorrev # XXX we should had chaching to this. 273 isancestor = cl.isancestorrev # XXX we should had chaching to this.
285 missingrevs = cl.findmissingrevs(common=[a.rev()], heads=[b.rev()]) 274 missingrevs = cl.findmissingrevs(common=[a.rev()], heads=[b.rev()])
286 mrset = set(missingrevs) 275 mrset = set(missingrevs)
309 iterrevs.update(roots) 298 iterrevs.update(roots)
310 iterrevs.remove(b.rev()) 299 iterrevs.remove(b.rev())
311 revs = sorted(iterrevs) 300 revs = sorted(iterrevs)
312 301
313 if repo.filecopiesmode == b'changeset-sidedata': 302 if repo.filecopiesmode == b'changeset-sidedata':
303 revinfo = _revinfo_getter(repo)
314 return _combine_changeset_copies( 304 return _combine_changeset_copies(
315 revs, children, b.rev(), revinfo, match, isancestor 305 revs, children, b.rev(), revinfo, match, isancestor
316 ) 306 )
317 else: 307 else:
308 revinfo = _revinfo_getter_extra(repo)
318 return _combine_changeset_copies_extra( 309 return _combine_changeset_copies_extra(
319 revs, children, b.rev(), revinfo, match, isancestor 310 revs, children, b.rev(), revinfo, match, isancestor
320 ) 311 )
321 312
322 313
424 new_tt == other_tt 415 new_tt == other_tt
425 or not isancestor(new_tt, other_tt) 416 or not isancestor(new_tt, other_tt)
426 or ismerged(dest) 417 or ismerged(dest)
427 ): 418 ):
428 minor[dest] = value 419 minor[dest] = value
420
421
422 def _revinfo_getter_extra(repo):
423 """return a function that return multiple data given a <rev>"i
424
425 * p1: revision number of first parent
426 * p2: revision number of first parent
427 * p1copies: mapping of copies from p1
428 * p2copies: mapping of copies from p2
429 * removed: a list of removed files
430 * ismerged: a callback to know if file was merged in that revision
431 """
432 cl = repo.changelog
433 parents = cl.parentrevs
434
435 def get_ismerged(rev):
436 ctx = repo[rev]
437
438 def ismerged(path):
439 if path not in ctx.files():
440 return False
441 fctx = ctx[path]
442 parents = fctx._filelog.parents(fctx._filenode)
443 nb_parents = 0
444 for n in parents:
445 if n != node.nullid:
446 nb_parents += 1
447 return nb_parents >= 2
448
449 return ismerged
450
451 def revinfo(rev):
452 p1, p2 = parents(rev)
453 ctx = repo[rev]
454 p1copies, p2copies = ctx._copies
455 removed = ctx.filesremoved()
456 return p1, p2, p1copies, p2copies, removed, get_ismerged(rev)
457
458 return revinfo
429 459
430 460
431 def _combine_changeset_copies_extra( 461 def _combine_changeset_copies_extra(
432 revs, children, targetrev, revinfo, match, isancestor 462 revs, children, targetrev, revinfo, match, isancestor
433 ): 463 ):