comparison mercurial/copies.py @ 46113:59fa3890d40a

node: import symbols explicitly There is no point in lazy importing mercurial.node, it is used all over the place anyway. So consistently import the used symbols directly. Fix one file using symbols indirectly via mercurial.revlog. Differential Revision: https://phab.mercurial-scm.org/D9480
author Joerg Sonnenberger <joerg@bec.de>
date Tue, 01 Dec 2020 21:54:46 +0100
parents 2f357d053df2
children 70a9eb899637
comparison
equal deleted inserted replaced
46112:d6afa9c149c3 46113:59fa3890d40a
9 9
10 import collections 10 import collections
11 import os 11 import os
12 12
13 from .i18n import _ 13 from .i18n import _
14 14 from .node import (
15 nullid,
16 nullrev,
17 )
15 18
16 from . import ( 19 from . import (
17 match as matchmod, 20 match as matchmod,
18 node,
19 pathutil, 21 pathutil,
20 policy, 22 policy,
21 pycompat, 23 pycompat,
22 util, 24 util,
23 ) 25 )
145 # case of computing what copies are in a commit versus its parent (like 147 # case of computing what copies are in a commit versus its parent (like
146 # during a rebase or histedit). Note, we exclude merge commits from this 148 # during a rebase or histedit). Note, we exclude merge commits from this
147 # optimization, since the ctx.files() for a merge commit is not correct for 149 # optimization, since the ctx.files() for a merge commit is not correct for
148 # this comparison. 150 # this comparison.
149 forwardmissingmatch = match 151 forwardmissingmatch = match
150 if b.p1() == a and b.p2().node() == node.nullid: 152 if b.p1() == a and b.p2().node() == nullid:
151 filesmatcher = matchmod.exact(b.files()) 153 filesmatcher = matchmod.exact(b.files())
152 forwardmissingmatch = matchmod.intersectmatchers(match, filesmatcher) 154 forwardmissingmatch = matchmod.intersectmatchers(match, filesmatcher)
153 missing = _computeforwardmissing(a, b, match=forwardmissingmatch) 155 missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
154 156
155 ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True) 157 ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
231 if flags(rev) & HASCOPIESINFO: 233 if flags(rev) & HASCOPIESINFO:
232 raw = changelogrevision(rev)._sidedata.get(sidedatamod.SD_FILES) 234 raw = changelogrevision(rev)._sidedata.get(sidedatamod.SD_FILES)
233 else: 235 else:
234 raw = None 236 raw = None
235 value = (p1, p2, raw) 237 value = (p1, p2, raw)
236 if p1 != node.nullrev and p2 != node.nullrev: 238 if p1 != nullrev and p2 != nullrev:
237 # XXX some case we over cache, IGNORE 239 # XXX some case we over cache, IGNORE
238 merge_caches[rev] = value 240 merge_caches[rev] = value
239 return value 241 return value
240 242
241 else: 243 else:
248 return e 250 return e
249 changes = None 251 changes = None
250 if flags(rev) & HASCOPIESINFO: 252 if flags(rev) & HASCOPIESINFO:
251 changes = changelogrevision(rev).changes 253 changes = changelogrevision(rev).changes
252 value = (p1, p2, changes) 254 value = (p1, p2, changes)
253 if p1 != node.nullrev and p2 != node.nullrev: 255 if p1 != nullrev and p2 != nullrev:
254 # XXX some case we over cache, IGNORE 256 # XXX some case we over cache, IGNORE
255 merge_caches[rev] = value 257 merge_caches[rev] = value
256 return value 258 return value
257 259
258 return revinfo 260 return revinfo
275 277
276 return _is_ancestor 278 return _is_ancestor
277 279
278 280
279 def _changesetforwardcopies(a, b, match): 281 def _changesetforwardcopies(a, b, match):
280 if a.rev() in (node.nullrev, b.rev()): 282 if a.rev() in (nullrev, b.rev()):
281 return {} 283 return {}
282 284
283 repo = a.repo().unfiltered() 285 repo = a.repo().unfiltered()
284 children = {} 286 children = {}
285 287
288 missingrevs = cl.findmissingrevs(common=[a.rev()], heads=[b.rev()]) 290 missingrevs = cl.findmissingrevs(common=[a.rev()], heads=[b.rev()])
289 mrset = set(missingrevs) 291 mrset = set(missingrevs)
290 roots = set() 292 roots = set()
291 for r in missingrevs: 293 for r in missingrevs:
292 for p in cl.parentrevs(r): 294 for p in cl.parentrevs(r):
293 if p == node.nullrev: 295 if p == nullrev:
294 continue 296 continue
295 if p not in children: 297 if p not in children:
296 children[p] = [r] 298 children[p] = [r]
297 else: 299 else:
298 children[p].append(r) 300 children[p].append(r)
492 return False 494 return False
493 fctx = ctx[path] 495 fctx = ctx[path]
494 parents = fctx._filelog.parents(fctx._filenode) 496 parents = fctx._filelog.parents(fctx._filenode)
495 nb_parents = 0 497 nb_parents = 0
496 for n in parents: 498 for n in parents:
497 if n != node.nullid: 499 if n != nullid:
498 nb_parents += 1 500 nb_parents += 1
499 return nb_parents >= 2 501 return nb_parents >= 2
500 502
501 return ismerged 503 return ismerged
502 504
666 copies = _backwardrenames(x, y, match=match) 668 copies = _backwardrenames(x, y, match=match)
667 else: 669 else:
668 if debug: 670 if debug:
669 repo.ui.debug(b'debug.copies: search mode: combined\n') 671 repo.ui.debug(b'debug.copies: search mode: combined\n')
670 base = None 672 base = None
671 if a.rev() != node.nullrev: 673 if a.rev() != nullrev:
672 base = x 674 base = x
673 copies = _chain( 675 copies = _chain(
674 _backwardrenames(x, a, match=match), 676 _backwardrenames(x, a, match=match),
675 _forwardcopies(a, y, base, match=match), 677 _forwardcopies(a, y, base, match=match),
676 ) 678 )