view mercurial/similar.py @ 20182:04036798ebed

branches: avoid unnecessary changectx.branch() calls This requires reading from the changelog, which can be costly over NFS. Note that this does not totally remove reading from the changelog; we still do that when calling changectx.closesbranch(). That call will be removed in a later patch. Running hg branches on the PyPy repo (with 996) over a busy NFS server, before this change: $ time hg --profile branches > /dev/null CallCount Recursive Total(s) Inline(s) module:lineno(function) 2042 0 2.2827 2.2827 <open> 2036 0 0.9840 0.9840 <method 'close' of 'file' objects> 2036 0 0.0464 0.0464 <method 'read' of 'file' objects> 5233 0 0.1985 0.0453 mercurial.repoview:161(changelog) 10462 0 0.0791 0.0314 mercurial.changelog:133(tip) 5233 0 0.0388 0.0176 mercurial.localrepo:26(__get__) 10462 0 0.0250 0.0126 <len> 5233 0 0.0059 0.0039 mercurial.repoview:112(filterrevs) 10462 0 0.0029 0.0029 <hash> 2034 0 0.0444 0.0444 <method 'seek' of 'file' objects> 5340 0 0.0390 0.0390 mercurial.revlog:296(rev) 2582 0 0.0371 0.0371 <zlib.decompress> 3155 0 0.1963 0.0366 mercurial.context:202(__init__) 3155 0 0.1238 0.0306 mercurial.repoview:161(changelog) 3155 0 0.0261 0.0080 mercurial.changelog:183(rev) 9465 0 0.0061 0.0061 <isinstance> 1096 0 0.0023 0.0023 <binascii.unhexlify> 4251 0 0.0014 0.0014 <len> 2059 0 3.7341 0.0332 mercurial.changelog:270(read) 2059 0 3.6304 0.0307 mercurial.revlog:907(revision) 2057 0 0.0262 0.0137 mercurial.changelog:28(decodeextra) 4118 0 0.0094 0.0094 <method 'split' of 'str' objects> 4118 0 0.0270 0.0048 mercurial.encoding:61(tolocal) 2059 0 0.0040 0.0040 <method 'index' of 'str' objects> 10462 0 0.0791 0.0314 mercurial.changelog:133(tip) 10462 0 0.0289 0.0207 mercurial.changelog:190(node) 10462 0 0.0188 0.0091 <len> 52433 20932 0.0478 0.0310 <len> 20932 0 0.0221 0.0168 mercurial.revlog:262(__len__) 2059 0 3.6304 0.0307 mercurial.revlog:907(revision) real 0m4.361s user 0m0.986s sys 0m0.237s After this change: $ time hg --profile branches > /dev/null CallCount Recursive Total(s) Inline(s) module:lineno(function) 1069 0 1.1098 1.1098 <open> 1063 0 0.4865 0.4865 <method 'close' of 'file' objects> 4122 0 0.1811 0.0404 mercurial.repoview:161(changelog) 8240 0 0.0712 0.0272 mercurial.changelog:133(tip) 4122 0 0.0378 0.0177 mercurial.localrepo:26(__get__) 8240 0 0.0221 0.0115 <len> 4122 0 0.0057 0.0033 mercurial.repoview:112(filterrevs) 8240 0 0.0025 0.0025 <hash> 3029 0 0.1979 0.0371 mercurial.context:202(__init__) 3029 0 0.1278 0.0310 mercurial.repoview:161(changelog) 3029 0 0.0230 0.0081 mercurial.changelog:183(rev) 9087 0 0.0061 0.0061 <isinstance> 1096 0 0.0026 0.0026 <binascii.unhexlify> 4125 0 0.0014 0.0014 <len> 4229 0 0.0337 0.0337 mercurial.revlog:296(rev) 1061 0 0.0296 0.0296 <method 'seek' of 'file' objects> 1063 0 0.0292 0.0292 <method 'read' of 'file' objects> 8240 0 0.0712 0.0272 mercurial.changelog:133(tip) 8240 0 0.0271 0.0196 mercurial.changelog:190(node) 8240 0 0.0169 0.0083 <len> 40476 16488 0.0422 0.0271 <len> 16488 0 0.0193 0.0152 mercurial.revlog:262(__len__) 1342 0 0.0241 0.0241 <zlib.decompress> 9445 0 0.0336 0.0224 mercurial.changelog:190(node) 9445 0 0.0112 0.0112 mercurial.revlog:317(node) 1074 0 1.9102 0.0224 mercurial.changelog:270(read) 1074 0 1.8397 0.0202 mercurial.revlog:907(revision) 1073 0 0.0187 0.0099 mercurial.changelog:28(decodeextra) 2148 0 0.0061 0.0061 <method 'split' of 'str' objects> 2148 0 0.0184 0.0034 mercurial.encoding:61(tolocal) real 0m2.402s user 0m0.735s sys 0m0.177s
author Brodie Rao <brodie@sf.io>
date Fri, 15 Nov 2013 23:18:08 -0500
parents 525fdb738975
children a56c47ed3885
line wrap: on
line source

# similar.py - mechanisms for finding similar files
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.

from i18n import _
import util
import mdiff
import bdiff

def _findexactmatches(repo, added, removed):
    '''find renamed files that have no changes

    Takes a list of new filectxs and a list of removed filectxs, and yields
    (before, after) tuples of exact matches.
    '''
    numfiles = len(added) + len(removed)

    # Get hashes of removed files.
    hashes = {}
    for i, fctx in enumerate(removed):
        repo.ui.progress(_('searching for exact renames'), i, total=numfiles)
        h = util.sha1(fctx.data()).digest()
        hashes[h] = fctx

    # For each added file, see if it corresponds to a removed file.
    for i, fctx in enumerate(added):
        repo.ui.progress(_('searching for exact renames'), i + len(removed),
                total=numfiles)
        h = util.sha1(fctx.data()).digest()
        if h in hashes:
            yield (hashes[h], fctx)

    # Done
    repo.ui.progress(_('searching for exact renames'), None)

def _findsimilarmatches(repo, added, removed, threshold):
    '''find potentially renamed files based on similar file content

    Takes a list of new filectxs and a list of removed filectxs, and yields
    (before, after, score) tuples of partial matches.
    '''
    copies = {}
    for i, r in enumerate(removed):
        repo.ui.progress(_('searching for similar files'), i,
                         total=len(removed))

        # lazily load text
        @util.cachefunc
        def data():
            orig = r.data()
            return orig, mdiff.splitnewlines(orig)

        def score(text):
            orig, lines = data()
            # bdiff.blocks() returns blocks of matching lines
            # count the number of bytes in each
            equal = 0
            matches = bdiff.blocks(text, orig)
            for x1, x2, y1, y2 in matches:
                for line in lines[y1:y2]:
                    equal += len(line)

            lengths = len(text) + len(orig)
            return equal * 2.0 / lengths

        for a in added:
            bestscore = copies.get(a, (None, threshold))[1]
            myscore = score(a.data())
            if myscore >= bestscore:
                copies[a] = (r, myscore)
    repo.ui.progress(_('searching'), None)

    for dest, v in copies.iteritems():
        source, score = v
        yield source, dest, score

def findrenames(repo, added, removed, threshold):
    '''find renamed files -- yields (before, after, score) tuples'''
    parentctx = repo['.']
    workingctx = repo[None]

    # Zero length files will be frequently unrelated to each other, and
    # tracking the deletion/addition of such a file will probably cause more
    # harm than good. We strip them out here to avoid matching them later on.
    addedfiles = set([workingctx[fp] for fp in added
            if workingctx[fp].size() > 0])
    removedfiles = set([parentctx[fp] for fp in removed
            if fp in parentctx and parentctx[fp].size() > 0])

    # Find exact matches.
    for (a, b) in _findexactmatches(repo,
            sorted(addedfiles), sorted(removedfiles)):
        addedfiles.remove(b)
        yield (a.path(), b.path(), 1.0)

    # If the user requested similar files to be matched, search for them also.
    if threshold < 1.0:
        for (a, b, score) in _findsimilarmatches(repo,
                sorted(addedfiles), sorted(removedfiles), threshold):
            yield (a.path(), b.path(), score)