mercurial/similar.py
author Pierre-Yves David <pierre-yves.david@octobus.net>
Wed, 16 Oct 2019 17:49:30 +0200
changeset 43254 181d28ba05da
parent 43106 d783f945a701
child 45942 89a2afe31e82
permissions -rw-r--r--
copies: avoid instancing more changectx to access parent revisions We just need to know the revision numbers of the parents, creating full context is needlessly expensive. This provide a small, but noticeable performance boost. revision: large amount; added files: large amount; rename small amount; c3b14617fbd7 9ba6ab77fd29 before: ! wall 2.885636 comb 2.900000 user 2.870000 sys 0.030000 (median of 10) after: ! wall 2.702270 comb 2.710000 user 2.690000 sys 0.020000 (median of 10) revision: large amount; added files: small amount; rename small amount; c3b14617fbd7 f650a9b140d2 before: ! wall 4.298271 comb 4.290000 user 4.240000 sys 0.050000 (median of 10) after: ! wall 3.976610 comb 3.970000 user 3.920000 sys 0.050000 (median of 10) revision: large amount; added files: large amount; rename large amount; 08ea3258278e d9fa043f30c0 before: ! wall 0.773397 comb 0.770000 user 0.770000 sys 0.000000 (median of 11) after: ! wall 0.701634 comb 0.700000 user 0.700000 sys 0.000000 (median of 13) revision: small amount; added files: large amount; rename large amount; df6f7a526b60 a83dc6a2d56f before: ! wall 0.013585 comb 0.010000 user 0.010000 sys 0.000000 (median of 217) after: ! wall 0.013550 comb 0.010000 user 0.010000 sys 0.000000 (median of 218) revision: small amount; added files: large amount; rename small amount; 4aa4e1f8e19a 169138063d63 before: ! wall 0.003202 comb 0.000000 user 0.000000 sys 0.000000 (median of 929) after: ! wall 0.002993 comb 0.010000 user 0.010000 sys 0.000000 (median of 992) revision: small amount; added files: small amount; rename small amount; 4bc173b045a6 964879152e2e before: ! wall 0.000077 comb 0.000000 user 0.000000 sys 0.000000 (median of 12060) after: ! wall 0.000072 comb 0.000000 user 0.000000 sys 0.000000 (median of 12804) revision: medium amount; added files: large amount; rename medium amount; c95f1ced15f2 2c68e87c3efe before: ! wall 0.510614 comb 0.500000 user 0.500000 sys 0.000000 (median of 18) after: ! wall 0.473681 comb 0.470000 user 0.470000 sys 0.000000 (median of 20) revision: medium amount; added files: medium amount; rename small amount; d343da0c55a8 d7746d32bf9d before: ! wall 0.126552 comb 0.130000 user 0.130000 sys 0.000000 (median of 77) after: ! wall 0.115240 comb 0.110000 user 0.110000 sys 0.000000 (median of 85) Differential Revision: https://phab.mercurial-scm.org/D7122

# similar.py - mechanisms for finding similar files
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.

from __future__ import absolute_import

from .i18n import _
from . import (
    mdiff,
    pycompat,
)


def _findexactmatches(repo, added, removed):
    '''find renamed files that have no changes

    Takes a list of new filectxs and a list of removed filectxs, and yields
    (before, after) tuples of exact matches.
    '''
    # Build table of removed files: {hash(fctx.data()): [fctx, ...]}.
    # We use hash() to discard fctx.data() from memory.
    hashes = {}
    progress = repo.ui.makeprogress(
        _(b'searching for exact renames'),
        total=(len(added) + len(removed)),
        unit=_(b'files'),
    )
    for fctx in removed:
        progress.increment()
        h = hash(fctx.data())
        if h not in hashes:
            hashes[h] = [fctx]
        else:
            hashes[h].append(fctx)

    # For each added file, see if it corresponds to a removed file.
    for fctx in added:
        progress.increment()
        adata = fctx.data()
        h = hash(adata)
        for rfctx in hashes.get(h, []):
            # compare between actual file contents for exact identity
            if adata == rfctx.data():
                yield (rfctx, fctx)
                break

    # Done
    progress.complete()


def _ctxdata(fctx):
    # lazily load text
    orig = fctx.data()
    return orig, mdiff.splitnewlines(orig)


def _score(fctx, otherdata):
    orig, lines = otherdata
    text = fctx.data()
    # mdiff.blocks() returns blocks of matching lines
    # count the number of bytes in each
    equal = 0
    matches = mdiff.blocks(text, orig)
    for x1, x2, y1, y2 in matches:
        for line in lines[y1:y2]:
            equal += len(line)

    lengths = len(text) + len(orig)
    return equal * 2.0 / lengths


def score(fctx1, fctx2):
    return _score(fctx1, _ctxdata(fctx2))


def _findsimilarmatches(repo, added, removed, threshold):
    '''find potentially renamed files based on similar file content

    Takes a list of new filectxs and a list of removed filectxs, and yields
    (before, after, score) tuples of partial matches.
    '''
    copies = {}
    progress = repo.ui.makeprogress(
        _(b'searching for similar files'), unit=_(b'files'), total=len(removed)
    )
    for r in removed:
        progress.increment()
        data = None
        for a in added:
            bestscore = copies.get(a, (None, threshold))[1]
            if data is None:
                data = _ctxdata(r)
            myscore = _score(a, data)
            if myscore > bestscore:
                copies[a] = (r, myscore)
    progress.complete()

    for dest, v in pycompat.iteritems(copies):
        source, bscore = v
        yield source, dest, bscore


def _dropempty(fctxs):
    return [x for x in fctxs if x.size() > 0]


def findrenames(repo, added, removed, threshold):
    '''find renamed files -- yields (before, after, score) tuples'''
    wctx = repo[None]
    pctx = wctx.p1()

    # Zero length files will be frequently unrelated to each other, and
    # tracking the deletion/addition of such a file will probably cause more
    # harm than good. We strip them out here to avoid matching them later on.
    addedfiles = _dropempty(wctx[fp] for fp in sorted(added))
    removedfiles = _dropempty(pctx[fp] for fp in sorted(removed) if fp in pctx)

    # Find exact matches.
    matchedfiles = set()
    for (a, b) in _findexactmatches(repo, addedfiles, removedfiles):
        matchedfiles.add(b)
        yield (a.path(), b.path(), 1.0)

    # If the user requested similar files to be matched, search for them also.
    if threshold < 1.0:
        addedfiles = [x for x in addedfiles if x not in matchedfiles]
        for (a, b, score) in _findsimilarmatches(
            repo, addedfiles, removedfiles, threshold
        ):
            yield (a.path(), b.path(), score)