view mercurial/similar.py @ 35767:5f5fb279fd39

streamclone: also stream caches to the client When stream clone is used over bundle2, relevant cache files are also streamed. This is expected to be a massive performance win for clone since no important cache will have to be recomputed. Some performance numbers: (All times are wall-clock times in seconds, 2 attempts per case.) # Mozilla-Central ## Clone over ssh over lan V1 streaming: 234.3 239.6 V2 streaming: 248.4 243.7 ## Clone over ssh over Internet V1 streaming: 175.5 110.9 V2 streaming: 109.1 111.0 ## Clone over HTTP over lan V1 streaming: 105.3 105.6 V2 streaming: 112.7 111.4 ## Clone over HTTP over internet V1 streaming: 105.6 114.6 V2 streaming: 226.7 225.9 ## Hg tags V1 streaming (no cache): 1.084 1.071 V2 streaming (cache): 0.312 0.325 ## Hg branches V1 streaming (no cache): 14.047 14.148 V2 streaming (with cache): 0.312 0.333 # Pypy ## Clone over ssh over internet V1 streaming: 29.4 30.1 V2 streaming: 31.2 30.1 ## Clone over http over internet V1 streaming: 29.7 29.7 V2 streaming: 75.2 72.9 (since ssh and lan are not affected, there seems to be an issue with how we read/write the http stream on connection with latency, unrelated to the format) ## Hg tags V1 streaming (no cache): 1.752 1.664 V2 streaming (with cache): 0.274 0.260 ## Hg branches V1 streaming (no cache): 4.469 4.728 V2 streaming (with cache): 0.318 0.321 # Private repository: * 500K revision revisions * 11K topological heads * 28K branch heads ## hg tags no cache: 1543.332 with cache: 4.900 ## hg branches no cache: 91.828 with cache: 2.955
author Boris Feld <boris.feld@octobus.net>
date Thu, 18 Jan 2018 00:50:12 +0100
parents ded48ad55146
children cd196be26cb7
line wrap: on
line source

# similar.py - mechanisms for finding similar files
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.

from __future__ import absolute_import

from .i18n import _
from . import (
    mdiff,
)

def _findexactmatches(repo, added, removed):
    '''find renamed files that have no changes

    Takes a list of new filectxs and a list of removed filectxs, and yields
    (before, after) tuples of exact matches.
    '''
    numfiles = len(added) + len(removed)

    # Build table of removed files: {hash(fctx.data()): [fctx, ...]}.
    # We use hash() to discard fctx.data() from memory.
    hashes = {}
    for i, fctx in enumerate(removed):
        repo.ui.progress(_('searching for exact renames'), i, total=numfiles,
                         unit=_('files'))
        h = hash(fctx.data())
        if h not in hashes:
            hashes[h] = [fctx]
        else:
            hashes[h].append(fctx)

    # For each added file, see if it corresponds to a removed file.
    for i, fctx in enumerate(added):
        repo.ui.progress(_('searching for exact renames'), i + len(removed),
                total=numfiles, unit=_('files'))
        adata = fctx.data()
        h = hash(adata)
        for rfctx in hashes.get(h, []):
            # compare between actual file contents for exact identity
            if adata == rfctx.data():
                yield (rfctx, fctx)
                break

    # Done
    repo.ui.progress(_('searching for exact renames'), None)

def _ctxdata(fctx):
    # lazily load text
    orig = fctx.data()
    return orig, mdiff.splitnewlines(orig)

def _score(fctx, otherdata):
    orig, lines = otherdata
    text = fctx.data()
    # mdiff.blocks() returns blocks of matching lines
    # count the number of bytes in each
    equal = 0
    matches = mdiff.blocks(text, orig)
    for x1, x2, y1, y2 in matches:
        for line in lines[y1:y2]:
            equal += len(line)

    lengths = len(text) + len(orig)
    return equal * 2.0 / lengths

def score(fctx1, fctx2):
    return _score(fctx1, _ctxdata(fctx2))

def _findsimilarmatches(repo, added, removed, threshold):
    '''find potentially renamed files based on similar file content

    Takes a list of new filectxs and a list of removed filectxs, and yields
    (before, after, score) tuples of partial matches.
    '''
    copies = {}
    for i, r in enumerate(removed):
        repo.ui.progress(_('searching for similar files'), i,
                         total=len(removed), unit=_('files'))

        data = None
        for a in added:
            bestscore = copies.get(a, (None, threshold))[1]
            if data is None:
                data = _ctxdata(r)
            myscore = _score(a, data)
            if myscore > bestscore:
                copies[a] = (r, myscore)
    repo.ui.progress(_('searching'), None)

    for dest, v in copies.iteritems():
        source, bscore = v
        yield source, dest, bscore

def _dropempty(fctxs):
    return [x for x in fctxs if x.size() > 0]

def findrenames(repo, added, removed, threshold):
    '''find renamed files -- yields (before, after, score) tuples'''
    wctx = repo[None]
    pctx = wctx.p1()

    # Zero length files will be frequently unrelated to each other, and
    # tracking the deletion/addition of such a file will probably cause more
    # harm than good. We strip them out here to avoid matching them later on.
    addedfiles = _dropempty(wctx[fp] for fp in sorted(added))
    removedfiles = _dropempty(pctx[fp] for fp in sorted(removed) if fp in pctx)

    # Find exact matches.
    matchedfiles = set()
    for (a, b) in _findexactmatches(repo, addedfiles, removedfiles):
        matchedfiles.add(b)
        yield (a.path(), b.path(), 1.0)

    # If the user requested similar files to be matched, search for them also.
    if threshold < 1.0:
        addedfiles = [x for x in addedfiles if x not in matchedfiles]
        for (a, b, score) in _findsimilarmatches(repo, addedfiles,
                                                 removedfiles, threshold):
            yield (a.path(), b.path(), score)