view mercurial/similar.py @ 25708:d3d32643c060

wireproto: correctly escape batched args and responses (issue4739) This issue appears to be as old as wireproto batching itself: I can reproduce the failure as far back as 08ef6b5f3715 trivially by rebasing the test changes in this patch, which was back in the 1.9 era. I didn't test before that change, because prior to that the testfile has a different name and I'm lazy. Note that the test thought it was checking this case, but it actually wasn't: it put a literal ; in the arg and response for its greet command, but the mangle/unmangle step defined in the test meant that instead of "Fo, =;o" going over the wire, "Gp-!><p" went instead, which doesn't contain any special characters (those being [.=;]) and thus not exercising the escaping. The test has been updated to use pre-unmangled special characters, so the request is now "Fo+<:o", which mangles to "Gp,=;p". I have confirmed that the test fails without the adjustment to the escaping rules in wireproto.py. No existing clients of RPC batching were depending on the old behavior in any way. The only *actual* users of batchable RPCs in core were: 1) largefiles, wherein it batches up many statlfile calls. It sends hexlified hashes over the wire and gets a 0, 1, or 2 back as a response. No risk of special characters. 2) setdiscovery, which was using heads() and known(), both of which communicate via hexlified nodes. Again, no risk of special characters. Since the escaping functionality has been completely broken since it was introduced, we know that it has no users. As such, we can change the escaping mechanism without having to worry about backwards compatibility issues. For the curious, this was detected by chance: it happens that the lz4-compressed text of a test file for remotefilelog compressed to something containing a ;, which then caused the failure when I moved remotefilelog to using batching for file content fetching.
author Augie Fackler <augie@google.com>
date Tue, 30 Jun 2015 19:19:17 -0400
parents 525fdb738975
children a56c47ed3885
line wrap: on
line source

# similar.py - mechanisms for finding similar files
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.

from i18n import _
import util
import mdiff
import bdiff

def _findexactmatches(repo, added, removed):
    '''find renamed files that have no changes

    Takes a list of new filectxs and a list of removed filectxs, and yields
    (before, after) tuples of exact matches.
    '''
    numfiles = len(added) + len(removed)

    # Get hashes of removed files.
    hashes = {}
    for i, fctx in enumerate(removed):
        repo.ui.progress(_('searching for exact renames'), i, total=numfiles)
        h = util.sha1(fctx.data()).digest()
        hashes[h] = fctx

    # For each added file, see if it corresponds to a removed file.
    for i, fctx in enumerate(added):
        repo.ui.progress(_('searching for exact renames'), i + len(removed),
                total=numfiles)
        h = util.sha1(fctx.data()).digest()
        if h in hashes:
            yield (hashes[h], fctx)

    # Done
    repo.ui.progress(_('searching for exact renames'), None)

def _findsimilarmatches(repo, added, removed, threshold):
    '''find potentially renamed files based on similar file content

    Takes a list of new filectxs and a list of removed filectxs, and yields
    (before, after, score) tuples of partial matches.
    '''
    copies = {}
    for i, r in enumerate(removed):
        repo.ui.progress(_('searching for similar files'), i,
                         total=len(removed))

        # lazily load text
        @util.cachefunc
        def data():
            orig = r.data()
            return orig, mdiff.splitnewlines(orig)

        def score(text):
            orig, lines = data()
            # bdiff.blocks() returns blocks of matching lines
            # count the number of bytes in each
            equal = 0
            matches = bdiff.blocks(text, orig)
            for x1, x2, y1, y2 in matches:
                for line in lines[y1:y2]:
                    equal += len(line)

            lengths = len(text) + len(orig)
            return equal * 2.0 / lengths

        for a in added:
            bestscore = copies.get(a, (None, threshold))[1]
            myscore = score(a.data())
            if myscore >= bestscore:
                copies[a] = (r, myscore)
    repo.ui.progress(_('searching'), None)

    for dest, v in copies.iteritems():
        source, score = v
        yield source, dest, score

def findrenames(repo, added, removed, threshold):
    '''find renamed files -- yields (before, after, score) tuples'''
    parentctx = repo['.']
    workingctx = repo[None]

    # Zero length files will be frequently unrelated to each other, and
    # tracking the deletion/addition of such a file will probably cause more
    # harm than good. We strip them out here to avoid matching them later on.
    addedfiles = set([workingctx[fp] for fp in added
            if workingctx[fp].size() > 0])
    removedfiles = set([parentctx[fp] for fp in removed
            if fp in parentctx and parentctx[fp].size() > 0])

    # Find exact matches.
    for (a, b) in _findexactmatches(repo,
            sorted(addedfiles), sorted(removedfiles)):
        addedfiles.remove(b)
        yield (a.path(), b.path(), 1.0)

    # If the user requested similar files to be matched, search for them also.
    if threshold < 1.0:
        for (a, b, score) in _findsimilarmatches(repo,
                sorted(addedfiles), sorted(removedfiles), threshold):
            yield (a.path(), b.path(), score)