mercurial/similar.py
author FUJIWARA Katsunori <foozy@lares.dti.ne.jp>
Tue, 04 Jul 2017 23:13:46 +0900
changeset 33278 87bca10a06ed
parent 32202 ded48ad55146
child 38348 cd196be26cb7
permissions -rw-r--r--
transaction: avoid file stat ambiguity only for files in blacklist Advancing mtime by os.utime() fails for EPERM, if the target file is owned by another. bff5ccbe5ead and related changes made some code paths give advancing mtime up in such case, to fix issue5418. This causes file stat ambiguity (again), if it is owned by another. https://www.mercurial-scm.org/wiki/ExactCacheValidationPlan To avoid file stat ambiguity in such case, especially for .hg/dirstate, ed66ec39933f made vfs.rename() copy the target file, and advance mtime of renamed one again, if EPERM (see issue5584 for detail). But straightforward "copy if EPERM" isn't reasonable for truncation of append-only files at rollbacking, because rollbacking might cost much for truncation of many filelogs, even though filelogs aren't filecache-ed. Therefore, this patch introduces blacklist "checkambigfiles", and avoids file stat ambiguity only for files specified in this blacklist. This patch consists of two parts below, which should be applied at once in order to avoid regression. - specify 'checkambig=True' at vfs.open(mode='a') in _playback() according to checkambigfiles - invoke _playback() with checkambigfiles - add transaction.__init__() checkambigfiles argument, for _abort() - make localrepo instantiate transaction with _cachedfiles - add rollback() checkambigfiles argument, for "hg rollback/recover" - make localrepo invoke rollback() with _cachedfiles After this patch, straightforward "copy if EPERM" will be reasonable at closing the file opened with checkambig=True, because this policy is applied only on files, which are listed in blacklist "checkambigfiles".

# similar.py - mechanisms for finding similar files
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.

from __future__ import absolute_import

from .i18n import _
from . import (
    mdiff,
)

def _findexactmatches(repo, added, removed):
    '''find renamed files that have no changes

    Takes a list of new filectxs and a list of removed filectxs, and yields
    (before, after) tuples of exact matches.
    '''
    numfiles = len(added) + len(removed)

    # Build table of removed files: {hash(fctx.data()): [fctx, ...]}.
    # We use hash() to discard fctx.data() from memory.
    hashes = {}
    for i, fctx in enumerate(removed):
        repo.ui.progress(_('searching for exact renames'), i, total=numfiles,
                         unit=_('files'))
        h = hash(fctx.data())
        if h not in hashes:
            hashes[h] = [fctx]
        else:
            hashes[h].append(fctx)

    # For each added file, see if it corresponds to a removed file.
    for i, fctx in enumerate(added):
        repo.ui.progress(_('searching for exact renames'), i + len(removed),
                total=numfiles, unit=_('files'))
        adata = fctx.data()
        h = hash(adata)
        for rfctx in hashes.get(h, []):
            # compare between actual file contents for exact identity
            if adata == rfctx.data():
                yield (rfctx, fctx)
                break

    # Done
    repo.ui.progress(_('searching for exact renames'), None)

def _ctxdata(fctx):
    # lazily load text
    orig = fctx.data()
    return orig, mdiff.splitnewlines(orig)

def _score(fctx, otherdata):
    orig, lines = otherdata
    text = fctx.data()
    # mdiff.blocks() returns blocks of matching lines
    # count the number of bytes in each
    equal = 0
    matches = mdiff.blocks(text, orig)
    for x1, x2, y1, y2 in matches:
        for line in lines[y1:y2]:
            equal += len(line)

    lengths = len(text) + len(orig)
    return equal * 2.0 / lengths

def score(fctx1, fctx2):
    return _score(fctx1, _ctxdata(fctx2))

def _findsimilarmatches(repo, added, removed, threshold):
    '''find potentially renamed files based on similar file content

    Takes a list of new filectxs and a list of removed filectxs, and yields
    (before, after, score) tuples of partial matches.
    '''
    copies = {}
    for i, r in enumerate(removed):
        repo.ui.progress(_('searching for similar files'), i,
                         total=len(removed), unit=_('files'))

        data = None
        for a in added:
            bestscore = copies.get(a, (None, threshold))[1]
            if data is None:
                data = _ctxdata(r)
            myscore = _score(a, data)
            if myscore > bestscore:
                copies[a] = (r, myscore)
    repo.ui.progress(_('searching'), None)

    for dest, v in copies.iteritems():
        source, bscore = v
        yield source, dest, bscore

def _dropempty(fctxs):
    return [x for x in fctxs if x.size() > 0]

def findrenames(repo, added, removed, threshold):
    '''find renamed files -- yields (before, after, score) tuples'''
    wctx = repo[None]
    pctx = wctx.p1()

    # Zero length files will be frequently unrelated to each other, and
    # tracking the deletion/addition of such a file will probably cause more
    # harm than good. We strip them out here to avoid matching them later on.
    addedfiles = _dropempty(wctx[fp] for fp in sorted(added))
    removedfiles = _dropempty(pctx[fp] for fp in sorted(removed) if fp in pctx)

    # Find exact matches.
    matchedfiles = set()
    for (a, b) in _findexactmatches(repo, addedfiles, removedfiles):
        matchedfiles.add(b)
        yield (a.path(), b.path(), 1.0)

    # If the user requested similar files to be matched, search for them also.
    if threshold < 1.0:
        addedfiles = [x for x in addedfiles if x not in matchedfiles]
        for (a, b, score) in _findsimilarmatches(repo, addedfiles,
                                                 removedfiles, threshold):
            yield (a.path(), b.path(), score)