view mercurial/filelog.py @ 21883:87aa279f7073

largefiles: show also how many data entities are outgoing at "hg outgoing" Before this patch, "hg outgoing --large" shows which largefiles are changed or added in outgoing revisions only in the point of the view of filenames. For example, according to the list of outgoing largefiles shown in "hg outgoing" output, users should expect that the former below costs much more to upload outgoing largefiles than the latter. - outgoing revisions add a hundred largefiles, but all of them refer the same data entity in this case, only one data entity is outgoing, even though "hg summary" says that a hundred largefiles are outgoing. - a hundred outgoing revisions change only one largefile with distinct data in this case, a hundred data entities are outgoing, even though "hg summary" says that only one largefile is outgoing. But the latter costs much more than the former, in fact. This patch shows also how many data entities are outgoing at "hg outgoing" by counting number of unique hash values for outgoing largefiles. When "--debug" is specified, this patch also shows what entities (in hash) are outgoing for each largefiles listed up, for debug purpose. In "ui.debugflag" route, "addfunc()" can append given "lfhash" to the list "toupload[fn]" always without duplication check, because de-duplication is already done in "_getoutgoings()".
author FUJIWARA Katsunori <foozy@lares.dti.ne.jp>
date Mon, 07 Jul 2014 18:45:46 +0900
parents 3bda242bf244
children 4669e26747c3
line wrap: on
line source

# filelog.py - file history class for mercurial
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.

import revlog
import re

_mdre = re.compile('\1\n')
def _parsemeta(text):
    """return (metadatadict, keylist, metadatasize)"""
    # text can be buffer, so we can't use .startswith or .index
    if text[:2] != '\1\n':
        return None, None, None
    s = _mdre.search(text, 2).start()
    mtext = text[2:s]
    meta = {}
    keys = []
    for l in mtext.splitlines():
        k, v = l.split(": ", 1)
        meta[k] = v
        keys.append(k)
    return meta, keys, (s + 2)

def _packmeta(meta, keys=None):
    if not keys:
        keys = sorted(meta.iterkeys())
    return "".join("%s: %s\n" % (k, meta[k]) for k in keys)

class filelog(revlog.revlog):
    def __init__(self, opener, path):
        super(filelog, self).__init__(opener,
                        "/".join(("data", path + ".i")))

    def read(self, node):
        t = self.revision(node)
        if not t.startswith('\1\n'):
            return t
        s = t.index('\1\n', 2)
        return t[s + 2:]

    def add(self, text, meta, transaction, link, p1=None, p2=None):
        if meta or text.startswith('\1\n'):
            text = "\1\n%s\1\n%s" % (_packmeta(meta), text)
        return self.addrevision(text, transaction, link, p1, p2)

    def renamed(self, node):
        if self.parents(node)[0] != revlog.nullid:
            return False
        t = self.revision(node)
        m = _parsemeta(t)[0]
        if m and "copy" in m:
            return (m["copy"], revlog.bin(m["copyrev"]))
        return False

    def size(self, rev):
        """return the size of a given revision"""

        # for revisions with renames, we have to go the slow way
        node = self.node(rev)
        if self.renamed(node):
            return len(self.read(node))

        # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
        return super(filelog, self).size(rev)

    def cmp(self, node, text):
        """compare text with a given file revision

        returns True if text is different than what is stored.
        """

        t = text
        if text.startswith('\1\n'):
            t = '\1\n\1\n' + text

        samehashes = not super(filelog, self).cmp(node, t)
        if samehashes:
            return False

        # renaming a file produces a different hash, even if the data
        # remains unchanged. Check if it's the case (slow):
        if self.renamed(node):
            t2 = self.read(node)
            return t2 != text

        return True

    def _file(self, f):
        return filelog(self.opener, f)