view mercurial/changelog.py @ 17658:a02c1ffddae9 stable

largefiles: handle commit -A properly, after a --large commit (issue3542) Previous to this, 'commit -A' would add as normal files, files that were already committed as largefiles, resulting in files being listed twice by 'status -A'. It also missed when (only) a largefile was deleted, even though status reported it as '!'. This also has the side effect of properly reporting the state of the affected largefiles in the post commit hook after a remove that also affected a normal file (the largefiles used to be 'R', now are properly absent). Since scmutil.addremove() is called both by the ui command (after some trivial argument validation) and during the commit process when -A is specified, it seems like a more appropriate method to wrap than the addremove command. Currently, a repo is only enabled to use largefiles after an add that explicitly identifies some file as large, and a subsequent commit. Therefore, this patch only changes behavior after such a largefile enabling commit. Note that in the test, if the final commit had a '-v', 'removing large8' would be printed twice. Both of these originate in removelargefiles(). The first print is in verbose mode after traversing remove + forget, the second is because the '_isaddremove' attr is set and 'after' is not.
author Matt Harbison <matt_harbison@yahoo.com>
date Mon, 30 Jul 2012 20:56:41 -0400
parents 62c56c94c77e
children e7cfe3587ea4
line wrap: on
line source

# changelog.py - changelog class for mercurial
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.

from node import bin, hex, nullid
from i18n import _
import util, error, revlog, encoding

_defaultextra = {'branch': 'default'}

def _string_escape(text):
    """
    >>> d = {'nl': chr(10), 'bs': chr(92), 'cr': chr(13), 'nul': chr(0)}
    >>> s = "ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d
    >>> s
    'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
    >>> res = _string_escape(s)
    >>> s == res.decode('string_escape')
    True
    """
    # subset of the string_escape codec
    text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
    return text.replace('\0', '\\0')

def decodeextra(text):
    """
    >>> decodeextra(encodeextra({'foo': 'bar', 'baz': chr(0) + '2'}))
    {'foo': 'bar', 'baz': '\\x002', 'branch': 'default'}
    >>> decodeextra(encodeextra({'foo': 'bar', 'baz': chr(92) + chr(0) + '2'}))
    {'foo': 'bar', 'baz': '\\\\\\x002', 'branch': 'default'}
    """
    extra = _defaultextra.copy()
    for l in text.split('\0'):
        if l:
            if '\\0' in l:
                # fix up \0 without getting into trouble with \\0
                l = l.replace('\\\\', '\\\\\n')
                l = l.replace('\\0', '\0')
                l = l.replace('\n', '')
            k, v = l.decode('string_escape').split(':', 1)
            extra[k] = v
    return extra

def encodeextra(d):
    # keys must be sorted to produce a deterministic changelog entry
    items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)]
    return "\0".join(items)

class appender(object):
    '''the changelog index must be updated last on disk, so we use this class
    to delay writes to it'''
    def __init__(self, fp, buf):
        self.data = buf
        self.fp = fp
        self.offset = fp.tell()
        self.size = util.fstat(fp).st_size

    def end(self):
        return self.size + len("".join(self.data))
    def tell(self):
        return self.offset
    def flush(self):
        pass
    def close(self):
        self.fp.close()

    def seek(self, offset, whence=0):
        '''virtual file offset spans real file and data'''
        if whence == 0:
            self.offset = offset
        elif whence == 1:
            self.offset += offset
        elif whence == 2:
            self.offset = self.end() + offset
        if self.offset < self.size:
            self.fp.seek(self.offset)

    def read(self, count=-1):
        '''only trick here is reads that span real file and data'''
        ret = ""
        if self.offset < self.size:
            s = self.fp.read(count)
            ret = s
            self.offset += len(s)
            if count > 0:
                count -= len(s)
        if count != 0:
            doff = self.offset - self.size
            self.data.insert(0, "".join(self.data))
            del self.data[1:]
            s = self.data[0][doff:doff + count]
            self.offset += len(s)
            ret += s
        return ret

    def write(self, s):
        self.data.append(str(s))
        self.offset += len(s)

def delayopener(opener, target, divert, buf):
    def o(name, mode='r'):
        if name != target:
            return opener(name, mode)
        if divert:
            return opener(name + ".a", mode.replace('a', 'w'))
        # otherwise, divert to memory
        return appender(opener(name, mode), buf)
    return o

class changelog(revlog.revlog):
    def __init__(self, opener):
        revlog.revlog.__init__(self, opener, "00changelog.i")
        if self._initempty:
            # changelogs don't benefit from generaldelta
            self.version &= ~revlog.REVLOGGENERALDELTA
            self._generaldelta = False
        self._realopener = opener
        self._delayed = False
        self._divert = False

    def delayupdate(self):
        "delay visibility of index updates to other readers"
        self._delayed = True
        self._divert = (len(self) == 0)
        self._delaybuf = []
        self.opener = delayopener(self._realopener, self.indexfile,
                                  self._divert, self._delaybuf)

    def finalize(self, tr):
        "finalize index updates"
        self._delayed = False
        self.opener = self._realopener
        # move redirected index data back into place
        if self._divert:
            nfile = self.opener(self.indexfile + ".a")
            n = nfile.name
            nfile.close()
            util.rename(n, n[:-2])
        elif self._delaybuf:
            fp = self.opener(self.indexfile, 'a')
            fp.write("".join(self._delaybuf))
            fp.close()
            self._delaybuf = []
        # split when we're done
        self.checkinlinesize(tr)

    def readpending(self, file):
        r = revlog.revlog(self.opener, file)
        self.index = r.index
        self.nodemap = r.nodemap
        self._nodecache = r._nodecache
        self._chunkcache = r._chunkcache

    def writepending(self):
        "create a file containing the unfinalized state for pretxnchangegroup"
        if self._delaybuf:
            # make a temporary copy of the index
            fp1 = self._realopener(self.indexfile)
            fp2 = self._realopener(self.indexfile + ".a", "w")
            fp2.write(fp1.read())
            # add pending data
            fp2.write("".join(self._delaybuf))
            fp2.close()
            # switch modes so finalize can simply rename
            self._delaybuf = []
            self._divert = True

        if self._divert:
            return True

        return False

    def checkinlinesize(self, tr, fp=None):
        if not self._delayed:
            revlog.revlog.checkinlinesize(self, tr, fp)

    def read(self, node):
        """
        format used:
        nodeid\n        : manifest node in ascii
        user\n          : user, no \n or \r allowed
        time tz extra\n : date (time is int or float, timezone is int)
                        : extra is metadatas, encoded and separated by '\0'
                        : older versions ignore it
        files\n\n       : files modified by the cset, no \n or \r allowed
        (.*)            : comment (free text, ideally utf-8)

        changelog v0 doesn't use extra
        """
        text = self.revision(node)
        if not text:
            return (nullid, "", (0, 0), [], "", _defaultextra)
        last = text.index("\n\n")
        desc = encoding.tolocal(text[last + 2:])
        l = text[:last].split('\n')
        manifest = bin(l[0])
        user = encoding.tolocal(l[1])

        tdata = l[2].split(' ', 2)
        if len(tdata) != 3:
            time = float(tdata[0])
            try:
                # various tools did silly things with the time zone field.
                timezone = int(tdata[1])
            except ValueError:
                timezone = 0
            extra = _defaultextra
        else:
            time, timezone = float(tdata[0]), int(tdata[1])
            extra = decodeextra(tdata[2])

        files = l[3:]
        return (manifest, user, (time, timezone), files, desc, extra)

    def add(self, manifest, files, desc, transaction, p1, p2,
                  user, date=None, extra=None):
        # Convert to UTF-8 encoded bytestrings as the very first
        # thing: calling any method on a localstr object will turn it
        # into a str object and the cached UTF-8 string is thus lost.
        user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)

        user = user.strip()
        # An empty username or a username with a "\n" will make the
        # revision text contain two "\n\n" sequences -> corrupt
        # repository since read cannot unpack the revision.
        if not user:
            raise error.RevlogError(_("empty username"))
        if "\n" in user:
            raise error.RevlogError(_("username %s contains a newline")
                                    % repr(user))

        # strip trailing whitespace and leading and trailing empty lines
        desc = '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')

        if date:
            parseddate = "%d %d" % util.parsedate(date)
        else:
            parseddate = "%d %d" % util.makedate()
        if extra:
            branch = extra.get("branch")
            if branch in ("default", ""):
                del extra["branch"]
            elif branch in (".", "null", "tip"):
                raise error.RevlogError(_('the name \'%s\' is reserved')
                                        % branch)
        if extra:
            extra = encodeextra(extra)
            parseddate = "%s %s" % (parseddate, extra)
        l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
        text = "\n".join(l)
        return self.addrevision(text, transaction, len(self), p1, p2)