view mercurial/dagutil.py @ 23702:c48924787eaa

filectx.parents: enforce changeid of parent to be in own changectx ancestors Because of the way filenodes are computed, you can have multiple changesets "introducing" the same file revision. For example, in the changeset graph below, changeset 2 and 3 both change a file -to- and -from- the same content. o 3: content = new | | o 2: content = new |/ o 1: content = old In such cases, the file revision is create once, when 2 is added, and just reused for 3. So the file change in '3' (from "old" to "new)" has no linkrev pointing to it). We'll call this situation "linkrev-shadowing". As the linkrev is used for optimization purposes when walking a file history, the linkrev-shadowing results in an unexpected jump to another branch during such a walk.. This leads to multiple bugs with log, annotate and rename detection. One element to fix such bugs is to ensure that walking the file history sticks on the same topology as the changeset's history. For this purpose, we extend the logic in 'basefilectx.parents' so that it always defines the proper changeset to associate the parent file revision with. This "proper" changeset has to be an ancestor of the changeset associated with the child file revision. This logic is performed in the '_adjustlinkrev' function. This function is given the starting changeset and all the information regarding the parent file revision. If the linkrev for the file revision is an ancestor of the starting changeset, the linkrev is valid and will be used. If it is not, we detected a topological jump caused by linkrev shadowing, we are going to walk the ancestors of the starting changeset until we find one setting the file to the revision we are trying to create. The performance impact appears acceptable: - We are walking the changelog once for each filelog traversal (as there should be no overlap between searches), - changelog traversal itself is fairly cheap, compared to what is likely going to be perform on the result on the filelog traversal, - We only touch the manifest for ancestors touching the file, And such changesets are likely to be the one introducing the file. (except in pathological cases involving merge), - We use manifest diff instead of full manifest unpacking to check manifest content, so it does not involve applying multiple diffs in most case. - linkrev shadowing is not the common case. Tests for fixed issues in log, annotate and rename detection have been added. But this changeset does not solve all problems. It fixes -ancestry- computation, but if the linkrev-shadowed changesets is the starting one, we'll still get things wrong. We'll have to fix the bootstrapping of such operations in a later changeset. Also, the usage of `hg log FILE` without --follow still has issues with linkrev pointing to hidden changesets, because it relies on the `filelog` revset which implement its own traversal logic that is still to be fixed. Thanks goes to: - Matt Mackall: for nudging me in the right direction - Julien Cristau and RĂ©mi Cardona: for keep telling me linkrev bug were an evolution show stopper for 3 years. - Durham Goode: for finding a new linkrev issue every few weeks - Mads Kiilerich: for that last rename bug who raise this topic over my anoyance limit.
author Pierre-Yves David <pierre-yves.david@fb.com>
date Tue, 23 Dec 2014 15:30:38 -0800
parents 4e5ac4a3a29b
children 6ddc86eedc3b
line wrap: on
line source

# dagutil.py - dag utilities for mercurial
#
# Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
# and Peter Arrenbrecht <peter@arrenbrecht.ch>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.

from node import nullrev
from i18n import _


class basedag(object):
    '''generic interface for DAGs

    terms:
    "ix" (short for index) identifies a nodes internally,
    "id" identifies one externally.

    All params are ixs unless explicitly suffixed otherwise.
    Pluralized params are lists or sets.
    '''

    def __init__(self):
        self._inverse = None

    def nodeset(self):
        '''set of all node ixs'''
        raise NotImplementedError

    def heads(self):
        '''list of head ixs'''
        raise NotImplementedError

    def parents(self, ix):
        '''list of parents ixs of ix'''
        raise NotImplementedError

    def inverse(self):
        '''inverse DAG, where parents becomes children, etc.'''
        raise NotImplementedError

    def ancestorset(self, starts, stops=None):
        '''
        set of all ancestors of starts (incl), but stop walk at stops (excl)
        '''
        raise NotImplementedError

    def descendantset(self, starts, stops=None):
        '''
        set of all descendants of starts (incl), but stop walk at stops (excl)
        '''
        return self.inverse().ancestorset(starts, stops)

    def headsetofconnecteds(self, ixs):
        '''
        subset of connected list of ixs so that no node has a descendant in it

        By "connected list" we mean that if an ancestor and a descendant are in
        the list, then so is at least one path connecting them.
        '''
        raise NotImplementedError

    def externalize(self, ix):
        '''return a node id'''
        return self._externalize(ix)

    def externalizeall(self, ixs):
        '''return a list of (or set if given a set) of node ids'''
        ids = self._externalizeall(ixs)
        if isinstance(ixs, set):
            return set(ids)
        return list(ids)

    def internalize(self, id):
        '''return a node ix'''
        return self._internalize(id)

    def internalizeall(self, ids, filterunknown=False):
        '''return a list of (or set if given a set) of node ixs'''
        ixs = self._internalizeall(ids, filterunknown)
        if isinstance(ids, set):
            return set(ixs)
        return list(ixs)


class genericdag(basedag):
    '''generic implementations for DAGs'''

    def ancestorset(self, starts, stops=None):
        stops = stops and set(stops) or set()
        seen = set()
        pending = list(starts)
        while pending:
            n = pending.pop()
            if n not in seen and n not in stops:
                seen.add(n)
                pending.extend(self.parents(n))
        return seen

    def headsetofconnecteds(self, ixs):
        hds = set(ixs)
        if not hds:
            return hds
        for n in ixs:
            for p in self.parents(n):
                hds.discard(p)
        assert hds
        return hds


class revlogbaseddag(basedag):
    '''generic dag interface to a revlog'''

    def __init__(self, revlog, nodeset):
        basedag.__init__(self)
        self._revlog = revlog
        self._heads = None
        self._nodeset = nodeset

    def nodeset(self):
        return self._nodeset

    def heads(self):
        if self._heads is None:
            self._heads = self._getheads()
        return self._heads

    def _externalize(self, ix):
        return self._revlog.index[ix][7]
    def _externalizeall(self, ixs):
        idx = self._revlog.index
        return [idx[i][7] for i in ixs]

    def _internalize(self, id):
        ix = self._revlog.rev(id)
        if ix == nullrev:
            raise LookupError(id, self._revlog.indexfile, _('nullid'))
        return ix
    def _internalizeall(self, ids, filterunknown):
        rl = self._revlog
        if filterunknown:
            return [r for r in map(rl.nodemap.get, ids)
                    if (r is not None
                        and r != nullrev
                        and r not in rl.filteredrevs)]
        return map(self._internalize, ids)


class revlogdag(revlogbaseddag):
    '''dag interface to a revlog'''

    def __init__(self, revlog):
        revlogbaseddag.__init__(self, revlog, set(revlog))

    def _getheads(self):
        return [r for r in self._revlog.headrevs() if r != nullrev]

    def parents(self, ix):
        rlog = self._revlog
        idx = rlog.index
        revdata = idx[ix]
        prev = revdata[5]
        if prev != nullrev:
            prev2 = revdata[6]
            if prev2 == nullrev:
                return [prev]
            return [prev, prev2]
        prev2 = revdata[6]
        if prev2 != nullrev:
            return [prev2]
        return []

    def inverse(self):
        if self._inverse is None:
            self._inverse = inverserevlogdag(self)
        return self._inverse

    def ancestorset(self, starts, stops=None):
        rlog = self._revlog
        idx = rlog.index
        stops = stops and set(stops) or set()
        seen = set()
        pending = list(starts)
        while pending:
            rev = pending.pop()
            if rev not in seen and rev not in stops:
                seen.add(rev)
                revdata = idx[rev]
                for i in [5, 6]:
                    prev = revdata[i]
                    if prev != nullrev:
                        pending.append(prev)
        return seen

    def headsetofconnecteds(self, ixs):
        if not ixs:
            return set()
        rlog = self._revlog
        idx = rlog.index
        headrevs = set(ixs)
        for rev in ixs:
            revdata = idx[rev]
            for i in [5, 6]:
                prev = revdata[i]
                if prev != nullrev:
                    headrevs.discard(prev)
        assert headrevs
        return headrevs

    def linearize(self, ixs):
        '''linearize and topologically sort a list of revisions

        The linearization process tries to create long runs of revs where
        a child rev comes immediately after its first parent. This is done by
        visiting the heads of the given revs in inverse topological order,
        and for each visited rev, visiting its second parent, then its first
        parent, then adding the rev itself to the output list.
        '''
        sorted = []
        visit = list(self.headsetofconnecteds(ixs))
        visit.sort(reverse=True)
        finished = set()

        while visit:
            cur = visit.pop()
            if cur < 0:
                cur = -cur - 1
                if cur not in finished:
                    sorted.append(cur)
                    finished.add(cur)
            else:
                visit.append(-cur - 1)
                visit += [p for p in self.parents(cur)
                          if p in ixs and p not in finished]
        assert len(sorted) == len(ixs)
        return sorted


class inverserevlogdag(revlogbaseddag, genericdag):
    '''inverse of an existing revlog dag; see revlogdag.inverse()'''

    def __init__(self, orig):
        revlogbaseddag.__init__(self, orig._revlog, orig._nodeset)
        self._orig = orig
        self._children = {}
        self._roots = []
        self._walkfrom = len(self._revlog) - 1

    def _walkto(self, walkto):
        rev = self._walkfrom
        cs = self._children
        roots = self._roots
        idx = self._revlog.index
        while rev >= walkto:
            data = idx[rev]
            isroot = True
            for prev in [data[5], data[6]]: # parent revs
                if prev != nullrev:
                    cs.setdefault(prev, []).append(rev)
                    isroot = False
            if isroot:
                roots.append(rev)
            rev -= 1
        self._walkfrom = rev

    def _getheads(self):
        self._walkto(nullrev)
        return self._roots

    def parents(self, ix):
        if ix is None:
            return []
        if ix <= self._walkfrom:
            self._walkto(ix)
        return self._children.get(ix, [])

    def inverse(self):
        return self._orig