view mercurial/setdiscovery.py @ 20182:04036798ebed

branches: avoid unnecessary changectx.branch() calls This requires reading from the changelog, which can be costly over NFS. Note that this does not totally remove reading from the changelog; we still do that when calling changectx.closesbranch(). That call will be removed in a later patch. Running hg branches on the PyPy repo (with 996) over a busy NFS server, before this change: $ time hg --profile branches > /dev/null CallCount Recursive Total(s) Inline(s) module:lineno(function) 2042 0 2.2827 2.2827 <open> 2036 0 0.9840 0.9840 <method 'close' of 'file' objects> 2036 0 0.0464 0.0464 <method 'read' of 'file' objects> 5233 0 0.1985 0.0453 mercurial.repoview:161(changelog) 10462 0 0.0791 0.0314 mercurial.changelog:133(tip) 5233 0 0.0388 0.0176 mercurial.localrepo:26(__get__) 10462 0 0.0250 0.0126 <len> 5233 0 0.0059 0.0039 mercurial.repoview:112(filterrevs) 10462 0 0.0029 0.0029 <hash> 2034 0 0.0444 0.0444 <method 'seek' of 'file' objects> 5340 0 0.0390 0.0390 mercurial.revlog:296(rev) 2582 0 0.0371 0.0371 <zlib.decompress> 3155 0 0.1963 0.0366 mercurial.context:202(__init__) 3155 0 0.1238 0.0306 mercurial.repoview:161(changelog) 3155 0 0.0261 0.0080 mercurial.changelog:183(rev) 9465 0 0.0061 0.0061 <isinstance> 1096 0 0.0023 0.0023 <binascii.unhexlify> 4251 0 0.0014 0.0014 <len> 2059 0 3.7341 0.0332 mercurial.changelog:270(read) 2059 0 3.6304 0.0307 mercurial.revlog:907(revision) 2057 0 0.0262 0.0137 mercurial.changelog:28(decodeextra) 4118 0 0.0094 0.0094 <method 'split' of 'str' objects> 4118 0 0.0270 0.0048 mercurial.encoding:61(tolocal) 2059 0 0.0040 0.0040 <method 'index' of 'str' objects> 10462 0 0.0791 0.0314 mercurial.changelog:133(tip) 10462 0 0.0289 0.0207 mercurial.changelog:190(node) 10462 0 0.0188 0.0091 <len> 52433 20932 0.0478 0.0310 <len> 20932 0 0.0221 0.0168 mercurial.revlog:262(__len__) 2059 0 3.6304 0.0307 mercurial.revlog:907(revision) real 0m4.361s user 0m0.986s sys 0m0.237s After this change: $ time hg --profile branches > /dev/null CallCount Recursive Total(s) Inline(s) module:lineno(function) 1069 0 1.1098 1.1098 <open> 1063 0 0.4865 0.4865 <method 'close' of 'file' objects> 4122 0 0.1811 0.0404 mercurial.repoview:161(changelog) 8240 0 0.0712 0.0272 mercurial.changelog:133(tip) 4122 0 0.0378 0.0177 mercurial.localrepo:26(__get__) 8240 0 0.0221 0.0115 <len> 4122 0 0.0057 0.0033 mercurial.repoview:112(filterrevs) 8240 0 0.0025 0.0025 <hash> 3029 0 0.1979 0.0371 mercurial.context:202(__init__) 3029 0 0.1278 0.0310 mercurial.repoview:161(changelog) 3029 0 0.0230 0.0081 mercurial.changelog:183(rev) 9087 0 0.0061 0.0061 <isinstance> 1096 0 0.0026 0.0026 <binascii.unhexlify> 4125 0 0.0014 0.0014 <len> 4229 0 0.0337 0.0337 mercurial.revlog:296(rev) 1061 0 0.0296 0.0296 <method 'seek' of 'file' objects> 1063 0 0.0292 0.0292 <method 'read' of 'file' objects> 8240 0 0.0712 0.0272 mercurial.changelog:133(tip) 8240 0 0.0271 0.0196 mercurial.changelog:190(node) 8240 0 0.0169 0.0083 <len> 40476 16488 0.0422 0.0271 <len> 16488 0 0.0193 0.0152 mercurial.revlog:262(__len__) 1342 0 0.0241 0.0241 <zlib.decompress> 9445 0 0.0336 0.0224 mercurial.changelog:190(node) 9445 0 0.0112 0.0112 mercurial.revlog:317(node) 1074 0 1.9102 0.0224 mercurial.changelog:270(read) 1074 0 1.8397 0.0202 mercurial.revlog:907(revision) 1073 0 0.0187 0.0099 mercurial.changelog:28(decodeextra) 2148 0 0.0061 0.0061 <method 'split' of 'str' objects> 2148 0 0.0184 0.0034 mercurial.encoding:61(tolocal) real 0m2.402s user 0m0.735s sys 0m0.177s
author Brodie Rao <brodie@sf.io>
date Fri, 15 Nov 2013 23:18:08 -0500
parents 1e5b38a919dd
children cdecbc5ab504
line wrap: on
line source

# setdiscovery.py - improved discovery of common nodeset for mercurial
#
# Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
# and Peter Arrenbrecht <peter@arrenbrecht.ch>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.

from node import nullid
from i18n import _
import random
import util, dagutil

def _updatesample(dag, nodes, sample, always, quicksamplesize=0):
    # if nodes is empty we scan the entire graph
    if nodes:
        heads = dag.headsetofconnecteds(nodes)
    else:
        heads = dag.heads()
    dist = {}
    visit = util.deque(heads)
    seen = set()
    factor = 1
    while visit:
        curr = visit.popleft()
        if curr in seen:
            continue
        d = dist.setdefault(curr, 1)
        if d > factor:
            factor *= 2
        if d == factor:
            if curr not in always: # need this check for the early exit below
                sample.add(curr)
                if quicksamplesize and (len(sample) >= quicksamplesize):
                    return
        seen.add(curr)
        for p in dag.parents(curr):
            if not nodes or p in nodes:
                dist.setdefault(p, d + 1)
                visit.append(p)

def _setupsample(dag, nodes, size):
    if len(nodes) <= size:
        return set(nodes), None, 0
    always = dag.headsetofconnecteds(nodes)
    desiredlen = size - len(always)
    if desiredlen <= 0:
        # This could be bad if there are very many heads, all unknown to the
        # server. We're counting on long request support here.
        return always, None, desiredlen
    return always, set(), desiredlen

def _takequicksample(dag, nodes, size, initial):
    always, sample, desiredlen = _setupsample(dag, nodes, size)
    if sample is None:
        return always
    if initial:
        fromset = None
    else:
        fromset = nodes
    _updatesample(dag, fromset, sample, always, quicksamplesize=desiredlen)
    sample.update(always)
    return sample

def _takefullsample(dag, nodes, size):
    always, sample, desiredlen = _setupsample(dag, nodes, size)
    if sample is None:
        return always
    # update from heads
    _updatesample(dag, nodes, sample, always)
    # update from roots
    _updatesample(dag.inverse(), nodes, sample, always)
    assert sample
    if len(sample) > desiredlen:
        sample = set(random.sample(sample, desiredlen))
    elif len(sample) < desiredlen:
        more = desiredlen - len(sample)
        sample.update(random.sample(list(nodes - sample - always), more))
    sample.update(always)
    return sample

def findcommonheads(ui, local, remote,
                    initialsamplesize=100,
                    fullsamplesize=200,
                    abortwhenunrelated=True):
    '''Return a tuple (common, anyincoming, remoteheads) used to identify
    missing nodes from or in remote.
    '''
    roundtrips = 0
    cl = local.changelog
    dag = dagutil.revlogdag(cl)

    # early exit if we know all the specified remote heads already
    ui.debug("query 1; heads\n")
    roundtrips += 1
    ownheads = dag.heads()
    sample = ownheads
    if remote.local():
        # stopgap until we have a proper localpeer that supports batch()
        srvheadhashes = remote.heads()
        yesno = remote.known(dag.externalizeall(sample))
    elif remote.capable('batch'):
        batch = remote.batch()
        srvheadhashesref = batch.heads()
        yesnoref = batch.known(dag.externalizeall(sample))
        batch.submit()
        srvheadhashes = srvheadhashesref.value
        yesno = yesnoref.value
    else:
        # compatibility with pre-batch, but post-known remotes during 1.9
        # development
        srvheadhashes = remote.heads()
        sample = []

    if cl.tip() == nullid:
        if srvheadhashes != [nullid]:
            return [nullid], True, srvheadhashes
        return [nullid], False, []

    # start actual discovery (we note this before the next "if" for
    # compatibility reasons)
    ui.status(_("searching for changes\n"))

    srvheads = dag.internalizeall(srvheadhashes, filterunknown=True)
    if len(srvheads) == len(srvheadhashes):
        ui.debug("all remote heads known locally\n")
        return (srvheadhashes, False, srvheadhashes,)

    if sample and util.all(yesno):
        ui.note(_("all local heads known remotely\n"))
        ownheadhashes = dag.externalizeall(ownheads)
        return (ownheadhashes, True, srvheadhashes,)

    # full blown discovery

    # own nodes where I don't know if remote knows them
    undecided = dag.nodeset()
    # own nodes I know we both know
    common = set()
    # own nodes I know remote lacks
    missing = set()

    # treat remote heads (and maybe own heads) as a first implicit sample
    # response
    common.update(dag.ancestorset(srvheads))
    undecided.difference_update(common)

    full = False
    while undecided:

        if sample:
            commoninsample = set(n for i, n in enumerate(sample) if yesno[i])
            common.update(dag.ancestorset(commoninsample, common))

            missinginsample = [n for i, n in enumerate(sample) if not yesno[i]]
            missing.update(dag.descendantset(missinginsample, missing))

            undecided.difference_update(missing)
            undecided.difference_update(common)

        if not undecided:
            break

        if full:
            ui.note(_("sampling from both directions\n"))
            sample = _takefullsample(dag, undecided, size=fullsamplesize)
        elif common:
            # use cheapish initial sample
            ui.debug("taking initial sample\n")
            sample = _takefullsample(dag, undecided, size=fullsamplesize)
        else:
            # use even cheaper initial sample
            ui.debug("taking quick initial sample\n")
            sample = _takequicksample(dag, undecided, size=initialsamplesize,
                                      initial=True)

        roundtrips += 1
        ui.progress(_('searching'), roundtrips, unit=_('queries'))
        ui.debug("query %i; still undecided: %i, sample size is: %i\n"
                 % (roundtrips, len(undecided), len(sample)))
        # indices between sample and externalized version must match
        sample = list(sample)
        yesno = remote.known(dag.externalizeall(sample))
        full = True

    result = dag.headsetofconnecteds(common)
    ui.progress(_('searching'), None)
    ui.debug("%d total queries\n" % roundtrips)

    if not result and srvheadhashes != [nullid]:
        if abortwhenunrelated:
            raise util.Abort(_("repository is unrelated"))
        else:
            ui.warn(_("warning: repository is unrelated\n"))
        return (set([nullid]), True, srvheadhashes,)

    anyincoming = (srvheadhashes != [nullid])
    return dag.externalizeall(result), anyincoming, srvheadhashes