view mercurial/logexchange.py @ 42743:8c9a6adec67a

rust-discovery: using the children cache in add_missing The DAG range computation often needs to get back to very old revisions, and turns out to be disproportionately long, given that the end goal is to remove the descendents of the given missing revisons from the undecided set. The fast iteration capabilities available in the Rust case make it possible to avoid the DAG range entirely, at the cost of precomputing the children cache, and to simply iterate on children of the given missing revisions. This is a case where staying on the same side of the interface between the two languages has clear benefits. On discoveries with initial undecided sets small enough to bypass sampling entirely, the total cost of computing the children cache and the subsequent iteration becomes better than the Python + C counterpart, which relies on reachableroots2. For example, on a repo with more than one million revisions with an initial undecided set of 11 elements, we get these figures: Rust version with simple iteration addcommons: 57.287us first undecided computation: 184.278334ms first children cache computation: 131.056us addmissings iteration: 42.766us first addinfo total: 185.24 ms Python + C version first addcommons: 0.29 ms addcommons 0.21 ms first undecided computation 191.35 ms addmissings 45.75 ms first addinfo total: 237.77 ms On discoveries with large undecided sets, the initial price paid makes the first addinfo slower than the Python + C version, but that's more than compensated by the gain in sampling and subsequent iterations. Here's an extreme example with an undecided set of a million revisions: Rust version: first undecided computation: 293.842629ms first children cache computation: 407.911297ms addmissings iteration: 34.312869ms first addinfo total: 776.02 ms taking initial sample query 2: sampling time: 1318.38 ms query 2; still undecided: 1005013, sample size is: 200 addmissings: 143.062us Python + C version: first undecided computation 298.13 ms addmissings 80.13 ms first addinfo total: 399.62 ms taking initial sample query 2: sampling time: 3957.23 ms query 2; still undecided: 1005013, sample size is: 200 addmissings 52.88 ms Differential Revision: https://phab.mercurial-scm.org/D6428
author Georges Racinet <georges.racinet@octobus.net>
date Tue, 16 Apr 2019 01:16:39 +0200
parents 876494fd967d
children 2372284d9457
line wrap: on
line source

# logexchange.py
#
# Copyright 2017 Augie Fackler <raf@durin42.com>
# Copyright 2017 Sean Farley <sean@farley.io>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.

from __future__ import absolute_import

from .node import hex

from . import (
    util,
    vfs as vfsmod,
)

# directory name in .hg/ in which remotenames files will be present
remotenamedir = 'logexchange'

def readremotenamefile(repo, filename):
    """
    reads a file from .hg/logexchange/ directory and yields it's content
    filename: the file to be read
    yield a tuple (node, remotepath, name)
    """

    vfs = vfsmod.vfs(repo.vfs.join(remotenamedir))
    if not vfs.exists(filename):
        return
    f = vfs(filename)
    lineno = 0
    for line in f:
        line = line.strip()
        if not line:
            continue
        # contains the version number
        if lineno == 0:
            lineno += 1
        try:
            node, remote, rname = line.split('\0')
            yield node, remote, rname
        except ValueError:
            pass

    f.close()

def readremotenames(repo):
    """
    read the details about the remotenames stored in .hg/logexchange/ and
    yields a tuple (node, remotepath, name). It does not yields information
    about whether an entry yielded is branch or bookmark. To get that
    information, call the respective functions.
    """

    for bmentry in readremotenamefile(repo, 'bookmarks'):
        yield bmentry
    for branchentry in readremotenamefile(repo, 'branches'):
        yield branchentry

def writeremotenamefile(repo, remotepath, names, nametype):
    vfs = vfsmod.vfs(repo.vfs.join(remotenamedir))
    f = vfs(nametype, 'w', atomictemp=True)
    # write the storage version info on top of file
    # version '0' represents the very initial version of the storage format
    f.write('0\n\n')

    olddata = set(readremotenamefile(repo, nametype))
    # re-save the data from a different remote than this one.
    for node, oldpath, rname in sorted(olddata):
        if oldpath != remotepath:
            f.write('%s\0%s\0%s\n' % (node, oldpath, rname))

    for name, node in sorted(names.iteritems()):
        if nametype == "branches":
            for n in node:
                f.write('%s\0%s\0%s\n' % (n, remotepath, name))
        elif nametype == "bookmarks":
            if node:
                f.write('%s\0%s\0%s\n' % (node, remotepath, name))

    f.close()

def saveremotenames(repo, remotepath, branches=None, bookmarks=None):
    """
    save remotenames i.e. remotebookmarks and remotebranches in their
    respective files under ".hg/logexchange/" directory.
    """
    wlock = repo.wlock()
    try:
        if bookmarks:
            writeremotenamefile(repo, remotepath, bookmarks, 'bookmarks')
        if branches:
            writeremotenamefile(repo, remotepath, branches, 'branches')
    finally:
        wlock.release()

def activepath(repo, remote):
    """returns remote path"""
    # is the remote a local peer
    local = remote.local()

    # determine the remote path from the repo, if possible; else just
    # use the string given to us
    rpath = remote
    if local:
        rpath = util.pconvert(remote._repo.root)
    elif not isinstance(remote, bytes):
        rpath = remote._url

    # represent the remotepath with user defined path name if exists
    for path, url in repo.ui.configitems('paths'):
        # remove auth info from user defined url
        noauthurl = util.removeauth(url)

        # Standardize on unix style paths, otherwise some {remotenames} end up
        # being an absolute path on Windows.
        url = util.pconvert(bytes(url))
        noauthurl = util.pconvert(noauthurl)
        if url == rpath or noauthurl == rpath:
            rpath = path
            break

    return rpath

def pullremotenames(localrepo, remoterepo):
    """
    pulls bookmarks and branches information of the remote repo during a
    pull or clone operation.
    localrepo is our local repository
    remoterepo is the peer instance
    """
    remotepath = activepath(localrepo, remoterepo)

    with remoterepo.commandexecutor() as e:
        bookmarks = e.callcommand('listkeys', {
            'namespace': 'bookmarks',
        }).result()

    # on a push, we don't want to keep obsolete heads since
    # they won't show up as heads on the next pull, so we
    # remove them here otherwise we would require the user
    # to issue a pull to refresh the storage
    bmap = {}
    repo = localrepo.unfiltered()

    with remoterepo.commandexecutor() as e:
        branchmap = e.callcommand('branchmap', {}).result()

    for branch, nodes in branchmap.iteritems():
        bmap[branch] = []
        for node in nodes:
            if node in repo and not repo[node].obsolete():
                bmap[branch].append(hex(node))

    saveremotenames(localrepo, remotepath, bmap, bookmarks)