contrib/benchmarks/__init__.py
author Gregory Szorc <gregory.szorc@gmail.com>
Fri, 02 Jun 2017 23:33:30 +0200
changeset 32778 91e355a0408b
parent 30588 be0e7af80543
child 43076 2372284d9457
permissions -rw-r--r--
releasenotes: command to manage release notes files Per discussion on the mailing list, we want better release notes for Mercurial. This patch introduces an extension that provides a command for producing release notes files. Functionality is implemented as an extension because it could be useful outside of the Mercurial project and because there is some code (like rst parsing) that already exists in Mercurial and it doesn't make sense to reinvent the wheel. The general idea with the extension is that changeset authors declare release notes in commit messages using rst directives. Periodically (such as at publishing or release time), a project maintainer runs `hg releasenotes` to extract release notes fragments from commit messages and format them to an auto-generated release notes file. More details are explained inline in docstrings. There are several things that need addressed before this is ready for prime time: * Moar tests * Interactive merge mode * Implement similarity detection for individual notes items * Support customizing section names/titles * Parsing improvements for bullet lists and paragraphs * Document which rst primitives can be parsed * Retain arbitrary content (e.g. header section/paragraphs) from existing release notes file * Better error messages (line numbers, hints, etc)

# __init__.py - asv benchmark suite
#
# Copyright 2016 Logilab SA <contact@logilab.fr>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.

# "historical portability" policy of contrib/benchmarks:
#
# We have to make this code work correctly with current mercurial stable branch
# and if possible with reasonable cost with early Mercurial versions.

'''ASV (https://asv.readthedocs.io) benchmark suite

Benchmark are parameterized against reference repositories found in the
directory pointed by the REPOS_DIR environment variable.

Invocation example:

    $ export REPOS_DIR=~/hgperf/repos
    # run suite on given revision
    $ asv --config contrib/asv.conf.json run REV
    # run suite on new changesets found in stable and default branch
    $ asv --config contrib/asv.conf.json run NEW
    # display a comparative result table of benchmark results between two given
    # revisions
    $ asv --config contrib/asv.conf.json compare REV1 REV2
    # compute regression detection and generate ASV static website
    $ asv --config contrib/asv.conf.json publish
    # serve the static website
    $ asv --config contrib/asv.conf.json preview
'''

from __future__ import absolute_import

import functools
import os
import re

from mercurial import (
    extensions,
    hg,
    ui as uimod,
    util,
)

basedir = os.path.abspath(os.path.join(os.path.dirname(__file__),
                          os.path.pardir, os.path.pardir))
reposdir = os.environ['REPOS_DIR']
reposnames = [name for name in os.listdir(reposdir)
              if os.path.isdir(os.path.join(reposdir, name, ".hg"))]
if not reposnames:
    raise ValueError("No repositories found in $REPO_DIR")
outputre = re.compile((r'! wall (\d+.\d+) comb \d+.\d+ user \d+.\d+ sys '
                       r'\d+.\d+ \(best of \d+\)'))

def runperfcommand(reponame, command, *args, **kwargs):
    os.environ["HGRCPATH"] = os.environ.get("ASVHGRCPATH", "")
    # for "historical portability"
    # ui.load() has been available since d83ca85
    if util.safehasattr(uimod.ui, "load"):
        ui = uimod.ui.load()
    else:
        ui = uimod.ui()
    repo = hg.repository(ui, os.path.join(reposdir, reponame))
    perfext = extensions.load(ui, 'perfext',
                              os.path.join(basedir, 'contrib', 'perf.py'))
    cmd = getattr(perfext, command)
    ui.pushbuffer()
    cmd(ui, repo, *args, **kwargs)
    output = ui.popbuffer()
    match = outputre.search(output)
    if not match:
        raise ValueError("Invalid output {0}".format(output))
    return float(match.group(1))

def perfbench(repos=reposnames, name=None, params=None):
    """decorator to declare ASV benchmark based on contrib/perf.py extension

    An ASV benchmark is a python function with the given attributes:

    __name__: should start with track_, time_ or mem_ to be collected by ASV
    params and param_name: parameter matrix to display multiple graphs on the
    same page.
    pretty_name: If defined it's displayed in web-ui instead of __name__
    (useful for revsets)
    the module name is prepended to the benchmark name and displayed as
    "category" in webui.

    Benchmarks are automatically parameterized with repositories found in the
    REPOS_DIR environment variable.

    `params` is the param matrix in the form of a list of tuple
    (param_name, [value0, value1])

    For example [(x, [a, b]), (y, [c, d])] declare benchmarks for
    (a, c), (a, d), (b, c) and (b, d).
    """
    params = list(params or [])
    params.insert(0, ("repo", repos))

    def decorator(func):
        @functools.wraps(func)
        def wrapped(repo, *args):
            def perf(command, *a, **kw):
                return runperfcommand(repo, command, *a, **kw)
            return func(perf, *args)

        wrapped.params = [p[1] for p in params]
        wrapped.param_names = [p[0] for p in params]
        wrapped.pretty_name = name
        return wrapped
    return decorator