perf: add asv benchmarks
Airspeed velocity (ASV) is a python framework for benchmarking Python packages
over their lifetime. The results are displayed in an interactive web frontend.
Add ASV benchmarks for mercurial that use contrib/perf.py extension that could
be run against multiple reference repositories.
The benchmark suite now includes revsets from contrib/base-revsets.txt with
variants, perftags, perfstatus, perfmanifest and perfheads.
Installation requires asv>=0.2, python-hglib and virtualenv
This is part of PerformanceTrackingSuitePlan
https://www.mercurial-scm.org/wiki/PerformanceTrackingSuitePlan
--- a/.hgignore Tue Nov 15 16:10:57 2016 +0100
+++ b/.hgignore Thu Sep 29 10:16:34 2016 +0200
@@ -49,6 +49,7 @@
tags
cscope.*
.idea/*
+.asv/*
i18n/hg.pot
locale/*/LC_MESSAGES/hg.mo
hgext/__index__.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/asv.conf.json Thu Sep 29 10:16:34 2016 +0200
@@ -0,0 +1,13 @@
+{
+ "version": 1,
+ "project": "mercurial",
+ "project_url": "https://mercurial-scm.org/",
+ "repo": "..",
+ "branches": ["default", "stable"],
+ "environment_type": "virtualenv",
+ "show_commit_url": "https://www.mercurial-scm.org/repo/hg/rev/",
+ "benchmark_dir": "benchmarks",
+ "env_dir": "../.asv/env",
+ "results_dir": "../.asv/results",
+ "html_dir": "../.asv/html"
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/benchmarks/__init__.py Thu Sep 29 10:16:34 2016 +0200
@@ -0,0 +1,102 @@
+# __init__.py - asv benchmark suite
+#
+# Copyright 2016 Logilab SA <contact@logilab.fr>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''ASV (https://asv.readthedocs.io) benchmark suite
+
+Benchmark are parameterized against reference repositories found in the
+directory pointed by the REPOS_DIR environment variable.
+
+Invocation example:
+
+ $ export REPOS_DIR=~/hgperf/repos
+ # run suite on given revision
+ $ asv --config contrib/asv.conf.json run REV
+ # run suite on new changesets found in stable and default branch
+ $ asv --config contrib/asv.conf.json run NEW
+ # display a comparative result table of benchmark results between two given
+ # revisions
+ $ asv --config contrib/asv.conf.json compare REV1 REV2
+ # compute regression detection and generate ASV static website
+ $ asv --config contrib/asv.conf.json publish
+ # serve the static website
+ $ asv --config contrib/asv.conf.json preview
+'''
+
+from __future__ import absolute_import
+
+import functools
+import os
+import re
+
+from mercurial import (
+ extensions,
+ hg,
+ ui as uimod,
+)
+
+basedir = os.path.abspath(os.path.join(os.path.dirname(__file__),
+ os.path.pardir, os.path.pardir))
+reposdir = os.environ['REPOS_DIR']
+reposnames = [name for name in os.listdir(reposdir)
+ if os.path.isdir(os.path.join(reposdir, name, ".hg"))]
+if not reposnames:
+ raise ValueError("No repositories found in $REPO_DIR")
+outputre = re.compile((r'! wall (\d+.\d+) comb \d+.\d+ user \d+.\d+ sys '
+ r'\d+.\d+ \(best of \d+\)'))
+
+def runperfcommand(reponame, command, *args, **kwargs):
+ os.environ["HGRCPATH"] = os.environ.get("ASVHGRCPATH", "")
+ ui = uimod.ui()
+ repo = hg.repository(ui, os.path.join(reposdir, reponame))
+ perfext = extensions.load(ui, 'perfext',
+ os.path.join(basedir, 'contrib', 'perf.py'))
+ cmd = getattr(perfext, command)
+ ui.pushbuffer()
+ cmd(ui, repo, *args, **kwargs)
+ output = ui.popbuffer()
+ match = outputre.search(output)
+ if not match:
+ raise ValueError("Invalid output {0}".format(output))
+ return float(match.group(1))
+
+def perfbench(repos=reposnames, name=None, params=None):
+ """decorator to declare ASV benchmark based on contrib/perf.py extension
+
+ An ASV benchmark is a python function with the given attributes:
+
+ __name__: should start with track_, time_ or mem_ to be collected by ASV
+ params and param_name: parameter matrix to display multiple graphs on the
+ same page.
+ pretty_name: If defined it's displayed in web-ui instead of __name__
+ (useful for revsets)
+ the module name is prepended to the benchmark name and displayed as
+ "category" in webui.
+
+ Benchmarks are automatically parameterized with repositories found in the
+ REPOS_DIR environment variable.
+
+ `params` is the param matrix in the form of a list of tuple
+ (param_name, [value0, value1])
+
+ For example [(x, [a, b]), (y, [c, d])] declare benchmarks for
+ (a, c), (a, d), (b, c) and (b, d).
+ """
+ params = list(params or [])
+ params.insert(0, ("repo", repos))
+
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapped(repo, *args):
+ def perf(command, *a, **kw):
+ return runperfcommand(repo, command, *a, **kw)
+ return func(perf, *args)
+
+ wrapped.params = [p[1] for p in params]
+ wrapped.param_names = [p[0] for p in params]
+ wrapped.pretty_name = name
+ return wrapped
+ return decorator
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/benchmarks/perf.py Thu Sep 29 10:16:34 2016 +0200
@@ -0,0 +1,26 @@
+# perf.py - asv benchmarks using contrib/perf.py extension
+#
+# Copyright 2016 Logilab SA <contact@logilab.fr>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from . import perfbench
+
+@perfbench()
+def track_tags(perf):
+ return perf("perftags")
+
+@perfbench()
+def track_status(perf):
+ return perf("perfstatus", unknown=False)
+
+@perfbench(params=[('rev', ['1000', '10000', 'tip'])])
+def track_manifest(perf, rev):
+ return perf("perfmanifest", rev)
+
+@perfbench()
+def track_heads(perf):
+ return perf("perfheads")
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/benchmarks/revset.py Thu Sep 29 10:16:34 2016 +0200
@@ -0,0 +1,53 @@
+# revset.py - asv revset benchmarks
+#
+# Copyright 2016 Logilab SA <contact@logilab.fr>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''ASV revset benchmarks generated from contrib/base-revsets.txt
+
+Each revset benchmark is parameterized with variants (first, last, sort, ...)
+'''
+
+from __future__ import absolute_import
+
+import os
+import string
+import sys
+
+from . import basedir, perfbench
+
+def createrevsetbenchmark(baseset, variants=None):
+ if variants is None:
+ # Default variants
+ variants = ["plain", "first", "last", "sort", "sort+first",
+ "sort+last"]
+ fname = "track_" + "_".join("".join([
+ c if c in string.digits + string.letters else " "
+ for c in baseset
+ ]).split())
+
+ def wrap(fname, baseset):
+ @perfbench(name=baseset, params=[("variant", variants)])
+ def f(perf, variant):
+ revset = baseset
+ if variant != "plain":
+ for var in variant.split("+"):
+ revset = "%s(%s)" % (var, revset)
+ return perf("perfrevset", revset)
+ f.__name__ = fname
+ return f
+ return wrap(fname, baseset)
+
+def initializerevsetbenchmarks():
+ mod = sys.modules[__name__]
+ with open(os.path.join(basedir, 'contrib', 'base-revsets.txt'),
+ 'rb') as fh:
+ for line in fh:
+ baseset = line.strip()
+ if baseset and not baseset.startswith('#'):
+ func = createrevsetbenchmark(baseset)
+ setattr(mod, func.__name__, func)
+
+initializerevsetbenchmarks()