changeset 42381:563cd9a72682

perf: add a `pre-run` option sometimes, the initial run is necessary to warm some cache that are not relevant for the current measurement. We add a new `perf.pre-run` option to specify a number of run of the benchmark logic that will happens before measurement are taken.
author Pierre-Yves David <pierre-yves.david@octobus.net>
date Tue, 21 May 2019 15:08:06 +0200
parents 45c18f7345c1
children 3293086ff663
files contrib/perf.py tests/test-contrib-perf.t
diffstat 2 files changed, 36 insertions(+), 2 deletions(-) [+]
line wrap: on
line diff
--- a/contrib/perf.py	Mon May 20 18:09:41 2019 -0700
+++ b/contrib/perf.py	Tue May 21 15:08:06 2019 +0200
@@ -15,6 +15,9 @@
 ``presleep``
   number of second to wait before any group of runs (default: 1)
 
+``pre-run``
+  number of run to perform before starting measurement.
+
 ``run-limits``
   Control the number of runs each benchmark will perform. The option value
   should be a list of `<time>-<numberofrun>` pairs. After each run the
@@ -240,6 +243,9 @@
     configitem(b'perf', b'all-timing',
         default=mercurial.configitems.dynamicdefault,
     )
+    configitem(b'perf', b'pre-run',
+        default=mercurial.configitems.dynamicdefault,
+    )
     configitem(b'perf', b'run-limits',
         default=mercurial.configitems.dynamicdefault,
     )
@@ -341,7 +347,9 @@
     if not limits:
         limits = DEFAULTLIMITS
 
-    t = functools.partial(_timer, fm, displayall=displayall, limits=limits)
+    prerun = getint(ui, b"perf", b"pre-run", 0)
+    t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
+                          prerun=prerun)
     return t, fm
 
 def stub_timer(fm, func, setup=None, title=None):
@@ -368,11 +376,15 @@
 )
 
 def _timer(fm, func, setup=None, title=None, displayall=False,
-           limits=DEFAULTLIMITS):
+           limits=DEFAULTLIMITS, prerun=0):
     gc.collect()
     results = []
     begin = util.timer()
     count = 0
+    for i in xrange(prerun):
+        if setup is not None:
+            setup()
+        func()
     keepgoing = True
     while keepgoing:
         if setup is not None:
--- a/tests/test-contrib-perf.t	Mon May 20 18:09:41 2019 -0700
+++ b/tests/test-contrib-perf.t	Tue May 21 15:08:06 2019 +0200
@@ -55,6 +55,9 @@
   "presleep"
     number of second to wait before any group of runs (default: 1)
   
+  "pre-run"
+    number of run to perform before starting measurement.
+  
   "run-limits"
     Control the number of runs each benchmark will perform. The option value
     should be a list of '<time>-<numberofrun>' pairs. After each run the
@@ -327,6 +330,25 @@
    }
   ]
 
+Test pre-run feature
+--------------------
+
+(perf discovery has some spurious output)
+
+  $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
+  ! wall * comb * user * sys * (best of 1) (glob)
+  searching for changes
+  $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
+  ! wall * comb * user * sys * (best of 1) (glob)
+  searching for changes
+  searching for changes
+  $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
+  ! wall * comb * user * sys * (best of 1) (glob)
+  searching for changes
+  searching for changes
+  searching for changes
+  searching for changes
+
 Check perf.py for historical portability
 ----------------------------------------