Mercurial > hg
comparison contrib/perf.py @ 51589:90ef3e042e10
perf: allow profiling of more than one run
By default, we still profile the first run only. However profiling more run help
to understand side effect from one run to the other. So we add an option to be
able to do so.
author | Pierre-Yves David <pierre-yves.david@octobus.net> |
---|---|
date | Sun, 14 Apr 2024 02:38:41 +0200 |
parents | 8e8776a28683 |
children | a6bdd2e6f7cb |
comparison
equal
deleted
inserted
replaced
51588:1574718fa62f | 51589:90ef3e042e10 |
---|---|
18 ``pre-run`` | 18 ``pre-run`` |
19 number of run to perform before starting measurement. | 19 number of run to perform before starting measurement. |
20 | 20 |
21 ``profile-benchmark`` | 21 ``profile-benchmark`` |
22 Enable profiling for the benchmarked section. | 22 Enable profiling for the benchmarked section. |
23 (The first iteration is benchmarked) | 23 (by default, the first iteration is benchmarked) |
24 | |
25 ``profiled-runs`` | |
26 list of iteration to profile (starting from 0) | |
24 | 27 |
25 ``run-limits`` | 28 ``run-limits`` |
26 Control the number of runs each benchmark will perform. The option value | 29 Control the number of runs each benchmark will perform. The option value |
27 should be a list of `<time>-<numberofrun>` pairs. After each run the | 30 should be a list of `<time>-<numberofrun>` pairs. After each run the |
28 conditions are considered in order with the following logic: | 31 conditions are considered in order with the following logic: |
312 default=mercurial.configitems.dynamicdefault, | 315 default=mercurial.configitems.dynamicdefault, |
313 ) | 316 ) |
314 configitem( | 317 configitem( |
315 b'perf', | 318 b'perf', |
316 b'profile-benchmark', | 319 b'profile-benchmark', |
320 default=mercurial.configitems.dynamicdefault, | |
321 ) | |
322 configitem( | |
323 b'perf', | |
324 b'profiled-runs', | |
317 default=mercurial.configitems.dynamicdefault, | 325 default=mercurial.configitems.dynamicdefault, |
318 ) | 326 ) |
319 configitem( | 327 configitem( |
320 b'perf', | 328 b'perf', |
321 b'run-limits', | 329 b'run-limits', |
352 b'pre-run', | 360 b'pre-run', |
353 default=mercurial.configitems.dynamicdefault, | 361 default=mercurial.configitems.dynamicdefault, |
354 ) | 362 ) |
355 configitem( | 363 configitem( |
356 b'perf', | 364 b'perf', |
357 b'profile-benchmark', | 365 b'profiled-runs', |
358 default=mercurial.configitems.dynamicdefault, | 366 default=mercurial.configitems.dynamicdefault, |
359 ) | 367 ) |
360 configitem( | 368 configitem( |
361 b'perf', | 369 b'perf', |
362 b'run-limits', | 370 b'run-limits', |
489 limits.append((time_limit, run_limit)) | 497 limits.append((time_limit, run_limit)) |
490 if not limits: | 498 if not limits: |
491 limits = DEFAULTLIMITS | 499 limits = DEFAULTLIMITS |
492 | 500 |
493 profiler = None | 501 profiler = None |
502 profiled_runs = set() | |
494 if profiling is not None: | 503 if profiling is not None: |
495 if ui.configbool(b"perf", b"profile-benchmark", False): | 504 if ui.configbool(b"perf", b"profile-benchmark", False): |
496 profiler = profiling.profile(ui) | 505 profiler = lambda: profiling.profile(ui) |
506 for run in ui.configlist(b"perf", b"profiled-runs", [0]): | |
507 profiled_runs.add(int(run)) | |
497 | 508 |
498 prerun = getint(ui, b"perf", b"pre-run", 0) | 509 prerun = getint(ui, b"perf", b"pre-run", 0) |
499 t = functools.partial( | 510 t = functools.partial( |
500 _timer, | 511 _timer, |
501 fm, | 512 fm, |
502 displayall=displayall, | 513 displayall=displayall, |
503 limits=limits, | 514 limits=limits, |
504 prerun=prerun, | 515 prerun=prerun, |
505 profiler=profiler, | 516 profiler=profiler, |
517 profiled_runs=profiled_runs, | |
506 ) | 518 ) |
507 return t, fm | 519 return t, fm |
508 | 520 |
509 | 521 |
510 def stub_timer(fm, func, setup=None, title=None): | 522 def stub_timer(fm, func, setup=None, title=None): |
545 title=None, | 557 title=None, |
546 displayall=False, | 558 displayall=False, |
547 limits=DEFAULTLIMITS, | 559 limits=DEFAULTLIMITS, |
548 prerun=0, | 560 prerun=0, |
549 profiler=None, | 561 profiler=None, |
562 profiled_runs=(0,), | |
550 ): | 563 ): |
551 gc.collect() | 564 gc.collect() |
552 results = [] | 565 results = [] |
553 begin = util.timer() | 566 begin = util.timer() |
554 count = 0 | 567 count = 0 |
555 if profiler is None: | 568 if profiler is None: |
556 profiler = NOOPCTX | 569 profiler = lambda: NOOPCTX |
557 for i in range(prerun): | 570 for i in range(prerun): |
558 if setup is not None: | 571 if setup is not None: |
559 setup() | 572 setup() |
560 with context(): | 573 with context(): |
561 func() | 574 func() |
562 keepgoing = True | 575 keepgoing = True |
563 while keepgoing: | 576 while keepgoing: |
577 if count in profiled_runs: | |
578 prof = profiler() | |
579 else: | |
580 prof = NOOPCTX | |
564 if setup is not None: | 581 if setup is not None: |
565 setup() | 582 setup() |
566 with context(): | 583 with context(): |
567 with profiler: | 584 with prof: |
568 with timeone() as item: | 585 with timeone() as item: |
569 r = func() | 586 r = func() |
570 profiler = NOOPCTX | |
571 count += 1 | 587 count += 1 |
572 results.append(item[0]) | 588 results.append(item[0]) |
573 cstop = util.timer() | 589 cstop = util.timer() |
574 # Look for a stop condition. | 590 # Look for a stop condition. |
575 elapsed = cstop - begin | 591 elapsed = cstop - begin |