[PATCH 2 of 2] perf: add an option to profile the benchmark section

Martin von Zweigbergk martinvonz at google.com
Thu May 23 09:14:54 EDT 2019


On Wed, May 22, 2019, 07:49 Pierre-Yves David <
pierre-yves.david at ens-lyon.org> wrote:

> # HG changeset patch
> # User Pierre-Yves David <pierre-yves.david at octobus.net>
> # Date 1558445208 -7200
> #      Tue May 21 15:26:48 2019 +0200
> # Node ID b5c316b4248f3add425313ac0f443c8888706b79
> # Parent  8553423d79608bd5d144456448509bdf98c6df11
> # EXP-Topic perf-profile
> # Available At https://bitbucket.org/octobus/mercurial-devel/
> #              hg pull https://bitbucket.org/octobus/mercurial-devel/ -r
> b5c316b4248f
> perf: add an option to profile the benchmark section
>
> Running a perf command with --profile gather data for the whole command
> execution, including setup and cleanup. This can significantly alter the
> data.
>
> To work around this we introduce a new option, it trigger the profiling of
> only one
> iteration of the benchmarked section.
>
> diff --git a/contrib/perf.py b/contrib/perf.py
> --- a/contrib/perf.py
> +++ b/contrib/perf.py
> @@ -18,6 +18,10 @@ Configurations
>  ``pre-run``
>    number of run to perform before starting measurement.
>
> +``profile-benchmark``
> +  Enable profiling for the benchmarked section.
> +  (The first iteration is benchmarked)
> +
>  ``run-limits``
>    Control the number of runs each benchmark will perform. The option value
>    should be a list of `<time>-<numberofrun>` pairs. After each run the
> @@ -109,6 +113,10 @@ try:
>  except ImportError:
>      pass
>
> +try:
> +    from mercurial import profiling
> +except ImportError:
> +    profiling = None
>
>  def identity(a):
>      return a
> @@ -246,6 +254,9 @@ try:
>      configitem(b'perf', b'pre-run',
>          default=mercurial.configitems.dynamicdefault,
>      )
> +    configitem(b'perf', b'profile-benchmark',
> +        default=mercurial.configitems.dynamicdefault,
> +    )
>      configitem(b'perf', b'run-limits',
>          default=mercurial.configitems.dynamicdefault,
>      )
> @@ -257,6 +268,13 @@ def getlen(ui):
>          return lambda x: 1
>      return len
>
> +class noop(object):
> +    """dummy context manager"""
> +    def __enter__(self):
> +        pass
> +    def __exit__(self, *args):
> +        pass
> +
>

Could this be replaced by util.nullcontextmanager()?

 def gettimer(ui, opts=None):
>      """return a timer function and formatter: (timer, formatter)
>
> @@ -347,9 +365,14 @@ def gettimer(ui, opts=None):
>      if not limits:
>          limits = DEFAULTLIMITS
>
> +    profiler = None
> +    if profiling is not None:
> +        if ui.configbool(b"perf", b"profile-benchmark", False):
> +            profiler = profiling.profile(ui)
> +
>      prerun = getint(ui, b"perf", b"pre-run", 0)
>      t = functools.partial(_timer, fm, displayall=displayall,
> limits=limits,
> -                          prerun=prerun)
> +                          prerun=prerun, profiler=profiler)
>      return t, fm
>
>  def stub_timer(fm, func, setup=None, title=None):
> @@ -376,11 +399,13 @@ DEFAULTLIMITS = (
>  )
>
>  def _timer(fm, func, setup=None, title=None, displayall=False,
> -           limits=DEFAULTLIMITS, prerun=0):
> +            limits=DEFAULTLIMITS, prerun=0, profiler=None):
>      gc.collect()
>      results = []
>      begin = util.timer()
>      count = 0
> +    if profiler is None:
> +        profiler = noop()
>      for i in xrange(prerun):
>          if setup is not None:
>              setup()
> @@ -389,8 +414,9 @@ def _timer(fm, func, setup=None, title=N
>      while keepgoing:
>          if setup is not None:
>              setup()
> -        with timeone() as item:
> -            r = func()
> +        with profiler:
> +            with timeone() as item:
> +                r = func()
>          count += 1
>          results.append(item[0])
>          cstop = util.timer()
> diff --git a/tests/test-contrib-perf.t b/tests/test-contrib-perf.t
> --- a/tests/test-contrib-perf.t
> +++ b/tests/test-contrib-perf.t
> @@ -58,6 +58,10 @@ perfstatus
>    "pre-run"
>      number of run to perform before starting measurement.
>
> +  "profile-benchmark"
> +    Enable profiling for the benchmarked section. (The first iteration is
> +    benchmarked)
> +
>    "run-limits"
>      Control the number of runs each benchmark will perform. The option
> value
>      should be a list of '<time>-<numberofrun>' pairs. After each run the
> @@ -349,6 +353,15 @@ Test pre-run feature
>    searching for changes
>    searching for changes
>
> +test  profile-benchmark option
> +------------------------------
> +
> +Function to check that statprof ran
> +  $ statprofran () {
> +  >   egrep 'Sample count:|No samples recorded' > /dev/null
> +  > }
> +  $ hg perfdiscovery . --config perf.stub=no --config
> perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 |
> statprofran
> +
>  Check perf.py for historical portability
>  ----------------------------------------
>
> _______________________________________________
> Mercurial-devel mailing list
> Mercurial-devel at mercurial-scm.org
> https://www.mercurial-scm.org/mailman/listinfo/mercurial-devel
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://www.mercurial-scm.org/pipermail/mercurial-devel/attachments/20190523/35342f6b/attachment.html>


More information about the Mercurial-devel mailing list