1# perf.py - performance test routines
2'''helper extension to measure performance
3
4Configurations
5==============
6
7``perf``
8--------
9
10``all-timing``
11    When set, additional statistics will be reported for each benchmark: best,
12    worst, median average. If not set only the best timing is reported
13    (default: off).
14
15``presleep``
16  number of second to wait before any group of runs (default: 1)
17
18``pre-run``
19  number of run to perform before starting measurement.
20
21``profile-benchmark``
22  Enable profiling for the benchmarked section.
23  (The first iteration is benchmarked)
24
25``run-limits``
26  Control the number of runs each benchmark will perform. The option value
27  should be a list of `<time>-<numberofrun>` pairs. After each run the
28  conditions are considered in order with the following logic:
29
30      If benchmark has been running for <time> seconds, and we have performed
31      <numberofrun> iterations, stop the benchmark,
32
33  The default value is: `3.0-100, 10.0-3`
34
35``stub``
36    When set, benchmarks will only be run once, useful for testing
37    (default: off)
38'''
39
40# "historical portability" policy of perf.py:
41#
42# We have to do:
43# - make perf.py "loadable" with as wide Mercurial version as possible
44#   This doesn't mean that perf commands work correctly with that Mercurial.
45#   BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46# - make historical perf command work correctly with as wide Mercurial
47#   version as possible
48#
49# We have to do, if possible with reasonable cost:
50# - make recent perf command for historical feature work correctly
51#   with early Mercurial
52#
53# We don't have to do:
54# - make perf command for recent feature work correctly with early
55#   Mercurial
56
57from __future__ import absolute_import
58import contextlib
59import functools
60import gc
61import os
62import random
63import shutil
64import struct
65import sys
66import tempfile
67import threading
68import time
69
70import mercurial.revlog
71from mercurial import (
72    changegroup,
73    cmdutil,
74    commands,
75    copies,
76    error,
77    extensions,
78    hg,
79    mdiff,
80    merge,
81    util,
82)
83
84# for "historical portability":
85# try to import modules separately (in dict order), and ignore
86# failure, because these aren't available with early Mercurial
87try:
88    from mercurial import branchmap  # since 2.5 (or bcee63733aad)
89except ImportError:
90    pass
91try:
92    from mercurial import obsolete  # since 2.3 (or ad0d6c2b3279)
93except ImportError:
94    pass
95try:
96    from mercurial import registrar  # since 3.7 (or 37d50250b696)
97
98    dir(registrar)  # forcibly load it
99except ImportError:
100    registrar = None
101try:
102    from mercurial import repoview  # since 2.5 (or 3a6ddacb7198)
103except ImportError:
104    pass
105try:
106    from mercurial.utils import repoviewutil  # since 5.0
107except ImportError:
108    repoviewutil = None
109try:
110    from mercurial import scmutil  # since 1.9 (or 8b252e826c68)
111except ImportError:
112    pass
113try:
114    from mercurial import setdiscovery  # since 1.9 (or cb98fed52495)
115except ImportError:
116    pass
117
118try:
119    from mercurial import profiling
120except ImportError:
121    profiling = None
122
123try:
124    from mercurial.revlogutils import constants as revlog_constants
125
126    perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
127
128    def revlog(opener, *args, **kwargs):
129        return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
130
131
132except (ImportError, AttributeError):
133    perf_rl_kind = None
134
135    def revlog(opener, *args, **kwargs):
136        return mercurial.revlog.revlog(opener, *args, **kwargs)
137
138
139def identity(a):
140    return a
141
142
143try:
144    from mercurial import pycompat
145
146    getargspec = pycompat.getargspec  # added to module after 4.5
147    _byteskwargs = pycompat.byteskwargs  # since 4.1 (or fbc3f73dc802)
148    _sysstr = pycompat.sysstr  # since 4.0 (or 2219f4f82ede)
149    _bytestr = pycompat.bytestr  # since 4.2 (or b70407bd84d5)
150    _xrange = pycompat.xrange  # since 4.8 (or 7eba8f83129b)
151    fsencode = pycompat.fsencode  # since 3.9 (or f4a5e0e86a7e)
152    if pycompat.ispy3:
153        _maxint = sys.maxsize  # per py3 docs for replacing maxint
154    else:
155        _maxint = sys.maxint
156except (NameError, ImportError, AttributeError):
157    import inspect
158
159    getargspec = inspect.getargspec
160    _byteskwargs = identity
161    _bytestr = str
162    fsencode = identity  # no py3 support
163    _maxint = sys.maxint  # no py3 support
164    _sysstr = lambda x: x  # no py3 support
165    _xrange = xrange
166
167try:
168    # 4.7+
169    queue = pycompat.queue.Queue
170except (NameError, AttributeError, ImportError):
171    # <4.7.
172    try:
173        queue = pycompat.queue
174    except (NameError, AttributeError, ImportError):
175        import Queue as queue
176
177try:
178    from mercurial import logcmdutil
179
180    makelogtemplater = logcmdutil.maketemplater
181except (AttributeError, ImportError):
182    try:
183        makelogtemplater = cmdutil.makelogtemplater
184    except (AttributeError, ImportError):
185        makelogtemplater = None
186
187# for "historical portability":
188# define util.safehasattr forcibly, because util.safehasattr has been
189# available since 1.9.3 (or 94b200a11cf7)
190_undefined = object()
191
192
193def safehasattr(thing, attr):
194    return getattr(thing, _sysstr(attr), _undefined) is not _undefined
195
196
197setattr(util, 'safehasattr', safehasattr)
198
199# for "historical portability":
200# define util.timer forcibly, because util.timer has been available
201# since ae5d60bb70c9
202if safehasattr(time, 'perf_counter'):
203    util.timer = time.perf_counter
204elif os.name == b'nt':
205    util.timer = time.clock
206else:
207    util.timer = time.time
208
209# for "historical portability":
210# use locally defined empty option list, if formatteropts isn't
211# available, because commands.formatteropts has been available since
212# 3.2 (or 7a7eed5176a4), even though formatting itself has been
213# available since 2.2 (or ae5f92e154d3)
214formatteropts = getattr(
215    cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
216)
217
218# for "historical portability":
219# use locally defined option list, if debugrevlogopts isn't available,
220# because commands.debugrevlogopts has been available since 3.7 (or
221# 5606f7d0d063), even though cmdutil.openrevlog() has been available
222# since 1.9 (or a79fea6b3e77).
223revlogopts = getattr(
224    cmdutil,
225    "debugrevlogopts",
226    getattr(
227        commands,
228        "debugrevlogopts",
229        [
230            (b'c', b'changelog', False, b'open changelog'),
231            (b'm', b'manifest', False, b'open manifest'),
232            (b'', b'dir', False, b'open directory manifest'),
233        ],
234    ),
235)
236
237cmdtable = {}
238
239# for "historical portability":
240# define parsealiases locally, because cmdutil.parsealiases has been
241# available since 1.5 (or 6252852b4332)
242def parsealiases(cmd):
243    return cmd.split(b"|")
244
245
246if safehasattr(registrar, 'command'):
247    command = registrar.command(cmdtable)
248elif safehasattr(cmdutil, 'command'):
249    command = cmdutil.command(cmdtable)
250    if 'norepo' not in getargspec(command).args:
251        # for "historical portability":
252        # wrap original cmdutil.command, because "norepo" option has
253        # been available since 3.1 (or 75a96326cecb)
254        _command = command
255
256        def command(name, options=(), synopsis=None, norepo=False):
257            if norepo:
258                commands.norepo += b' %s' % b' '.join(parsealiases(name))
259            return _command(name, list(options), synopsis)
260
261
262else:
263    # for "historical portability":
264    # define "@command" annotation locally, because cmdutil.command
265    # has been available since 1.9 (or 2daa5179e73f)
266    def command(name, options=(), synopsis=None, norepo=False):
267        def decorator(func):
268            if synopsis:
269                cmdtable[name] = func, list(options), synopsis
270            else:
271                cmdtable[name] = func, list(options)
272            if norepo:
273                commands.norepo += b' %s' % b' '.join(parsealiases(name))
274            return func
275
276        return decorator
277
278
279try:
280    import mercurial.registrar
281    import mercurial.configitems
282
283    configtable = {}
284    configitem = mercurial.registrar.configitem(configtable)
285    configitem(
286        b'perf',
287        b'presleep',
288        default=mercurial.configitems.dynamicdefault,
289        experimental=True,
290    )
291    configitem(
292        b'perf',
293        b'stub',
294        default=mercurial.configitems.dynamicdefault,
295        experimental=True,
296    )
297    configitem(
298        b'perf',
299        b'parentscount',
300        default=mercurial.configitems.dynamicdefault,
301        experimental=True,
302    )
303    configitem(
304        b'perf',
305        b'all-timing',
306        default=mercurial.configitems.dynamicdefault,
307        experimental=True,
308    )
309    configitem(
310        b'perf',
311        b'pre-run',
312        default=mercurial.configitems.dynamicdefault,
313    )
314    configitem(
315        b'perf',
316        b'profile-benchmark',
317        default=mercurial.configitems.dynamicdefault,
318    )
319    configitem(
320        b'perf',
321        b'run-limits',
322        default=mercurial.configitems.dynamicdefault,
323        experimental=True,
324    )
325except (ImportError, AttributeError):
326    pass
327except TypeError:
328    # compatibility fix for a11fd395e83f
329    # hg version: 5.2
330    configitem(
331        b'perf',
332        b'presleep',
333        default=mercurial.configitems.dynamicdefault,
334    )
335    configitem(
336        b'perf',
337        b'stub',
338        default=mercurial.configitems.dynamicdefault,
339    )
340    configitem(
341        b'perf',
342        b'parentscount',
343        default=mercurial.configitems.dynamicdefault,
344    )
345    configitem(
346        b'perf',
347        b'all-timing',
348        default=mercurial.configitems.dynamicdefault,
349    )
350    configitem(
351        b'perf',
352        b'pre-run',
353        default=mercurial.configitems.dynamicdefault,
354    )
355    configitem(
356        b'perf',
357        b'profile-benchmark',
358        default=mercurial.configitems.dynamicdefault,
359    )
360    configitem(
361        b'perf',
362        b'run-limits',
363        default=mercurial.configitems.dynamicdefault,
364    )
365
366
367def getlen(ui):
368    if ui.configbool(b"perf", b"stub", False):
369        return lambda x: 1
370    return len
371
372
373class noop(object):
374    """dummy context manager"""
375
376    def __enter__(self):
377        pass
378
379    def __exit__(self, *args):
380        pass
381
382
383NOOPCTX = noop()
384
385
386def gettimer(ui, opts=None):
387    """return a timer function and formatter: (timer, formatter)
388
389    This function exists to gather the creation of formatter in a single
390    place instead of duplicating it in all performance commands."""
391
392    # enforce an idle period before execution to counteract power management
393    # experimental config: perf.presleep
394    time.sleep(getint(ui, b"perf", b"presleep", 1))
395
396    if opts is None:
397        opts = {}
398    # redirect all to stderr unless buffer api is in use
399    if not ui._buffers:
400        ui = ui.copy()
401        uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
402        if uifout:
403            # for "historical portability":
404            # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
405            uifout.set(ui.ferr)
406
407    # get a formatter
408    uiformatter = getattr(ui, 'formatter', None)
409    if uiformatter:
410        fm = uiformatter(b'perf', opts)
411    else:
412        # for "historical portability":
413        # define formatter locally, because ui.formatter has been
414        # available since 2.2 (or ae5f92e154d3)
415        from mercurial import node
416
417        class defaultformatter(object):
418            """Minimized composition of baseformatter and plainformatter"""
419
420            def __init__(self, ui, topic, opts):
421                self._ui = ui
422                if ui.debugflag:
423                    self.hexfunc = node.hex
424                else:
425                    self.hexfunc = node.short
426
427            def __nonzero__(self):
428                return False
429
430            __bool__ = __nonzero__
431
432            def startitem(self):
433                pass
434
435            def data(self, **data):
436                pass
437
438            def write(self, fields, deftext, *fielddata, **opts):
439                self._ui.write(deftext % fielddata, **opts)
440
441            def condwrite(self, cond, fields, deftext, *fielddata, **opts):
442                if cond:
443                    self._ui.write(deftext % fielddata, **opts)
444
445            def plain(self, text, **opts):
446                self._ui.write(text, **opts)
447
448            def end(self):
449                pass
450
451        fm = defaultformatter(ui, b'perf', opts)
452
453    # stub function, runs code only once instead of in a loop
454    # experimental config: perf.stub
455    if ui.configbool(b"perf", b"stub", False):
456        return functools.partial(stub_timer, fm), fm
457
458    # experimental config: perf.all-timing
459    displayall = ui.configbool(b"perf", b"all-timing", False)
460
461    # experimental config: perf.run-limits
462    limitspec = ui.configlist(b"perf", b"run-limits", [])
463    limits = []
464    for item in limitspec:
465        parts = item.split(b'-', 1)
466        if len(parts) < 2:
467            ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
468            continue
469        try:
470            time_limit = float(_sysstr(parts[0]))
471        except ValueError as e:
472            ui.warn(
473                (
474                    b'malformatted run limit entry, %s: %s\n'
475                    % (_bytestr(e), item)
476                )
477            )
478            continue
479        try:
480            run_limit = int(_sysstr(parts[1]))
481        except ValueError as e:
482            ui.warn(
483                (
484                    b'malformatted run limit entry, %s: %s\n'
485                    % (_bytestr(e), item)
486                )
487            )
488            continue
489        limits.append((time_limit, run_limit))
490    if not limits:
491        limits = DEFAULTLIMITS
492
493    profiler = None
494    if profiling is not None:
495        if ui.configbool(b"perf", b"profile-benchmark", False):
496            profiler = profiling.profile(ui)
497
498    prerun = getint(ui, b"perf", b"pre-run", 0)
499    t = functools.partial(
500        _timer,
501        fm,
502        displayall=displayall,
503        limits=limits,
504        prerun=prerun,
505        profiler=profiler,
506    )
507    return t, fm
508
509
510def stub_timer(fm, func, setup=None, title=None):
511    if setup is not None:
512        setup()
513    func()
514
515
516@contextlib.contextmanager
517def timeone():
518    r = []
519    ostart = os.times()
520    cstart = util.timer()
521    yield r
522    cstop = util.timer()
523    ostop = os.times()
524    a, b = ostart, ostop
525    r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
526
527
528# list of stop condition (elapsed time, minimal run count)
529DEFAULTLIMITS = (
530    (3.0, 100),
531    (10.0, 3),
532)
533
534
535def _timer(
536    fm,
537    func,
538    setup=None,
539    title=None,
540    displayall=False,
541    limits=DEFAULTLIMITS,
542    prerun=0,
543    profiler=None,
544):
545    gc.collect()
546    results = []
547    begin = util.timer()
548    count = 0
549    if profiler is None:
550        profiler = NOOPCTX
551    for i in range(prerun):
552        if setup is not None:
553            setup()
554        func()
555    keepgoing = True
556    while keepgoing:
557        if setup is not None:
558            setup()
559        with profiler:
560            with timeone() as item:
561                r = func()
562        profiler = NOOPCTX
563        count += 1
564        results.append(item[0])
565        cstop = util.timer()
566        # Look for a stop condition.
567        elapsed = cstop - begin
568        for t, mincount in limits:
569            if elapsed >= t and count >= mincount:
570                keepgoing = False
571                break
572
573    formatone(fm, results, title=title, result=r, displayall=displayall)
574
575
576def formatone(fm, timings, title=None, result=None, displayall=False):
577
578    count = len(timings)
579
580    fm.startitem()
581
582    if title:
583        fm.write(b'title', b'! %s\n', title)
584    if result:
585        fm.write(b'result', b'! result: %s\n', result)
586
587    def display(role, entry):
588        prefix = b''
589        if role != b'best':
590            prefix = b'%s.' % role
591        fm.plain(b'!')
592        fm.write(prefix + b'wall', b' wall %f', entry[0])
593        fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
594        fm.write(prefix + b'user', b' user %f', entry[1])
595        fm.write(prefix + b'sys', b' sys %f', entry[2])
596        fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
597        fm.plain(b'\n')
598
599    timings.sort()
600    min_val = timings[0]
601    display(b'best', min_val)
602    if displayall:
603        max_val = timings[-1]
604        display(b'max', max_val)
605        avg = tuple([sum(x) / count for x in zip(*timings)])
606        display(b'avg', avg)
607        median = timings[len(timings) // 2]
608        display(b'median', median)
609
610
611# utilities for historical portability
612
613
614def getint(ui, section, name, default):
615    # for "historical portability":
616    # ui.configint has been available since 1.9 (or fa2b596db182)
617    v = ui.config(section, name, None)
618    if v is None:
619        return default
620    try:
621        return int(v)
622    except ValueError:
623        raise error.ConfigError(
624            b"%s.%s is not an integer ('%s')" % (section, name, v)
625        )
626
627
628def safeattrsetter(obj, name, ignoremissing=False):
629    """Ensure that 'obj' has 'name' attribute before subsequent setattr
630
631    This function is aborted, if 'obj' doesn't have 'name' attribute
632    at runtime. This avoids overlooking removal of an attribute, which
633    breaks assumption of performance measurement, in the future.
634
635    This function returns the object to (1) assign a new value, and
636    (2) restore an original value to the attribute.
637
638    If 'ignoremissing' is true, missing 'name' attribute doesn't cause
639    abortion, and this function returns None. This is useful to
640    examine an attribute, which isn't ensured in all Mercurial
641    versions.
642    """
643    if not util.safehasattr(obj, name):
644        if ignoremissing:
645            return None
646        raise error.Abort(
647            (
648                b"missing attribute %s of %s might break assumption"
649                b" of performance measurement"
650            )
651            % (name, obj)
652        )
653
654    origvalue = getattr(obj, _sysstr(name))
655
656    class attrutil(object):
657        def set(self, newvalue):
658            setattr(obj, _sysstr(name), newvalue)
659
660        def restore(self):
661            setattr(obj, _sysstr(name), origvalue)
662
663    return attrutil()
664
665
666# utilities to examine each internal API changes
667
668
669def getbranchmapsubsettable():
670    # for "historical portability":
671    # subsettable is defined in:
672    # - branchmap since 2.9 (or 175c6fd8cacc)
673    # - repoview since 2.5 (or 59a9f18d4587)
674    # - repoviewutil since 5.0
675    for mod in (branchmap, repoview, repoviewutil):
676        subsettable = getattr(mod, 'subsettable', None)
677        if subsettable:
678            return subsettable
679
680    # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
681    # branchmap and repoview modules exist, but subsettable attribute
682    # doesn't)
683    raise error.Abort(
684        b"perfbranchmap not available with this Mercurial",
685        hint=b"use 2.5 or later",
686    )
687
688
689def getsvfs(repo):
690    """Return appropriate object to access files under .hg/store"""
691    # for "historical portability":
692    # repo.svfs has been available since 2.3 (or 7034365089bf)
693    svfs = getattr(repo, 'svfs', None)
694    if svfs:
695        return svfs
696    else:
697        return getattr(repo, 'sopener')
698
699
700def getvfs(repo):
701    """Return appropriate object to access files under .hg"""
702    # for "historical portability":
703    # repo.vfs has been available since 2.3 (or 7034365089bf)
704    vfs = getattr(repo, 'vfs', None)
705    if vfs:
706        return vfs
707    else:
708        return getattr(repo, 'opener')
709
710
711def repocleartagscachefunc(repo):
712    """Return the function to clear tags cache according to repo internal API"""
713    if util.safehasattr(repo, b'_tagscache'):  # since 2.0 (or 9dca7653b525)
714        # in this case, setattr(repo, '_tagscache', None) or so isn't
715        # correct way to clear tags cache, because existing code paths
716        # expect _tagscache to be a structured object.
717        def clearcache():
718            # _tagscache has been filteredpropertycache since 2.5 (or
719            # 98c867ac1330), and delattr() can't work in such case
720            if '_tagscache' in vars(repo):
721                del repo.__dict__['_tagscache']
722
723        return clearcache
724
725    repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
726    if repotags:  # since 1.4 (or 5614a628d173)
727        return lambda: repotags.set(None)
728
729    repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
730    if repotagscache:  # since 0.6 (or d7df759d0e97)
731        return lambda: repotagscache.set(None)
732
733    # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
734    # this point, but it isn't so problematic, because:
735    # - repo.tags of such Mercurial isn't "callable", and repo.tags()
736    #   in perftags() causes failure soon
737    # - perf.py itself has been available since 1.1 (or eb240755386d)
738    raise error.Abort(b"tags API of this hg command is unknown")
739
740
741# utilities to clear cache
742
743
744def clearfilecache(obj, attrname):
745    unfiltered = getattr(obj, 'unfiltered', None)
746    if unfiltered is not None:
747        obj = obj.unfiltered()
748    if attrname in vars(obj):
749        delattr(obj, attrname)
750    obj._filecache.pop(attrname, None)
751
752
753def clearchangelog(repo):
754    if repo is not repo.unfiltered():
755        object.__setattr__(repo, '_clcachekey', None)
756        object.__setattr__(repo, '_clcache', None)
757    clearfilecache(repo.unfiltered(), 'changelog')
758
759
760# perf commands
761
762
763@command(b'perf::walk|perfwalk', formatteropts)
764def perfwalk(ui, repo, *pats, **opts):
765    opts = _byteskwargs(opts)
766    timer, fm = gettimer(ui, opts)
767    m = scmutil.match(repo[None], pats, {})
768    timer(
769        lambda: len(
770            list(
771                repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
772            )
773        )
774    )
775    fm.end()
776
777
778@command(b'perf::annotate|perfannotate', formatteropts)
779def perfannotate(ui, repo, f, **opts):
780    opts = _byteskwargs(opts)
781    timer, fm = gettimer(ui, opts)
782    fc = repo[b'.'][f]
783    timer(lambda: len(fc.annotate(True)))
784    fm.end()
785
786
787@command(
788    b'perf::status|perfstatus',
789    [
790        (b'u', b'unknown', False, b'ask status to look for unknown files'),
791        (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
792    ]
793    + formatteropts,
794)
795def perfstatus(ui, repo, **opts):
796    """benchmark the performance of a single status call
797
798    The repository data are preserved between each call.
799
800    By default, only the status of the tracked file are requested. If
801    `--unknown` is passed, the "unknown" files are also tracked.
802    """
803    opts = _byteskwargs(opts)
804    # m = match.always(repo.root, repo.getcwd())
805    # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
806    #                                                False))))
807    timer, fm = gettimer(ui, opts)
808    if opts[b'dirstate']:
809        dirstate = repo.dirstate
810        m = scmutil.matchall(repo)
811        unknown = opts[b'unknown']
812
813        def status_dirstate():
814            s = dirstate.status(
815                m, subrepos=[], ignored=False, clean=False, unknown=unknown
816            )
817            sum(map(bool, s))
818
819        timer(status_dirstate)
820    else:
821        timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
822    fm.end()
823
824
825@command(b'perf::addremove|perfaddremove', formatteropts)
826def perfaddremove(ui, repo, **opts):
827    opts = _byteskwargs(opts)
828    timer, fm = gettimer(ui, opts)
829    try:
830        oldquiet = repo.ui.quiet
831        repo.ui.quiet = True
832        matcher = scmutil.match(repo[None])
833        opts[b'dry_run'] = True
834        if 'uipathfn' in getargspec(scmutil.addremove).args:
835            uipathfn = scmutil.getuipathfn(repo)
836            timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
837        else:
838            timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
839    finally:
840        repo.ui.quiet = oldquiet
841        fm.end()
842
843
844def clearcaches(cl):
845    # behave somewhat consistently across internal API changes
846    if util.safehasattr(cl, b'clearcaches'):
847        cl.clearcaches()
848    elif util.safehasattr(cl, b'_nodecache'):
849        # <= hg-5.2
850        from mercurial.node import nullid, nullrev
851
852        cl._nodecache = {nullid: nullrev}
853        cl._nodepos = None
854
855
856@command(b'perf::heads|perfheads', formatteropts)
857def perfheads(ui, repo, **opts):
858    """benchmark the computation of a changelog heads"""
859    opts = _byteskwargs(opts)
860    timer, fm = gettimer(ui, opts)
861    cl = repo.changelog
862
863    def s():
864        clearcaches(cl)
865
866    def d():
867        len(cl.headrevs())
868
869    timer(d, setup=s)
870    fm.end()
871
872
873@command(
874    b'perf::tags|perftags',
875    formatteropts
876    + [
877        (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
878    ],
879)
880def perftags(ui, repo, **opts):
881    opts = _byteskwargs(opts)
882    timer, fm = gettimer(ui, opts)
883    repocleartagscache = repocleartagscachefunc(repo)
884    clearrevlogs = opts[b'clear_revlogs']
885
886    def s():
887        if clearrevlogs:
888            clearchangelog(repo)
889            clearfilecache(repo.unfiltered(), 'manifest')
890        repocleartagscache()
891
892    def t():
893        return len(repo.tags())
894
895    timer(t, setup=s)
896    fm.end()
897
898
899@command(b'perf::ancestors|perfancestors', formatteropts)
900def perfancestors(ui, repo, **opts):
901    opts = _byteskwargs(opts)
902    timer, fm = gettimer(ui, opts)
903    heads = repo.changelog.headrevs()
904
905    def d():
906        for a in repo.changelog.ancestors(heads):
907            pass
908
909    timer(d)
910    fm.end()
911
912
913@command(b'perf::ancestorset|perfancestorset', formatteropts)
914def perfancestorset(ui, repo, revset, **opts):
915    opts = _byteskwargs(opts)
916    timer, fm = gettimer(ui, opts)
917    revs = repo.revs(revset)
918    heads = repo.changelog.headrevs()
919
920    def d():
921        s = repo.changelog.ancestors(heads)
922        for rev in revs:
923            rev in s
924
925    timer(d)
926    fm.end()
927
928
929@command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
930def perfdiscovery(ui, repo, path, **opts):
931    """benchmark discovery between local repo and the peer at given path"""
932    repos = [repo, None]
933    timer, fm = gettimer(ui, opts)
934
935    try:
936        from mercurial.utils.urlutil import get_unique_pull_path
937
938        path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
939    except ImportError:
940        path = ui.expandpath(path)
941
942    def s():
943        repos[1] = hg.peer(ui, opts, path)
944
945    def d():
946        setdiscovery.findcommonheads(ui, *repos)
947
948    timer(d, setup=s)
949    fm.end()
950
951
952@command(
953    b'perf::bookmarks|perfbookmarks',
954    formatteropts
955    + [
956        (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
957    ],
958)
959def perfbookmarks(ui, repo, **opts):
960    """benchmark parsing bookmarks from disk to memory"""
961    opts = _byteskwargs(opts)
962    timer, fm = gettimer(ui, opts)
963
964    clearrevlogs = opts[b'clear_revlogs']
965
966    def s():
967        if clearrevlogs:
968            clearchangelog(repo)
969        clearfilecache(repo, b'_bookmarks')
970
971    def d():
972        repo._bookmarks
973
974    timer(d, setup=s)
975    fm.end()
976
977
978@command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
979def perfbundleread(ui, repo, bundlepath, **opts):
980    """Benchmark reading of bundle files.
981
982    This command is meant to isolate the I/O part of bundle reading as
983    much as possible.
984    """
985    from mercurial import (
986        bundle2,
987        exchange,
988        streamclone,
989    )
990
991    opts = _byteskwargs(opts)
992
993    def makebench(fn):
994        def run():
995            with open(bundlepath, b'rb') as fh:
996                bundle = exchange.readbundle(ui, fh, bundlepath)
997                fn(bundle)
998
999        return run
1000
1001    def makereadnbytes(size):
1002        def run():
1003            with open(bundlepath, b'rb') as fh:
1004                bundle = exchange.readbundle(ui, fh, bundlepath)
1005                while bundle.read(size):
1006                    pass
1007
1008        return run
1009
1010    def makestdioread(size):
1011        def run():
1012            with open(bundlepath, b'rb') as fh:
1013                while fh.read(size):
1014                    pass
1015
1016        return run
1017
1018    # bundle1
1019
1020    def deltaiter(bundle):
1021        for delta in bundle.deltaiter():
1022            pass
1023
1024    def iterchunks(bundle):
1025        for chunk in bundle.getchunks():
1026            pass
1027
1028    # bundle2
1029
1030    def forwardchunks(bundle):
1031        for chunk in bundle._forwardchunks():
1032            pass
1033
1034    def iterparts(bundle):
1035        for part in bundle.iterparts():
1036            pass
1037
1038    def iterpartsseekable(bundle):
1039        for part in bundle.iterparts(seekable=True):
1040            pass
1041
1042    def seek(bundle):
1043        for part in bundle.iterparts(seekable=True):
1044            part.seek(0, os.SEEK_END)
1045
1046    def makepartreadnbytes(size):
1047        def run():
1048            with open(bundlepath, b'rb') as fh:
1049                bundle = exchange.readbundle(ui, fh, bundlepath)
1050                for part in bundle.iterparts():
1051                    while part.read(size):
1052                        pass
1053
1054        return run
1055
1056    benches = [
1057        (makestdioread(8192), b'read(8k)'),
1058        (makestdioread(16384), b'read(16k)'),
1059        (makestdioread(32768), b'read(32k)'),
1060        (makestdioread(131072), b'read(128k)'),
1061    ]
1062
1063    with open(bundlepath, b'rb') as fh:
1064        bundle = exchange.readbundle(ui, fh, bundlepath)
1065
1066        if isinstance(bundle, changegroup.cg1unpacker):
1067            benches.extend(
1068                [
1069                    (makebench(deltaiter), b'cg1 deltaiter()'),
1070                    (makebench(iterchunks), b'cg1 getchunks()'),
1071                    (makereadnbytes(8192), b'cg1 read(8k)'),
1072                    (makereadnbytes(16384), b'cg1 read(16k)'),
1073                    (makereadnbytes(32768), b'cg1 read(32k)'),
1074                    (makereadnbytes(131072), b'cg1 read(128k)'),
1075                ]
1076            )
1077        elif isinstance(bundle, bundle2.unbundle20):
1078            benches.extend(
1079                [
1080                    (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1081                    (makebench(iterparts), b'bundle2 iterparts()'),
1082                    (
1083                        makebench(iterpartsseekable),
1084                        b'bundle2 iterparts() seekable',
1085                    ),
1086                    (makebench(seek), b'bundle2 part seek()'),
1087                    (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1088                    (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1089                    (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1090                    (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1091                ]
1092            )
1093        elif isinstance(bundle, streamclone.streamcloneapplier):
1094            raise error.Abort(b'stream clone bundles not supported')
1095        else:
1096            raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1097
1098    for fn, title in benches:
1099        timer, fm = gettimer(ui, opts)
1100        timer(fn, title=title)
1101        fm.end()
1102
1103
1104@command(
1105    b'perf::changegroupchangelog|perfchangegroupchangelog',
1106    formatteropts
1107    + [
1108        (b'', b'cgversion', b'02', b'changegroup version'),
1109        (b'r', b'rev', b'', b'revisions to add to changegroup'),
1110    ],
1111)
1112def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1113    """Benchmark producing a changelog group for a changegroup.
1114
1115    This measures the time spent processing the changelog during a
1116    bundle operation. This occurs during `hg bundle` and on a server
1117    processing a `getbundle` wire protocol request (handles clones
1118    and pull requests).
1119
1120    By default, all revisions are added to the changegroup.
1121    """
1122    opts = _byteskwargs(opts)
1123    cl = repo.changelog
1124    nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1125    bundler = changegroup.getbundler(cgversion, repo)
1126
1127    def d():
1128        state, chunks = bundler._generatechangelog(cl, nodes)
1129        for chunk in chunks:
1130            pass
1131
1132    timer, fm = gettimer(ui, opts)
1133
1134    # Terminal printing can interfere with timing. So disable it.
1135    with ui.configoverride({(b'progress', b'disable'): True}):
1136        timer(d)
1137
1138    fm.end()
1139
1140
1141@command(b'perf::dirs|perfdirs', formatteropts)
1142def perfdirs(ui, repo, **opts):
1143    opts = _byteskwargs(opts)
1144    timer, fm = gettimer(ui, opts)
1145    dirstate = repo.dirstate
1146    b'a' in dirstate
1147
1148    def d():
1149        dirstate.hasdir(b'a')
1150        try:
1151            del dirstate._map._dirs
1152        except AttributeError:
1153            pass
1154
1155    timer(d)
1156    fm.end()
1157
1158
1159@command(
1160    b'perf::dirstate|perfdirstate',
1161    [
1162        (
1163            b'',
1164            b'iteration',
1165            None,
1166            b'benchmark a full iteration for the dirstate',
1167        ),
1168        (
1169            b'',
1170            b'contains',
1171            None,
1172            b'benchmark a large amount of `nf in dirstate` calls',
1173        ),
1174    ]
1175    + formatteropts,
1176)
1177def perfdirstate(ui, repo, **opts):
1178    """benchmap the time of various distate operations
1179
1180    By default benchmark the time necessary to load a dirstate from scratch.
1181    The dirstate is loaded to the point were a "contains" request can be
1182    answered.
1183    """
1184    opts = _byteskwargs(opts)
1185    timer, fm = gettimer(ui, opts)
1186    b"a" in repo.dirstate
1187
1188    if opts[b'iteration'] and opts[b'contains']:
1189        msg = b'only specify one of --iteration or --contains'
1190        raise error.Abort(msg)
1191
1192    if opts[b'iteration']:
1193        setup = None
1194        dirstate = repo.dirstate
1195
1196        def d():
1197            for f in dirstate:
1198                pass
1199
1200    elif opts[b'contains']:
1201        setup = None
1202        dirstate = repo.dirstate
1203        allfiles = list(dirstate)
1204        # also add file path that will be "missing" from the dirstate
1205        allfiles.extend([f[::-1] for f in allfiles])
1206
1207        def d():
1208            for f in allfiles:
1209                f in dirstate
1210
1211    else:
1212
1213        def setup():
1214            repo.dirstate.invalidate()
1215
1216        def d():
1217            b"a" in repo.dirstate
1218
1219    timer(d, setup=setup)
1220    fm.end()
1221
1222
1223@command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1224def perfdirstatedirs(ui, repo, **opts):
1225    """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1226    opts = _byteskwargs(opts)
1227    timer, fm = gettimer(ui, opts)
1228    repo.dirstate.hasdir(b"a")
1229
1230    def setup():
1231        try:
1232            del repo.dirstate._map._dirs
1233        except AttributeError:
1234            pass
1235
1236    def d():
1237        repo.dirstate.hasdir(b"a")
1238
1239    timer(d, setup=setup)
1240    fm.end()
1241
1242
1243@command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1244def perfdirstatefoldmap(ui, repo, **opts):
1245    """benchmap a `dirstate._map.filefoldmap.get()` request
1246
1247    The dirstate filefoldmap cache is dropped between every request.
1248    """
1249    opts = _byteskwargs(opts)
1250    timer, fm = gettimer(ui, opts)
1251    dirstate = repo.dirstate
1252    dirstate._map.filefoldmap.get(b'a')
1253
1254    def setup():
1255        del dirstate._map.filefoldmap
1256
1257    def d():
1258        dirstate._map.filefoldmap.get(b'a')
1259
1260    timer(d, setup=setup)
1261    fm.end()
1262
1263
1264@command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1265def perfdirfoldmap(ui, repo, **opts):
1266    """benchmap a `dirstate._map.dirfoldmap.get()` request
1267
1268    The dirstate dirfoldmap cache is dropped between every request.
1269    """
1270    opts = _byteskwargs(opts)
1271    timer, fm = gettimer(ui, opts)
1272    dirstate = repo.dirstate
1273    dirstate._map.dirfoldmap.get(b'a')
1274
1275    def setup():
1276        del dirstate._map.dirfoldmap
1277        try:
1278            del dirstate._map._dirs
1279        except AttributeError:
1280            pass
1281
1282    def d():
1283        dirstate._map.dirfoldmap.get(b'a')
1284
1285    timer(d, setup=setup)
1286    fm.end()
1287
1288
1289@command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1290def perfdirstatewrite(ui, repo, **opts):
1291    """benchmap the time it take to write a dirstate on disk"""
1292    opts = _byteskwargs(opts)
1293    timer, fm = gettimer(ui, opts)
1294    ds = repo.dirstate
1295    b"a" in ds
1296
1297    def setup():
1298        ds._dirty = True
1299
1300    def d():
1301        ds.write(repo.currenttransaction())
1302
1303    timer(d, setup=setup)
1304    fm.end()
1305
1306
1307def _getmergerevs(repo, opts):
1308    """parse command argument to return rev involved in merge
1309
1310    input: options dictionnary with `rev`, `from` and `bse`
1311    output: (localctx, otherctx, basectx)
1312    """
1313    if opts[b'from']:
1314        fromrev = scmutil.revsingle(repo, opts[b'from'])
1315        wctx = repo[fromrev]
1316    else:
1317        wctx = repo[None]
1318        # we don't want working dir files to be stat'd in the benchmark, so
1319        # prime that cache
1320        wctx.dirty()
1321    rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1322    if opts[b'base']:
1323        fromrev = scmutil.revsingle(repo, opts[b'base'])
1324        ancestor = repo[fromrev]
1325    else:
1326        ancestor = wctx.ancestor(rctx)
1327    return (wctx, rctx, ancestor)
1328
1329
1330@command(
1331    b'perf::mergecalculate|perfmergecalculate',
1332    [
1333        (b'r', b'rev', b'.', b'rev to merge against'),
1334        (b'', b'from', b'', b'rev to merge from'),
1335        (b'', b'base', b'', b'the revision to use as base'),
1336    ]
1337    + formatteropts,
1338)
1339def perfmergecalculate(ui, repo, **opts):
1340    opts = _byteskwargs(opts)
1341    timer, fm = gettimer(ui, opts)
1342
1343    wctx, rctx, ancestor = _getmergerevs(repo, opts)
1344
1345    def d():
1346        # acceptremote is True because we don't want prompts in the middle of
1347        # our benchmark
1348        merge.calculateupdates(
1349            repo,
1350            wctx,
1351            rctx,
1352            [ancestor],
1353            branchmerge=False,
1354            force=False,
1355            acceptremote=True,
1356            followcopies=True,
1357        )
1358
1359    timer(d)
1360    fm.end()
1361
1362
1363@command(
1364    b'perf::mergecopies|perfmergecopies',
1365    [
1366        (b'r', b'rev', b'.', b'rev to merge against'),
1367        (b'', b'from', b'', b'rev to merge from'),
1368        (b'', b'base', b'', b'the revision to use as base'),
1369    ]
1370    + formatteropts,
1371)
1372def perfmergecopies(ui, repo, **opts):
1373    """measure runtime of `copies.mergecopies`"""
1374    opts = _byteskwargs(opts)
1375    timer, fm = gettimer(ui, opts)
1376    wctx, rctx, ancestor = _getmergerevs(repo, opts)
1377
1378    def d():
1379        # acceptremote is True because we don't want prompts in the middle of
1380        # our benchmark
1381        copies.mergecopies(repo, wctx, rctx, ancestor)
1382
1383    timer(d)
1384    fm.end()
1385
1386
1387@command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1388def perfpathcopies(ui, repo, rev1, rev2, **opts):
1389    """benchmark the copy tracing logic"""
1390    opts = _byteskwargs(opts)
1391    timer, fm = gettimer(ui, opts)
1392    ctx1 = scmutil.revsingle(repo, rev1, rev1)
1393    ctx2 = scmutil.revsingle(repo, rev2, rev2)
1394
1395    def d():
1396        copies.pathcopies(ctx1, ctx2)
1397
1398    timer(d)
1399    fm.end()
1400
1401
1402@command(
1403    b'perf::phases|perfphases',
1404    [
1405        (b'', b'full', False, b'include file reading time too'),
1406    ],
1407    b"",
1408)
1409def perfphases(ui, repo, **opts):
1410    """benchmark phasesets computation"""
1411    opts = _byteskwargs(opts)
1412    timer, fm = gettimer(ui, opts)
1413    _phases = repo._phasecache
1414    full = opts.get(b'full')
1415
1416    def d():
1417        phases = _phases
1418        if full:
1419            clearfilecache(repo, b'_phasecache')
1420            phases = repo._phasecache
1421        phases.invalidate()
1422        phases.loadphaserevs(repo)
1423
1424    timer(d)
1425    fm.end()
1426
1427
1428@command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1429def perfphasesremote(ui, repo, dest=None, **opts):
1430    """benchmark time needed to analyse phases of the remote server"""
1431    from mercurial.node import bin
1432    from mercurial import (
1433        exchange,
1434        hg,
1435        phases,
1436    )
1437
1438    opts = _byteskwargs(opts)
1439    timer, fm = gettimer(ui, opts)
1440
1441    path = ui.getpath(dest, default=(b'default-push', b'default'))
1442    if not path:
1443        raise error.Abort(
1444            b'default repository not configured!',
1445            hint=b"see 'hg help config.paths'",
1446        )
1447    dest = path.pushloc or path.loc
1448    ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1449    other = hg.peer(repo, opts, dest)
1450
1451    # easier to perform discovery through the operation
1452    op = exchange.pushoperation(repo, other)
1453    exchange._pushdiscoverychangeset(op)
1454
1455    remotesubset = op.fallbackheads
1456
1457    with other.commandexecutor() as e:
1458        remotephases = e.callcommand(
1459            b'listkeys', {b'namespace': b'phases'}
1460        ).result()
1461    del other
1462    publishing = remotephases.get(b'publishing', False)
1463    if publishing:
1464        ui.statusnoi18n(b'publishing: yes\n')
1465    else:
1466        ui.statusnoi18n(b'publishing: no\n')
1467
1468    has_node = getattr(repo.changelog.index, 'has_node', None)
1469    if has_node is None:
1470        has_node = repo.changelog.nodemap.__contains__
1471    nonpublishroots = 0
1472    for nhex, phase in remotephases.iteritems():
1473        if nhex == b'publishing':  # ignore data related to publish option
1474            continue
1475        node = bin(nhex)
1476        if has_node(node) and int(phase):
1477            nonpublishroots += 1
1478    ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1479    ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1480
1481    def d():
1482        phases.remotephasessummary(repo, remotesubset, remotephases)
1483
1484    timer(d)
1485    fm.end()
1486
1487
1488@command(
1489    b'perf::manifest|perfmanifest',
1490    [
1491        (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1492        (b'', b'clear-disk', False, b'clear on-disk caches too'),
1493    ]
1494    + formatteropts,
1495    b'REV|NODE',
1496)
1497def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1498    """benchmark the time to read a manifest from disk and return a usable
1499    dict-like object
1500
1501    Manifest caches are cleared before retrieval."""
1502    opts = _byteskwargs(opts)
1503    timer, fm = gettimer(ui, opts)
1504    if not manifest_rev:
1505        ctx = scmutil.revsingle(repo, rev, rev)
1506        t = ctx.manifestnode()
1507    else:
1508        from mercurial.node import bin
1509
1510        if len(rev) == 40:
1511            t = bin(rev)
1512        else:
1513            try:
1514                rev = int(rev)
1515
1516                if util.safehasattr(repo.manifestlog, b'getstorage'):
1517                    t = repo.manifestlog.getstorage(b'').node(rev)
1518                else:
1519                    t = repo.manifestlog._revlog.lookup(rev)
1520            except ValueError:
1521                raise error.Abort(
1522                    b'manifest revision must be integer or full node'
1523                )
1524
1525    def d():
1526        repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1527        repo.manifestlog[t].read()
1528
1529    timer(d)
1530    fm.end()
1531
1532
1533@command(b'perf::changeset|perfchangeset', formatteropts)
1534def perfchangeset(ui, repo, rev, **opts):
1535    opts = _byteskwargs(opts)
1536    timer, fm = gettimer(ui, opts)
1537    n = scmutil.revsingle(repo, rev).node()
1538
1539    def d():
1540        repo.changelog.read(n)
1541        # repo.changelog._cache = None
1542
1543    timer(d)
1544    fm.end()
1545
1546
1547@command(b'perf::ignore|perfignore', formatteropts)
1548def perfignore(ui, repo, **opts):
1549    """benchmark operation related to computing ignore"""
1550    opts = _byteskwargs(opts)
1551    timer, fm = gettimer(ui, opts)
1552    dirstate = repo.dirstate
1553
1554    def setupone():
1555        dirstate.invalidate()
1556        clearfilecache(dirstate, b'_ignore')
1557
1558    def runone():
1559        dirstate._ignore
1560
1561    timer(runone, setup=setupone, title=b"load")
1562    fm.end()
1563
1564
1565@command(
1566    b'perf::index|perfindex',
1567    [
1568        (b'', b'rev', [], b'revision to be looked up (default tip)'),
1569        (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1570    ]
1571    + formatteropts,
1572)
1573def perfindex(ui, repo, **opts):
1574    """benchmark index creation time followed by a lookup
1575
1576    The default is to look `tip` up. Depending on the index implementation,
1577    the revision looked up can matters. For example, an implementation
1578    scanning the index will have a faster lookup time for `--rev tip` than for
1579    `--rev 0`. The number of looked up revisions and their order can also
1580    matters.
1581
1582    Example of useful set to test:
1583
1584    * tip
1585    * 0
1586    * -10:
1587    * :10
1588    * -10: + :10
1589    * :10: + -10:
1590    * -10000:
1591    * -10000: + 0
1592
1593    It is not currently possible to check for lookup of a missing node. For
1594    deeper lookup benchmarking, checkout the `perfnodemap` command."""
1595    import mercurial.revlog
1596
1597    opts = _byteskwargs(opts)
1598    timer, fm = gettimer(ui, opts)
1599    mercurial.revlog._prereadsize = 2 ** 24  # disable lazy parser in old hg
1600    if opts[b'no_lookup']:
1601        if opts['rev']:
1602            raise error.Abort('--no-lookup and --rev are mutually exclusive')
1603        nodes = []
1604    elif not opts[b'rev']:
1605        nodes = [repo[b"tip"].node()]
1606    else:
1607        revs = scmutil.revrange(repo, opts[b'rev'])
1608        cl = repo.changelog
1609        nodes = [cl.node(r) for r in revs]
1610
1611    unfi = repo.unfiltered()
1612    # find the filecache func directly
1613    # This avoid polluting the benchmark with the filecache logic
1614    makecl = unfi.__class__.changelog.func
1615
1616    def setup():
1617        # probably not necessary, but for good measure
1618        clearchangelog(unfi)
1619
1620    def d():
1621        cl = makecl(unfi)
1622        for n in nodes:
1623            cl.rev(n)
1624
1625    timer(d, setup=setup)
1626    fm.end()
1627
1628
1629@command(
1630    b'perf::nodemap|perfnodemap',
1631    [
1632        (b'', b'rev', [], b'revision to be looked up (default tip)'),
1633        (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1634    ]
1635    + formatteropts,
1636)
1637def perfnodemap(ui, repo, **opts):
1638    """benchmark the time necessary to look up revision from a cold nodemap
1639
1640    Depending on the implementation, the amount and order of revision we look
1641    up can varies. Example of useful set to test:
1642    * tip
1643    * 0
1644    * -10:
1645    * :10
1646    * -10: + :10
1647    * :10: + -10:
1648    * -10000:
1649    * -10000: + 0
1650
1651    The command currently focus on valid binary lookup. Benchmarking for
1652    hexlookup, prefix lookup and missing lookup would also be valuable.
1653    """
1654    import mercurial.revlog
1655
1656    opts = _byteskwargs(opts)
1657    timer, fm = gettimer(ui, opts)
1658    mercurial.revlog._prereadsize = 2 ** 24  # disable lazy parser in old hg
1659
1660    unfi = repo.unfiltered()
1661    clearcaches = opts[b'clear_caches']
1662    # find the filecache func directly
1663    # This avoid polluting the benchmark with the filecache logic
1664    makecl = unfi.__class__.changelog.func
1665    if not opts[b'rev']:
1666        raise error.Abort(b'use --rev to specify revisions to look up')
1667    revs = scmutil.revrange(repo, opts[b'rev'])
1668    cl = repo.changelog
1669    nodes = [cl.node(r) for r in revs]
1670
1671    # use a list to pass reference to a nodemap from one closure to the next
1672    nodeget = [None]
1673
1674    def setnodeget():
1675        # probably not necessary, but for good measure
1676        clearchangelog(unfi)
1677        cl = makecl(unfi)
1678        if util.safehasattr(cl.index, 'get_rev'):
1679            nodeget[0] = cl.index.get_rev
1680        else:
1681            nodeget[0] = cl.nodemap.get
1682
1683    def d():
1684        get = nodeget[0]
1685        for n in nodes:
1686            get(n)
1687
1688    setup = None
1689    if clearcaches:
1690
1691        def setup():
1692            setnodeget()
1693
1694    else:
1695        setnodeget()
1696        d()  # prewarm the data structure
1697    timer(d, setup=setup)
1698    fm.end()
1699
1700
1701@command(b'perf::startup|perfstartup', formatteropts)
1702def perfstartup(ui, repo, **opts):
1703    opts = _byteskwargs(opts)
1704    timer, fm = gettimer(ui, opts)
1705
1706    def d():
1707        if os.name != 'nt':
1708            os.system(
1709                b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1710            )
1711        else:
1712            os.environ['HGRCPATH'] = r' '
1713            os.system("%s version -q > NUL" % sys.argv[0])
1714
1715    timer(d)
1716    fm.end()
1717
1718
1719@command(b'perf::parents|perfparents', formatteropts)
1720def perfparents(ui, repo, **opts):
1721    """benchmark the time necessary to fetch one changeset's parents.
1722
1723    The fetch is done using the `node identifier`, traversing all object layers
1724    from the repository object. The first N revisions will be used for this
1725    benchmark. N is controlled by the ``perf.parentscount`` config option
1726    (default: 1000).
1727    """
1728    opts = _byteskwargs(opts)
1729    timer, fm = gettimer(ui, opts)
1730    # control the number of commits perfparents iterates over
1731    # experimental config: perf.parentscount
1732    count = getint(ui, b"perf", b"parentscount", 1000)
1733    if len(repo.changelog) < count:
1734        raise error.Abort(b"repo needs %d commits for this test" % count)
1735    repo = repo.unfiltered()
1736    nl = [repo.changelog.node(i) for i in _xrange(count)]
1737
1738    def d():
1739        for n in nl:
1740            repo.changelog.parents(n)
1741
1742    timer(d)
1743    fm.end()
1744
1745
1746@command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1747def perfctxfiles(ui, repo, x, **opts):
1748    opts = _byteskwargs(opts)
1749    x = int(x)
1750    timer, fm = gettimer(ui, opts)
1751
1752    def d():
1753        len(repo[x].files())
1754
1755    timer(d)
1756    fm.end()
1757
1758
1759@command(b'perf::rawfiles|perfrawfiles', formatteropts)
1760def perfrawfiles(ui, repo, x, **opts):
1761    opts = _byteskwargs(opts)
1762    x = int(x)
1763    timer, fm = gettimer(ui, opts)
1764    cl = repo.changelog
1765
1766    def d():
1767        len(cl.read(x)[3])
1768
1769    timer(d)
1770    fm.end()
1771
1772
1773@command(b'perf::lookup|perflookup', formatteropts)
1774def perflookup(ui, repo, rev, **opts):
1775    opts = _byteskwargs(opts)
1776    timer, fm = gettimer(ui, opts)
1777    timer(lambda: len(repo.lookup(rev)))
1778    fm.end()
1779
1780
1781@command(
1782    b'perf::linelogedits|perflinelogedits',
1783    [
1784        (b'n', b'edits', 10000, b'number of edits'),
1785        (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1786    ],
1787    norepo=True,
1788)
1789def perflinelogedits(ui, **opts):
1790    from mercurial import linelog
1791
1792    opts = _byteskwargs(opts)
1793
1794    edits = opts[b'edits']
1795    maxhunklines = opts[b'max_hunk_lines']
1796
1797    maxb1 = 100000
1798    random.seed(0)
1799    randint = random.randint
1800    currentlines = 0
1801    arglist = []
1802    for rev in _xrange(edits):
1803        a1 = randint(0, currentlines)
1804        a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1805        b1 = randint(0, maxb1)
1806        b2 = randint(b1, b1 + maxhunklines)
1807        currentlines += (b2 - b1) - (a2 - a1)
1808        arglist.append((rev, a1, a2, b1, b2))
1809
1810    def d():
1811        ll = linelog.linelog()
1812        for args in arglist:
1813            ll.replacelines(*args)
1814
1815    timer, fm = gettimer(ui, opts)
1816    timer(d)
1817    fm.end()
1818
1819
1820@command(b'perf::revrange|perfrevrange', formatteropts)
1821def perfrevrange(ui, repo, *specs, **opts):
1822    opts = _byteskwargs(opts)
1823    timer, fm = gettimer(ui, opts)
1824    revrange = scmutil.revrange
1825    timer(lambda: len(revrange(repo, specs)))
1826    fm.end()
1827
1828
1829@command(b'perf::nodelookup|perfnodelookup', formatteropts)
1830def perfnodelookup(ui, repo, rev, **opts):
1831    opts = _byteskwargs(opts)
1832    timer, fm = gettimer(ui, opts)
1833    import mercurial.revlog
1834
1835    mercurial.revlog._prereadsize = 2 ** 24  # disable lazy parser in old hg
1836    n = scmutil.revsingle(repo, rev).node()
1837
1838    try:
1839        cl = revlog(getsvfs(repo), radix=b"00changelog")
1840    except TypeError:
1841        cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
1842
1843    def d():
1844        cl.rev(n)
1845        clearcaches(cl)
1846
1847    timer(d)
1848    fm.end()
1849
1850
1851@command(
1852    b'perf::log|perflog',
1853    [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1854)
1855def perflog(ui, repo, rev=None, **opts):
1856    opts = _byteskwargs(opts)
1857    if rev is None:
1858        rev = []
1859    timer, fm = gettimer(ui, opts)
1860    ui.pushbuffer()
1861    timer(
1862        lambda: commands.log(
1863            ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1864        )
1865    )
1866    ui.popbuffer()
1867    fm.end()
1868
1869
1870@command(b'perf::moonwalk|perfmoonwalk', formatteropts)
1871def perfmoonwalk(ui, repo, **opts):
1872    """benchmark walking the changelog backwards
1873
1874    This also loads the changelog data for each revision in the changelog.
1875    """
1876    opts = _byteskwargs(opts)
1877    timer, fm = gettimer(ui, opts)
1878
1879    def moonwalk():
1880        for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1881            ctx = repo[i]
1882            ctx.branch()  # read changelog data (in addition to the index)
1883
1884    timer(moonwalk)
1885    fm.end()
1886
1887
1888@command(
1889    b'perf::templating|perftemplating',
1890    [
1891        (b'r', b'rev', [], b'revisions to run the template on'),
1892    ]
1893    + formatteropts,
1894)
1895def perftemplating(ui, repo, testedtemplate=None, **opts):
1896    """test the rendering time of a given template"""
1897    if makelogtemplater is None:
1898        raise error.Abort(
1899            b"perftemplating not available with this Mercurial",
1900            hint=b"use 4.3 or later",
1901        )
1902
1903    opts = _byteskwargs(opts)
1904
1905    nullui = ui.copy()
1906    nullui.fout = open(os.devnull, 'wb')
1907    nullui.disablepager()
1908    revs = opts.get(b'rev')
1909    if not revs:
1910        revs = [b'all()']
1911    revs = list(scmutil.revrange(repo, revs))
1912
1913    defaulttemplate = (
1914        b'{date|shortdate} [{rev}:{node|short}]'
1915        b' {author|person}: {desc|firstline}\n'
1916    )
1917    if testedtemplate is None:
1918        testedtemplate = defaulttemplate
1919    displayer = makelogtemplater(nullui, repo, testedtemplate)
1920
1921    def format():
1922        for r in revs:
1923            ctx = repo[r]
1924            displayer.show(ctx)
1925            displayer.flush(ctx)
1926
1927    timer, fm = gettimer(ui, opts)
1928    timer(format)
1929    fm.end()
1930
1931
1932def _displaystats(ui, opts, entries, data):
1933    # use a second formatter because the data are quite different, not sure
1934    # how it flies with the templater.
1935    fm = ui.formatter(b'perf-stats', opts)
1936    for key, title in entries:
1937        values = data[key]
1938        nbvalues = len(data)
1939        values.sort()
1940        stats = {
1941            'key': key,
1942            'title': title,
1943            'nbitems': len(values),
1944            'min': values[0][0],
1945            '10%': values[(nbvalues * 10) // 100][0],
1946            '25%': values[(nbvalues * 25) // 100][0],
1947            '50%': values[(nbvalues * 50) // 100][0],
1948            '75%': values[(nbvalues * 75) // 100][0],
1949            '80%': values[(nbvalues * 80) // 100][0],
1950            '85%': values[(nbvalues * 85) // 100][0],
1951            '90%': values[(nbvalues * 90) // 100][0],
1952            '95%': values[(nbvalues * 95) // 100][0],
1953            '99%': values[(nbvalues * 99) // 100][0],
1954            'max': values[-1][0],
1955        }
1956        fm.startitem()
1957        fm.data(**stats)
1958        # make node pretty for the human output
1959        fm.plain('### %s (%d items)\n' % (title, len(values)))
1960        lines = [
1961            'min',
1962            '10%',
1963            '25%',
1964            '50%',
1965            '75%',
1966            '80%',
1967            '85%',
1968            '90%',
1969            '95%',
1970            '99%',
1971            'max',
1972        ]
1973        for l in lines:
1974            fm.plain('%s: %s\n' % (l, stats[l]))
1975    fm.end()
1976
1977
1978@command(
1979    b'perf::helper-mergecopies|perfhelper-mergecopies',
1980    formatteropts
1981    + [
1982        (b'r', b'revs', [], b'restrict search to these revisions'),
1983        (b'', b'timing', False, b'provides extra data (costly)'),
1984        (b'', b'stats', False, b'provides statistic about the measured data'),
1985    ],
1986)
1987def perfhelpermergecopies(ui, repo, revs=[], **opts):
1988    """find statistics about potential parameters for `perfmergecopies`
1989
1990    This command find (base, p1, p2) triplet relevant for copytracing
1991    benchmarking in the context of a merge.  It reports values for some of the
1992    parameters that impact merge copy tracing time during merge.
1993
1994    If `--timing` is set, rename detection is run and the associated timing
1995    will be reported. The extra details come at the cost of slower command
1996    execution.
1997
1998    Since rename detection is only run once, other factors might easily
1999    affect the precision of the timing. However it should give a good
2000    approximation of which revision triplets are very costly.
2001    """
2002    opts = _byteskwargs(opts)
2003    fm = ui.formatter(b'perf', opts)
2004    dotiming = opts[b'timing']
2005    dostats = opts[b'stats']
2006
2007    output_template = [
2008        ("base", "%(base)12s"),
2009        ("p1", "%(p1.node)12s"),
2010        ("p2", "%(p2.node)12s"),
2011        ("p1.nb-revs", "%(p1.nbrevs)12d"),
2012        ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2013        ("p1.renames", "%(p1.renamedfiles)12d"),
2014        ("p1.time", "%(p1.time)12.3f"),
2015        ("p2.nb-revs", "%(p2.nbrevs)12d"),
2016        ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2017        ("p2.renames", "%(p2.renamedfiles)12d"),
2018        ("p2.time", "%(p2.time)12.3f"),
2019        ("renames", "%(nbrenamedfiles)12d"),
2020        ("total.time", "%(time)12.3f"),
2021    ]
2022    if not dotiming:
2023        output_template = [
2024            i
2025            for i in output_template
2026            if not ('time' in i[0] or 'renames' in i[0])
2027        ]
2028    header_names = [h for (h, v) in output_template]
2029    output = ' '.join([v for (h, v) in output_template]) + '\n'
2030    header = ' '.join(['%12s'] * len(header_names)) + '\n'
2031    fm.plain(header % tuple(header_names))
2032
2033    if not revs:
2034        revs = ['all()']
2035    revs = scmutil.revrange(repo, revs)
2036
2037    if dostats:
2038        alldata = {
2039            'nbrevs': [],
2040            'nbmissingfiles': [],
2041        }
2042        if dotiming:
2043            alldata['parentnbrenames'] = []
2044            alldata['totalnbrenames'] = []
2045            alldata['parenttime'] = []
2046            alldata['totaltime'] = []
2047
2048    roi = repo.revs('merge() and %ld', revs)
2049    for r in roi:
2050        ctx = repo[r]
2051        p1 = ctx.p1()
2052        p2 = ctx.p2()
2053        bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2054        for b in bases:
2055            b = repo[b]
2056            p1missing = copies._computeforwardmissing(b, p1)
2057            p2missing = copies._computeforwardmissing(b, p2)
2058            data = {
2059                b'base': b.hex(),
2060                b'p1.node': p1.hex(),
2061                b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2062                b'p1.nbmissingfiles': len(p1missing),
2063                b'p2.node': p2.hex(),
2064                b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2065                b'p2.nbmissingfiles': len(p2missing),
2066            }
2067            if dostats:
2068                if p1missing:
2069                    alldata['nbrevs'].append(
2070                        (data['p1.nbrevs'], b.hex(), p1.hex())
2071                    )
2072                    alldata['nbmissingfiles'].append(
2073                        (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2074                    )
2075                if p2missing:
2076                    alldata['nbrevs'].append(
2077                        (data['p2.nbrevs'], b.hex(), p2.hex())
2078                    )
2079                    alldata['nbmissingfiles'].append(
2080                        (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2081                    )
2082            if dotiming:
2083                begin = util.timer()
2084                mergedata = copies.mergecopies(repo, p1, p2, b)
2085                end = util.timer()
2086                # not very stable timing since we did only one run
2087                data['time'] = end - begin
2088                # mergedata contains five dicts: "copy", "movewithdir",
2089                # "diverge", "renamedelete" and "dirmove".
2090                # The first 4 are about renamed file so lets count that.
2091                renames = len(mergedata[0])
2092                renames += len(mergedata[1])
2093                renames += len(mergedata[2])
2094                renames += len(mergedata[3])
2095                data['nbrenamedfiles'] = renames
2096                begin = util.timer()
2097                p1renames = copies.pathcopies(b, p1)
2098                end = util.timer()
2099                data['p1.time'] = end - begin
2100                begin = util.timer()
2101                p2renames = copies.pathcopies(b, p2)
2102                end = util.timer()
2103                data['p2.time'] = end - begin
2104                data['p1.renamedfiles'] = len(p1renames)
2105                data['p2.renamedfiles'] = len(p2renames)
2106
2107                if dostats:
2108                    if p1missing:
2109                        alldata['parentnbrenames'].append(
2110                            (data['p1.renamedfiles'], b.hex(), p1.hex())
2111                        )
2112                        alldata['parenttime'].append(
2113                            (data['p1.time'], b.hex(), p1.hex())
2114                        )
2115                    if p2missing:
2116                        alldata['parentnbrenames'].append(
2117                            (data['p2.renamedfiles'], b.hex(), p2.hex())
2118                        )
2119                        alldata['parenttime'].append(
2120                            (data['p2.time'], b.hex(), p2.hex())
2121                        )
2122                    if p1missing or p2missing:
2123                        alldata['totalnbrenames'].append(
2124                            (
2125                                data['nbrenamedfiles'],
2126                                b.hex(),
2127                                p1.hex(),
2128                                p2.hex(),
2129                            )
2130                        )
2131                        alldata['totaltime'].append(
2132                            (data['time'], b.hex(), p1.hex(), p2.hex())
2133                        )
2134            fm.startitem()
2135            fm.data(**data)
2136            # make node pretty for the human output
2137            out = data.copy()
2138            out['base'] = fm.hexfunc(b.node())
2139            out['p1.node'] = fm.hexfunc(p1.node())
2140            out['p2.node'] = fm.hexfunc(p2.node())
2141            fm.plain(output % out)
2142
2143    fm.end()
2144    if dostats:
2145        # use a second formatter because the data are quite different, not sure
2146        # how it flies with the templater.
2147        entries = [
2148            ('nbrevs', 'number of revision covered'),
2149            ('nbmissingfiles', 'number of missing files at head'),
2150        ]
2151        if dotiming:
2152            entries.append(
2153                ('parentnbrenames', 'rename from one parent to base')
2154            )
2155            entries.append(('totalnbrenames', 'total number of renames'))
2156            entries.append(('parenttime', 'time for one parent'))
2157            entries.append(('totaltime', 'time for both parents'))
2158        _displaystats(ui, opts, entries, alldata)
2159
2160
2161@command(
2162    b'perf::helper-pathcopies|perfhelper-pathcopies',
2163    formatteropts
2164    + [
2165        (b'r', b'revs', [], b'restrict search to these revisions'),
2166        (b'', b'timing', False, b'provides extra data (costly)'),
2167        (b'', b'stats', False, b'provides statistic about the measured data'),
2168    ],
2169)
2170def perfhelperpathcopies(ui, repo, revs=[], **opts):
2171    """find statistic about potential parameters for the `perftracecopies`
2172
2173    This command find source-destination pair relevant for copytracing testing.
2174    It report value for some of the parameters that impact copy tracing time.
2175
2176    If `--timing` is set, rename detection is run and the associated timing
2177    will be reported. The extra details comes at the cost of a slower command
2178    execution.
2179
2180    Since the rename detection is only run once, other factors might easily
2181    affect the precision of the timing. However it should give a good
2182    approximation of which revision pairs are very costly.
2183    """
2184    opts = _byteskwargs(opts)
2185    fm = ui.formatter(b'perf', opts)
2186    dotiming = opts[b'timing']
2187    dostats = opts[b'stats']
2188
2189    if dotiming:
2190        header = '%12s %12s %12s %12s %12s %12s\n'
2191        output = (
2192            "%(source)12s %(destination)12s "
2193            "%(nbrevs)12d %(nbmissingfiles)12d "
2194            "%(nbrenamedfiles)12d %(time)18.5f\n"
2195        )
2196        header_names = (
2197            "source",
2198            "destination",
2199            "nb-revs",
2200            "nb-files",
2201            "nb-renames",
2202            "time",
2203        )
2204        fm.plain(header % header_names)
2205    else:
2206        header = '%12s %12s %12s %12s\n'
2207        output = (
2208            "%(source)12s %(destination)12s "
2209            "%(nbrevs)12d %(nbmissingfiles)12d\n"
2210        )
2211        fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2212
2213    if not revs:
2214        revs = ['all()']
2215    revs = scmutil.revrange(repo, revs)
2216
2217    if dostats:
2218        alldata = {
2219            'nbrevs': [],
2220            'nbmissingfiles': [],
2221        }
2222        if dotiming:
2223            alldata['nbrenames'] = []
2224            alldata['time'] = []
2225
2226    roi = repo.revs('merge() and %ld', revs)
2227    for r in roi:
2228        ctx = repo[r]
2229        p1 = ctx.p1().rev()
2230        p2 = ctx.p2().rev()
2231        bases = repo.changelog._commonancestorsheads(p1, p2)
2232        for p in (p1, p2):
2233            for b in bases:
2234                base = repo[b]
2235                parent = repo[p]
2236                missing = copies._computeforwardmissing(base, parent)
2237                if not missing:
2238                    continue
2239                data = {
2240                    b'source': base.hex(),
2241                    b'destination': parent.hex(),
2242                    b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2243                    b'nbmissingfiles': len(missing),
2244                }
2245                if dostats:
2246                    alldata['nbrevs'].append(
2247                        (
2248                            data['nbrevs'],
2249                            base.hex(),
2250                            parent.hex(),
2251                        )
2252                    )
2253                    alldata['nbmissingfiles'].append(
2254                        (
2255                            data['nbmissingfiles'],
2256                            base.hex(),
2257                            parent.hex(),
2258                        )
2259                    )
2260                if dotiming:
2261                    begin = util.timer()
2262                    renames = copies.pathcopies(base, parent)
2263                    end = util.timer()
2264                    # not very stable timing since we did only one run
2265                    data['time'] = end - begin
2266                    data['nbrenamedfiles'] = len(renames)
2267                    if dostats:
2268                        alldata['time'].append(
2269                            (
2270                                data['time'],
2271                                base.hex(),
2272                                parent.hex(),
2273                            )
2274                        )
2275                        alldata['nbrenames'].append(
2276                            (
2277                                data['nbrenamedfiles'],
2278                                base.hex(),
2279                                parent.hex(),
2280                            )
2281                        )
2282                fm.startitem()
2283                fm.data(**data)
2284                out = data.copy()
2285                out['source'] = fm.hexfunc(base.node())
2286                out['destination'] = fm.hexfunc(parent.node())
2287                fm.plain(output % out)
2288
2289    fm.end()
2290    if dostats:
2291        entries = [
2292            ('nbrevs', 'number of revision covered'),
2293            ('nbmissingfiles', 'number of missing files at head'),
2294        ]
2295        if dotiming:
2296            entries.append(('nbrenames', 'renamed files'))
2297            entries.append(('time', 'time'))
2298        _displaystats(ui, opts, entries, alldata)
2299
2300
2301@command(b'perf::cca|perfcca', formatteropts)
2302def perfcca(ui, repo, **opts):
2303    opts = _byteskwargs(opts)
2304    timer, fm = gettimer(ui, opts)
2305    timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2306    fm.end()
2307
2308
2309@command(b'perf::fncacheload|perffncacheload', formatteropts)
2310def perffncacheload(ui, repo, **opts):
2311    opts = _byteskwargs(opts)
2312    timer, fm = gettimer(ui, opts)
2313    s = repo.store
2314
2315    def d():
2316        s.fncache._load()
2317
2318    timer(d)
2319    fm.end()
2320
2321
2322@command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2323def perffncachewrite(ui, repo, **opts):
2324    opts = _byteskwargs(opts)
2325    timer, fm = gettimer(ui, opts)
2326    s = repo.store
2327    lock = repo.lock()
2328    s.fncache._load()
2329    tr = repo.transaction(b'perffncachewrite')
2330    tr.addbackup(b'fncache')
2331
2332    def d():
2333        s.fncache._dirty = True
2334        s.fncache.write(tr)
2335
2336    timer(d)
2337    tr.close()
2338    lock.release()
2339    fm.end()
2340
2341
2342@command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2343def perffncacheencode(ui, repo, **opts):
2344    opts = _byteskwargs(opts)
2345    timer, fm = gettimer(ui, opts)
2346    s = repo.store
2347    s.fncache._load()
2348
2349    def d():
2350        for p in s.fncache.entries:
2351            s.encode(p)
2352
2353    timer(d)
2354    fm.end()
2355
2356
2357def _bdiffworker(q, blocks, xdiff, ready, done):
2358    while not done.is_set():
2359        pair = q.get()
2360        while pair is not None:
2361            if xdiff:
2362                mdiff.bdiff.xdiffblocks(*pair)
2363            elif blocks:
2364                mdiff.bdiff.blocks(*pair)
2365            else:
2366                mdiff.textdiff(*pair)
2367            q.task_done()
2368            pair = q.get()
2369        q.task_done()  # for the None one
2370        with ready:
2371            ready.wait()
2372
2373
2374def _manifestrevision(repo, mnode):
2375    ml = repo.manifestlog
2376
2377    if util.safehasattr(ml, b'getstorage'):
2378        store = ml.getstorage(b'')
2379    else:
2380        store = ml._revlog
2381
2382    return store.revision(mnode)
2383
2384
2385@command(
2386    b'perf::bdiff|perfbdiff',
2387    revlogopts
2388    + formatteropts
2389    + [
2390        (
2391            b'',
2392            b'count',
2393            1,
2394            b'number of revisions to test (when using --startrev)',
2395        ),
2396        (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2397        (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2398        (b'', b'blocks', False, b'test computing diffs into blocks'),
2399        (b'', b'xdiff', False, b'use xdiff algorithm'),
2400    ],
2401    b'-c|-m|FILE REV',
2402)
2403def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2404    """benchmark a bdiff between revisions
2405
2406    By default, benchmark a bdiff between its delta parent and itself.
2407
2408    With ``--count``, benchmark bdiffs between delta parents and self for N
2409    revisions starting at the specified revision.
2410
2411    With ``--alldata``, assume the requested revision is a changeset and
2412    measure bdiffs for all changes related to that changeset (manifest
2413    and filelogs).
2414    """
2415    opts = _byteskwargs(opts)
2416
2417    if opts[b'xdiff'] and not opts[b'blocks']:
2418        raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2419
2420    if opts[b'alldata']:
2421        opts[b'changelog'] = True
2422
2423    if opts.get(b'changelog') or opts.get(b'manifest'):
2424        file_, rev = None, file_
2425    elif rev is None:
2426        raise error.CommandError(b'perfbdiff', b'invalid arguments')
2427
2428    blocks = opts[b'blocks']
2429    xdiff = opts[b'xdiff']
2430    textpairs = []
2431
2432    r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2433
2434    startrev = r.rev(r.lookup(rev))
2435    for rev in range(startrev, min(startrev + count, len(r) - 1)):
2436        if opts[b'alldata']:
2437            # Load revisions associated with changeset.
2438            ctx = repo[rev]
2439            mtext = _manifestrevision(repo, ctx.manifestnode())
2440            for pctx in ctx.parents():
2441                pman = _manifestrevision(repo, pctx.manifestnode())
2442                textpairs.append((pman, mtext))
2443
2444            # Load filelog revisions by iterating manifest delta.
2445            man = ctx.manifest()
2446            pman = ctx.p1().manifest()
2447            for filename, change in pman.diff(man).items():
2448                fctx = repo.file(filename)
2449                f1 = fctx.revision(change[0][0] or -1)
2450                f2 = fctx.revision(change[1][0] or -1)
2451                textpairs.append((f1, f2))
2452        else:
2453            dp = r.deltaparent(rev)
2454            textpairs.append((r.revision(dp), r.revision(rev)))
2455
2456    withthreads = threads > 0
2457    if not withthreads:
2458
2459        def d():
2460            for pair in textpairs:
2461                if xdiff:
2462                    mdiff.bdiff.xdiffblocks(*pair)
2463                elif blocks:
2464                    mdiff.bdiff.blocks(*pair)
2465                else:
2466                    mdiff.textdiff(*pair)
2467
2468    else:
2469        q = queue()
2470        for i in _xrange(threads):
2471            q.put(None)
2472        ready = threading.Condition()
2473        done = threading.Event()
2474        for i in _xrange(threads):
2475            threading.Thread(
2476                target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2477            ).start()
2478        q.join()
2479
2480        def d():
2481            for pair in textpairs:
2482                q.put(pair)
2483            for i in _xrange(threads):
2484                q.put(None)
2485            with ready:
2486                ready.notify_all()
2487            q.join()
2488
2489    timer, fm = gettimer(ui, opts)
2490    timer(d)
2491    fm.end()
2492
2493    if withthreads:
2494        done.set()
2495        for i in _xrange(threads):
2496            q.put(None)
2497        with ready:
2498            ready.notify_all()
2499
2500
2501@command(
2502    b'perf::unidiff|perfunidiff',
2503    revlogopts
2504    + formatteropts
2505    + [
2506        (
2507            b'',
2508            b'count',
2509            1,
2510            b'number of revisions to test (when using --startrev)',
2511        ),
2512        (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2513    ],
2514    b'-c|-m|FILE REV',
2515)
2516def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2517    """benchmark a unified diff between revisions
2518
2519    This doesn't include any copy tracing - it's just a unified diff
2520    of the texts.
2521
2522    By default, benchmark a diff between its delta parent and itself.
2523
2524    With ``--count``, benchmark diffs between delta parents and self for N
2525    revisions starting at the specified revision.
2526
2527    With ``--alldata``, assume the requested revision is a changeset and
2528    measure diffs for all changes related to that changeset (manifest
2529    and filelogs).
2530    """
2531    opts = _byteskwargs(opts)
2532    if opts[b'alldata']:
2533        opts[b'changelog'] = True
2534
2535    if opts.get(b'changelog') or opts.get(b'manifest'):
2536        file_, rev = None, file_
2537    elif rev is None:
2538        raise error.CommandError(b'perfunidiff', b'invalid arguments')
2539
2540    textpairs = []
2541
2542    r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2543
2544    startrev = r.rev(r.lookup(rev))
2545    for rev in range(startrev, min(startrev + count, len(r) - 1)):
2546        if opts[b'alldata']:
2547            # Load revisions associated with changeset.
2548            ctx = repo[rev]
2549            mtext = _manifestrevision(repo, ctx.manifestnode())
2550            for pctx in ctx.parents():
2551                pman = _manifestrevision(repo, pctx.manifestnode())
2552                textpairs.append((pman, mtext))
2553
2554            # Load filelog revisions by iterating manifest delta.
2555            man = ctx.manifest()
2556            pman = ctx.p1().manifest()
2557            for filename, change in pman.diff(man).items():
2558                fctx = repo.file(filename)
2559                f1 = fctx.revision(change[0][0] or -1)
2560                f2 = fctx.revision(change[1][0] or -1)
2561                textpairs.append((f1, f2))
2562        else:
2563            dp = r.deltaparent(rev)
2564            textpairs.append((r.revision(dp), r.revision(rev)))
2565
2566    def d():
2567        for left, right in textpairs:
2568            # The date strings don't matter, so we pass empty strings.
2569            headerlines, hunks = mdiff.unidiff(
2570                left, b'', right, b'', b'left', b'right', binary=False
2571            )
2572            # consume iterators in roughly the way patch.py does
2573            b'\n'.join(headerlines)
2574            b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2575
2576    timer, fm = gettimer(ui, opts)
2577    timer(d)
2578    fm.end()
2579
2580
2581@command(b'perf::diffwd|perfdiffwd', formatteropts)
2582def perfdiffwd(ui, repo, **opts):
2583    """Profile diff of working directory changes"""
2584    opts = _byteskwargs(opts)
2585    timer, fm = gettimer(ui, opts)
2586    options = {
2587        'w': 'ignore_all_space',
2588        'b': 'ignore_space_change',
2589        'B': 'ignore_blank_lines',
2590    }
2591
2592    for diffopt in ('', 'w', 'b', 'B', 'wB'):
2593        opts = {options[c]: b'1' for c in diffopt}
2594
2595        def d():
2596            ui.pushbuffer()
2597            commands.diff(ui, repo, **opts)
2598            ui.popbuffer()
2599
2600        diffopt = diffopt.encode('ascii')
2601        title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2602        timer(d, title=title)
2603    fm.end()
2604
2605
2606@command(
2607    b'perf::revlogindex|perfrevlogindex',
2608    revlogopts + formatteropts,
2609    b'-c|-m|FILE',
2610)
2611def perfrevlogindex(ui, repo, file_=None, **opts):
2612    """Benchmark operations against a revlog index.
2613
2614    This tests constructing a revlog instance, reading index data,
2615    parsing index data, and performing various operations related to
2616    index data.
2617    """
2618
2619    opts = _byteskwargs(opts)
2620
2621    rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2622
2623    opener = getattr(rl, 'opener')  # trick linter
2624    # compat with hg <= 5.8
2625    radix = getattr(rl, 'radix', None)
2626    indexfile = getattr(rl, '_indexfile', None)
2627    if indexfile is None:
2628        # compatibility with <= hg-5.8
2629        indexfile = getattr(rl, 'indexfile')
2630    data = opener.read(indexfile)
2631
2632    header = struct.unpack(b'>I', data[0:4])[0]
2633    version = header & 0xFFFF
2634    if version == 1:
2635        inline = header & (1 << 16)
2636    else:
2637        raise error.Abort(b'unsupported revlog version: %d' % version)
2638
2639    parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2640    if parse_index_v1 is None:
2641        parse_index_v1 = mercurial.revlog.revlogio().parseindex
2642
2643    rllen = len(rl)
2644
2645    node0 = rl.node(0)
2646    node25 = rl.node(rllen // 4)
2647    node50 = rl.node(rllen // 2)
2648    node75 = rl.node(rllen // 4 * 3)
2649    node100 = rl.node(rllen - 1)
2650
2651    allrevs = range(rllen)
2652    allrevsrev = list(reversed(allrevs))
2653    allnodes = [rl.node(rev) for rev in range(rllen)]
2654    allnodesrev = list(reversed(allnodes))
2655
2656    def constructor():
2657        if radix is not None:
2658            revlog(opener, radix=radix)
2659        else:
2660            # hg <= 5.8
2661            revlog(opener, indexfile=indexfile)
2662
2663    def read():
2664        with opener(indexfile) as fh:
2665            fh.read()
2666
2667    def parseindex():
2668        parse_index_v1(data, inline)
2669
2670    def getentry(revornode):
2671        index = parse_index_v1(data, inline)[0]
2672        index[revornode]
2673
2674    def getentries(revs, count=1):
2675        index = parse_index_v1(data, inline)[0]
2676
2677        for i in range(count):
2678            for rev in revs:
2679                index[rev]
2680
2681    def resolvenode(node):
2682        index = parse_index_v1(data, inline)[0]
2683        rev = getattr(index, 'rev', None)
2684        if rev is None:
2685            nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2686            # This only works for the C code.
2687            if nodemap is None:
2688                return
2689            rev = nodemap.__getitem__
2690
2691        try:
2692            rev(node)
2693        except error.RevlogError:
2694            pass
2695
2696    def resolvenodes(nodes, count=1):
2697        index = parse_index_v1(data, inline)[0]
2698        rev = getattr(index, 'rev', None)
2699        if rev is None:
2700            nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2701            # This only works for the C code.
2702            if nodemap is None:
2703                return
2704            rev = nodemap.__getitem__
2705
2706        for i in range(count):
2707            for node in nodes:
2708                try:
2709                    rev(node)
2710                except error.RevlogError:
2711                    pass
2712
2713    benches = [
2714        (constructor, b'revlog constructor'),
2715        (read, b'read'),
2716        (parseindex, b'create index object'),
2717        (lambda: getentry(0), b'retrieve index entry for rev 0'),
2718        (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2719        (lambda: resolvenode(node0), b'look up node at rev 0'),
2720        (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2721        (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2722        (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2723        (lambda: resolvenode(node100), b'look up node at tip'),
2724        # 2x variation is to measure caching impact.
2725        (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2726        (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2727        (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2728        (
2729            lambda: resolvenodes(allnodesrev, 2),
2730            b'look up all nodes 2x (reverse)',
2731        ),
2732        (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2733        (
2734            lambda: getentries(allrevs, 2),
2735            b'retrieve all index entries 2x (forward)',
2736        ),
2737        (
2738            lambda: getentries(allrevsrev),
2739            b'retrieve all index entries (reverse)',
2740        ),
2741        (
2742            lambda: getentries(allrevsrev, 2),
2743            b'retrieve all index entries 2x (reverse)',
2744        ),
2745    ]
2746
2747    for fn, title in benches:
2748        timer, fm = gettimer(ui, opts)
2749        timer(fn, title=title)
2750        fm.end()
2751
2752
2753@command(
2754    b'perf::revlogrevisions|perfrevlogrevisions',
2755    revlogopts
2756    + formatteropts
2757    + [
2758        (b'd', b'dist', 100, b'distance between the revisions'),
2759        (b's', b'startrev', 0, b'revision to start reading at'),
2760        (b'', b'reverse', False, b'read in reverse'),
2761    ],
2762    b'-c|-m|FILE',
2763)
2764def perfrevlogrevisions(
2765    ui, repo, file_=None, startrev=0, reverse=False, **opts
2766):
2767    """Benchmark reading a series of revisions from a revlog.
2768
2769    By default, we read every ``-d/--dist`` revision from 0 to tip of
2770    the specified revlog.
2771
2772    The start revision can be defined via ``-s/--startrev``.
2773    """
2774    opts = _byteskwargs(opts)
2775
2776    rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2777    rllen = getlen(ui)(rl)
2778
2779    if startrev < 0:
2780        startrev = rllen + startrev
2781
2782    def d():
2783        rl.clearcaches()
2784
2785        beginrev = startrev
2786        endrev = rllen
2787        dist = opts[b'dist']
2788
2789        if reverse:
2790            beginrev, endrev = endrev - 1, beginrev - 1
2791            dist = -1 * dist
2792
2793        for x in _xrange(beginrev, endrev, dist):
2794            # Old revisions don't support passing int.
2795            n = rl.node(x)
2796            rl.revision(n)
2797
2798    timer, fm = gettimer(ui, opts)
2799    timer(d)
2800    fm.end()
2801
2802
2803@command(
2804    b'perf::revlogwrite|perfrevlogwrite',
2805    revlogopts
2806    + formatteropts
2807    + [
2808        (b's', b'startrev', 1000, b'revision to start writing at'),
2809        (b'', b'stoprev', -1, b'last revision to write'),
2810        (b'', b'count', 3, b'number of passes to perform'),
2811        (b'', b'details', False, b'print timing for every revisions tested'),
2812        (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2813        (b'', b'lazydeltabase', True, b'try the provided delta first'),
2814        (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2815    ],
2816    b'-c|-m|FILE',
2817)
2818def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2819    """Benchmark writing a series of revisions to a revlog.
2820
2821    Possible source values are:
2822    * `full`: add from a full text (default).
2823    * `parent-1`: add from a delta to the first parent
2824    * `parent-2`: add from a delta to the second parent if it exists
2825                  (use a delta from the first parent otherwise)
2826    * `parent-smallest`: add from the smallest delta (either p1 or p2)
2827    * `storage`: add from the existing precomputed deltas
2828
2829    Note: This performance command measures performance in a custom way. As a
2830    result some of the global configuration of the 'perf' command does not
2831    apply to it:
2832
2833    * ``pre-run``: disabled
2834
2835    * ``profile-benchmark``: disabled
2836
2837    * ``run-limits``: disabled use --count instead
2838    """
2839    opts = _byteskwargs(opts)
2840
2841    rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2842    rllen = getlen(ui)(rl)
2843    if startrev < 0:
2844        startrev = rllen + startrev
2845    if stoprev < 0:
2846        stoprev = rllen + stoprev
2847
2848    lazydeltabase = opts['lazydeltabase']
2849    source = opts['source']
2850    clearcaches = opts['clear_caches']
2851    validsource = (
2852        b'full',
2853        b'parent-1',
2854        b'parent-2',
2855        b'parent-smallest',
2856        b'storage',
2857    )
2858    if source not in validsource:
2859        raise error.Abort('invalid source type: %s' % source)
2860
2861    ### actually gather results
2862    count = opts['count']
2863    if count <= 0:
2864        raise error.Abort('invalide run count: %d' % count)
2865    allresults = []
2866    for c in range(count):
2867        timing = _timeonewrite(
2868            ui,
2869            rl,
2870            source,
2871            startrev,
2872            stoprev,
2873            c + 1,
2874            lazydeltabase=lazydeltabase,
2875            clearcaches=clearcaches,
2876        )
2877        allresults.append(timing)
2878
2879    ### consolidate the results in a single list
2880    results = []
2881    for idx, (rev, t) in enumerate(allresults[0]):
2882        ts = [t]
2883        for other in allresults[1:]:
2884            orev, ot = other[idx]
2885            assert orev == rev
2886            ts.append(ot)
2887        results.append((rev, ts))
2888    resultcount = len(results)
2889
2890    ### Compute and display relevant statistics
2891
2892    # get a formatter
2893    fm = ui.formatter(b'perf', opts)
2894    displayall = ui.configbool(b"perf", b"all-timing", False)
2895
2896    # print individual details if requested
2897    if opts['details']:
2898        for idx, item in enumerate(results, 1):
2899            rev, data = item
2900            title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2901            formatone(fm, data, title=title, displayall=displayall)
2902
2903    # sorts results by median time
2904    results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2905    # list of (name, index) to display)
2906    relevants = [
2907        ("min", 0),
2908        ("10%", resultcount * 10 // 100),
2909        ("25%", resultcount * 25 // 100),
2910        ("50%", resultcount * 70 // 100),
2911        ("75%", resultcount * 75 // 100),
2912        ("90%", resultcount * 90 // 100),
2913        ("95%", resultcount * 95 // 100),
2914        ("99%", resultcount * 99 // 100),
2915        ("99.9%", resultcount * 999 // 1000),
2916        ("99.99%", resultcount * 9999 // 10000),
2917        ("99.999%", resultcount * 99999 // 100000),
2918        ("max", -1),
2919    ]
2920    if not ui.quiet:
2921        for name, idx in relevants:
2922            data = results[idx]
2923            title = '%s of %d, rev %d' % (name, resultcount, data[0])
2924            formatone(fm, data[1], title=title, displayall=displayall)
2925
2926    # XXX summing that many float will not be very precise, we ignore this fact
2927    # for now
2928    totaltime = []
2929    for item in allresults:
2930        totaltime.append(
2931            (
2932                sum(x[1][0] for x in item),
2933                sum(x[1][1] for x in item),
2934                sum(x[1][2] for x in item),
2935            )
2936        )
2937    formatone(
2938        fm,
2939        totaltime,
2940        title="total time (%d revs)" % resultcount,
2941        displayall=displayall,
2942    )
2943    fm.end()
2944
2945
2946class _faketr(object):
2947    def add(s, x, y, z=None):
2948        return None
2949
2950
2951def _timeonewrite(
2952    ui,
2953    orig,
2954    source,
2955    startrev,
2956    stoprev,
2957    runidx=None,
2958    lazydeltabase=True,
2959    clearcaches=True,
2960):
2961    timings = []
2962    tr = _faketr()
2963    with _temprevlog(ui, orig, startrev) as dest:
2964        dest._lazydeltabase = lazydeltabase
2965        revs = list(orig.revs(startrev, stoprev))
2966        total = len(revs)
2967        topic = 'adding'
2968        if runidx is not None:
2969            topic += ' (run #%d)' % runidx
2970        # Support both old and new progress API
2971        if util.safehasattr(ui, 'makeprogress'):
2972            progress = ui.makeprogress(topic, unit='revs', total=total)
2973
2974            def updateprogress(pos):
2975                progress.update(pos)
2976
2977            def completeprogress():
2978                progress.complete()
2979
2980        else:
2981
2982            def updateprogress(pos):
2983                ui.progress(topic, pos, unit='revs', total=total)
2984
2985            def completeprogress():
2986                ui.progress(topic, None, unit='revs', total=total)
2987
2988        for idx, rev in enumerate(revs):
2989            updateprogress(idx)
2990            addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2991            if clearcaches:
2992                dest.index.clearcaches()
2993                dest.clearcaches()
2994            with timeone() as r:
2995                dest.addrawrevision(*addargs, **addkwargs)
2996            timings.append((rev, r[0]))
2997        updateprogress(total)
2998        completeprogress()
2999    return timings
3000
3001
3002def _getrevisionseed(orig, rev, tr, source):
3003    from mercurial.node import nullid
3004
3005    linkrev = orig.linkrev(rev)
3006    node = orig.node(rev)
3007    p1, p2 = orig.parents(node)
3008    flags = orig.flags(rev)
3009    cachedelta = None
3010    text = None
3011
3012    if source == b'full':
3013        text = orig.revision(rev)
3014    elif source == b'parent-1':
3015        baserev = orig.rev(p1)
3016        cachedelta = (baserev, orig.revdiff(p1, rev))
3017    elif source == b'parent-2':
3018        parent = p2
3019        if p2 == nullid:
3020            parent = p1
3021        baserev = orig.rev(parent)
3022        cachedelta = (baserev, orig.revdiff(parent, rev))
3023    elif source == b'parent-smallest':
3024        p1diff = orig.revdiff(p1, rev)
3025        parent = p1
3026        diff = p1diff
3027        if p2 != nullid:
3028            p2diff = orig.revdiff(p2, rev)
3029            if len(p1diff) > len(p2diff):
3030                parent = p2
3031                diff = p2diff
3032        baserev = orig.rev(parent)
3033        cachedelta = (baserev, diff)
3034    elif source == b'storage':
3035        baserev = orig.deltaparent(rev)
3036        cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3037
3038    return (
3039        (text, tr, linkrev, p1, p2),
3040        {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3041    )
3042
3043
3044@contextlib.contextmanager
3045def _temprevlog(ui, orig, truncaterev):
3046    from mercurial import vfs as vfsmod
3047
3048    if orig._inline:
3049        raise error.Abort('not supporting inline revlog (yet)')
3050    revlogkwargs = {}
3051    k = 'upperboundcomp'
3052    if util.safehasattr(orig, k):
3053        revlogkwargs[k] = getattr(orig, k)
3054
3055    indexfile = getattr(orig, '_indexfile', None)
3056    if indexfile is None:
3057        # compatibility with <= hg-5.8
3058        indexfile = getattr(orig, 'indexfile')
3059    origindexpath = orig.opener.join(indexfile)
3060
3061    datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3062    origdatapath = orig.opener.join(datafile)
3063    radix = b'revlog'
3064    indexname = b'revlog.i'
3065    dataname = b'revlog.d'
3066
3067    tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3068    try:
3069        # copy the data file in a temporary directory
3070        ui.debug('copying data in %s\n' % tmpdir)
3071        destindexpath = os.path.join(tmpdir, 'revlog.i')
3072        destdatapath = os.path.join(tmpdir, 'revlog.d')
3073        shutil.copyfile(origindexpath, destindexpath)
3074        shutil.copyfile(origdatapath, destdatapath)
3075
3076        # remove the data we want to add again
3077        ui.debug('truncating data to be rewritten\n')
3078        with open(destindexpath, 'ab') as index:
3079            index.seek(0)
3080            index.truncate(truncaterev * orig._io.size)
3081        with open(destdatapath, 'ab') as data:
3082            data.seek(0)
3083            data.truncate(orig.start(truncaterev))
3084
3085        # instantiate a new revlog from the temporary copy
3086        ui.debug('truncating adding to be rewritten\n')
3087        vfs = vfsmod.vfs(tmpdir)
3088        vfs.options = getattr(orig.opener, 'options', None)
3089
3090        try:
3091            dest = revlog(vfs, radix=radix, **revlogkwargs)
3092        except TypeError:
3093            dest = revlog(
3094                vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3095            )
3096        if dest._inline:
3097            raise error.Abort('not supporting inline revlog (yet)')
3098        # make sure internals are initialized
3099        dest.revision(len(dest) - 1)
3100        yield dest
3101        del dest, vfs
3102    finally:
3103        shutil.rmtree(tmpdir, True)
3104
3105
3106@command(
3107    b'perf::revlogchunks|perfrevlogchunks',
3108    revlogopts
3109    + formatteropts
3110    + [
3111        (b'e', b'engines', b'', b'compression engines to use'),
3112        (b's', b'startrev', 0, b'revision to start at'),
3113    ],
3114    b'-c|-m|FILE',
3115)
3116def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3117    """Benchmark operations on revlog chunks.
3118
3119    Logically, each revlog is a collection of fulltext revisions. However,
3120    stored within each revlog are "chunks" of possibly compressed data. This
3121    data needs to be read and decompressed or compressed and written.
3122
3123    This command measures the time it takes to read+decompress and recompress
3124    chunks in a revlog. It effectively isolates I/O and compression performance.
3125    For measurements of higher-level operations like resolving revisions,
3126    see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3127    """
3128    opts = _byteskwargs(opts)
3129
3130    rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3131
3132    # _chunkraw was renamed to _getsegmentforrevs.
3133    try:
3134        segmentforrevs = rl._getsegmentforrevs
3135    except AttributeError:
3136        segmentforrevs = rl._chunkraw
3137
3138    # Verify engines argument.
3139    if engines:
3140        engines = {e.strip() for e in engines.split(b',')}
3141        for engine in engines:
3142            try:
3143                util.compressionengines[engine]
3144            except KeyError:
3145                raise error.Abort(b'unknown compression engine: %s' % engine)
3146    else:
3147        engines = []
3148        for e in util.compengines:
3149            engine = util.compengines[e]
3150            try:
3151                if engine.available():
3152                    engine.revlogcompressor().compress(b'dummy')
3153                    engines.append(e)
3154            except NotImplementedError:
3155                pass
3156
3157    revs = list(rl.revs(startrev, len(rl) - 1))
3158
3159    def rlfh(rl):
3160        if rl._inline:
3161            indexfile = getattr(rl, '_indexfile', None)
3162            if indexfile is None:
3163                # compatibility with <= hg-5.8
3164                indexfile = getattr(rl, 'indexfile')
3165            return getsvfs(repo)(indexfile)
3166        else:
3167            datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3168            return getsvfs(repo)(datafile)
3169
3170    def doread():
3171        rl.clearcaches()
3172        for rev in revs:
3173            segmentforrevs(rev, rev)
3174
3175    def doreadcachedfh():
3176        rl.clearcaches()
3177        fh = rlfh(rl)
3178        for rev in revs:
3179            segmentforrevs(rev, rev, df=fh)
3180
3181    def doreadbatch():
3182        rl.clearcaches()
3183        segmentforrevs(revs[0], revs[-1])
3184
3185    def doreadbatchcachedfh():
3186        rl.clearcaches()
3187        fh = rlfh(rl)
3188        segmentforrevs(revs[0], revs[-1], df=fh)
3189
3190    def dochunk():
3191        rl.clearcaches()
3192        fh = rlfh(rl)
3193        for rev in revs:
3194            rl._chunk(rev, df=fh)
3195
3196    chunks = [None]
3197
3198    def dochunkbatch():
3199        rl.clearcaches()
3200        fh = rlfh(rl)
3201        # Save chunks as a side-effect.
3202        chunks[0] = rl._chunks(revs, df=fh)
3203
3204    def docompress(compressor):
3205        rl.clearcaches()
3206
3207        try:
3208            # Swap in the requested compression engine.
3209            oldcompressor = rl._compressor
3210            rl._compressor = compressor
3211            for chunk in chunks[0]:
3212                rl.compress(chunk)
3213        finally:
3214            rl._compressor = oldcompressor
3215
3216    benches = [
3217        (lambda: doread(), b'read'),
3218        (lambda: doreadcachedfh(), b'read w/ reused fd'),
3219        (lambda: doreadbatch(), b'read batch'),
3220        (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3221        (lambda: dochunk(), b'chunk'),
3222        (lambda: dochunkbatch(), b'chunk batch'),
3223    ]
3224
3225    for engine in sorted(engines):
3226        compressor = util.compengines[engine].revlogcompressor()
3227        benches.append(
3228            (
3229                functools.partial(docompress, compressor),
3230                b'compress w/ %s' % engine,
3231            )
3232        )
3233
3234    for fn, title in benches:
3235        timer, fm = gettimer(ui, opts)
3236        timer(fn, title=title)
3237        fm.end()
3238
3239
3240@command(
3241    b'perf::revlogrevision|perfrevlogrevision',
3242    revlogopts
3243    + formatteropts
3244    + [(b'', b'cache', False, b'use caches instead of clearing')],
3245    b'-c|-m|FILE REV',
3246)
3247def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3248    """Benchmark obtaining a revlog revision.
3249
3250    Obtaining a revlog revision consists of roughly the following steps:
3251
3252    1. Compute the delta chain
3253    2. Slice the delta chain if applicable
3254    3. Obtain the raw chunks for that delta chain
3255    4. Decompress each raw chunk
3256    5. Apply binary patches to obtain fulltext
3257    6. Verify hash of fulltext
3258
3259    This command measures the time spent in each of these phases.
3260    """
3261    opts = _byteskwargs(opts)
3262
3263    if opts.get(b'changelog') or opts.get(b'manifest'):
3264        file_, rev = None, file_
3265    elif rev is None:
3266        raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3267
3268    r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3269
3270    # _chunkraw was renamed to _getsegmentforrevs.
3271    try:
3272        segmentforrevs = r._getsegmentforrevs
3273    except AttributeError:
3274        segmentforrevs = r._chunkraw
3275
3276    node = r.lookup(rev)
3277    rev = r.rev(node)
3278
3279    def getrawchunks(data, chain):
3280        start = r.start
3281        length = r.length
3282        inline = r._inline
3283        try:
3284            iosize = r.index.entry_size
3285        except AttributeError:
3286            iosize = r._io.size
3287        buffer = util.buffer
3288
3289        chunks = []
3290        ladd = chunks.append
3291        for idx, item in enumerate(chain):
3292            offset = start(item[0])
3293            bits = data[idx]
3294            for rev in item:
3295                chunkstart = start(rev)
3296                if inline:
3297                    chunkstart += (rev + 1) * iosize
3298                chunklength = length(rev)
3299                ladd(buffer(bits, chunkstart - offset, chunklength))
3300
3301        return chunks
3302
3303    def dodeltachain(rev):
3304        if not cache:
3305            r.clearcaches()
3306        r._deltachain(rev)
3307
3308    def doread(chain):
3309        if not cache:
3310            r.clearcaches()
3311        for item in slicedchain:
3312            segmentforrevs(item[0], item[-1])
3313
3314    def doslice(r, chain, size):
3315        for s in slicechunk(r, chain, targetsize=size):
3316            pass
3317
3318    def dorawchunks(data, chain):
3319        if not cache:
3320            r.clearcaches()
3321        getrawchunks(data, chain)
3322
3323    def dodecompress(chunks):
3324        decomp = r.decompress
3325        for chunk in chunks:
3326            decomp(chunk)
3327
3328    def dopatch(text, bins):
3329        if not cache:
3330            r.clearcaches()
3331        mdiff.patches(text, bins)
3332
3333    def dohash(text):
3334        if not cache:
3335            r.clearcaches()
3336        r.checkhash(text, node, rev=rev)
3337
3338    def dorevision():
3339        if not cache:
3340            r.clearcaches()
3341        r.revision(node)
3342
3343    try:
3344        from mercurial.revlogutils.deltas import slicechunk
3345    except ImportError:
3346        slicechunk = getattr(revlog, '_slicechunk', None)
3347
3348    size = r.length(rev)
3349    chain = r._deltachain(rev)[0]
3350    if not getattr(r, '_withsparseread', False):
3351        slicedchain = (chain,)
3352    else:
3353        slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3354    data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3355    rawchunks = getrawchunks(data, slicedchain)
3356    bins = r._chunks(chain)
3357    text = bytes(bins[0])
3358    bins = bins[1:]
3359    text = mdiff.patches(text, bins)
3360
3361    benches = [
3362        (lambda: dorevision(), b'full'),
3363        (lambda: dodeltachain(rev), b'deltachain'),
3364        (lambda: doread(chain), b'read'),
3365    ]
3366
3367    if getattr(r, '_withsparseread', False):
3368        slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3369        benches.append(slicing)
3370
3371    benches.extend(
3372        [
3373            (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3374            (lambda: dodecompress(rawchunks), b'decompress'),
3375            (lambda: dopatch(text, bins), b'patch'),
3376            (lambda: dohash(text), b'hash'),
3377        ]
3378    )
3379
3380    timer, fm = gettimer(ui, opts)
3381    for fn, title in benches:
3382        timer(fn, title=title)
3383    fm.end()
3384
3385
3386@command(
3387    b'perf::revset|perfrevset',
3388    [
3389        (b'C', b'clear', False, b'clear volatile cache between each call.'),
3390        (b'', b'contexts', False, b'obtain changectx for each revision'),
3391    ]
3392    + formatteropts,
3393    b"REVSET",
3394)
3395def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3396    """benchmark the execution time of a revset
3397
3398    Use the --clean option if need to evaluate the impact of build volatile
3399    revisions set cache on the revset execution. Volatile cache hold filtered
3400    and obsolete related cache."""
3401    opts = _byteskwargs(opts)
3402
3403    timer, fm = gettimer(ui, opts)
3404
3405    def d():
3406        if clear:
3407            repo.invalidatevolatilesets()
3408        if contexts:
3409            for ctx in repo.set(expr):
3410                pass
3411        else:
3412            for r in repo.revs(expr):
3413                pass
3414
3415    timer(d)
3416    fm.end()
3417
3418
3419@command(
3420    b'perf::volatilesets|perfvolatilesets',
3421    [
3422        (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3423    ]
3424    + formatteropts,
3425)
3426def perfvolatilesets(ui, repo, *names, **opts):
3427    """benchmark the computation of various volatile set
3428
3429    Volatile set computes element related to filtering and obsolescence."""
3430    opts = _byteskwargs(opts)
3431    timer, fm = gettimer(ui, opts)
3432    repo = repo.unfiltered()
3433
3434    def getobs(name):
3435        def d():
3436            repo.invalidatevolatilesets()
3437            if opts[b'clear_obsstore']:
3438                clearfilecache(repo, b'obsstore')
3439            obsolete.getrevs(repo, name)
3440
3441        return d
3442
3443    allobs = sorted(obsolete.cachefuncs)
3444    if names:
3445        allobs = [n for n in allobs if n in names]
3446
3447    for name in allobs:
3448        timer(getobs(name), title=name)
3449
3450    def getfiltered(name):
3451        def d():
3452            repo.invalidatevolatilesets()
3453            if opts[b'clear_obsstore']:
3454                clearfilecache(repo, b'obsstore')
3455            repoview.filterrevs(repo, name)
3456
3457        return d
3458
3459    allfilter = sorted(repoview.filtertable)
3460    if names:
3461        allfilter = [n for n in allfilter if n in names]
3462
3463    for name in allfilter:
3464        timer(getfiltered(name), title=name)
3465    fm.end()
3466
3467
3468@command(
3469    b'perf::branchmap|perfbranchmap',
3470    [
3471        (b'f', b'full', False, b'Includes build time of subset'),
3472        (
3473            b'',
3474            b'clear-revbranch',
3475            False,
3476            b'purge the revbranch cache between computation',
3477        ),
3478    ]
3479    + formatteropts,
3480)
3481def perfbranchmap(ui, repo, *filternames, **opts):
3482    """benchmark the update of a branchmap
3483
3484    This benchmarks the full repo.branchmap() call with read and write disabled
3485    """
3486    opts = _byteskwargs(opts)
3487    full = opts.get(b"full", False)
3488    clear_revbranch = opts.get(b"clear_revbranch", False)
3489    timer, fm = gettimer(ui, opts)
3490
3491    def getbranchmap(filtername):
3492        """generate a benchmark function for the filtername"""
3493        if filtername is None:
3494            view = repo
3495        else:
3496            view = repo.filtered(filtername)
3497        if util.safehasattr(view._branchcaches, '_per_filter'):
3498            filtered = view._branchcaches._per_filter
3499        else:
3500            # older versions
3501            filtered = view._branchcaches
3502
3503        def d():
3504            if clear_revbranch:
3505                repo.revbranchcache()._clear()
3506            if full:
3507                view._branchcaches.clear()
3508            else:
3509                filtered.pop(filtername, None)
3510            view.branchmap()
3511
3512        return d
3513
3514    # add filter in smaller subset to bigger subset
3515    possiblefilters = set(repoview.filtertable)
3516    if filternames:
3517        possiblefilters &= set(filternames)
3518    subsettable = getbranchmapsubsettable()
3519    allfilters = []
3520    while possiblefilters:
3521        for name in possiblefilters:
3522            subset = subsettable.get(name)
3523            if subset not in possiblefilters:
3524                break
3525        else:
3526            assert False, b'subset cycle %s!' % possiblefilters
3527        allfilters.append(name)
3528        possiblefilters.remove(name)
3529
3530    # warm the cache
3531    if not full:
3532        for name in allfilters:
3533            repo.filtered(name).branchmap()
3534    if not filternames or b'unfiltered' in filternames:
3535        # add unfiltered
3536        allfilters.append(None)
3537
3538    if util.safehasattr(branchmap.branchcache, 'fromfile'):
3539        branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3540        branchcacheread.set(classmethod(lambda *args: None))
3541    else:
3542        # older versions
3543        branchcacheread = safeattrsetter(branchmap, b'read')
3544        branchcacheread.set(lambda *args: None)
3545    branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3546    branchcachewrite.set(lambda *args: None)
3547    try:
3548        for name in allfilters:
3549            printname = name
3550            if name is None:
3551                printname = b'unfiltered'
3552            timer(getbranchmap(name), title=printname)
3553    finally:
3554        branchcacheread.restore()
3555        branchcachewrite.restore()
3556    fm.end()
3557
3558
3559@command(
3560    b'perf::branchmapupdate|perfbranchmapupdate',
3561    [
3562        (b'', b'base', [], b'subset of revision to start from'),
3563        (b'', b'target', [], b'subset of revision to end with'),
3564        (b'', b'clear-caches', False, b'clear cache between each runs'),
3565    ]
3566    + formatteropts,
3567)
3568def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3569    """benchmark branchmap update from for <base> revs to <target> revs
3570
3571    If `--clear-caches` is passed, the following items will be reset before
3572    each update:
3573        * the changelog instance and associated indexes
3574        * the rev-branch-cache instance
3575
3576    Examples:
3577
3578       # update for the one last revision
3579       $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3580
3581       $ update for change coming with a new branch
3582       $ hg perfbranchmapupdate --base 'stable' --target 'default'
3583    """
3584    from mercurial import branchmap
3585    from mercurial import repoview
3586
3587    opts = _byteskwargs(opts)
3588    timer, fm = gettimer(ui, opts)
3589    clearcaches = opts[b'clear_caches']
3590    unfi = repo.unfiltered()
3591    x = [None]  # used to pass data between closure
3592
3593    # we use a `list` here to avoid possible side effect from smartset
3594    baserevs = list(scmutil.revrange(repo, base))
3595    targetrevs = list(scmutil.revrange(repo, target))
3596    if not baserevs:
3597        raise error.Abort(b'no revisions selected for --base')
3598    if not targetrevs:
3599        raise error.Abort(b'no revisions selected for --target')
3600
3601    # make sure the target branchmap also contains the one in the base
3602    targetrevs = list(set(baserevs) | set(targetrevs))
3603    targetrevs.sort()
3604
3605    cl = repo.changelog
3606    allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3607    allbaserevs.sort()
3608    alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3609
3610    newrevs = list(alltargetrevs.difference(allbaserevs))
3611    newrevs.sort()
3612
3613    allrevs = frozenset(unfi.changelog.revs())
3614    basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3615    targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3616
3617    def basefilter(repo, visibilityexceptions=None):
3618        return basefilterrevs
3619
3620    def targetfilter(repo, visibilityexceptions=None):
3621        return targetfilterrevs
3622
3623    msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3624    ui.status(msg % (len(allbaserevs), len(newrevs)))
3625    if targetfilterrevs:
3626        msg = b'(%d revisions still filtered)\n'
3627        ui.status(msg % len(targetfilterrevs))
3628
3629    try:
3630        repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3631        repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3632
3633        baserepo = repo.filtered(b'__perf_branchmap_update_base')
3634        targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3635
3636        # try to find an existing branchmap to reuse
3637        subsettable = getbranchmapsubsettable()
3638        candidatefilter = subsettable.get(None)
3639        while candidatefilter is not None:
3640            candidatebm = repo.filtered(candidatefilter).branchmap()
3641            if candidatebm.validfor(baserepo):
3642                filtered = repoview.filterrevs(repo, candidatefilter)
3643                missing = [r for r in allbaserevs if r in filtered]
3644                base = candidatebm.copy()
3645                base.update(baserepo, missing)
3646                break
3647            candidatefilter = subsettable.get(candidatefilter)
3648        else:
3649            # no suitable subset where found
3650            base = branchmap.branchcache()
3651            base.update(baserepo, allbaserevs)
3652
3653        def setup():
3654            x[0] = base.copy()
3655            if clearcaches:
3656                unfi._revbranchcache = None
3657                clearchangelog(repo)
3658
3659        def bench():
3660            x[0].update(targetrepo, newrevs)
3661
3662        timer(bench, setup=setup)
3663        fm.end()
3664    finally:
3665        repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3666        repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3667
3668
3669@command(
3670    b'perf::branchmapload|perfbranchmapload',
3671    [
3672        (b'f', b'filter', b'', b'Specify repoview filter'),
3673        (b'', b'list', False, b'List brachmap filter caches'),
3674        (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3675    ]
3676    + formatteropts,
3677)
3678def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3679    """benchmark reading the branchmap"""
3680    opts = _byteskwargs(opts)
3681    clearrevlogs = opts[b'clear_revlogs']
3682
3683    if list:
3684        for name, kind, st in repo.cachevfs.readdir(stat=True):
3685            if name.startswith(b'branch2'):
3686                filtername = name.partition(b'-')[2] or b'unfiltered'
3687                ui.status(
3688                    b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3689                )
3690        return
3691    if not filter:
3692        filter = None
3693    subsettable = getbranchmapsubsettable()
3694    if filter is None:
3695        repo = repo.unfiltered()
3696    else:
3697        repo = repoview.repoview(repo, filter)
3698
3699    repo.branchmap()  # make sure we have a relevant, up to date branchmap
3700
3701    try:
3702        fromfile = branchmap.branchcache.fromfile
3703    except AttributeError:
3704        # older versions
3705        fromfile = branchmap.read
3706
3707    currentfilter = filter
3708    # try once without timer, the filter may not be cached
3709    while fromfile(repo) is None:
3710        currentfilter = subsettable.get(currentfilter)
3711        if currentfilter is None:
3712            raise error.Abort(
3713                b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3714            )
3715        repo = repo.filtered(currentfilter)
3716    timer, fm = gettimer(ui, opts)
3717
3718    def setup():
3719        if clearrevlogs:
3720            clearchangelog(repo)
3721
3722    def bench():
3723        fromfile(repo)
3724
3725    timer(bench, setup=setup)
3726    fm.end()
3727
3728
3729@command(b'perf::loadmarkers|perfloadmarkers')
3730def perfloadmarkers(ui, repo):
3731    """benchmark the time to parse the on-disk markers for a repo
3732
3733    Result is the number of markers in the repo."""
3734    timer, fm = gettimer(ui)
3735    svfs = getsvfs(repo)
3736    timer(lambda: len(obsolete.obsstore(repo, svfs)))
3737    fm.end()
3738
3739
3740@command(
3741    b'perf::lrucachedict|perflrucachedict',
3742    formatteropts
3743    + [
3744        (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3745        (b'', b'mincost', 0, b'smallest cost of items in cache'),
3746        (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3747        (b'', b'size', 4, b'size of cache'),
3748        (b'', b'gets', 10000, b'number of key lookups'),
3749        (b'', b'sets', 10000, b'number of key sets'),
3750        (b'', b'mixed', 10000, b'number of mixed mode operations'),
3751        (
3752            b'',
3753            b'mixedgetfreq',
3754            50,
3755            b'frequency of get vs set ops in mixed mode',
3756        ),
3757    ],
3758    norepo=True,
3759)
3760def perflrucache(
3761    ui,
3762    mincost=0,
3763    maxcost=100,
3764    costlimit=0,
3765    size=4,
3766    gets=10000,
3767    sets=10000,
3768    mixed=10000,
3769    mixedgetfreq=50,
3770    **opts
3771):
3772    opts = _byteskwargs(opts)
3773
3774    def doinit():
3775        for i in _xrange(10000):
3776            util.lrucachedict(size)
3777
3778    costrange = list(range(mincost, maxcost + 1))
3779
3780    values = []
3781    for i in _xrange(size):
3782        values.append(random.randint(0, _maxint))
3783
3784    # Get mode fills the cache and tests raw lookup performance with no
3785    # eviction.
3786    getseq = []
3787    for i in _xrange(gets):
3788        getseq.append(random.choice(values))
3789
3790    def dogets():
3791        d = util.lrucachedict(size)
3792        for v in values:
3793            d[v] = v
3794        for key in getseq:
3795            value = d[key]
3796            value  # silence pyflakes warning
3797
3798    def dogetscost():
3799        d = util.lrucachedict(size, maxcost=costlimit)
3800        for i, v in enumerate(values):
3801            d.insert(v, v, cost=costs[i])
3802        for key in getseq:
3803            try:
3804                value = d[key]
3805                value  # silence pyflakes warning
3806            except KeyError:
3807                pass
3808
3809    # Set mode tests insertion speed with cache eviction.
3810    setseq = []
3811    costs = []
3812    for i in _xrange(sets):
3813        setseq.append(random.randint(0, _maxint))
3814        costs.append(random.choice(costrange))
3815
3816    def doinserts():
3817        d = util.lrucachedict(size)
3818        for v in setseq:
3819            d.insert(v, v)
3820
3821    def doinsertscost():
3822        d = util.lrucachedict(size, maxcost=costlimit)
3823        for i, v in enumerate(setseq):
3824            d.insert(v, v, cost=costs[i])
3825
3826    def dosets():
3827        d = util.lrucachedict(size)
3828        for v in setseq:
3829            d[v] = v
3830
3831    # Mixed mode randomly performs gets and sets with eviction.
3832    mixedops = []
3833    for i in _xrange(mixed):
3834        r = random.randint(0, 100)
3835        if r < mixedgetfreq:
3836            op = 0
3837        else:
3838            op = 1
3839
3840        mixedops.append(
3841            (op, random.randint(0, size * 2), random.choice(costrange))
3842        )
3843
3844    def domixed():
3845        d = util.lrucachedict(size)
3846
3847        for op, v, cost in mixedops:
3848            if op == 0:
3849                try:
3850                    d[v]
3851                except KeyError:
3852                    pass
3853            else:
3854                d[v] = v
3855
3856    def domixedcost():
3857        d = util.lrucachedict(size, maxcost=costlimit)
3858
3859        for op, v, cost in mixedops:
3860            if op == 0:
3861                try:
3862                    d[v]
3863                except KeyError:
3864                    pass
3865            else:
3866                d.insert(v, v, cost=cost)
3867
3868    benches = [
3869        (doinit, b'init'),
3870    ]
3871
3872    if costlimit:
3873        benches.extend(
3874            [
3875                (dogetscost, b'gets w/ cost limit'),
3876                (doinsertscost, b'inserts w/ cost limit'),
3877                (domixedcost, b'mixed w/ cost limit'),
3878            ]
3879        )
3880    else:
3881        benches.extend(
3882            [
3883                (dogets, b'gets'),
3884                (doinserts, b'inserts'),
3885                (dosets, b'sets'),
3886                (domixed, b'mixed'),
3887            ]
3888        )
3889
3890    for fn, title in benches:
3891        timer, fm = gettimer(ui, opts)
3892        timer(fn, title=title)
3893        fm.end()
3894
3895
3896@command(
3897    b'perf::write|perfwrite',
3898    formatteropts
3899    + [
3900        (b'', b'write-method', b'write', b'ui write method'),
3901        (b'', b'nlines', 100, b'number of lines'),
3902        (b'', b'nitems', 100, b'number of items (per line)'),
3903        (b'', b'item', b'x', b'item that is written'),
3904        (b'', b'batch-line', None, b'pass whole line to write method at once'),
3905        (b'', b'flush-line', None, b'flush after each line'),
3906    ],
3907)
3908def perfwrite(ui, repo, **opts):
3909    """microbenchmark ui.write (and others)"""
3910    opts = _byteskwargs(opts)
3911
3912    write = getattr(ui, _sysstr(opts[b'write_method']))
3913    nlines = int(opts[b'nlines'])
3914    nitems = int(opts[b'nitems'])
3915    item = opts[b'item']
3916    batch_line = opts.get(b'batch_line')
3917    flush_line = opts.get(b'flush_line')
3918
3919    if batch_line:
3920        line = item * nitems + b'\n'
3921
3922    def benchmark():
3923        for i in pycompat.xrange(nlines):
3924            if batch_line:
3925                write(line)
3926            else:
3927                for i in pycompat.xrange(nitems):
3928                    write(item)
3929                write(b'\n')
3930            if flush_line:
3931                ui.flush()
3932        ui.flush()
3933
3934    timer, fm = gettimer(ui, opts)
3935    timer(benchmark)
3936    fm.end()
3937
3938
3939def uisetup(ui):
3940    if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3941        commands, b'debugrevlogopts'
3942    ):
3943        # for "historical portability":
3944        # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3945        # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3946        # openrevlog() should cause failure, because it has been
3947        # available since 3.5 (or 49c583ca48c4).
3948        def openrevlog(orig, repo, cmd, file_, opts):
3949            if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3950                raise error.Abort(
3951                    b"This version doesn't support --dir option",
3952                    hint=b"use 3.5 or later",
3953                )
3954            return orig(repo, cmd, file_, opts)
3955
3956        extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3957
3958
3959@command(
3960    b'perf::progress|perfprogress',
3961    formatteropts
3962    + [
3963        (b'', b'topic', b'topic', b'topic for progress messages'),
3964        (b'c', b'total', 1000000, b'total value we are progressing to'),
3965    ],
3966    norepo=True,
3967)
3968def perfprogress(ui, topic=None, total=None, **opts):
3969    """printing of progress bars"""
3970    opts = _byteskwargs(opts)
3971
3972    timer, fm = gettimer(ui, opts)
3973
3974    def doprogress():
3975        with ui.makeprogress(topic, total=total) as progress:
3976            for i in _xrange(total):
3977                progress.increment()
3978
3979    timer(doprogress)
3980    fm.end()
3981