xref: /freebsd/sys/contrib/openzfs/cmd/arc_summary (revision 9768746b)
1#!/usr/bin/env python3
2#
3# Copyright (c) 2008 Ben Rockwood <benr@cuddletech.com>,
4# Copyright (c) 2010 Martin Matuska <mm@FreeBSD.org>,
5# Copyright (c) 2010-2011 Jason J. Hellenthal <jhell@DataIX.net>,
6# Copyright (c) 2017 Scot W. Stevenson <scot.stevenson@gmail.com>
7# All rights reserved.
8#
9# Redistribution and use in source and binary forms, with or without
10# modification, are permitted provided that the following conditions
11# are met:
12#
13# 1. Redistributions of source code must retain the above copyright
14#    notice, this list of conditions and the following disclaimer.
15# 2. Redistributions in binary form must reproduce the above copyright
16#    notice, this list of conditions and the following disclaimer in the
17#    documentation and/or other materials provided with the distribution.
18#
19# THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22# ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29# SUCH DAMAGE.
30"""Print statistics on the ZFS ARC Cache and other information
31
32Provides basic information on the ARC, its efficiency, the L2ARC (if present),
33the Data Management Unit (DMU), Virtual Devices (VDEVs), and tunables. See
34the in-source documentation and code at
35https://github.com/openzfs/zfs/blob/master/module/zfs/arc.c for details.
36The original introduction to arc_summary can be found at
37http://cuddletech.com/?p=454
38"""
39
40import argparse
41import os
42import subprocess
43import sys
44import time
45import errno
46
47# We can't use env -S portably, and we need python3 -u to handle pipes in
48# the shell abruptly closing the way we want to, so...
49import io
50if isinstance(sys.__stderr__.buffer, io.BufferedWriter):
51    os.execv(sys.executable, [sys.executable, "-u"] + sys.argv)
52
53DESCRIPTION = 'Print ARC and other statistics for OpenZFS'
54INDENT = ' '*8
55LINE_LENGTH = 72
56DATE_FORMAT = '%a %b %d %H:%M:%S %Y'
57TITLE = 'ZFS Subsystem Report'
58
59SECTIONS = 'arc archits dmu l2arc spl tunables vdev zil'.split()
60SECTION_HELP = 'print info from one section ('+' '.join(SECTIONS)+')'
61
62# Tunables and SPL are handled separately because they come from
63# different sources
64SECTION_PATHS = {'arc': 'arcstats',
65                 'dmu': 'dmu_tx',
66                 'l2arc': 'arcstats',  # L2ARC stuff lives in arcstats
67                 'vdev': 'vdev_cache_stats',
68                 'zfetch': 'zfetchstats',
69                 'zil': 'zil'}
70
71parser = argparse.ArgumentParser(description=DESCRIPTION)
72parser.add_argument('-a', '--alternate', action='store_true', default=False,
73                    help='use alternate formatting for tunables and SPL',
74                    dest='alt')
75parser.add_argument('-d', '--description', action='store_true', default=False,
76                    help='print descriptions with tunables and SPL',
77                    dest='desc')
78parser.add_argument('-g', '--graph', action='store_true', default=False,
79                    help='print graph on ARC use and exit', dest='graph')
80parser.add_argument('-p', '--page', type=int, dest='page',
81                    help='print page by number (DEPRECATED, use "-s")')
82parser.add_argument('-r', '--raw', action='store_true', default=False,
83                    help='dump all available data with minimal formatting',
84                    dest='raw')
85parser.add_argument('-s', '--section', dest='section', help=SECTION_HELP)
86ARGS = parser.parse_args()
87
88
89if sys.platform.startswith('freebsd'):
90    # Requires py36-sysctl on FreeBSD
91    import sysctl
92
93    VDEV_CACHE_SIZE = 'vdev.cache_size'
94
95    def is_value(ctl):
96        return ctl.type != sysctl.CTLTYPE_NODE
97
98    def namefmt(ctl, base='vfs.zfs.'):
99        # base is removed from the name
100        cut = len(base)
101        return ctl.name[cut:]
102
103    def load_kstats(section):
104        base = 'kstat.zfs.misc.{section}.'.format(section=section)
105        fmt = lambda kstat: '{name} : {value}'.format(name=namefmt(kstat, base),
106                                                      value=kstat.value)
107        kstats = sysctl.filter(base)
108        return [fmt(kstat) for kstat in kstats if is_value(kstat)]
109
110    def get_params(base):
111        ctls = sysctl.filter(base)
112        return {namefmt(ctl): str(ctl.value) for ctl in ctls if is_value(ctl)}
113
114    def get_tunable_params():
115        return get_params('vfs.zfs')
116
117    def get_vdev_params():
118        return get_params('vfs.zfs.vdev')
119
120    def get_version_impl(request):
121        # FreeBSD reports versions for zpl and spa instead of zfs and spl.
122        name = {'zfs': 'zpl',
123                'spl': 'spa'}[request]
124        mib = 'vfs.zfs.version.{}'.format(name)
125        version = sysctl.filter(mib)[0].value
126        return '{} version {}'.format(name, version)
127
128    def get_descriptions(_request):
129        ctls = sysctl.filter('vfs.zfs')
130        return {namefmt(ctl): ctl.description for ctl in ctls if is_value(ctl)}
131
132
133elif sys.platform.startswith('linux'):
134    KSTAT_PATH = '/proc/spl/kstat/zfs'
135    SPL_PATH = '/sys/module/spl/parameters'
136    TUNABLES_PATH = '/sys/module/zfs/parameters'
137
138    VDEV_CACHE_SIZE = 'zfs_vdev_cache_size'
139
140    def load_kstats(section):
141        path = os.path.join(KSTAT_PATH, section)
142        with open(path) as f:
143            return list(f)[2:] # Get rid of header
144
145    def get_params(basepath):
146        """Collect information on the Solaris Porting Layer (SPL) or the
147        tunables, depending on the PATH given. Does not check if PATH is
148        legal.
149        """
150        result = {}
151        for name in os.listdir(basepath):
152            path = os.path.join(basepath, name)
153            with open(path) as f:
154                value = f.read()
155                result[name] = value.strip()
156        return result
157
158    def get_spl_params():
159        return get_params(SPL_PATH)
160
161    def get_tunable_params():
162        return get_params(TUNABLES_PATH)
163
164    def get_vdev_params():
165        return get_params(TUNABLES_PATH)
166
167    def get_version_impl(request):
168        # The original arc_summary called /sbin/modinfo/{spl,zfs} to get
169        # the version information. We switch to /sys/module/{spl,zfs}/version
170        # to make sure we get what is really loaded in the kernel
171        try:
172            with open("/sys/module/{}/version".format(request)) as f:
173                return f.read().strip()
174        except:
175            return "(unknown)"
176
177    def get_descriptions(request):
178        """Get the descriptions of the Solaris Porting Layer (SPL) or the
179        tunables, return with minimal formatting.
180        """
181
182        if request not in ('spl', 'zfs'):
183            print('ERROR: description of "{0}" requested)'.format(request))
184            sys.exit(1)
185
186        descs = {}
187        target_prefix = 'parm:'
188
189        # We would prefer to do this with /sys/modules -- see the discussion at
190        # get_version() -- but there isn't a way to get the descriptions from
191        # there, so we fall back on modinfo
192        command = ["/sbin/modinfo", request, "-0"]
193
194        info = ''
195
196        try:
197
198            info = subprocess.run(command, stdout=subprocess.PIPE,
199                                  check=True, universal_newlines=True)
200            raw_output = info.stdout.split('\0')
201
202        except subprocess.CalledProcessError:
203            print("Error: Descriptions not available",
204                  "(can't access kernel module)")
205            sys.exit(1)
206
207        for line in raw_output:
208
209            if not line.startswith(target_prefix):
210                continue
211
212            line = line[len(target_prefix):].strip()
213            name, raw_desc = line.split(':', 1)
214            desc = raw_desc.rsplit('(', 1)[0]
215
216            if desc == '':
217                desc = '(No description found)'
218
219            descs[name.strip()] = desc.strip()
220
221        return descs
222
223def handle_unraisableException(exc_type, exc_value=None, exc_traceback=None,
224                               err_msg=None, object=None):
225   handle_Exception(exc_type, object, exc_traceback)
226
227def handle_Exception(ex_cls, ex, tb):
228    if ex_cls is KeyboardInterrupt:
229        sys.exit()
230
231    if ex_cls is BrokenPipeError:
232        # It turns out that while sys.exit() triggers an exception
233        # not handled message on Python 3.8+, os._exit() does not.
234        os._exit(0)
235
236    if ex_cls is OSError:
237      if ex.errno == errno.ENOTCONN:
238        sys.exit()
239
240    raise ex
241
242if hasattr(sys,'unraisablehook'): # Python 3.8+
243    sys.unraisablehook = handle_unraisableException
244sys.excepthook = handle_Exception
245
246
247def cleanup_line(single_line):
248    """Format a raw line of data from /proc and isolate the name value
249    part, returning a tuple with each. Currently, this gets rid of the
250    middle '4'. For example "arc_no_grow    4    0" returns the tuple
251    ("arc_no_grow", "0").
252    """
253    name, _, value = single_line.split()
254
255    return name, value
256
257
258def draw_graph(kstats_dict):
259    """Draw a primitive graph representing the basic information on the
260    ARC -- its size and the proportion used by MFU and MRU -- and quit.
261    We use max size of the ARC to calculate how full it is. This is a
262    very rough representation.
263    """
264
265    arc_stats = isolate_section('arcstats', kstats_dict)
266
267    GRAPH_INDENT = ' '*4
268    GRAPH_WIDTH = 60
269    arc_size = f_bytes(arc_stats['size'])
270    arc_perc = f_perc(arc_stats['size'], arc_stats['c_max'])
271    mfu_size = f_bytes(arc_stats['mfu_size'])
272    mru_size = f_bytes(arc_stats['mru_size'])
273    meta_limit = f_bytes(arc_stats['arc_meta_limit'])
274    meta_size = f_bytes(arc_stats['arc_meta_used'])
275    dnode_limit = f_bytes(arc_stats['arc_dnode_limit'])
276    dnode_size = f_bytes(arc_stats['dnode_size'])
277
278    info_form = ('ARC: {0} ({1})  MFU: {2}  MRU: {3}  META: {4} ({5}) '
279                 'DNODE {6} ({7})')
280    info_line = info_form.format(arc_size, arc_perc, mfu_size, mru_size,
281                                 meta_size, meta_limit, dnode_size,
282                                 dnode_limit)
283    info_spc = ' '*int((GRAPH_WIDTH-len(info_line))/2)
284    info_line = GRAPH_INDENT+info_spc+info_line
285
286    graph_line = GRAPH_INDENT+'+'+('-'*(GRAPH_WIDTH-2))+'+'
287
288    mfu_perc = float(int(arc_stats['mfu_size'])/int(arc_stats['c_max']))
289    mru_perc = float(int(arc_stats['mru_size'])/int(arc_stats['c_max']))
290    arc_perc = float(int(arc_stats['size'])/int(arc_stats['c_max']))
291    total_ticks = float(arc_perc)*GRAPH_WIDTH
292    mfu_ticks = mfu_perc*GRAPH_WIDTH
293    mru_ticks = mru_perc*GRAPH_WIDTH
294    other_ticks = total_ticks-(mfu_ticks+mru_ticks)
295
296    core_form = 'F'*int(mfu_ticks)+'R'*int(mru_ticks)+'O'*int(other_ticks)
297    core_spc = ' '*(GRAPH_WIDTH-(2+len(core_form)))
298    core_line = GRAPH_INDENT+'|'+core_form+core_spc+'|'
299
300    for line in ('', info_line, graph_line, core_line, graph_line, ''):
301        print(line)
302
303
304def f_bytes(byte_string):
305    """Return human-readable representation of a byte value in
306    powers of 2 (eg "KiB" for "kibibytes", etc) to two decimal
307    points. Values smaller than one KiB are returned without
308    decimal points. Note "bytes" is a reserved keyword.
309    """
310
311    prefixes = ([2**80, "YiB"],   # yobibytes (yotta)
312                [2**70, "ZiB"],   # zebibytes (zetta)
313                [2**60, "EiB"],   # exbibytes (exa)
314                [2**50, "PiB"],   # pebibytes (peta)
315                [2**40, "TiB"],   # tebibytes (tera)
316                [2**30, "GiB"],   # gibibytes (giga)
317                [2**20, "MiB"],   # mebibytes (mega)
318                [2**10, "KiB"])   # kibibytes (kilo)
319
320    bites = int(byte_string)
321
322    if bites >= 2**10:
323        for limit, unit in prefixes:
324
325            if bites >= limit:
326                value = bites / limit
327                break
328
329        result = '{0:.1f} {1}'.format(value, unit)
330    else:
331        result = '{0} Bytes'.format(bites)
332
333    return result
334
335
336def f_hits(hits_string):
337    """Create a human-readable representation of the number of hits.
338    The single-letter symbols used are SI to avoid the confusion caused
339    by the different "short scale" and "long scale" representations in
340    English, which use the same words for different values. See
341    https://en.wikipedia.org/wiki/Names_of_large_numbers and:
342    https://physics.nist.gov/cuu/Units/prefixes.html
343    """
344
345    numbers = ([10**24, 'Y'],  # yotta (septillion)
346               [10**21, 'Z'],  # zetta (sextillion)
347               [10**18, 'E'],  # exa   (quintrillion)
348               [10**15, 'P'],  # peta  (quadrillion)
349               [10**12, 'T'],  # tera  (trillion)
350               [10**9, 'G'],   # giga  (billion)
351               [10**6, 'M'],   # mega  (million)
352               [10**3, 'k'])   # kilo  (thousand)
353
354    hits = int(hits_string)
355
356    if hits >= 1000:
357        for limit, symbol in numbers:
358
359            if hits >= limit:
360                value = hits/limit
361                break
362
363        result = "%0.1f%s" % (value, symbol)
364    else:
365        result = "%d" % hits
366
367    return result
368
369
370def f_perc(value1, value2):
371    """Calculate percentage and return in human-readable form. If
372    rounding produces the result '0.0' though the first number is
373    not zero, include a 'less-than' symbol to avoid confusion.
374    Division by zero is handled by returning 'n/a'; no error
375    is called.
376    """
377
378    v1 = float(value1)
379    v2 = float(value2)
380
381    try:
382        perc = 100 * v1/v2
383    except ZeroDivisionError:
384        result = 'n/a'
385    else:
386        result = '{0:0.1f} %'.format(perc)
387
388    if result == '0.0 %' and v1 > 0:
389        result = '< 0.1 %'
390
391    return result
392
393
394def format_raw_line(name, value):
395    """For the --raw option for the tunable and SPL outputs, decide on the
396    correct formatting based on the --alternate flag.
397    """
398
399    if ARGS.alt:
400        result = '{0}{1}={2}'.format(INDENT, name, value)
401    else:
402        # Right-align the value within the line length if it fits,
403        # otherwise just separate it from the name by a single space.
404        fit = LINE_LENGTH - len(INDENT) - len(name)
405        overflow = len(value) + 1
406        w = max(fit, overflow)
407        result = '{0}{1}{2:>{w}}'.format(INDENT, name, value, w=w)
408
409    return result
410
411
412def get_kstats():
413    """Collect information on the ZFS subsystem. The step does not perform any
414    further processing, giving us the option to only work on what is actually
415    needed. The name "kstat" is a holdover from the Solaris utility of the same
416    name.
417    """
418
419    result = {}
420
421    for section in SECTION_PATHS.values():
422        if section not in result:
423            result[section] = load_kstats(section)
424
425    return result
426
427
428def get_version(request):
429    """Get the version number of ZFS or SPL on this machine for header.
430    Returns an error string, but does not raise an error, if we can't
431    get the ZFS/SPL version.
432    """
433
434    if request not in ('spl', 'zfs'):
435        error_msg = '(ERROR: "{0}" requested)'.format(request)
436        return error_msg
437
438    return get_version_impl(request)
439
440
441def print_header():
442    """Print the initial heading with date and time as well as info on the
443    kernel and ZFS versions. This is not called for the graph.
444    """
445
446    # datetime is now recommended over time but we keep the exact formatting
447    # from the older version of arc_summary in case there are scripts
448    # that expect it in this way
449    daydate = time.strftime(DATE_FORMAT)
450    spc_date = LINE_LENGTH-len(daydate)
451    sys_version = os.uname()
452
453    sys_msg = sys_version.sysname+' '+sys_version.release
454    zfs = get_version('zfs')
455    spc_zfs = LINE_LENGTH-len(zfs)
456
457    machine_msg = 'Machine: '+sys_version.nodename+' ('+sys_version.machine+')'
458    spl = get_version('spl')
459    spc_spl = LINE_LENGTH-len(spl)
460
461    print('\n'+('-'*LINE_LENGTH))
462    print('{0:<{spc}}{1}'.format(TITLE, daydate, spc=spc_date))
463    print('{0:<{spc}}{1}'.format(sys_msg, zfs, spc=spc_zfs))
464    print('{0:<{spc}}{1}\n'.format(machine_msg, spl, spc=spc_spl))
465
466
467def print_raw(kstats_dict):
468    """Print all available data from the system in a minimally sorted format.
469    This can be used as a source to be piped through 'grep'.
470    """
471
472    sections = sorted(kstats_dict.keys())
473
474    for section in sections:
475
476        print('\n{0}:'.format(section.upper()))
477        lines = sorted(kstats_dict[section])
478
479        for line in lines:
480            name, value = cleanup_line(line)
481            print(format_raw_line(name, value))
482
483    # Tunables and SPL must be handled separately because they come from a
484    # different source and have descriptions the user might request
485    print()
486    section_spl()
487    section_tunables()
488
489
490def isolate_section(section_name, kstats_dict):
491    """From the complete information on all sections, retrieve only those
492    for one section.
493    """
494
495    try:
496        section_data = kstats_dict[section_name]
497    except KeyError:
498        print('ERROR: Data on {0} not available'.format(section_data))
499        sys.exit(1)
500
501    section_dict = dict(cleanup_line(l) for l in section_data)
502
503    return section_dict
504
505
506# Formatted output helper functions
507
508
509def prt_1(text, value):
510    """Print text and one value, no indent"""
511    spc = ' '*(LINE_LENGTH-(len(text)+len(value)))
512    print('{0}{spc}{1}'.format(text, value, spc=spc))
513
514
515def prt_i1(text, value):
516    """Print text and one value, with indent"""
517    spc = ' '*(LINE_LENGTH-(len(INDENT)+len(text)+len(value)))
518    print(INDENT+'{0}{spc}{1}'.format(text, value, spc=spc))
519
520
521def prt_2(text, value1, value2):
522    """Print text and two values, no indent"""
523    values = '{0:>9}  {1:>9}'.format(value1, value2)
524    spc = ' '*(LINE_LENGTH-(len(text)+len(values)+2))
525    print('{0}{spc}  {1}'.format(text, values, spc=spc))
526
527
528def prt_i2(text, value1, value2):
529    """Print text and two values, with indent"""
530    values = '{0:>9}  {1:>9}'.format(value1, value2)
531    spc = ' '*(LINE_LENGTH-(len(INDENT)+len(text)+len(values)+2))
532    print(INDENT+'{0}{spc}  {1}'.format(text, values, spc=spc))
533
534
535# The section output concentrates on important parameters instead of
536# being exhaustive (that is what the --raw parameter is for)
537
538
539def section_arc(kstats_dict):
540    """Give basic information on the ARC, MRU and MFU. This is the first
541    and most used section.
542    """
543
544    arc_stats = isolate_section('arcstats', kstats_dict)
545
546    throttle = arc_stats['memory_throttle_count']
547
548    if throttle == '0':
549        health = 'HEALTHY'
550    else:
551        health = 'THROTTLED'
552
553    prt_1('ARC status:', health)
554    prt_i1('Memory throttle count:', throttle)
555    print()
556
557    arc_size = arc_stats['size']
558    arc_target_size = arc_stats['c']
559    arc_max = arc_stats['c_max']
560    arc_min = arc_stats['c_min']
561    anon_size = arc_stats['anon_size']
562    mfu_size = arc_stats['mfu_size']
563    mru_size = arc_stats['mru_size']
564    mfug_size = arc_stats['mfu_ghost_size']
565    mrug_size = arc_stats['mru_ghost_size']
566    unc_size = arc_stats['uncached_size']
567    meta_limit = arc_stats['arc_meta_limit']
568    meta_size = arc_stats['arc_meta_used']
569    dnode_limit = arc_stats['arc_dnode_limit']
570    dnode_size = arc_stats['dnode_size']
571    target_size_ratio = '{0}:1'.format(int(arc_max) // int(arc_min))
572
573    prt_2('ARC size (current):',
574          f_perc(arc_size, arc_max), f_bytes(arc_size))
575    prt_i2('Target size (adaptive):',
576           f_perc(arc_target_size, arc_max), f_bytes(arc_target_size))
577    prt_i2('Min size (hard limit):',
578           f_perc(arc_min, arc_max), f_bytes(arc_min))
579    prt_i2('Max size (high water):',
580           target_size_ratio, f_bytes(arc_max))
581    caches_size = int(anon_size)+int(mfu_size)+int(mru_size)+int(unc_size)
582    prt_i2('Anonymouns data size:',
583           f_perc(anon_size, caches_size), f_bytes(anon_size))
584    prt_i2('Most Frequently Used (MFU) cache size:',
585           f_perc(mfu_size, caches_size), f_bytes(mfu_size))
586    prt_i2('Most Recently Used (MRU) cache size:',
587           f_perc(mru_size, caches_size), f_bytes(mru_size))
588    prt_i1('Most Frequently Used (MFU) ghost size:', f_bytes(mfug_size))
589    prt_i1('Most Recently Used (MRU) ghost size:', f_bytes(mrug_size))
590    prt_i2('Uncached data size:',
591           f_perc(unc_size, caches_size), f_bytes(unc_size))
592    prt_i2('Metadata cache size (hard limit):',
593           f_perc(meta_limit, arc_max), f_bytes(meta_limit))
594    prt_i2('Metadata cache size (current):',
595           f_perc(meta_size, meta_limit), f_bytes(meta_size))
596    prt_i2('Dnode cache size (hard limit):',
597           f_perc(dnode_limit, meta_limit), f_bytes(dnode_limit))
598    prt_i2('Dnode cache size (current):',
599           f_perc(dnode_size, dnode_limit), f_bytes(dnode_size))
600    print()
601
602    print('ARC hash breakdown:')
603    prt_i1('Elements max:', f_hits(arc_stats['hash_elements_max']))
604    prt_i2('Elements current:',
605           f_perc(arc_stats['hash_elements'], arc_stats['hash_elements_max']),
606           f_hits(arc_stats['hash_elements']))
607    prt_i1('Collisions:', f_hits(arc_stats['hash_collisions']))
608
609    prt_i1('Chain max:', f_hits(arc_stats['hash_chain_max']))
610    prt_i1('Chains:', f_hits(arc_stats['hash_chains']))
611    print()
612
613    print('ARC misc:')
614    prt_i1('Deleted:', f_hits(arc_stats['deleted']))
615    prt_i1('Mutex misses:', f_hits(arc_stats['mutex_miss']))
616    prt_i1('Eviction skips:', f_hits(arc_stats['evict_skip']))
617    prt_i1('Eviction skips due to L2 writes:',
618           f_hits(arc_stats['evict_l2_skip']))
619    prt_i1('L2 cached evictions:', f_bytes(arc_stats['evict_l2_cached']))
620    prt_i1('L2 eligible evictions:', f_bytes(arc_stats['evict_l2_eligible']))
621    prt_i2('L2 eligible MFU evictions:',
622           f_perc(arc_stats['evict_l2_eligible_mfu'],
623           arc_stats['evict_l2_eligible']),
624           f_bytes(arc_stats['evict_l2_eligible_mfu']))
625    prt_i2('L2 eligible MRU evictions:',
626           f_perc(arc_stats['evict_l2_eligible_mru'],
627           arc_stats['evict_l2_eligible']),
628           f_bytes(arc_stats['evict_l2_eligible_mru']))
629    prt_i1('L2 ineligible evictions:',
630           f_bytes(arc_stats['evict_l2_ineligible']))
631    print()
632
633
634def section_archits(kstats_dict):
635    """Print information on how the caches are accessed ("arc hits").
636    """
637
638    arc_stats = isolate_section('arcstats', kstats_dict)
639    all_accesses = int(arc_stats['hits'])+int(arc_stats['iohits'])+\
640        int(arc_stats['misses'])
641
642    prt_1('ARC total accesses:', f_hits(all_accesses))
643    ta_todo = (('Total hits:', arc_stats['hits']),
644               ('Total I/O hits:', arc_stats['iohits']),
645               ('Total misses:', arc_stats['misses']))
646    for title, value in ta_todo:
647        prt_i2(title, f_perc(value, all_accesses), f_hits(value))
648    print()
649
650    dd_total = int(arc_stats['demand_data_hits']) +\
651        int(arc_stats['demand_data_iohits']) +\
652        int(arc_stats['demand_data_misses'])
653    prt_2('ARC demand data accesses:', f_perc(dd_total, all_accesses),
654         f_hits(dd_total))
655    dd_todo = (('Demand data hits:', arc_stats['demand_data_hits']),
656               ('Demand data I/O hits:', arc_stats['demand_data_iohits']),
657               ('Demand data misses:', arc_stats['demand_data_misses']))
658    for title, value in dd_todo:
659        prt_i2(title, f_perc(value, dd_total), f_hits(value))
660    print()
661
662    dm_total = int(arc_stats['demand_metadata_hits']) +\
663        int(arc_stats['demand_metadata_iohits']) +\
664        int(arc_stats['demand_metadata_misses'])
665    prt_2('ARC demand metadata accesses:', f_perc(dm_total, all_accesses),
666          f_hits(dm_total))
667    dm_todo = (('Demand metadata hits:', arc_stats['demand_metadata_hits']),
668               ('Demand metadata I/O hits:',
669                arc_stats['demand_metadata_iohits']),
670               ('Demand metadata misses:', arc_stats['demand_metadata_misses']))
671    for title, value in dm_todo:
672        prt_i2(title, f_perc(value, dm_total), f_hits(value))
673    print()
674
675    pd_total = int(arc_stats['prefetch_data_hits']) +\
676        int(arc_stats['prefetch_data_iohits']) +\
677        int(arc_stats['prefetch_data_misses'])
678    prt_2('ARC prefetch metadata accesses:', f_perc(pd_total, all_accesses),
679          f_hits(pd_total))
680    pd_todo = (('Prefetch data hits:', arc_stats['prefetch_data_hits']),
681               ('Prefetch data I/O hits:', arc_stats['prefetch_data_iohits']),
682               ('Prefetch data misses:', arc_stats['prefetch_data_misses']))
683    for title, value in pd_todo:
684        prt_i2(title, f_perc(value, pd_total), f_hits(value))
685    print()
686
687    pm_total = int(arc_stats['prefetch_metadata_hits']) +\
688        int(arc_stats['prefetch_metadata_iohits']) +\
689        int(arc_stats['prefetch_metadata_misses'])
690    prt_2('ARC prefetch metadata accesses:', f_perc(pm_total, all_accesses),
691          f_hits(pm_total))
692    pm_todo = (('Prefetch metadata hits:',
693                arc_stats['prefetch_metadata_hits']),
694               ('Prefetch metadata I/O hits:',
695                arc_stats['prefetch_metadata_iohits']),
696               ('Prefetch metadata misses:',
697                arc_stats['prefetch_metadata_misses']))
698    for title, value in pm_todo:
699        prt_i2(title, f_perc(value, pm_total), f_hits(value))
700    print()
701
702    all_prefetches = int(arc_stats['predictive_prefetch'])+\
703        int(arc_stats['prescient_prefetch'])
704    prt_2('ARC predictive prefetches:',
705           f_perc(arc_stats['predictive_prefetch'], all_prefetches),
706           f_hits(arc_stats['predictive_prefetch']))
707    prt_i2('Demand hits after predictive:',
708           f_perc(arc_stats['demand_hit_predictive_prefetch'],
709                  arc_stats['predictive_prefetch']),
710           f_hits(arc_stats['demand_hit_predictive_prefetch']))
711    prt_i2('Demand I/O hits after predictive:',
712           f_perc(arc_stats['demand_iohit_predictive_prefetch'],
713                  arc_stats['predictive_prefetch']),
714           f_hits(arc_stats['demand_iohit_predictive_prefetch']))
715    never = int(arc_stats['predictive_prefetch']) -\
716        int(arc_stats['demand_hit_predictive_prefetch']) -\
717        int(arc_stats['demand_iohit_predictive_prefetch'])
718    prt_i2('Never demanded after predictive:',
719           f_perc(never, arc_stats['predictive_prefetch']),
720           f_hits(never))
721    print()
722
723    prt_2('ARC prescient prefetches:',
724           f_perc(arc_stats['prescient_prefetch'], all_prefetches),
725           f_hits(arc_stats['prescient_prefetch']))
726    prt_i2('Demand hits after prescient:',
727           f_perc(arc_stats['demand_hit_prescient_prefetch'],
728                  arc_stats['prescient_prefetch']),
729           f_hits(arc_stats['demand_hit_prescient_prefetch']))
730    prt_i2('Demand I/O hits after prescient:',
731           f_perc(arc_stats['demand_iohit_prescient_prefetch'],
732                  arc_stats['prescient_prefetch']),
733           f_hits(arc_stats['demand_iohit_prescient_prefetch']))
734    never = int(arc_stats['prescient_prefetch'])-\
735        int(arc_stats['demand_hit_prescient_prefetch'])-\
736        int(arc_stats['demand_iohit_prescient_prefetch'])
737    prt_i2('Never demanded after prescient:',
738           f_perc(never, arc_stats['prescient_prefetch']),
739           f_hits(never))
740    print()
741
742    print('ARC states hits of all accesses:')
743    cl_todo = (('Most frequently used (MFU):', arc_stats['mfu_hits']),
744               ('Most recently used (MRU):', arc_stats['mru_hits']),
745               ('Most frequently used (MFU) ghost:',
746                arc_stats['mfu_ghost_hits']),
747               ('Most recently used (MRU) ghost:',
748                arc_stats['mru_ghost_hits']),
749               ('Uncached:', arc_stats['uncached_hits']))
750    for title, value in cl_todo:
751        prt_i2(title, f_perc(value, all_accesses), f_hits(value))
752    print()
753
754
755def section_dmu(kstats_dict):
756    """Collect information on the DMU"""
757
758    zfetch_stats = isolate_section('zfetchstats', kstats_dict)
759
760    zfetch_access_total = int(zfetch_stats['hits'])+int(zfetch_stats['misses'])
761
762    prt_1('DMU predictive prefetcher calls:', f_hits(zfetch_access_total))
763    prt_i2('Stream hits:',
764           f_perc(zfetch_stats['hits'], zfetch_access_total),
765           f_hits(zfetch_stats['hits']))
766    prt_i2('Stream misses:',
767           f_perc(zfetch_stats['misses'], zfetch_access_total),
768           f_hits(zfetch_stats['misses']))
769    prt_i2('Streams limit reached:',
770           f_perc(zfetch_stats['max_streams'], zfetch_stats['misses']),
771           f_hits(zfetch_stats['max_streams']))
772    prt_i1('Prefetches issued', f_hits(zfetch_stats['io_issued']))
773    print()
774
775
776def section_l2arc(kstats_dict):
777    """Collect information on L2ARC device if present. If not, tell user
778    that we're skipping the section.
779    """
780
781    # The L2ARC statistics live in the same section as the normal ARC stuff
782    arc_stats = isolate_section('arcstats', kstats_dict)
783
784    if arc_stats['l2_size'] == '0':
785        print('L2ARC not detected, skipping section\n')
786        return
787
788    l2_errors = int(arc_stats['l2_writes_error']) +\
789        int(arc_stats['l2_cksum_bad']) +\
790        int(arc_stats['l2_io_error'])
791
792    l2_access_total = int(arc_stats['l2_hits'])+int(arc_stats['l2_misses'])
793    health = 'HEALTHY'
794
795    if l2_errors > 0:
796        health = 'DEGRADED'
797
798    prt_1('L2ARC status:', health)
799
800    l2_todo = (('Low memory aborts:', 'l2_abort_lowmem'),
801               ('Free on write:', 'l2_free_on_write'),
802               ('R/W clashes:', 'l2_rw_clash'),
803               ('Bad checksums:', 'l2_cksum_bad'),
804               ('I/O errors:', 'l2_io_error'))
805
806    for title, value in l2_todo:
807        prt_i1(title, f_hits(arc_stats[value]))
808
809    print()
810    prt_1('L2ARC size (adaptive):', f_bytes(arc_stats['l2_size']))
811    prt_i2('Compressed:', f_perc(arc_stats['l2_asize'], arc_stats['l2_size']),
812           f_bytes(arc_stats['l2_asize']))
813    prt_i2('Header size:',
814           f_perc(arc_stats['l2_hdr_size'], arc_stats['l2_size']),
815           f_bytes(arc_stats['l2_hdr_size']))
816    prt_i2('MFU allocated size:',
817           f_perc(arc_stats['l2_mfu_asize'], arc_stats['l2_asize']),
818           f_bytes(arc_stats['l2_mfu_asize']))
819    prt_i2('MRU allocated size:',
820           f_perc(arc_stats['l2_mru_asize'], arc_stats['l2_asize']),
821           f_bytes(arc_stats['l2_mru_asize']))
822    prt_i2('Prefetch allocated size:',
823           f_perc(arc_stats['l2_prefetch_asize'], arc_stats['l2_asize']),
824           f_bytes(arc_stats['l2_prefetch_asize']))
825    prt_i2('Data (buffer content) allocated size:',
826           f_perc(arc_stats['l2_bufc_data_asize'], arc_stats['l2_asize']),
827           f_bytes(arc_stats['l2_bufc_data_asize']))
828    prt_i2('Metadata (buffer content) allocated size:',
829           f_perc(arc_stats['l2_bufc_metadata_asize'], arc_stats['l2_asize']),
830           f_bytes(arc_stats['l2_bufc_metadata_asize']))
831
832    print()
833    prt_1('L2ARC breakdown:', f_hits(l2_access_total))
834    prt_i2('Hit ratio:',
835           f_perc(arc_stats['l2_hits'], l2_access_total),
836           f_hits(arc_stats['l2_hits']))
837    prt_i2('Miss ratio:',
838           f_perc(arc_stats['l2_misses'], l2_access_total),
839           f_hits(arc_stats['l2_misses']))
840    prt_i1('Feeds:', f_hits(arc_stats['l2_feeds']))
841
842    print()
843    print('L2ARC writes:')
844
845    if arc_stats['l2_writes_done'] != arc_stats['l2_writes_sent']:
846        prt_i2('Writes sent:', 'FAULTED', f_hits(arc_stats['l2_writes_sent']))
847        prt_i2('Done ratio:',
848               f_perc(arc_stats['l2_writes_done'],
849                      arc_stats['l2_writes_sent']),
850               f_hits(arc_stats['l2_writes_done']))
851        prt_i2('Error ratio:',
852               f_perc(arc_stats['l2_writes_error'],
853                      arc_stats['l2_writes_sent']),
854               f_hits(arc_stats['l2_writes_error']))
855    else:
856        prt_i2('Writes sent:', '100 %', f_hits(arc_stats['l2_writes_sent']))
857
858    print()
859    print('L2ARC evicts:')
860    prt_i1('Lock retries:', f_hits(arc_stats['l2_evict_lock_retry']))
861    prt_i1('Upon reading:', f_hits(arc_stats['l2_evict_reading']))
862    print()
863
864
865def section_spl(*_):
866    """Print the SPL parameters, if requested with alternative format
867    and/or descriptions. This does not use kstats.
868    """
869
870    if sys.platform.startswith('freebsd'):
871        # No SPL support in FreeBSD
872        return
873
874    spls = get_spl_params()
875    keylist = sorted(spls.keys())
876    print('Solaris Porting Layer (SPL):')
877
878    if ARGS.desc:
879        descriptions = get_descriptions('spl')
880
881    for key in keylist:
882        value = spls[key]
883
884        if ARGS.desc:
885            try:
886                print(INDENT+'#', descriptions[key])
887            except KeyError:
888                print(INDENT+'# (No description found)')  # paranoid
889
890        print(format_raw_line(key, value))
891
892    print()
893
894
895def section_tunables(*_):
896    """Print the tunables, if requested with alternative format and/or
897    descriptions. This does not use kstasts.
898    """
899
900    tunables = get_tunable_params()
901    keylist = sorted(tunables.keys())
902    print('Tunables:')
903
904    if ARGS.desc:
905        descriptions = get_descriptions('zfs')
906
907    for key in keylist:
908        value = tunables[key]
909
910        if ARGS.desc:
911            try:
912                print(INDENT+'#', descriptions[key])
913            except KeyError:
914                print(INDENT+'# (No description found)')  # paranoid
915
916        print(format_raw_line(key, value))
917
918    print()
919
920
921def section_vdev(kstats_dict):
922    """Collect information on VDEV caches"""
923
924    # Currently [Nov 2017] the VDEV cache is disabled, because it is actually
925    # harmful. When this is the case, we just skip the whole entry. See
926    # https://github.com/openzfs/zfs/blob/master/module/zfs/vdev_cache.c
927    # for details
928    tunables = get_vdev_params()
929
930    if tunables[VDEV_CACHE_SIZE] == '0':
931        print('VDEV cache disabled, skipping section\n')
932        return
933
934    vdev_stats = isolate_section('vdev_cache_stats', kstats_dict)
935
936    vdev_cache_total = int(vdev_stats['hits']) +\
937        int(vdev_stats['misses']) +\
938        int(vdev_stats['delegations'])
939
940    prt_1('VDEV cache summary:', f_hits(vdev_cache_total))
941    prt_i2('Hit ratio:', f_perc(vdev_stats['hits'], vdev_cache_total),
942           f_hits(vdev_stats['hits']))
943    prt_i2('Miss ratio:', f_perc(vdev_stats['misses'], vdev_cache_total),
944           f_hits(vdev_stats['misses']))
945    prt_i2('Delegations:', f_perc(vdev_stats['delegations'], vdev_cache_total),
946           f_hits(vdev_stats['delegations']))
947    print()
948
949
950def section_zil(kstats_dict):
951    """Collect information on the ZFS Intent Log. Some of the information
952    taken from https://github.com/openzfs/zfs/blob/master/include/sys/zil.h
953    """
954
955    zil_stats = isolate_section('zil', kstats_dict)
956
957    prt_1('ZIL committed transactions:',
958          f_hits(zil_stats['zil_itx_count']))
959    prt_i1('Commit requests:', f_hits(zil_stats['zil_commit_count']))
960    prt_i1('Flushes to stable storage:',
961           f_hits(zil_stats['zil_commit_writer_count']))
962    prt_i2('Transactions to SLOG storage pool:',
963           f_bytes(zil_stats['zil_itx_metaslab_slog_bytes']),
964           f_hits(zil_stats['zil_itx_metaslab_slog_count']))
965    prt_i2('Transactions to non-SLOG storage pool:',
966           f_bytes(zil_stats['zil_itx_metaslab_normal_bytes']),
967           f_hits(zil_stats['zil_itx_metaslab_normal_count']))
968    print()
969
970
971section_calls = {'arc': section_arc,
972                 'archits': section_archits,
973                 'dmu': section_dmu,
974                 'l2arc': section_l2arc,
975                 'spl': section_spl,
976                 'tunables': section_tunables,
977                 'vdev': section_vdev,
978                 'zil': section_zil}
979
980
981def main():
982    """Run program. The options to draw a graph and to print all data raw are
983    treated separately because they come with their own call.
984    """
985
986    kstats = get_kstats()
987
988    if ARGS.graph:
989        draw_graph(kstats)
990        sys.exit(0)
991
992    print_header()
993
994    if ARGS.raw:
995        print_raw(kstats)
996
997    elif ARGS.section:
998
999        try:
1000            section_calls[ARGS.section](kstats)
1001        except KeyError:
1002            print('Error: Section "{0}" unknown'.format(ARGS.section))
1003            sys.exit(1)
1004
1005    elif ARGS.page:
1006        print('WARNING: Pages are deprecated, please use "--section"\n')
1007
1008        pages_to_calls = {1: 'arc',
1009                          2: 'archits',
1010                          3: 'l2arc',
1011                          4: 'dmu',
1012                          5: 'vdev',
1013                          6: 'tunables'}
1014
1015        try:
1016            call = pages_to_calls[ARGS.page]
1017        except KeyError:
1018            print('Error: Page "{0}" not supported'.format(ARGS.page))
1019            sys.exit(1)
1020        else:
1021            section_calls[call](kstats)
1022
1023    else:
1024        # If no parameters were given, we print all sections. We might want to
1025        # change the sequence by hand
1026        calls = sorted(section_calls.keys())
1027
1028        for section in calls:
1029            section_calls[section](kstats)
1030
1031    sys.exit(0)
1032
1033
1034if __name__ == '__main__':
1035    main()
1036