*.swp
*.gcno
*.gcda
+*.pyc
+*.pyo
.deps
.libs
.dirstamp
-dist_bin_SCRIPTS = arc_summary.py arc_summary3.py
+EXTRA_DIST = arc_summary2 arc_summary3
+
+if USING_PYTHON_2
+dist_bin_SCRIPTS = arc_summary2
+install-exec-hook:
+ mv $(DESTDIR)$(bindir)/arc_summary2 $(DESTDIR)$(bindir)/arc_summary
+endif
+
+if USING_PYTHON_3
+dist_bin_SCRIPTS = arc_summary3
+install-exec-hook:
+ mv $(DESTDIR)$(bindir)/arc_summary3 $(DESTDIR)$(bindir)/arc_summary
+endif
+++ /dev/null
-#!/usr/bin/python
-#
-# $Id: arc_summary.pl,v 388:e27800740aa2 2011-07-08 02:53:29Z jhell $
-#
-# Copyright (c) 2008 Ben Rockwood <benr@cuddletech.com>,
-# Copyright (c) 2010 Martin Matuska <mm@FreeBSD.org>,
-# Copyright (c) 2010-2011 Jason J. Hellenthal <jhell@DataIX.net>,
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
-# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
-# SUCH DAMAGE.
-#
-# If you are having troubles when using this script from cron(8) please try
-# adjusting your PATH before reporting problems.
-#
-# Note some of this code uses older code (eg getopt instead of argparse,
-# subprocess.Popen() instead of subprocess.run()) because we need to support
-# some very old versions of Python.
-"""Print statistics on the ZFS Adjustable Replacement Cache (ARC)
-
-Provides basic information on the ARC, its efficiency, the L2ARC (if present),
-the Data Management Unit (DMU), Virtual Devices (VDEVs), and tunables. See the
-in-source documentation and code at
-https://github.com/zfsonlinux/zfs/blob/master/module/zfs/arc.c for details.
-"""
-
-import getopt
-import os
-import sys
-import time
-import errno
-
-from subprocess import Popen, PIPE
-from decimal import Decimal as D
-
-show_tunable_descriptions = False
-alternate_tunable_layout = False
-
-
-def handle_Exception(ex_cls, ex, tb):
- if ex is IOError:
- if ex.errno == errno.EPIPE:
- sys.exit()
-
- if ex is KeyboardInterrupt:
- sys.exit()
-
-
-sys.excepthook = handle_Exception
-
-
-def get_Kstat():
- """Collect information on the ZFS subsystem from the /proc virtual
- file system. The name "kstat" is a holdover from the Solaris utility
- of the same name.
- """
-
- def load_proc_kstats(fn, namespace):
- """Collect information on a specific subsystem of the ARC"""
-
- kstats = [line.strip() for line in open(fn)]
- del kstats[0:2]
- for kstat in kstats:
- kstat = kstat.strip()
- name, _, value = kstat.split()
- Kstat[namespace + name] = D(value)
-
- Kstat = {}
- load_proc_kstats('/proc/spl/kstat/zfs/arcstats',
- 'kstat.zfs.misc.arcstats.')
- load_proc_kstats('/proc/spl/kstat/zfs/zfetchstats',
- 'kstat.zfs.misc.zfetchstats.')
- load_proc_kstats('/proc/spl/kstat/zfs/vdev_cache_stats',
- 'kstat.zfs.misc.vdev_cache_stats.')
-
- return Kstat
-
-
-def fBytes(b=0):
- """Return human-readable representation of a byte value in
- powers of 2 (eg "KiB" for "kibibytes", etc) to two decimal
- points. Values smaller than one KiB are returned without
- decimal points.
- """
-
- prefixes = [
- [2**80, "YiB"], # yobibytes (yotta)
- [2**70, "ZiB"], # zebibytes (zetta)
- [2**60, "EiB"], # exbibytes (exa)
- [2**50, "PiB"], # pebibytes (peta)
- [2**40, "TiB"], # tebibytes (tera)
- [2**30, "GiB"], # gibibytes (giga)
- [2**20, "MiB"], # mebibytes (mega)
- [2**10, "KiB"]] # kibibytes (kilo)
-
- if b >= 2**10:
-
- for limit, unit in prefixes:
-
- if b >= limit:
- value = b / limit
- break
-
- result = "%0.2f\t%s" % (value, unit)
-
- else:
-
- result = "%d\tBytes" % b
-
- return result
-
-
-def fHits(hits=0):
- """Create a human-readable representation of the number of hits.
- The single-letter symbols used are SI to avoid the confusion caused
- by the different "short scale" and "long scale" representations in
- English, which use the same words for different values. See
- https://en.wikipedia.org/wiki/Names_of_large_numbers and
- https://physics.nist.gov/cuu/Units/prefixes.html
- """
-
- numbers = [
- [10**24, 'Y'], # yotta (septillion)
- [10**21, 'Z'], # zetta (sextillion)
- [10**18, 'E'], # exa (quintrillion)
- [10**15, 'P'], # peta (quadrillion)
- [10**12, 'T'], # tera (trillion)
- [10**9, 'G'], # giga (billion)
- [10**6, 'M'], # mega (million)
- [10**3, 'k']] # kilo (thousand)
-
- if hits >= 1000:
-
- for limit, symbol in numbers:
-
- if hits >= limit:
- value = hits/limit
- break
-
- result = "%0.2f%s" % (value, symbol)
-
- else:
-
- result = "%d" % hits
-
- return result
-
-
-def fPerc(lVal=0, rVal=0, Decimal=2):
- """Calculate percentage value and return in human-readable format"""
-
- if rVal > 0:
- return str("%0." + str(Decimal) + "f") % (100 * (lVal / rVal)) + "%"
- else:
- return str("%0." + str(Decimal) + "f") % 100 + "%"
-
-
-def get_arc_summary(Kstat):
- """Collect general data on the ARC"""
-
- output = {}
- memory_throttle_count = Kstat[
- "kstat.zfs.misc.arcstats.memory_throttle_count"
- ]
-
- if memory_throttle_count > 0:
- output['health'] = 'THROTTLED'
- else:
- output['health'] = 'HEALTHY'
-
- output['memory_throttle_count'] = fHits(memory_throttle_count)
-
- # ARC Misc.
- deleted = Kstat["kstat.zfs.misc.arcstats.deleted"]
- mutex_miss = Kstat["kstat.zfs.misc.arcstats.mutex_miss"]
- evict_skip = Kstat["kstat.zfs.misc.arcstats.evict_skip"]
-
- # ARC Misc.
- output["arc_misc"] = {}
- output["arc_misc"]["deleted"] = fHits(deleted)
- output["arc_misc"]['mutex_miss'] = fHits(mutex_miss)
- output["arc_misc"]['evict_skips'] = fHits(evict_skip)
-
- # ARC Sizing
- arc_size = Kstat["kstat.zfs.misc.arcstats.size"]
- mru_size = Kstat["kstat.zfs.misc.arcstats.mru_size"]
- mfu_size = Kstat["kstat.zfs.misc.arcstats.mfu_size"]
- meta_limit = Kstat["kstat.zfs.misc.arcstats.arc_meta_limit"]
- meta_size = Kstat["kstat.zfs.misc.arcstats.arc_meta_used"]
- dnode_limit = Kstat["kstat.zfs.misc.arcstats.arc_dnode_limit"]
- dnode_size = Kstat["kstat.zfs.misc.arcstats.dnode_size"]
- target_max_size = Kstat["kstat.zfs.misc.arcstats.c_max"]
- target_min_size = Kstat["kstat.zfs.misc.arcstats.c_min"]
- target_size = Kstat["kstat.zfs.misc.arcstats.c"]
-
- target_size_ratio = (target_max_size / target_min_size)
-
- # ARC Sizing
- output['arc_sizing'] = {}
- output['arc_sizing']['arc_size'] = {
- 'per': fPerc(arc_size, target_max_size),
- 'num': fBytes(arc_size),
- }
- output['arc_sizing']['target_max_size'] = {
- 'ratio': target_size_ratio,
- 'num': fBytes(target_max_size),
- }
- output['arc_sizing']['target_min_size'] = {
- 'per': fPerc(target_min_size, target_max_size),
- 'num': fBytes(target_min_size),
- }
- output['arc_sizing']['target_size'] = {
- 'per': fPerc(target_size, target_max_size),
- 'num': fBytes(target_size),
- }
- output['arc_sizing']['meta_limit'] = {
- 'per': fPerc(meta_limit, target_max_size),
- 'num': fBytes(meta_limit),
- }
- output['arc_sizing']['meta_size'] = {
- 'per': fPerc(meta_size, meta_limit),
- 'num': fBytes(meta_size),
- }
- output['arc_sizing']['dnode_limit'] = {
- 'per': fPerc(dnode_limit, meta_limit),
- 'num': fBytes(dnode_limit),
- }
- output['arc_sizing']['dnode_size'] = {
- 'per': fPerc(dnode_size, dnode_limit),
- 'num': fBytes(dnode_size),
- }
-
- # ARC Hash Breakdown
- output['arc_hash_break'] = {}
- output['arc_hash_break']['hash_chain_max'] = Kstat[
- "kstat.zfs.misc.arcstats.hash_chain_max"
- ]
- output['arc_hash_break']['hash_chains'] = Kstat[
- "kstat.zfs.misc.arcstats.hash_chains"
- ]
- output['arc_hash_break']['hash_collisions'] = Kstat[
- "kstat.zfs.misc.arcstats.hash_collisions"
- ]
- output['arc_hash_break']['hash_elements'] = Kstat[
- "kstat.zfs.misc.arcstats.hash_elements"
- ]
- output['arc_hash_break']['hash_elements_max'] = Kstat[
- "kstat.zfs.misc.arcstats.hash_elements_max"
- ]
-
- output['arc_size_break'] = {}
- output['arc_size_break']['recently_used_cache_size'] = {
- 'per': fPerc(mru_size, mru_size + mfu_size),
- 'num': fBytes(mru_size),
- }
- output['arc_size_break']['frequently_used_cache_size'] = {
- 'per': fPerc(mfu_size, mru_size + mfu_size),
- 'num': fBytes(mfu_size),
- }
-
- # ARC Hash Breakdown
- hash_chain_max = Kstat["kstat.zfs.misc.arcstats.hash_chain_max"]
- hash_chains = Kstat["kstat.zfs.misc.arcstats.hash_chains"]
- hash_collisions = Kstat["kstat.zfs.misc.arcstats.hash_collisions"]
- hash_elements = Kstat["kstat.zfs.misc.arcstats.hash_elements"]
- hash_elements_max = Kstat["kstat.zfs.misc.arcstats.hash_elements_max"]
-
- output['arc_hash_break'] = {}
- output['arc_hash_break']['elements_max'] = fHits(hash_elements_max)
- output['arc_hash_break']['elements_current'] = {
- 'per': fPerc(hash_elements, hash_elements_max),
- 'num': fHits(hash_elements),
- }
- output['arc_hash_break']['collisions'] = fHits(hash_collisions)
- output['arc_hash_break']['chain_max'] = fHits(hash_chain_max)
- output['arc_hash_break']['chains'] = fHits(hash_chains)
-
- return output
-
-
-def _arc_summary(Kstat):
- """Print information on the ARC"""
-
- # ARC Sizing
- arc = get_arc_summary(Kstat)
-
- sys.stdout.write("ARC Summary: (%s)\n" % arc['health'])
-
- sys.stdout.write("\tMemory Throttle Count:\t\t\t%s\n" %
- arc['memory_throttle_count'])
- sys.stdout.write("\n")
-
- # ARC Misc.
- sys.stdout.write("ARC Misc:\n")
- sys.stdout.write("\tDeleted:\t\t\t\t%s\n" % arc['arc_misc']['deleted'])
- sys.stdout.write("\tMutex Misses:\t\t\t\t%s\n" %
- arc['arc_misc']['mutex_miss'])
- sys.stdout.write("\tEvict Skips:\t\t\t\t%s\n" %
- arc['arc_misc']['evict_skips'])
- sys.stdout.write("\n")
-
- # ARC Sizing
- sys.stdout.write("ARC Size:\t\t\t\t%s\t%s\n" % (
- arc['arc_sizing']['arc_size']['per'],
- arc['arc_sizing']['arc_size']['num']
- )
- )
- sys.stdout.write("\tTarget Size: (Adaptive)\t\t%s\t%s\n" % (
- arc['arc_sizing']['target_size']['per'],
- arc['arc_sizing']['target_size']['num'],
- )
- )
-
- sys.stdout.write("\tMin Size (Hard Limit):\t\t%s\t%s\n" % (
- arc['arc_sizing']['target_min_size']['per'],
- arc['arc_sizing']['target_min_size']['num'],
- )
- )
-
- sys.stdout.write("\tMax Size (High Water):\t\t%d:1\t%s\n" % (
- arc['arc_sizing']['target_max_size']['ratio'],
- arc['arc_sizing']['target_max_size']['num'],
- )
- )
-
- sys.stdout.write("\nARC Size Breakdown:\n")
- sys.stdout.write("\tRecently Used Cache Size:\t%s\t%s\n" % (
- arc['arc_size_break']['recently_used_cache_size']['per'],
- arc['arc_size_break']['recently_used_cache_size']['num'],
- )
- )
- sys.stdout.write("\tFrequently Used Cache Size:\t%s\t%s\n" % (
- arc['arc_size_break']['frequently_used_cache_size']['per'],
- arc['arc_size_break']['frequently_used_cache_size']['num'],
- )
- )
- sys.stdout.write("\tMetadata Size (Hard Limit):\t%s\t%s\n" % (
- arc['arc_sizing']['meta_limit']['per'],
- arc['arc_sizing']['meta_limit']['num'],
- )
- )
- sys.stdout.write("\tMetadata Size:\t\t\t%s\t%s\n" % (
- arc['arc_sizing']['meta_size']['per'],
- arc['arc_sizing']['meta_size']['num'],
- )
- )
- sys.stdout.write("\tDnode Size (Hard Limit):\t%s\t%s\n" % (
- arc['arc_sizing']['dnode_limit']['per'],
- arc['arc_sizing']['dnode_limit']['num'],
- )
- )
- sys.stdout.write("\tDnode Size:\t\t\t%s\t%s\n" % (
- arc['arc_sizing']['dnode_size']['per'],
- arc['arc_sizing']['dnode_size']['num'],
- )
- )
-
- sys.stdout.write("\n")
-
- # ARC Hash Breakdown
- sys.stdout.write("ARC Hash Breakdown:\n")
- sys.stdout.write("\tElements Max:\t\t\t\t%s\n" %
- arc['arc_hash_break']['elements_max'])
- sys.stdout.write("\tElements Current:\t\t%s\t%s\n" % (
- arc['arc_hash_break']['elements_current']['per'],
- arc['arc_hash_break']['elements_current']['num'],
- )
- )
- sys.stdout.write("\tCollisions:\t\t\t\t%s\n" %
- arc['arc_hash_break']['collisions'])
- sys.stdout.write("\tChain Max:\t\t\t\t%s\n" %
- arc['arc_hash_break']['chain_max'])
- sys.stdout.write("\tChains:\t\t\t\t\t%s\n" %
- arc['arc_hash_break']['chains'])
-
-
-def get_arc_efficiency(Kstat):
- """Collect information on the efficiency of the ARC"""
-
- output = {}
-
- arc_hits = Kstat["kstat.zfs.misc.arcstats.hits"]
- arc_misses = Kstat["kstat.zfs.misc.arcstats.misses"]
- demand_data_hits = Kstat["kstat.zfs.misc.arcstats.demand_data_hits"]
- demand_data_misses = Kstat["kstat.zfs.misc.arcstats.demand_data_misses"]
- demand_metadata_hits = Kstat[
- "kstat.zfs.misc.arcstats.demand_metadata_hits"
- ]
- demand_metadata_misses = Kstat[
- "kstat.zfs.misc.arcstats.demand_metadata_misses"
- ]
- mfu_ghost_hits = Kstat["kstat.zfs.misc.arcstats.mfu_ghost_hits"]
- mfu_hits = Kstat["kstat.zfs.misc.arcstats.mfu_hits"]
- mru_ghost_hits = Kstat["kstat.zfs.misc.arcstats.mru_ghost_hits"]
- mru_hits = Kstat["kstat.zfs.misc.arcstats.mru_hits"]
- prefetch_data_hits = Kstat["kstat.zfs.misc.arcstats.prefetch_data_hits"]
- prefetch_data_misses = Kstat[
- "kstat.zfs.misc.arcstats.prefetch_data_misses"
- ]
- prefetch_metadata_hits = Kstat[
- "kstat.zfs.misc.arcstats.prefetch_metadata_hits"
- ]
- prefetch_metadata_misses = Kstat[
- "kstat.zfs.misc.arcstats.prefetch_metadata_misses"
- ]
-
- anon_hits = arc_hits - (
- mfu_hits + mru_hits + mfu_ghost_hits + mru_ghost_hits
- )
- arc_accesses_total = (arc_hits + arc_misses)
- demand_data_total = (demand_data_hits + demand_data_misses)
- prefetch_data_total = (prefetch_data_hits + prefetch_data_misses)
- real_hits = (mfu_hits + mru_hits)
-
- output["total_accesses"] = fHits(arc_accesses_total)
- output["cache_hit_ratio"] = {
- 'per': fPerc(arc_hits, arc_accesses_total),
- 'num': fHits(arc_hits),
- }
- output["cache_miss_ratio"] = {
- 'per': fPerc(arc_misses, arc_accesses_total),
- 'num': fHits(arc_misses),
- }
- output["actual_hit_ratio"] = {
- 'per': fPerc(real_hits, arc_accesses_total),
- 'num': fHits(real_hits),
- }
- output["data_demand_efficiency"] = {
- 'per': fPerc(demand_data_hits, demand_data_total),
- 'num': fHits(demand_data_total),
- }
-
- if prefetch_data_total > 0:
- output["data_prefetch_efficiency"] = {
- 'per': fPerc(prefetch_data_hits, prefetch_data_total),
- 'num': fHits(prefetch_data_total),
- }
-
- if anon_hits > 0:
- output["cache_hits_by_cache_list"] = {}
- output["cache_hits_by_cache_list"]["anonymously_used"] = {
- 'per': fPerc(anon_hits, arc_hits),
- 'num': fHits(anon_hits),
- }
-
- output["most_recently_used"] = {
- 'per': fPerc(mru_hits, arc_hits),
- 'num': fHits(mru_hits),
- }
- output["most_frequently_used"] = {
- 'per': fPerc(mfu_hits, arc_hits),
- 'num': fHits(mfu_hits),
- }
- output["most_recently_used_ghost"] = {
- 'per': fPerc(mru_ghost_hits, arc_hits),
- 'num': fHits(mru_ghost_hits),
- }
- output["most_frequently_used_ghost"] = {
- 'per': fPerc(mfu_ghost_hits, arc_hits),
- 'num': fHits(mfu_ghost_hits),
- }
-
- output["cache_hits_by_data_type"] = {}
- output["cache_hits_by_data_type"]["demand_data"] = {
- 'per': fPerc(demand_data_hits, arc_hits),
- 'num': fHits(demand_data_hits),
- }
- output["cache_hits_by_data_type"]["prefetch_data"] = {
- 'per': fPerc(prefetch_data_hits, arc_hits),
- 'num': fHits(prefetch_data_hits),
- }
- output["cache_hits_by_data_type"]["demand_metadata"] = {
- 'per': fPerc(demand_metadata_hits, arc_hits),
- 'num': fHits(demand_metadata_hits),
- }
- output["cache_hits_by_data_type"]["prefetch_metadata"] = {
- 'per': fPerc(prefetch_metadata_hits, arc_hits),
- 'num': fHits(prefetch_metadata_hits),
- }
-
- output["cache_misses_by_data_type"] = {}
- output["cache_misses_by_data_type"]["demand_data"] = {
- 'per': fPerc(demand_data_misses, arc_misses),
- 'num': fHits(demand_data_misses),
- }
- output["cache_misses_by_data_type"]["prefetch_data"] = {
- 'per': fPerc(prefetch_data_misses, arc_misses),
- 'num': fHits(prefetch_data_misses),
- }
- output["cache_misses_by_data_type"]["demand_metadata"] = {
- 'per': fPerc(demand_metadata_misses, arc_misses),
- 'num': fHits(demand_metadata_misses),
- }
- output["cache_misses_by_data_type"]["prefetch_metadata"] = {
- 'per': fPerc(prefetch_metadata_misses, arc_misses),
- 'num': fHits(prefetch_metadata_misses),
- }
-
- return output
-
-
-def _arc_efficiency(Kstat):
- """Print information on the efficiency of the ARC"""
-
- arc = get_arc_efficiency(Kstat)
-
- sys.stdout.write("ARC Total accesses:\t\t\t\t\t%s\n" %
- arc['total_accesses'])
- sys.stdout.write("\tCache Hit Ratio:\t\t%s\t%s\n" % (
- arc['cache_hit_ratio']['per'],
- arc['cache_hit_ratio']['num'],
- )
- )
- sys.stdout.write("\tCache Miss Ratio:\t\t%s\t%s\n" % (
- arc['cache_miss_ratio']['per'],
- arc['cache_miss_ratio']['num'],
- )
- )
-
- sys.stdout.write("\tActual Hit Ratio:\t\t%s\t%s\n" % (
- arc['actual_hit_ratio']['per'],
- arc['actual_hit_ratio']['num'],
- )
- )
-
- sys.stdout.write("\n")
- sys.stdout.write("\tData Demand Efficiency:\t\t%s\t%s\n" % (
- arc['data_demand_efficiency']['per'],
- arc['data_demand_efficiency']['num'],
- )
- )
-
- if 'data_prefetch_efficiency' in arc:
- sys.stdout.write("\tData Prefetch Efficiency:\t%s\t%s\n" % (
- arc['data_prefetch_efficiency']['per'],
- arc['data_prefetch_efficiency']['num'],
- )
- )
- sys.stdout.write("\n")
-
- sys.stdout.write("\tCACHE HITS BY CACHE LIST:\n")
- if 'cache_hits_by_cache_list' in arc:
- sys.stdout.write("\t Anonymously Used:\t\t%s\t%s\n" % (
- arc['cache_hits_by_cache_list']['anonymously_used']['per'],
- arc['cache_hits_by_cache_list']['anonymously_used']['num'],
- )
- )
- sys.stdout.write("\t Most Recently Used:\t\t%s\t%s\n" % (
- arc['most_recently_used']['per'],
- arc['most_recently_used']['num'],
- )
- )
- sys.stdout.write("\t Most Frequently Used:\t\t%s\t%s\n" % (
- arc['most_frequently_used']['per'],
- arc['most_frequently_used']['num'],
- )
- )
- sys.stdout.write("\t Most Recently Used Ghost:\t%s\t%s\n" % (
- arc['most_recently_used_ghost']['per'],
- arc['most_recently_used_ghost']['num'],
- )
- )
- sys.stdout.write("\t Most Frequently Used Ghost:\t%s\t%s\n" % (
- arc['most_frequently_used_ghost']['per'],
- arc['most_frequently_used_ghost']['num'],
- )
- )
-
- sys.stdout.write("\n\tCACHE HITS BY DATA TYPE:\n")
- sys.stdout.write("\t Demand Data:\t\t\t%s\t%s\n" % (
- arc["cache_hits_by_data_type"]['demand_data']['per'],
- arc["cache_hits_by_data_type"]['demand_data']['num'],
- )
- )
- sys.stdout.write("\t Prefetch Data:\t\t%s\t%s\n" % (
- arc["cache_hits_by_data_type"]['prefetch_data']['per'],
- arc["cache_hits_by_data_type"]['prefetch_data']['num'],
- )
- )
- sys.stdout.write("\t Demand Metadata:\t\t%s\t%s\n" % (
- arc["cache_hits_by_data_type"]['demand_metadata']['per'],
- arc["cache_hits_by_data_type"]['demand_metadata']['num'],
- )
- )
- sys.stdout.write("\t Prefetch Metadata:\t\t%s\t%s\n" % (
- arc["cache_hits_by_data_type"]['prefetch_metadata']['per'],
- arc["cache_hits_by_data_type"]['prefetch_metadata']['num'],
- )
- )
-
- sys.stdout.write("\n\tCACHE MISSES BY DATA TYPE:\n")
- sys.stdout.write("\t Demand Data:\t\t\t%s\t%s\n" % (
- arc["cache_misses_by_data_type"]['demand_data']['per'],
- arc["cache_misses_by_data_type"]['demand_data']['num'],
- )
- )
- sys.stdout.write("\t Prefetch Data:\t\t%s\t%s\n" % (
- arc["cache_misses_by_data_type"]['prefetch_data']['per'],
- arc["cache_misses_by_data_type"]['prefetch_data']['num'],
- )
- )
- sys.stdout.write("\t Demand Metadata:\t\t%s\t%s\n" % (
- arc["cache_misses_by_data_type"]['demand_metadata']['per'],
- arc["cache_misses_by_data_type"]['demand_metadata']['num'],
- )
- )
- sys.stdout.write("\t Prefetch Metadata:\t\t%s\t%s\n" % (
- arc["cache_misses_by_data_type"]['prefetch_metadata']['per'],
- arc["cache_misses_by_data_type"]['prefetch_metadata']['num'],
- )
- )
-
-
-def get_l2arc_summary(Kstat):
- """Collection information on the L2ARC"""
-
- output = {}
-
- l2_abort_lowmem = Kstat["kstat.zfs.misc.arcstats.l2_abort_lowmem"]
- l2_cksum_bad = Kstat["kstat.zfs.misc.arcstats.l2_cksum_bad"]
- l2_evict_lock_retry = Kstat["kstat.zfs.misc.arcstats.l2_evict_lock_retry"]
- l2_evict_reading = Kstat["kstat.zfs.misc.arcstats.l2_evict_reading"]
- l2_feeds = Kstat["kstat.zfs.misc.arcstats.l2_feeds"]
- l2_free_on_write = Kstat["kstat.zfs.misc.arcstats.l2_free_on_write"]
- l2_hdr_size = Kstat["kstat.zfs.misc.arcstats.l2_hdr_size"]
- l2_hits = Kstat["kstat.zfs.misc.arcstats.l2_hits"]
- l2_io_error = Kstat["kstat.zfs.misc.arcstats.l2_io_error"]
- l2_misses = Kstat["kstat.zfs.misc.arcstats.l2_misses"]
- l2_rw_clash = Kstat["kstat.zfs.misc.arcstats.l2_rw_clash"]
- l2_size = Kstat["kstat.zfs.misc.arcstats.l2_size"]
- l2_asize = Kstat["kstat.zfs.misc.arcstats.l2_asize"]
- l2_writes_done = Kstat["kstat.zfs.misc.arcstats.l2_writes_done"]
- l2_writes_error = Kstat["kstat.zfs.misc.arcstats.l2_writes_error"]
- l2_writes_sent = Kstat["kstat.zfs.misc.arcstats.l2_writes_sent"]
-
- l2_access_total = (l2_hits + l2_misses)
- output['l2_health_count'] = (l2_writes_error + l2_cksum_bad + l2_io_error)
-
- output['l2_access_total'] = l2_access_total
- output['l2_size'] = l2_size
- output['l2_asize'] = l2_asize
-
- if l2_size > 0 and l2_access_total > 0:
-
- if output['l2_health_count'] > 0:
- output["health"] = "DEGRADED"
- else:
- output["health"] = "HEALTHY"
-
- output["low_memory_aborts"] = fHits(l2_abort_lowmem)
- output["free_on_write"] = fHits(l2_free_on_write)
- output["rw_clashes"] = fHits(l2_rw_clash)
- output["bad_checksums"] = fHits(l2_cksum_bad)
- output["io_errors"] = fHits(l2_io_error)
-
- output["l2_arc_size"] = {}
- output["l2_arc_size"]["adative"] = fBytes(l2_size)
- output["l2_arc_size"]["actual"] = {
- 'per': fPerc(l2_asize, l2_size),
- 'num': fBytes(l2_asize)
- }
- output["l2_arc_size"]["head_size"] = {
- 'per': fPerc(l2_hdr_size, l2_size),
- 'num': fBytes(l2_hdr_size),
- }
-
- output["l2_arc_evicts"] = {}
- output["l2_arc_evicts"]['lock_retries'] = fHits(l2_evict_lock_retry)
- output["l2_arc_evicts"]['reading'] = fHits(l2_evict_reading)
-
- output['l2_arc_breakdown'] = {}
- output['l2_arc_breakdown']['value'] = fHits(l2_access_total)
- output['l2_arc_breakdown']['hit_ratio'] = {
- 'per': fPerc(l2_hits, l2_access_total),
- 'num': fHits(l2_hits),
- }
- output['l2_arc_breakdown']['miss_ratio'] = {
- 'per': fPerc(l2_misses, l2_access_total),
- 'num': fHits(l2_misses),
- }
- output['l2_arc_breakdown']['feeds'] = fHits(l2_feeds)
-
- output['l2_arc_buffer'] = {}
-
- output['l2_arc_writes'] = {}
- output['l2_writes_done'] = l2_writes_done
- output['l2_writes_sent'] = l2_writes_sent
- if l2_writes_done != l2_writes_sent:
- output['l2_arc_writes']['writes_sent'] = {
- 'value': "FAULTED",
- 'num': fHits(l2_writes_sent),
- }
- output['l2_arc_writes']['done_ratio'] = {
- 'per': fPerc(l2_writes_done, l2_writes_sent),
- 'num': fHits(l2_writes_done),
- }
- output['l2_arc_writes']['error_ratio'] = {
- 'per': fPerc(l2_writes_error, l2_writes_sent),
- 'num': fHits(l2_writes_error),
- }
- else:
- output['l2_arc_writes']['writes_sent'] = {
- 'per': fPerc(100),
- 'num': fHits(l2_writes_sent),
- }
-
- return output
-
-
-def _l2arc_summary(Kstat):
- """Print information on the L2ARC"""
-
- arc = get_l2arc_summary(Kstat)
-
- if arc['l2_size'] > 0 and arc['l2_access_total'] > 0:
- sys.stdout.write("L2 ARC Summary: ")
- if arc['l2_health_count'] > 0:
- sys.stdout.write("(DEGRADED)\n")
- else:
- sys.stdout.write("(HEALTHY)\n")
- sys.stdout.write("\tLow Memory Aborts:\t\t\t%s\n" %
- arc['low_memory_aborts'])
- sys.stdout.write("\tFree on Write:\t\t\t\t%s\n" % arc['free_on_write'])
- sys.stdout.write("\tR/W Clashes:\t\t\t\t%s\n" % arc['rw_clashes'])
- sys.stdout.write("\tBad Checksums:\t\t\t\t%s\n" % arc['bad_checksums'])
- sys.stdout.write("\tIO Errors:\t\t\t\t%s\n" % arc['io_errors'])
- sys.stdout.write("\n")
-
- sys.stdout.write("L2 ARC Size: (Adaptive)\t\t\t\t%s\n" %
- arc["l2_arc_size"]["adative"])
- sys.stdout.write("\tCompressed:\t\t\t%s\t%s\n" % (
- arc["l2_arc_size"]["actual"]["per"],
- arc["l2_arc_size"]["actual"]["num"],
- )
- )
- sys.stdout.write("\tHeader Size:\t\t\t%s\t%s\n" % (
- arc["l2_arc_size"]["head_size"]["per"],
- arc["l2_arc_size"]["head_size"]["num"],
- )
- )
- sys.stdout.write("\n")
-
- if arc["l2_arc_evicts"]['lock_retries'] != '0' or \
- arc["l2_arc_evicts"]["reading"] != '0':
- sys.stdout.write("L2 ARC Evicts:\n")
- sys.stdout.write("\tLock Retries:\t\t\t\t%s\n" %
- arc["l2_arc_evicts"]['lock_retries'])
- sys.stdout.write("\tUpon Reading:\t\t\t\t%s\n" %
- arc["l2_arc_evicts"]["reading"])
- sys.stdout.write("\n")
-
- sys.stdout.write("L2 ARC Breakdown:\t\t\t\t%s\n" %
- arc['l2_arc_breakdown']['value'])
- sys.stdout.write("\tHit Ratio:\t\t\t%s\t%s\n" % (
- arc['l2_arc_breakdown']['hit_ratio']['per'],
- arc['l2_arc_breakdown']['hit_ratio']['num'],
- )
- )
-
- sys.stdout.write("\tMiss Ratio:\t\t\t%s\t%s\n" % (
- arc['l2_arc_breakdown']['miss_ratio']['per'],
- arc['l2_arc_breakdown']['miss_ratio']['num'],
- )
- )
-
- sys.stdout.write("\tFeeds:\t\t\t\t\t%s\n" %
- arc['l2_arc_breakdown']['feeds'])
- sys.stdout.write("\n")
-
- sys.stdout.write("L2 ARC Writes:\n")
- if arc['l2_writes_done'] != arc['l2_writes_sent']:
- sys.stdout.write("\tWrites Sent: (%s)\t\t\t\t%s\n" % (
- arc['l2_arc_writes']['writes_sent']['value'],
- arc['l2_arc_writes']['writes_sent']['num'],
- )
- )
- sys.stdout.write("\t Done Ratio:\t\t\t%s\t%s\n" % (
- arc['l2_arc_writes']['done_ratio']['per'],
- arc['l2_arc_writes']['done_ratio']['num'],
- )
- )
- sys.stdout.write("\t Error Ratio:\t\t\t%s\t%s\n" % (
- arc['l2_arc_writes']['error_ratio']['per'],
- arc['l2_arc_writes']['error_ratio']['num'],
- )
- )
- else:
- sys.stdout.write("\tWrites Sent:\t\t\t%s\t%s\n" % (
- arc['l2_arc_writes']['writes_sent']['per'],
- arc['l2_arc_writes']['writes_sent']['num'],
- )
- )
-
-
-def get_dmu_summary(Kstat):
- """Collect information on the DMU"""
-
- output = {}
-
- zfetch_hits = Kstat["kstat.zfs.misc.zfetchstats.hits"]
- zfetch_misses = Kstat["kstat.zfs.misc.zfetchstats.misses"]
-
- zfetch_access_total = (zfetch_hits + zfetch_misses)
- output['zfetch_access_total'] = zfetch_access_total
-
- if zfetch_access_total > 0:
- output['dmu'] = {}
- output['dmu']['efficiency'] = {}
- output['dmu']['efficiency']['value'] = fHits(zfetch_access_total)
- output['dmu']['efficiency']['hit_ratio'] = {
- 'per': fPerc(zfetch_hits, zfetch_access_total),
- 'num': fHits(zfetch_hits),
- }
- output['dmu']['efficiency']['miss_ratio'] = {
- 'per': fPerc(zfetch_misses, zfetch_access_total),
- 'num': fHits(zfetch_misses),
- }
-
- return output
-
-
-def _dmu_summary(Kstat):
- """Print information on the DMU"""
-
- arc = get_dmu_summary(Kstat)
-
- if arc['zfetch_access_total'] > 0:
- sys.stdout.write("DMU Prefetch Efficiency:\t\t\t\t\t%s\n" %
- arc['dmu']['efficiency']['value'])
- sys.stdout.write("\tHit Ratio:\t\t\t%s\t%s\n" % (
- arc['dmu']['efficiency']['hit_ratio']['per'],
- arc['dmu']['efficiency']['hit_ratio']['num'],
- )
- )
- sys.stdout.write("\tMiss Ratio:\t\t\t%s\t%s\n" % (
- arc['dmu']['efficiency']['miss_ratio']['per'],
- arc['dmu']['efficiency']['miss_ratio']['num'],
- )
- )
-
- sys.stdout.write("\n")
-
-
-def get_vdev_summary(Kstat):
- """Collect information on the VDEVs"""
-
- output = {}
-
- vdev_cache_delegations = \
- Kstat["kstat.zfs.misc.vdev_cache_stats.delegations"]
- vdev_cache_misses = Kstat["kstat.zfs.misc.vdev_cache_stats.misses"]
- vdev_cache_hits = Kstat["kstat.zfs.misc.vdev_cache_stats.hits"]
- vdev_cache_total = (vdev_cache_misses + vdev_cache_hits +
- vdev_cache_delegations)
-
- output['vdev_cache_total'] = vdev_cache_total
-
- if vdev_cache_total > 0:
- output['summary'] = fHits(vdev_cache_total)
- output['hit_ratio'] = {
- 'per': fPerc(vdev_cache_hits, vdev_cache_total),
- 'num': fHits(vdev_cache_hits),
- }
- output['miss_ratio'] = {
- 'per': fPerc(vdev_cache_misses, vdev_cache_total),
- 'num': fHits(vdev_cache_misses),
- }
- output['delegations'] = {
- 'per': fPerc(vdev_cache_delegations, vdev_cache_total),
- 'num': fHits(vdev_cache_delegations),
- }
-
- return output
-
-
-def _vdev_summary(Kstat):
- """Print information on the VDEVs"""
-
- arc = get_vdev_summary(Kstat)
-
- if arc['vdev_cache_total'] > 0:
- sys.stdout.write("VDEV Cache Summary:\t\t\t\t%s\n" % arc['summary'])
- sys.stdout.write("\tHit Ratio:\t\t\t%s\t%s\n" % (
- arc['hit_ratio']['per'],
- arc['hit_ratio']['num'],
- ))
- sys.stdout.write("\tMiss Ratio:\t\t\t%s\t%s\n" % (
- arc['miss_ratio']['per'],
- arc['miss_ratio']['num'],
- ))
- sys.stdout.write("\tDelegations:\t\t\t%s\t%s\n" % (
- arc['delegations']['per'],
- arc['delegations']['num'],
- ))
-
-
-def _tunable_summary(Kstat):
- """Print information on tunables, including descriptions if requested"""
-
- global show_tunable_descriptions
- global alternate_tunable_layout
-
- names = os.listdir("/sys/module/zfs/parameters/")
-
- values = {}
- for name in names:
- with open("/sys/module/zfs/parameters/" + name) as f:
- value = f.read()
- values[name] = value.strip()
-
- descriptions = {}
-
- if show_tunable_descriptions:
-
- command = ["/sbin/modinfo", "zfs", "-0"]
-
- try:
- p = Popen(command, stdin=PIPE, stdout=PIPE,
- stderr=PIPE, shell=False, close_fds=True)
- p.wait()
-
- # By default, Python 2 returns a string as the first element of the
- # tuple from p.communicate(), while Python 3 returns bytes which
- # must be decoded first. The better way to do this would be with
- # subprocess.run() or at least .check_output(), but this fails on
- # CentOS 6 because of its old version of Python 2
- desc = bytes.decode(p.communicate()[0])
- description_list = desc.strip().split('\0')
-
- if p.returncode == 0:
- for tunable in description_list:
- if tunable[0:5] == 'parm:':
- tunable = tunable[5:].strip()
- name, description = tunable.split(':', 1)
- if not description:
- description = "Description unavailable"
- descriptions[name] = description
- else:
- sys.stderr.write("%s: '%s' exited with code %i\n" %
- (sys.argv[0], command[0], p.returncode))
- sys.stderr.write("Tunable descriptions will be disabled.\n")
- except OSError as e:
- sys.stderr.write("%s: Cannot run '%s': %s\n" %
- (sys.argv[0], command[0], e.strerror))
- sys.stderr.write("Tunable descriptions will be disabled.\n")
-
- sys.stdout.write("ZFS Tunables:\n")
- names.sort()
-
- if alternate_tunable_layout:
- fmt = "\t%s=%s\n"
- else:
- fmt = "\t%-50s%s\n"
-
- for name in names:
-
- if not name:
- continue
-
- if show_tunable_descriptions and name in descriptions:
- sys.stdout.write("\t# %s\n" % descriptions[name])
-
- sys.stdout.write(fmt % (name, values[name]))
-
-
-unSub = [
- _arc_summary,
- _arc_efficiency,
- _l2arc_summary,
- _dmu_summary,
- _vdev_summary,
- _tunable_summary
-]
-
-
-def zfs_header():
- """Print title string with date"""
-
- daydate = time.strftime('%a %b %d %H:%M:%S %Y')
-
- sys.stdout.write('\n'+'-'*72+'\n')
- sys.stdout.write('ZFS Subsystem Report\t\t\t\t%s' % daydate)
- sys.stdout.write('\n')
-
-
-def usage():
- """Print usage information"""
-
- sys.stdout.write("Usage: arc_summary.py [-h] [-a] [-d] [-p PAGE]\n\n")
- sys.stdout.write("\t -h, --help : "
- "Print this help message and exit\n")
- sys.stdout.write("\t -a, --alternate : "
- "Show an alternate sysctl layout\n")
- sys.stdout.write("\t -d, --description : "
- "Show the sysctl descriptions\n")
- sys.stdout.write("\t -p PAGE, --page=PAGE : "
- "Select a single output page to display,\n")
- sys.stdout.write("\t "
- "should be an integer between 1 and " +
- str(len(unSub)) + "\n\n")
- sys.stdout.write("Examples:\n")
- sys.stdout.write("\tarc_summary.py -a\n")
- sys.stdout.write("\tarc_summary.py -p 4\n")
- sys.stdout.write("\tarc_summary.py -ad\n")
- sys.stdout.write("\tarc_summary.py --page=2\n")
-
-
-def main():
- """Main function"""
-
- global show_tunable_descriptions
- global alternate_tunable_layout
-
- try:
- opts, args = getopt.getopt(
- sys.argv[1:],
- "adp:h", ["alternate", "description", "page=", "help"]
- )
- except getopt.error as e:
- sys.stderr.write("Error: %s\n" % e.msg)
- usage()
- sys.exit(1)
-
- args = {}
- for opt, arg in opts:
- if opt in ('-a', '--alternate'):
- args['a'] = True
- if opt in ('-d', '--description'):
- args['d'] = True
- if opt in ('-p', '--page'):
- args['p'] = arg
- if opt in ('-h', '--help'):
- usage()
- sys.exit(0)
-
- Kstat = get_Kstat()
-
- alternate_tunable_layout = 'a' in args
- show_tunable_descriptions = 'd' in args
-
- pages = []
-
- if 'p' in args:
- try:
- pages.append(unSub[int(args['p']) - 1])
- except IndexError:
- sys.stderr.write('the argument to -p must be between 1 and ' +
- str(len(unSub)) + '\n')
- sys.exit(1)
- else:
- pages = unSub
-
- zfs_header()
- for page in pages:
- page(Kstat)
- sys.stdout.write("\n")
-
-
-if __name__ == '__main__':
- main()
--- /dev/null
+#!/usr/bin/python2
+#
+# $Id: arc_summary.pl,v 388:e27800740aa2 2011-07-08 02:53:29Z jhell $
+#
+# Copyright (c) 2008 Ben Rockwood <benr@cuddletech.com>,
+# Copyright (c) 2010 Martin Matuska <mm@FreeBSD.org>,
+# Copyright (c) 2010-2011 Jason J. Hellenthal <jhell@DataIX.net>,
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# If you are having troubles when using this script from cron(8) please try
+# adjusting your PATH before reporting problems.
+#
+# Note some of this code uses older code (eg getopt instead of argparse,
+# subprocess.Popen() instead of subprocess.run()) because we need to support
+# some very old versions of Python.
+#
+
+"""Print statistics on the ZFS Adjustable Replacement Cache (ARC)
+
+Provides basic information on the ARC, its efficiency, the L2ARC (if present),
+the Data Management Unit (DMU), Virtual Devices (VDEVs), and tunables. See the
+in-source documentation and code at
+https://github.com/zfsonlinux/zfs/blob/master/module/zfs/arc.c for details.
+"""
+
+import getopt
+import os
+import sys
+import time
+import errno
+
+from subprocess import Popen, PIPE
+from decimal import Decimal as D
+
+show_tunable_descriptions = False
+alternate_tunable_layout = False
+
+
+def handle_Exception(ex_cls, ex, tb):
+ if ex is IOError:
+ if ex.errno == errno.EPIPE:
+ sys.exit()
+
+ if ex is KeyboardInterrupt:
+ sys.exit()
+
+
+sys.excepthook = handle_Exception
+
+
+def get_Kstat():
+ """Collect information on the ZFS subsystem from the /proc virtual
+ file system. The name "kstat" is a holdover from the Solaris utility
+ of the same name.
+ """
+
+ def load_proc_kstats(fn, namespace):
+ """Collect information on a specific subsystem of the ARC"""
+
+ kstats = [line.strip() for line in open(fn)]
+ del kstats[0:2]
+ for kstat in kstats:
+ kstat = kstat.strip()
+ name, _, value = kstat.split()
+ Kstat[namespace + name] = D(value)
+
+ Kstat = {}
+ load_proc_kstats('/proc/spl/kstat/zfs/arcstats',
+ 'kstat.zfs.misc.arcstats.')
+ load_proc_kstats('/proc/spl/kstat/zfs/zfetchstats',
+ 'kstat.zfs.misc.zfetchstats.')
+ load_proc_kstats('/proc/spl/kstat/zfs/vdev_cache_stats',
+ 'kstat.zfs.misc.vdev_cache_stats.')
+
+ return Kstat
+
+
+def fBytes(b=0):
+ """Return human-readable representation of a byte value in
+ powers of 2 (eg "KiB" for "kibibytes", etc) to two decimal
+ points. Values smaller than one KiB are returned without
+ decimal points.
+ """
+
+ prefixes = [
+ [2**80, "YiB"], # yobibytes (yotta)
+ [2**70, "ZiB"], # zebibytes (zetta)
+ [2**60, "EiB"], # exbibytes (exa)
+ [2**50, "PiB"], # pebibytes (peta)
+ [2**40, "TiB"], # tebibytes (tera)
+ [2**30, "GiB"], # gibibytes (giga)
+ [2**20, "MiB"], # mebibytes (mega)
+ [2**10, "KiB"]] # kibibytes (kilo)
+
+ if b >= 2**10:
+
+ for limit, unit in prefixes:
+
+ if b >= limit:
+ value = b / limit
+ break
+
+ result = "%0.2f\t%s" % (value, unit)
+
+ else:
+
+ result = "%d\tBytes" % b
+
+ return result
+
+
+def fHits(hits=0):
+ """Create a human-readable representation of the number of hits.
+ The single-letter symbols used are SI to avoid the confusion caused
+ by the different "short scale" and "long scale" representations in
+ English, which use the same words for different values. See
+ https://en.wikipedia.org/wiki/Names_of_large_numbers and
+ https://physics.nist.gov/cuu/Units/prefixes.html
+ """
+
+ numbers = [
+ [10**24, 'Y'], # yotta (septillion)
+ [10**21, 'Z'], # zetta (sextillion)
+ [10**18, 'E'], # exa (quintrillion)
+ [10**15, 'P'], # peta (quadrillion)
+ [10**12, 'T'], # tera (trillion)
+ [10**9, 'G'], # giga (billion)
+ [10**6, 'M'], # mega (million)
+ [10**3, 'k']] # kilo (thousand)
+
+ if hits >= 1000:
+
+ for limit, symbol in numbers:
+
+ if hits >= limit:
+ value = hits/limit
+ break
+
+ result = "%0.2f%s" % (value, symbol)
+
+ else:
+
+ result = "%d" % hits
+
+ return result
+
+
+def fPerc(lVal=0, rVal=0, Decimal=2):
+ """Calculate percentage value and return in human-readable format"""
+
+ if rVal > 0:
+ return str("%0." + str(Decimal) + "f") % (100 * (lVal / rVal)) + "%"
+ else:
+ return str("%0." + str(Decimal) + "f") % 100 + "%"
+
+
+def get_arc_summary(Kstat):
+ """Collect general data on the ARC"""
+
+ output = {}
+ memory_throttle_count = Kstat[
+ "kstat.zfs.misc.arcstats.memory_throttle_count"
+ ]
+
+ if memory_throttle_count > 0:
+ output['health'] = 'THROTTLED'
+ else:
+ output['health'] = 'HEALTHY'
+
+ output['memory_throttle_count'] = fHits(memory_throttle_count)
+
+ # ARC Misc.
+ deleted = Kstat["kstat.zfs.misc.arcstats.deleted"]
+ mutex_miss = Kstat["kstat.zfs.misc.arcstats.mutex_miss"]
+ evict_skip = Kstat["kstat.zfs.misc.arcstats.evict_skip"]
+
+ # ARC Misc.
+ output["arc_misc"] = {}
+ output["arc_misc"]["deleted"] = fHits(deleted)
+ output["arc_misc"]['mutex_miss'] = fHits(mutex_miss)
+ output["arc_misc"]['evict_skips'] = fHits(evict_skip)
+
+ # ARC Sizing
+ arc_size = Kstat["kstat.zfs.misc.arcstats.size"]
+ mru_size = Kstat["kstat.zfs.misc.arcstats.mru_size"]
+ mfu_size = Kstat["kstat.zfs.misc.arcstats.mfu_size"]
+ meta_limit = Kstat["kstat.zfs.misc.arcstats.arc_meta_limit"]
+ meta_size = Kstat["kstat.zfs.misc.arcstats.arc_meta_used"]
+ dnode_limit = Kstat["kstat.zfs.misc.arcstats.arc_dnode_limit"]
+ dnode_size = Kstat["kstat.zfs.misc.arcstats.dnode_size"]
+ target_max_size = Kstat["kstat.zfs.misc.arcstats.c_max"]
+ target_min_size = Kstat["kstat.zfs.misc.arcstats.c_min"]
+ target_size = Kstat["kstat.zfs.misc.arcstats.c"]
+
+ target_size_ratio = (target_max_size / target_min_size)
+
+ # ARC Sizing
+ output['arc_sizing'] = {}
+ output['arc_sizing']['arc_size'] = {
+ 'per': fPerc(arc_size, target_max_size),
+ 'num': fBytes(arc_size),
+ }
+ output['arc_sizing']['target_max_size'] = {
+ 'ratio': target_size_ratio,
+ 'num': fBytes(target_max_size),
+ }
+ output['arc_sizing']['target_min_size'] = {
+ 'per': fPerc(target_min_size, target_max_size),
+ 'num': fBytes(target_min_size),
+ }
+ output['arc_sizing']['target_size'] = {
+ 'per': fPerc(target_size, target_max_size),
+ 'num': fBytes(target_size),
+ }
+ output['arc_sizing']['meta_limit'] = {
+ 'per': fPerc(meta_limit, target_max_size),
+ 'num': fBytes(meta_limit),
+ }
+ output['arc_sizing']['meta_size'] = {
+ 'per': fPerc(meta_size, meta_limit),
+ 'num': fBytes(meta_size),
+ }
+ output['arc_sizing']['dnode_limit'] = {
+ 'per': fPerc(dnode_limit, meta_limit),
+ 'num': fBytes(dnode_limit),
+ }
+ output['arc_sizing']['dnode_size'] = {
+ 'per': fPerc(dnode_size, dnode_limit),
+ 'num': fBytes(dnode_size),
+ }
+
+ # ARC Hash Breakdown
+ output['arc_hash_break'] = {}
+ output['arc_hash_break']['hash_chain_max'] = Kstat[
+ "kstat.zfs.misc.arcstats.hash_chain_max"
+ ]
+ output['arc_hash_break']['hash_chains'] = Kstat[
+ "kstat.zfs.misc.arcstats.hash_chains"
+ ]
+ output['arc_hash_break']['hash_collisions'] = Kstat[
+ "kstat.zfs.misc.arcstats.hash_collisions"
+ ]
+ output['arc_hash_break']['hash_elements'] = Kstat[
+ "kstat.zfs.misc.arcstats.hash_elements"
+ ]
+ output['arc_hash_break']['hash_elements_max'] = Kstat[
+ "kstat.zfs.misc.arcstats.hash_elements_max"
+ ]
+
+ output['arc_size_break'] = {}
+ output['arc_size_break']['recently_used_cache_size'] = {
+ 'per': fPerc(mru_size, mru_size + mfu_size),
+ 'num': fBytes(mru_size),
+ }
+ output['arc_size_break']['frequently_used_cache_size'] = {
+ 'per': fPerc(mfu_size, mru_size + mfu_size),
+ 'num': fBytes(mfu_size),
+ }
+
+ # ARC Hash Breakdown
+ hash_chain_max = Kstat["kstat.zfs.misc.arcstats.hash_chain_max"]
+ hash_chains = Kstat["kstat.zfs.misc.arcstats.hash_chains"]
+ hash_collisions = Kstat["kstat.zfs.misc.arcstats.hash_collisions"]
+ hash_elements = Kstat["kstat.zfs.misc.arcstats.hash_elements"]
+ hash_elements_max = Kstat["kstat.zfs.misc.arcstats.hash_elements_max"]
+
+ output['arc_hash_break'] = {}
+ output['arc_hash_break']['elements_max'] = fHits(hash_elements_max)
+ output['arc_hash_break']['elements_current'] = {
+ 'per': fPerc(hash_elements, hash_elements_max),
+ 'num': fHits(hash_elements),
+ }
+ output['arc_hash_break']['collisions'] = fHits(hash_collisions)
+ output['arc_hash_break']['chain_max'] = fHits(hash_chain_max)
+ output['arc_hash_break']['chains'] = fHits(hash_chains)
+
+ return output
+
+
+def _arc_summary(Kstat):
+ """Print information on the ARC"""
+
+ # ARC Sizing
+ arc = get_arc_summary(Kstat)
+
+ sys.stdout.write("ARC Summary: (%s)\n" % arc['health'])
+
+ sys.stdout.write("\tMemory Throttle Count:\t\t\t%s\n" %
+ arc['memory_throttle_count'])
+ sys.stdout.write("\n")
+
+ # ARC Misc.
+ sys.stdout.write("ARC Misc:\n")
+ sys.stdout.write("\tDeleted:\t\t\t\t%s\n" % arc['arc_misc']['deleted'])
+ sys.stdout.write("\tMutex Misses:\t\t\t\t%s\n" %
+ arc['arc_misc']['mutex_miss'])
+ sys.stdout.write("\tEvict Skips:\t\t\t\t%s\n" %
+ arc['arc_misc']['evict_skips'])
+ sys.stdout.write("\n")
+
+ # ARC Sizing
+ sys.stdout.write("ARC Size:\t\t\t\t%s\t%s\n" % (
+ arc['arc_sizing']['arc_size']['per'],
+ arc['arc_sizing']['arc_size']['num']
+ )
+ )
+ sys.stdout.write("\tTarget Size: (Adaptive)\t\t%s\t%s\n" % (
+ arc['arc_sizing']['target_size']['per'],
+ arc['arc_sizing']['target_size']['num'],
+ )
+ )
+
+ sys.stdout.write("\tMin Size (Hard Limit):\t\t%s\t%s\n" % (
+ arc['arc_sizing']['target_min_size']['per'],
+ arc['arc_sizing']['target_min_size']['num'],
+ )
+ )
+
+ sys.stdout.write("\tMax Size (High Water):\t\t%d:1\t%s\n" % (
+ arc['arc_sizing']['target_max_size']['ratio'],
+ arc['arc_sizing']['target_max_size']['num'],
+ )
+ )
+
+ sys.stdout.write("\nARC Size Breakdown:\n")
+ sys.stdout.write("\tRecently Used Cache Size:\t%s\t%s\n" % (
+ arc['arc_size_break']['recently_used_cache_size']['per'],
+ arc['arc_size_break']['recently_used_cache_size']['num'],
+ )
+ )
+ sys.stdout.write("\tFrequently Used Cache Size:\t%s\t%s\n" % (
+ arc['arc_size_break']['frequently_used_cache_size']['per'],
+ arc['arc_size_break']['frequently_used_cache_size']['num'],
+ )
+ )
+ sys.stdout.write("\tMetadata Size (Hard Limit):\t%s\t%s\n" % (
+ arc['arc_sizing']['meta_limit']['per'],
+ arc['arc_sizing']['meta_limit']['num'],
+ )
+ )
+ sys.stdout.write("\tMetadata Size:\t\t\t%s\t%s\n" % (
+ arc['arc_sizing']['meta_size']['per'],
+ arc['arc_sizing']['meta_size']['num'],
+ )
+ )
+ sys.stdout.write("\tDnode Size (Hard Limit):\t%s\t%s\n" % (
+ arc['arc_sizing']['dnode_limit']['per'],
+ arc['arc_sizing']['dnode_limit']['num'],
+ )
+ )
+ sys.stdout.write("\tDnode Size:\t\t\t%s\t%s\n" % (
+ arc['arc_sizing']['dnode_size']['per'],
+ arc['arc_sizing']['dnode_size']['num'],
+ )
+ )
+
+ sys.stdout.write("\n")
+
+ # ARC Hash Breakdown
+ sys.stdout.write("ARC Hash Breakdown:\n")
+ sys.stdout.write("\tElements Max:\t\t\t\t%s\n" %
+ arc['arc_hash_break']['elements_max'])
+ sys.stdout.write("\tElements Current:\t\t%s\t%s\n" % (
+ arc['arc_hash_break']['elements_current']['per'],
+ arc['arc_hash_break']['elements_current']['num'],
+ )
+ )
+ sys.stdout.write("\tCollisions:\t\t\t\t%s\n" %
+ arc['arc_hash_break']['collisions'])
+ sys.stdout.write("\tChain Max:\t\t\t\t%s\n" %
+ arc['arc_hash_break']['chain_max'])
+ sys.stdout.write("\tChains:\t\t\t\t\t%s\n" %
+ arc['arc_hash_break']['chains'])
+
+
+def get_arc_efficiency(Kstat):
+ """Collect information on the efficiency of the ARC"""
+
+ output = {}
+
+ arc_hits = Kstat["kstat.zfs.misc.arcstats.hits"]
+ arc_misses = Kstat["kstat.zfs.misc.arcstats.misses"]
+ demand_data_hits = Kstat["kstat.zfs.misc.arcstats.demand_data_hits"]
+ demand_data_misses = Kstat["kstat.zfs.misc.arcstats.demand_data_misses"]
+ demand_metadata_hits = Kstat[
+ "kstat.zfs.misc.arcstats.demand_metadata_hits"
+ ]
+ demand_metadata_misses = Kstat[
+ "kstat.zfs.misc.arcstats.demand_metadata_misses"
+ ]
+ mfu_ghost_hits = Kstat["kstat.zfs.misc.arcstats.mfu_ghost_hits"]
+ mfu_hits = Kstat["kstat.zfs.misc.arcstats.mfu_hits"]
+ mru_ghost_hits = Kstat["kstat.zfs.misc.arcstats.mru_ghost_hits"]
+ mru_hits = Kstat["kstat.zfs.misc.arcstats.mru_hits"]
+ prefetch_data_hits = Kstat["kstat.zfs.misc.arcstats.prefetch_data_hits"]
+ prefetch_data_misses = Kstat[
+ "kstat.zfs.misc.arcstats.prefetch_data_misses"
+ ]
+ prefetch_metadata_hits = Kstat[
+ "kstat.zfs.misc.arcstats.prefetch_metadata_hits"
+ ]
+ prefetch_metadata_misses = Kstat[
+ "kstat.zfs.misc.arcstats.prefetch_metadata_misses"
+ ]
+
+ anon_hits = arc_hits - (
+ mfu_hits + mru_hits + mfu_ghost_hits + mru_ghost_hits
+ )
+ arc_accesses_total = (arc_hits + arc_misses)
+ demand_data_total = (demand_data_hits + demand_data_misses)
+ prefetch_data_total = (prefetch_data_hits + prefetch_data_misses)
+ real_hits = (mfu_hits + mru_hits)
+
+ output["total_accesses"] = fHits(arc_accesses_total)
+ output["cache_hit_ratio"] = {
+ 'per': fPerc(arc_hits, arc_accesses_total),
+ 'num': fHits(arc_hits),
+ }
+ output["cache_miss_ratio"] = {
+ 'per': fPerc(arc_misses, arc_accesses_total),
+ 'num': fHits(arc_misses),
+ }
+ output["actual_hit_ratio"] = {
+ 'per': fPerc(real_hits, arc_accesses_total),
+ 'num': fHits(real_hits),
+ }
+ output["data_demand_efficiency"] = {
+ 'per': fPerc(demand_data_hits, demand_data_total),
+ 'num': fHits(demand_data_total),
+ }
+
+ if prefetch_data_total > 0:
+ output["data_prefetch_efficiency"] = {
+ 'per': fPerc(prefetch_data_hits, prefetch_data_total),
+ 'num': fHits(prefetch_data_total),
+ }
+
+ if anon_hits > 0:
+ output["cache_hits_by_cache_list"] = {}
+ output["cache_hits_by_cache_list"]["anonymously_used"] = {
+ 'per': fPerc(anon_hits, arc_hits),
+ 'num': fHits(anon_hits),
+ }
+
+ output["most_recently_used"] = {
+ 'per': fPerc(mru_hits, arc_hits),
+ 'num': fHits(mru_hits),
+ }
+ output["most_frequently_used"] = {
+ 'per': fPerc(mfu_hits, arc_hits),
+ 'num': fHits(mfu_hits),
+ }
+ output["most_recently_used_ghost"] = {
+ 'per': fPerc(mru_ghost_hits, arc_hits),
+ 'num': fHits(mru_ghost_hits),
+ }
+ output["most_frequently_used_ghost"] = {
+ 'per': fPerc(mfu_ghost_hits, arc_hits),
+ 'num': fHits(mfu_ghost_hits),
+ }
+
+ output["cache_hits_by_data_type"] = {}
+ output["cache_hits_by_data_type"]["demand_data"] = {
+ 'per': fPerc(demand_data_hits, arc_hits),
+ 'num': fHits(demand_data_hits),
+ }
+ output["cache_hits_by_data_type"]["prefetch_data"] = {
+ 'per': fPerc(prefetch_data_hits, arc_hits),
+ 'num': fHits(prefetch_data_hits),
+ }
+ output["cache_hits_by_data_type"]["demand_metadata"] = {
+ 'per': fPerc(demand_metadata_hits, arc_hits),
+ 'num': fHits(demand_metadata_hits),
+ }
+ output["cache_hits_by_data_type"]["prefetch_metadata"] = {
+ 'per': fPerc(prefetch_metadata_hits, arc_hits),
+ 'num': fHits(prefetch_metadata_hits),
+ }
+
+ output["cache_misses_by_data_type"] = {}
+ output["cache_misses_by_data_type"]["demand_data"] = {
+ 'per': fPerc(demand_data_misses, arc_misses),
+ 'num': fHits(demand_data_misses),
+ }
+ output["cache_misses_by_data_type"]["prefetch_data"] = {
+ 'per': fPerc(prefetch_data_misses, arc_misses),
+ 'num': fHits(prefetch_data_misses),
+ }
+ output["cache_misses_by_data_type"]["demand_metadata"] = {
+ 'per': fPerc(demand_metadata_misses, arc_misses),
+ 'num': fHits(demand_metadata_misses),
+ }
+ output["cache_misses_by_data_type"]["prefetch_metadata"] = {
+ 'per': fPerc(prefetch_metadata_misses, arc_misses),
+ 'num': fHits(prefetch_metadata_misses),
+ }
+
+ return output
+
+
+def _arc_efficiency(Kstat):
+ """Print information on the efficiency of the ARC"""
+
+ arc = get_arc_efficiency(Kstat)
+
+ sys.stdout.write("ARC Total accesses:\t\t\t\t\t%s\n" %
+ arc['total_accesses'])
+ sys.stdout.write("\tCache Hit Ratio:\t\t%s\t%s\n" % (
+ arc['cache_hit_ratio']['per'],
+ arc['cache_hit_ratio']['num'],
+ )
+ )
+ sys.stdout.write("\tCache Miss Ratio:\t\t%s\t%s\n" % (
+ arc['cache_miss_ratio']['per'],
+ arc['cache_miss_ratio']['num'],
+ )
+ )
+
+ sys.stdout.write("\tActual Hit Ratio:\t\t%s\t%s\n" % (
+ arc['actual_hit_ratio']['per'],
+ arc['actual_hit_ratio']['num'],
+ )
+ )
+
+ sys.stdout.write("\n")
+ sys.stdout.write("\tData Demand Efficiency:\t\t%s\t%s\n" % (
+ arc['data_demand_efficiency']['per'],
+ arc['data_demand_efficiency']['num'],
+ )
+ )
+
+ if 'data_prefetch_efficiency' in arc:
+ sys.stdout.write("\tData Prefetch Efficiency:\t%s\t%s\n" % (
+ arc['data_prefetch_efficiency']['per'],
+ arc['data_prefetch_efficiency']['num'],
+ )
+ )
+ sys.stdout.write("\n")
+
+ sys.stdout.write("\tCACHE HITS BY CACHE LIST:\n")
+ if 'cache_hits_by_cache_list' in arc:
+ sys.stdout.write("\t Anonymously Used:\t\t%s\t%s\n" % (
+ arc['cache_hits_by_cache_list']['anonymously_used']['per'],
+ arc['cache_hits_by_cache_list']['anonymously_used']['num'],
+ )
+ )
+ sys.stdout.write("\t Most Recently Used:\t\t%s\t%s\n" % (
+ arc['most_recently_used']['per'],
+ arc['most_recently_used']['num'],
+ )
+ )
+ sys.stdout.write("\t Most Frequently Used:\t\t%s\t%s\n" % (
+ arc['most_frequently_used']['per'],
+ arc['most_frequently_used']['num'],
+ )
+ )
+ sys.stdout.write("\t Most Recently Used Ghost:\t%s\t%s\n" % (
+ arc['most_recently_used_ghost']['per'],
+ arc['most_recently_used_ghost']['num'],
+ )
+ )
+ sys.stdout.write("\t Most Frequently Used Ghost:\t%s\t%s\n" % (
+ arc['most_frequently_used_ghost']['per'],
+ arc['most_frequently_used_ghost']['num'],
+ )
+ )
+
+ sys.stdout.write("\n\tCACHE HITS BY DATA TYPE:\n")
+ sys.stdout.write("\t Demand Data:\t\t\t%s\t%s\n" % (
+ arc["cache_hits_by_data_type"]['demand_data']['per'],
+ arc["cache_hits_by_data_type"]['demand_data']['num'],
+ )
+ )
+ sys.stdout.write("\t Prefetch Data:\t\t%s\t%s\n" % (
+ arc["cache_hits_by_data_type"]['prefetch_data']['per'],
+ arc["cache_hits_by_data_type"]['prefetch_data']['num'],
+ )
+ )
+ sys.stdout.write("\t Demand Metadata:\t\t%s\t%s\n" % (
+ arc["cache_hits_by_data_type"]['demand_metadata']['per'],
+ arc["cache_hits_by_data_type"]['demand_metadata']['num'],
+ )
+ )
+ sys.stdout.write("\t Prefetch Metadata:\t\t%s\t%s\n" % (
+ arc["cache_hits_by_data_type"]['prefetch_metadata']['per'],
+ arc["cache_hits_by_data_type"]['prefetch_metadata']['num'],
+ )
+ )
+
+ sys.stdout.write("\n\tCACHE MISSES BY DATA TYPE:\n")
+ sys.stdout.write("\t Demand Data:\t\t\t%s\t%s\n" % (
+ arc["cache_misses_by_data_type"]['demand_data']['per'],
+ arc["cache_misses_by_data_type"]['demand_data']['num'],
+ )
+ )
+ sys.stdout.write("\t Prefetch Data:\t\t%s\t%s\n" % (
+ arc["cache_misses_by_data_type"]['prefetch_data']['per'],
+ arc["cache_misses_by_data_type"]['prefetch_data']['num'],
+ )
+ )
+ sys.stdout.write("\t Demand Metadata:\t\t%s\t%s\n" % (
+ arc["cache_misses_by_data_type"]['demand_metadata']['per'],
+ arc["cache_misses_by_data_type"]['demand_metadata']['num'],
+ )
+ )
+ sys.stdout.write("\t Prefetch Metadata:\t\t%s\t%s\n" % (
+ arc["cache_misses_by_data_type"]['prefetch_metadata']['per'],
+ arc["cache_misses_by_data_type"]['prefetch_metadata']['num'],
+ )
+ )
+
+
+def get_l2arc_summary(Kstat):
+ """Collection information on the L2ARC"""
+
+ output = {}
+
+ l2_abort_lowmem = Kstat["kstat.zfs.misc.arcstats.l2_abort_lowmem"]
+ l2_cksum_bad = Kstat["kstat.zfs.misc.arcstats.l2_cksum_bad"]
+ l2_evict_lock_retry = Kstat["kstat.zfs.misc.arcstats.l2_evict_lock_retry"]
+ l2_evict_reading = Kstat["kstat.zfs.misc.arcstats.l2_evict_reading"]
+ l2_feeds = Kstat["kstat.zfs.misc.arcstats.l2_feeds"]
+ l2_free_on_write = Kstat["kstat.zfs.misc.arcstats.l2_free_on_write"]
+ l2_hdr_size = Kstat["kstat.zfs.misc.arcstats.l2_hdr_size"]
+ l2_hits = Kstat["kstat.zfs.misc.arcstats.l2_hits"]
+ l2_io_error = Kstat["kstat.zfs.misc.arcstats.l2_io_error"]
+ l2_misses = Kstat["kstat.zfs.misc.arcstats.l2_misses"]
+ l2_rw_clash = Kstat["kstat.zfs.misc.arcstats.l2_rw_clash"]
+ l2_size = Kstat["kstat.zfs.misc.arcstats.l2_size"]
+ l2_asize = Kstat["kstat.zfs.misc.arcstats.l2_asize"]
+ l2_writes_done = Kstat["kstat.zfs.misc.arcstats.l2_writes_done"]
+ l2_writes_error = Kstat["kstat.zfs.misc.arcstats.l2_writes_error"]
+ l2_writes_sent = Kstat["kstat.zfs.misc.arcstats.l2_writes_sent"]
+
+ l2_access_total = (l2_hits + l2_misses)
+ output['l2_health_count'] = (l2_writes_error + l2_cksum_bad + l2_io_error)
+
+ output['l2_access_total'] = l2_access_total
+ output['l2_size'] = l2_size
+ output['l2_asize'] = l2_asize
+
+ if l2_size > 0 and l2_access_total > 0:
+
+ if output['l2_health_count'] > 0:
+ output["health"] = "DEGRADED"
+ else:
+ output["health"] = "HEALTHY"
+
+ output["low_memory_aborts"] = fHits(l2_abort_lowmem)
+ output["free_on_write"] = fHits(l2_free_on_write)
+ output["rw_clashes"] = fHits(l2_rw_clash)
+ output["bad_checksums"] = fHits(l2_cksum_bad)
+ output["io_errors"] = fHits(l2_io_error)
+
+ output["l2_arc_size"] = {}
+ output["l2_arc_size"]["adative"] = fBytes(l2_size)
+ output["l2_arc_size"]["actual"] = {
+ 'per': fPerc(l2_asize, l2_size),
+ 'num': fBytes(l2_asize)
+ }
+ output["l2_arc_size"]["head_size"] = {
+ 'per': fPerc(l2_hdr_size, l2_size),
+ 'num': fBytes(l2_hdr_size),
+ }
+
+ output["l2_arc_evicts"] = {}
+ output["l2_arc_evicts"]['lock_retries'] = fHits(l2_evict_lock_retry)
+ output["l2_arc_evicts"]['reading'] = fHits(l2_evict_reading)
+
+ output['l2_arc_breakdown'] = {}
+ output['l2_arc_breakdown']['value'] = fHits(l2_access_total)
+ output['l2_arc_breakdown']['hit_ratio'] = {
+ 'per': fPerc(l2_hits, l2_access_total),
+ 'num': fHits(l2_hits),
+ }
+ output['l2_arc_breakdown']['miss_ratio'] = {
+ 'per': fPerc(l2_misses, l2_access_total),
+ 'num': fHits(l2_misses),
+ }
+ output['l2_arc_breakdown']['feeds'] = fHits(l2_feeds)
+
+ output['l2_arc_buffer'] = {}
+
+ output['l2_arc_writes'] = {}
+ output['l2_writes_done'] = l2_writes_done
+ output['l2_writes_sent'] = l2_writes_sent
+ if l2_writes_done != l2_writes_sent:
+ output['l2_arc_writes']['writes_sent'] = {
+ 'value': "FAULTED",
+ 'num': fHits(l2_writes_sent),
+ }
+ output['l2_arc_writes']['done_ratio'] = {
+ 'per': fPerc(l2_writes_done, l2_writes_sent),
+ 'num': fHits(l2_writes_done),
+ }
+ output['l2_arc_writes']['error_ratio'] = {
+ 'per': fPerc(l2_writes_error, l2_writes_sent),
+ 'num': fHits(l2_writes_error),
+ }
+ else:
+ output['l2_arc_writes']['writes_sent'] = {
+ 'per': fPerc(100),
+ 'num': fHits(l2_writes_sent),
+ }
+
+ return output
+
+
+def _l2arc_summary(Kstat):
+ """Print information on the L2ARC"""
+
+ arc = get_l2arc_summary(Kstat)
+
+ if arc['l2_size'] > 0 and arc['l2_access_total'] > 0:
+ sys.stdout.write("L2 ARC Summary: ")
+ if arc['l2_health_count'] > 0:
+ sys.stdout.write("(DEGRADED)\n")
+ else:
+ sys.stdout.write("(HEALTHY)\n")
+ sys.stdout.write("\tLow Memory Aborts:\t\t\t%s\n" %
+ arc['low_memory_aborts'])
+ sys.stdout.write("\tFree on Write:\t\t\t\t%s\n" % arc['free_on_write'])
+ sys.stdout.write("\tR/W Clashes:\t\t\t\t%s\n" % arc['rw_clashes'])
+ sys.stdout.write("\tBad Checksums:\t\t\t\t%s\n" % arc['bad_checksums'])
+ sys.stdout.write("\tIO Errors:\t\t\t\t%s\n" % arc['io_errors'])
+ sys.stdout.write("\n")
+
+ sys.stdout.write("L2 ARC Size: (Adaptive)\t\t\t\t%s\n" %
+ arc["l2_arc_size"]["adative"])
+ sys.stdout.write("\tCompressed:\t\t\t%s\t%s\n" % (
+ arc["l2_arc_size"]["actual"]["per"],
+ arc["l2_arc_size"]["actual"]["num"],
+ )
+ )
+ sys.stdout.write("\tHeader Size:\t\t\t%s\t%s\n" % (
+ arc["l2_arc_size"]["head_size"]["per"],
+ arc["l2_arc_size"]["head_size"]["num"],
+ )
+ )
+ sys.stdout.write("\n")
+
+ if arc["l2_arc_evicts"]['lock_retries'] != '0' or \
+ arc["l2_arc_evicts"]["reading"] != '0':
+ sys.stdout.write("L2 ARC Evicts:\n")
+ sys.stdout.write("\tLock Retries:\t\t\t\t%s\n" %
+ arc["l2_arc_evicts"]['lock_retries'])
+ sys.stdout.write("\tUpon Reading:\t\t\t\t%s\n" %
+ arc["l2_arc_evicts"]["reading"])
+ sys.stdout.write("\n")
+
+ sys.stdout.write("L2 ARC Breakdown:\t\t\t\t%s\n" %
+ arc['l2_arc_breakdown']['value'])
+ sys.stdout.write("\tHit Ratio:\t\t\t%s\t%s\n" % (
+ arc['l2_arc_breakdown']['hit_ratio']['per'],
+ arc['l2_arc_breakdown']['hit_ratio']['num'],
+ )
+ )
+
+ sys.stdout.write("\tMiss Ratio:\t\t\t%s\t%s\n" % (
+ arc['l2_arc_breakdown']['miss_ratio']['per'],
+ arc['l2_arc_breakdown']['miss_ratio']['num'],
+ )
+ )
+
+ sys.stdout.write("\tFeeds:\t\t\t\t\t%s\n" %
+ arc['l2_arc_breakdown']['feeds'])
+ sys.stdout.write("\n")
+
+ sys.stdout.write("L2 ARC Writes:\n")
+ if arc['l2_writes_done'] != arc['l2_writes_sent']:
+ sys.stdout.write("\tWrites Sent: (%s)\t\t\t\t%s\n" % (
+ arc['l2_arc_writes']['writes_sent']['value'],
+ arc['l2_arc_writes']['writes_sent']['num'],
+ )
+ )
+ sys.stdout.write("\t Done Ratio:\t\t\t%s\t%s\n" % (
+ arc['l2_arc_writes']['done_ratio']['per'],
+ arc['l2_arc_writes']['done_ratio']['num'],
+ )
+ )
+ sys.stdout.write("\t Error Ratio:\t\t\t%s\t%s\n" % (
+ arc['l2_arc_writes']['error_ratio']['per'],
+ arc['l2_arc_writes']['error_ratio']['num'],
+ )
+ )
+ else:
+ sys.stdout.write("\tWrites Sent:\t\t\t%s\t%s\n" % (
+ arc['l2_arc_writes']['writes_sent']['per'],
+ arc['l2_arc_writes']['writes_sent']['num'],
+ )
+ )
+
+
+def get_dmu_summary(Kstat):
+ """Collect information on the DMU"""
+
+ output = {}
+
+ zfetch_hits = Kstat["kstat.zfs.misc.zfetchstats.hits"]
+ zfetch_misses = Kstat["kstat.zfs.misc.zfetchstats.misses"]
+
+ zfetch_access_total = (zfetch_hits + zfetch_misses)
+ output['zfetch_access_total'] = zfetch_access_total
+
+ if zfetch_access_total > 0:
+ output['dmu'] = {}
+ output['dmu']['efficiency'] = {}
+ output['dmu']['efficiency']['value'] = fHits(zfetch_access_total)
+ output['dmu']['efficiency']['hit_ratio'] = {
+ 'per': fPerc(zfetch_hits, zfetch_access_total),
+ 'num': fHits(zfetch_hits),
+ }
+ output['dmu']['efficiency']['miss_ratio'] = {
+ 'per': fPerc(zfetch_misses, zfetch_access_total),
+ 'num': fHits(zfetch_misses),
+ }
+
+ return output
+
+
+def _dmu_summary(Kstat):
+ """Print information on the DMU"""
+
+ arc = get_dmu_summary(Kstat)
+
+ if arc['zfetch_access_total'] > 0:
+ sys.stdout.write("DMU Prefetch Efficiency:\t\t\t\t\t%s\n" %
+ arc['dmu']['efficiency']['value'])
+ sys.stdout.write("\tHit Ratio:\t\t\t%s\t%s\n" % (
+ arc['dmu']['efficiency']['hit_ratio']['per'],
+ arc['dmu']['efficiency']['hit_ratio']['num'],
+ )
+ )
+ sys.stdout.write("\tMiss Ratio:\t\t\t%s\t%s\n" % (
+ arc['dmu']['efficiency']['miss_ratio']['per'],
+ arc['dmu']['efficiency']['miss_ratio']['num'],
+ )
+ )
+
+ sys.stdout.write("\n")
+
+
+def get_vdev_summary(Kstat):
+ """Collect information on the VDEVs"""
+
+ output = {}
+
+ vdev_cache_delegations = \
+ Kstat["kstat.zfs.misc.vdev_cache_stats.delegations"]
+ vdev_cache_misses = Kstat["kstat.zfs.misc.vdev_cache_stats.misses"]
+ vdev_cache_hits = Kstat["kstat.zfs.misc.vdev_cache_stats.hits"]
+ vdev_cache_total = (vdev_cache_misses + vdev_cache_hits +
+ vdev_cache_delegations)
+
+ output['vdev_cache_total'] = vdev_cache_total
+
+ if vdev_cache_total > 0:
+ output['summary'] = fHits(vdev_cache_total)
+ output['hit_ratio'] = {
+ 'per': fPerc(vdev_cache_hits, vdev_cache_total),
+ 'num': fHits(vdev_cache_hits),
+ }
+ output['miss_ratio'] = {
+ 'per': fPerc(vdev_cache_misses, vdev_cache_total),
+ 'num': fHits(vdev_cache_misses),
+ }
+ output['delegations'] = {
+ 'per': fPerc(vdev_cache_delegations, vdev_cache_total),
+ 'num': fHits(vdev_cache_delegations),
+ }
+
+ return output
+
+
+def _vdev_summary(Kstat):
+ """Print information on the VDEVs"""
+
+ arc = get_vdev_summary(Kstat)
+
+ if arc['vdev_cache_total'] > 0:
+ sys.stdout.write("VDEV Cache Summary:\t\t\t\t%s\n" % arc['summary'])
+ sys.stdout.write("\tHit Ratio:\t\t\t%s\t%s\n" % (
+ arc['hit_ratio']['per'],
+ arc['hit_ratio']['num'],
+ ))
+ sys.stdout.write("\tMiss Ratio:\t\t\t%s\t%s\n" % (
+ arc['miss_ratio']['per'],
+ arc['miss_ratio']['num'],
+ ))
+ sys.stdout.write("\tDelegations:\t\t\t%s\t%s\n" % (
+ arc['delegations']['per'],
+ arc['delegations']['num'],
+ ))
+
+
+def _tunable_summary(Kstat):
+ """Print information on tunables, including descriptions if requested"""
+
+ global show_tunable_descriptions
+ global alternate_tunable_layout
+
+ names = os.listdir("/sys/module/zfs/parameters/")
+
+ values = {}
+ for name in names:
+ with open("/sys/module/zfs/parameters/" + name) as f:
+ value = f.read()
+ values[name] = value.strip()
+
+ descriptions = {}
+
+ if show_tunable_descriptions:
+
+ command = ["/sbin/modinfo", "zfs", "-0"]
+
+ try:
+ p = Popen(command, stdin=PIPE, stdout=PIPE,
+ stderr=PIPE, shell=False, close_fds=True)
+ p.wait()
+
+ # By default, Python 2 returns a string as the first element of the
+ # tuple from p.communicate(), while Python 3 returns bytes which
+ # must be decoded first. The better way to do this would be with
+ # subprocess.run() or at least .check_output(), but this fails on
+ # CentOS 6 because of its old version of Python 2
+ desc = bytes.decode(p.communicate()[0])
+ description_list = desc.strip().split('\0')
+
+ if p.returncode == 0:
+ for tunable in description_list:
+ if tunable[0:5] == 'parm:':
+ tunable = tunable[5:].strip()
+ name, description = tunable.split(':', 1)
+ if not description:
+ description = "Description unavailable"
+ descriptions[name] = description
+ else:
+ sys.stderr.write("%s: '%s' exited with code %i\n" %
+ (sys.argv[0], command[0], p.returncode))
+ sys.stderr.write("Tunable descriptions will be disabled.\n")
+ except OSError as e:
+ sys.stderr.write("%s: Cannot run '%s': %s\n" %
+ (sys.argv[0], command[0], e.strerror))
+ sys.stderr.write("Tunable descriptions will be disabled.\n")
+
+ sys.stdout.write("ZFS Tunables:\n")
+ names.sort()
+
+ if alternate_tunable_layout:
+ fmt = "\t%s=%s\n"
+ else:
+ fmt = "\t%-50s%s\n"
+
+ for name in names:
+
+ if not name:
+ continue
+
+ if show_tunable_descriptions and name in descriptions:
+ sys.stdout.write("\t# %s\n" % descriptions[name])
+
+ sys.stdout.write(fmt % (name, values[name]))
+
+
+unSub = [
+ _arc_summary,
+ _arc_efficiency,
+ _l2arc_summary,
+ _dmu_summary,
+ _vdev_summary,
+ _tunable_summary
+]
+
+
+def zfs_header():
+ """Print title string with date"""
+
+ daydate = time.strftime('%a %b %d %H:%M:%S %Y')
+
+ sys.stdout.write('\n'+'-'*72+'\n')
+ sys.stdout.write('ZFS Subsystem Report\t\t\t\t%s' % daydate)
+ sys.stdout.write('\n')
+
+
+def usage():
+ """Print usage information"""
+
+ sys.stdout.write("Usage: arc_summary [-h] [-a] [-d] [-p PAGE]\n\n")
+ sys.stdout.write("\t -h, --help : "
+ "Print this help message and exit\n")
+ sys.stdout.write("\t -a, --alternate : "
+ "Show an alternate sysctl layout\n")
+ sys.stdout.write("\t -d, --description : "
+ "Show the sysctl descriptions\n")
+ sys.stdout.write("\t -p PAGE, --page=PAGE : "
+ "Select a single output page to display,\n")
+ sys.stdout.write("\t "
+ "should be an integer between 1 and " +
+ str(len(unSub)) + "\n\n")
+ sys.stdout.write("Examples:\n")
+ sys.stdout.write("\tarc_summary -a\n")
+ sys.stdout.write("\tarc_summary -p 4\n")
+ sys.stdout.write("\tarc_summary -ad\n")
+ sys.stdout.write("\tarc_summary --page=2\n")
+
+
+def main():
+ """Main function"""
+
+ global show_tunable_descriptions
+ global alternate_tunable_layout
+
+ try:
+ opts, args = getopt.getopt(
+ sys.argv[1:],
+ "adp:h", ["alternate", "description", "page=", "help"]
+ )
+ except getopt.error as e:
+ sys.stderr.write("Error: %s\n" % e.msg)
+ usage()
+ sys.exit(1)
+
+ args = {}
+ for opt, arg in opts:
+ if opt in ('-a', '--alternate'):
+ args['a'] = True
+ if opt in ('-d', '--description'):
+ args['d'] = True
+ if opt in ('-p', '--page'):
+ args['p'] = arg
+ if opt in ('-h', '--help'):
+ usage()
+ sys.exit(0)
+
+ Kstat = get_Kstat()
+
+ alternate_tunable_layout = 'a' in args
+ show_tunable_descriptions = 'd' in args
+
+ pages = []
+
+ if 'p' in args:
+ try:
+ pages.append(unSub[int(args['p']) - 1])
+ except IndexError:
+ sys.stderr.write('the argument to -p must be between 1 and ' +
+ str(len(unSub)) + '\n')
+ sys.exit(1)
+ else:
+ pages = unSub
+
+ zfs_header()
+ for page in pages:
+ page(Kstat)
+ sys.stdout.write("\n")
+
+
+if __name__ == '__main__':
+ main()
--- /dev/null
+#!/usr/bin/python3
+#
+# Copyright (c) 2008 Ben Rockwood <benr@cuddletech.com>,
+# Copyright (c) 2010 Martin Matuska <mm@FreeBSD.org>,
+# Copyright (c) 2010-2011 Jason J. Hellenthal <jhell@DataIX.net>,
+# Copyright (c) 2017 Scot W. Stevenson <scot.stevenson@gmail.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+"""Print statistics on the ZFS ARC Cache and other information
+
+Provides basic information on the ARC, its efficiency, the L2ARC (if present),
+the Data Management Unit (DMU), Virtual Devices (VDEVs), and tunables. See
+the in-source documentation and code at
+https://github.com/zfsonlinux/zfs/blob/master/module/zfs/arc.c for details.
+The original introduction to arc_summary can be found at
+http://cuddletech.com/?p=454
+"""
+
+import argparse
+import os
+import subprocess
+import sys
+import time
+
+DECRIPTION = 'Print ARC and other statistics for ZFS on Linux'
+INDENT = ' '*8
+LINE_LENGTH = 72
+PROC_PATH = '/proc/spl/kstat/zfs/'
+SPL_PATH = '/sys/module/spl/parameters/'
+TUNABLES_PATH = '/sys/module/zfs/parameters/'
+DATE_FORMAT = '%a %b %d %H:%M:%S %Y'
+TITLE = 'ZFS Subsystem Report'
+
+SECTIONS = 'arc archits dmu l2arc spl tunables vdev zil'.split()
+SECTION_HELP = 'print info from one section ('+' '.join(SECTIONS)+')'
+
+# Tunables and SPL are handled separately because they come from
+# different sources
+SECTION_PATHS = {'arc': 'arcstats',
+ 'dmu': 'dmu_tx',
+ 'l2arc': 'arcstats', # L2ARC stuff lives in arcstats
+ 'vdev': 'vdev_cache_stats',
+ 'xuio': 'xuio_stats',
+ 'zfetch': 'zfetchstats',
+ 'zil': 'zil'}
+
+parser = argparse.ArgumentParser(description=DECRIPTION)
+parser.add_argument('-a', '--alternate', action='store_true', default=False,
+ help='use alternate formatting for tunables and SPL',
+ dest='alt')
+parser.add_argument('-d', '--description', action='store_true', default=False,
+ help='print descriptions with tunables and SPL',
+ dest='desc')
+parser.add_argument('-g', '--graph', action='store_true', default=False,
+ help='print graph on ARC use and exit', dest='graph')
+parser.add_argument('-p', '--page', type=int, dest='page',
+ help='print page by number (DEPRECATED, use "-s")')
+parser.add_argument('-r', '--raw', action='store_true', default=False,
+ help='dump all available data with minimal formatting',
+ dest='raw')
+parser.add_argument('-s', '--section', dest='section', help=SECTION_HELP)
+ARGS = parser.parse_args()
+
+
+def cleanup_line(single_line):
+ """Format a raw line of data from /proc and isolate the name value
+ part, returning a tuple with each. Currently, this gets rid of the
+ middle '4'. For example "arc_no_grow 4 0" returns the tuple
+ ("arc_no_grow", "0").
+ """
+ name, _, value = single_line.split()
+
+ return name, value
+
+
+def draw_graph(kstats_dict):
+ """Draw a primitive graph representing the basic information on the
+ ARC -- its size and the proportion used by MFU and MRU -- and quit.
+ We use max size of the ARC to calculate how full it is. This is a
+ very rough representation.
+ """
+
+ arc_stats = isolate_section('arcstats', kstats_dict)
+
+ GRAPH_INDENT = ' '*4
+ GRAPH_WIDTH = 60
+ arc_size = f_bytes(arc_stats['size'])
+ arc_perc = f_perc(arc_stats['size'], arc_stats['c_max'])
+ mfu_size = f_bytes(arc_stats['mfu_size'])
+ mru_size = f_bytes(arc_stats['mru_size'])
+ meta_limit = f_bytes(arc_stats['arc_meta_limit'])
+ meta_size = f_bytes(arc_stats['arc_meta_used'])
+ dnode_limit = f_bytes(arc_stats['arc_dnode_limit'])
+ dnode_size = f_bytes(arc_stats['dnode_size'])
+
+ info_form = ('ARC: {0} ({1}) MFU: {2} MRU: {3} META: {4} ({5}) '
+ 'DNODE {6} ({7})')
+ info_line = info_form.format(arc_size, arc_perc, mfu_size, mru_size,
+ meta_size, meta_limit, dnode_size,
+ dnode_limit)
+ info_spc = ' '*int((GRAPH_WIDTH-len(info_line))/2)
+ info_line = GRAPH_INDENT+info_spc+info_line
+
+ graph_line = GRAPH_INDENT+'+'+('-'*(GRAPH_WIDTH-2))+'+'
+
+ mfu_perc = float(int(arc_stats['mfu_size'])/int(arc_stats['c_max']))
+ mru_perc = float(int(arc_stats['mru_size'])/int(arc_stats['c_max']))
+ arc_perc = float(int(arc_stats['size'])/int(arc_stats['c_max']))
+ total_ticks = float(arc_perc)*GRAPH_WIDTH
+ mfu_ticks = mfu_perc*GRAPH_WIDTH
+ mru_ticks = mru_perc*GRAPH_WIDTH
+ other_ticks = total_ticks-(mfu_ticks+mru_ticks)
+
+ core_form = 'F'*int(mfu_ticks)+'R'*int(mru_ticks)+'O'*int(other_ticks)
+ core_spc = ' '*(GRAPH_WIDTH-(2+len(core_form)))
+ core_line = GRAPH_INDENT+'|'+core_form+core_spc+'|'
+
+ for line in ('', info_line, graph_line, core_line, graph_line, ''):
+ print(line)
+
+
+def f_bytes(byte_string):
+ """Return human-readable representation of a byte value in
+ powers of 2 (eg "KiB" for "kibibytes", etc) to two decimal
+ points. Values smaller than one KiB are returned without
+ decimal points. Note "bytes" is a reserved keyword.
+ """
+
+ prefixes = ([2**80, "YiB"], # yobibytes (yotta)
+ [2**70, "ZiB"], # zebibytes (zetta)
+ [2**60, "EiB"], # exbibytes (exa)
+ [2**50, "PiB"], # pebibytes (peta)
+ [2**40, "TiB"], # tebibytes (tera)
+ [2**30, "GiB"], # gibibytes (giga)
+ [2**20, "MiB"], # mebibytes (mega)
+ [2**10, "KiB"]) # kibibytes (kilo)
+
+ bites = int(byte_string)
+
+ if bites >= 2**10:
+ for limit, unit in prefixes:
+
+ if bites >= limit:
+ value = bites / limit
+ break
+
+ result = '{0:.1f} {1}'.format(value, unit)
+ else:
+ result = '{0} Bytes'.format(bites)
+
+ return result
+
+
+def f_hits(hits_string):
+ """Create a human-readable representation of the number of hits.
+ The single-letter symbols used are SI to avoid the confusion caused
+ by the different "short scale" and "long scale" representations in
+ English, which use the same words for different values. See
+ https://en.wikipedia.org/wiki/Names_of_large_numbers and:
+ https://physics.nist.gov/cuu/Units/prefixes.html
+ """
+
+ numbers = ([10**24, 'Y'], # yotta (septillion)
+ [10**21, 'Z'], # zetta (sextillion)
+ [10**18, 'E'], # exa (quintrillion)
+ [10**15, 'P'], # peta (quadrillion)
+ [10**12, 'T'], # tera (trillion)
+ [10**9, 'G'], # giga (billion)
+ [10**6, 'M'], # mega (million)
+ [10**3, 'k']) # kilo (thousand)
+
+ hits = int(hits_string)
+
+ if hits >= 1000:
+ for limit, symbol in numbers:
+
+ if hits >= limit:
+ value = hits/limit
+ break
+
+ result = "%0.1f%s" % (value, symbol)
+ else:
+ result = "%d" % hits
+
+ return result
+
+
+def f_perc(value1, value2):
+ """Calculate percentage and return in human-readable form. If
+ rounding produces the result '0.0' though the first number is
+ not zero, include a 'less-than' symbol to avoid confusion.
+ Division by zero is handled by returning 'n/a'; no error
+ is called.
+ """
+
+ v1 = float(value1)
+ v2 = float(value2)
+
+ try:
+ perc = 100 * v1/v2
+ except ZeroDivisionError:
+ result = 'n/a'
+ else:
+ result = '{0:0.1f} %'.format(perc)
+
+ if result == '0.0 %' and v1 > 0:
+ result = '< 0.1 %'
+
+ return result
+
+
+def format_raw_line(name, value):
+ """For the --raw option for the tunable and SPL outputs, decide on the
+ correct formatting based on the --alternate flag.
+ """
+
+ if ARGS.alt:
+ result = '{0}{1}={2}'.format(INDENT, name, value)
+ else:
+ spc = LINE_LENGTH-(len(INDENT)+len(value))
+ result = '{0}{1:<{spc}}{2}'.format(INDENT, name, value, spc=spc)
+
+ return result
+
+
+def get_kstats():
+ """Collect information on the ZFS subsystem from the /proc Linux virtual
+ file system. The step does not perform any further processing, giving us
+ the option to only work on what is actually needed. The name "kstat" is a
+ holdover from the Solaris utility of the same name.
+ """
+
+ result = {}
+ secs = SECTION_PATHS.values()
+
+ for section in secs:
+
+ with open(PROC_PATH+section, 'r') as proc_location:
+ lines = [line for line in proc_location]
+
+ del lines[0:2] # Get rid of header
+ result[section] = lines
+
+ return result
+
+
+def get_spl_tunables(PATH):
+ """Collect information on the Solaris Porting Layer (SPL) or the
+ tunables, depending on the PATH given. Does not check if PATH is
+ legal.
+ """
+
+ result = {}
+ parameters = os.listdir(PATH)
+
+ for name in parameters:
+
+ with open(PATH+name, 'r') as para_file:
+ value = para_file.read()
+ result[name] = value.strip()
+
+ return result
+
+
+def get_descriptions(request):
+ """Get the decriptions of the Solaris Porting Layer (SPL) or the
+ tunables, return with minimal formatting.
+ """
+
+ if request not in ('spl', 'zfs'):
+ print('ERROR: description of "{0}" requested)'.format(request))
+ sys.exit(1)
+
+ descs = {}
+ target_prefix = 'parm:'
+
+ # We would prefer to do this with /sys/modules -- see the discussion at
+ # get_version() -- but there isn't a way to get the descriptions from
+ # there, so we fall back on modinfo
+ command = ["/sbin/modinfo", request, "-0"]
+
+ # The recommended way to do this is with subprocess.run(). However,
+ # some installed versions of Python are < 3.5, so we offer them
+ # the option of doing it the old way (for now)
+ info = ''
+
+ try:
+
+ if 'run' in dir(subprocess):
+ info = subprocess.run(command, stdout=subprocess.PIPE,
+ universal_newlines=True)
+ raw_output = info.stdout.split('\0')
+ else:
+ info = subprocess.check_output(command, universal_newlines=True)
+ raw_output = info.split('\0')
+
+ except subprocess.CalledProcessError:
+ print("Error: Descriptions not available (can't access kernel module)")
+ sys.exit(1)
+
+ for line in raw_output:
+
+ if not line.startswith(target_prefix):
+ continue
+
+ line = line[len(target_prefix):].strip()
+ name, raw_desc = line.split(':', 1)
+ desc = raw_desc.rsplit('(', 1)[0]
+
+ if desc == '':
+ desc = '(No description found)'
+
+ descs[name.strip()] = desc.strip()
+
+ return descs
+
+
+def get_version(request):
+ """Get the version number of ZFS or SPL on this machine for header.
+ Returns an error string, but does not raise an error, if we can't
+ get the ZFS/SPL version via modinfo.
+ """
+
+ if request not in ('spl', 'zfs'):
+ error_msg = '(ERROR: "{0}" requested)'.format(request)
+ return error_msg
+
+ # The original arc_summary called /sbin/modinfo/{spl,zfs} to get
+ # the version information. We switch to /sys/module/{spl,zfs}/version
+ # to make sure we get what is really loaded in the kernel
+ command = ["cat", "/sys/module/{0}/version".format(request)]
+ req = request.upper()
+ version = "(Can't get {0} version)".format(req)
+
+ # The recommended way to do this is with subprocess.run(). However,
+ # some installed versions of Python are < 3.5, so we offer them
+ # the option of doing it the old way (for now)
+ info = ''
+ if 'run' in dir(subprocess):
+ info = subprocess.run(command, stdout=subprocess.PIPE,
+ universal_newlines=True)
+ version = info.stdout.strip()
+ else:
+ info = subprocess.check_output(command, universal_newlines=True)
+ version = info.strip()
+
+ return version
+
+
+def print_header():
+ """Print the initial heading with date and time as well as info on the
+ Linux and ZFS versions. This is not called for the graph.
+ """
+
+ # datetime is now recommended over time but we keep the exact formatting
+ # from the older version of arc_summary in case there are scripts
+ # that expect it in this way
+ daydate = time.strftime(DATE_FORMAT)
+ spc_date = LINE_LENGTH-len(daydate)
+ sys_version = os.uname()
+
+ sys_msg = sys_version.sysname+' '+sys_version.release
+ zfs = get_version('zfs')
+ spc_zfs = LINE_LENGTH-len(zfs)
+
+ machine_msg = 'Machine: '+sys_version.nodename+' ('+sys_version.machine+')'
+ spl = get_version('spl')
+ spc_spl = LINE_LENGTH-len(spl)
+
+ print('\n'+('-'*LINE_LENGTH))
+ print('{0:<{spc}}{1}'.format(TITLE, daydate, spc=spc_date))
+ print('{0:<{spc}}{1}'.format(sys_msg, zfs, spc=spc_zfs))
+ print('{0:<{spc}}{1}\n'.format(machine_msg, spl, spc=spc_spl))
+
+
+def print_raw(kstats_dict):
+ """Print all available data from the system in a minimally sorted format.
+ This can be used as a source to be piped through 'grep'.
+ """
+
+ sections = sorted(kstats_dict.keys())
+
+ for section in sections:
+
+ print('\n{0}:'.format(section.upper()))
+ lines = sorted(kstats_dict[section])
+
+ for line in lines:
+ name, value = cleanup_line(line)
+ print(format_raw_line(name, value))
+
+ # Tunables and SPL must be handled separately because they come from a
+ # different source and have descriptions the user might request
+ print()
+ section_spl()
+ section_tunables()
+
+
+def isolate_section(section_name, kstats_dict):
+ """From the complete information on all sections, retrieve only those
+ for one section.
+ """
+
+ try:
+ section_data = kstats_dict[section_name]
+ except KeyError:
+ print('ERROR: Data on {0} not available'.format(section_data))
+ sys.exit(1)
+
+ section_dict = dict(cleanup_line(l) for l in section_data)
+
+ return section_dict
+
+
+# Formatted output helper functions
+
+
+def prt_1(text, value):
+ """Print text and one value, no indent"""
+ spc = ' '*(LINE_LENGTH-(len(text)+len(value)))
+ print('{0}{spc}{1}'.format(text, value, spc=spc))
+
+
+def prt_i1(text, value):
+ """Print text and one value, with indent"""
+ spc = ' '*(LINE_LENGTH-(len(INDENT)+len(text)+len(value)))
+ print(INDENT+'{0}{spc}{1}'.format(text, value, spc=spc))
+
+
+def prt_2(text, value1, value2):
+ """Print text and two values, no indent"""
+ values = '{0:>9} {1:>9}'.format(value1, value2)
+ spc = ' '*(LINE_LENGTH-(len(text)+len(values)+2))
+ print('{0}{spc} {1}'.format(text, values, spc=spc))
+
+
+def prt_i2(text, value1, value2):
+ """Print text and two values, with indent"""
+ values = '{0:>9} {1:>9}'.format(value1, value2)
+ spc = ' '*(LINE_LENGTH-(len(INDENT)+len(text)+len(values)+2))
+ print(INDENT+'{0}{spc} {1}'.format(text, values, spc=spc))
+
+
+# The section output concentrates on important parameters instead of
+# being exhaustive (that is what the --raw parameter is for)
+
+
+def section_arc(kstats_dict):
+ """Give basic information on the ARC, MRU and MFU. This is the first
+ and most used section.
+ """
+
+ arc_stats = isolate_section('arcstats', kstats_dict)
+
+ throttle = arc_stats['memory_throttle_count']
+
+ if throttle == '0':
+ health = 'HEALTHY'
+ else:
+ health = 'THROTTLED'
+
+ prt_1('ARC status:', health)
+ prt_i1('Memory throttle count:', throttle)
+ print()
+
+ arc_size = arc_stats['size']
+ arc_target_size = arc_stats['c']
+ arc_max = arc_stats['c_max']
+ arc_min = arc_stats['c_min']
+ mfu_size = arc_stats['mfu_size']
+ mru_size = arc_stats['mru_size']
+ meta_limit = arc_stats['arc_meta_limit']
+ meta_size = arc_stats['arc_meta_used']
+ dnode_limit = arc_stats['arc_dnode_limit']
+ dnode_size = arc_stats['dnode_size']
+ target_size_ratio = '{0}:1'.format(int(arc_max) // int(arc_min))
+
+ prt_2('ARC size (current):',
+ f_perc(arc_size, arc_max), f_bytes(arc_size))
+ prt_i2('Target size (adaptive):',
+ f_perc(arc_target_size, arc_max), f_bytes(arc_target_size))
+ prt_i2('Min size (hard limit):',
+ f_perc(arc_min, arc_max), f_bytes(arc_min))
+ prt_i2('Max size (high water):',
+ target_size_ratio, f_bytes(arc_max))
+ caches_size = int(mfu_size)+int(mru_size)
+ prt_i2('Most Frequently Used (MFU) cache size:',
+ f_perc(mfu_size, caches_size), f_bytes(mfu_size))
+ prt_i2('Most Recently Used (MRU) cache size:',
+ f_perc(mru_size, caches_size), f_bytes(mru_size))
+ prt_i2('Metadata cache size (hard limit):',
+ f_perc(meta_limit, arc_max), f_bytes(meta_limit))
+ prt_i2('Metadata cache size (current):',
+ f_perc(meta_size, meta_limit), f_bytes(meta_size))
+ prt_i2('Dnode cache size (hard limit):',
+ f_perc(dnode_limit, meta_limit), f_bytes(dnode_limit))
+ prt_i2('Dnode cache size (current):',
+ f_perc(dnode_size, dnode_limit), f_bytes(dnode_size))
+ print()
+
+ print('ARC hash breakdown:')
+ prt_i1('Elements max:', f_hits(arc_stats['hash_elements_max']))
+ prt_i2('Elements current:',
+ f_perc(arc_stats['hash_elements'], arc_stats['hash_elements_max']),
+ f_hits(arc_stats['hash_elements']))
+ prt_i1('Collisions:', f_hits(arc_stats['hash_collisions']))
+
+ prt_i1('Chain max:', f_hits(arc_stats['hash_chain_max']))
+ prt_i1('Chains:', f_hits(arc_stats['hash_chains']))
+ print()
+
+ print('ARC misc:')
+ prt_i1('Deleted:', f_hits(arc_stats['deleted']))
+ prt_i1('Mutex misses:', f_hits(arc_stats['mutex_miss']))
+ prt_i1('Eviction skips:', f_hits(arc_stats['evict_skip']))
+ print()
+
+
+def section_archits(kstats_dict):
+ """Print information on how the caches are accessed ("arc hits").
+ """
+
+ arc_stats = isolate_section('arcstats', kstats_dict)
+ all_accesses = int(arc_stats['hits'])+int(arc_stats['misses'])
+ actual_hits = int(arc_stats['mfu_hits'])+int(arc_stats['mru_hits'])
+
+ prt_1('ARC total accesses (hits + misses):', f_hits(all_accesses))
+ ta_todo = (('Cache hit ratio:', arc_stats['hits']),
+ ('Cache miss ratio:', arc_stats['misses']),
+ ('Actual hit ratio (MFU + MRU hits):', actual_hits))
+
+ for title, value in ta_todo:
+ prt_i2(title, f_perc(value, all_accesses), f_hits(value))
+
+ dd_total = int(arc_stats['demand_data_hits']) +\
+ int(arc_stats['demand_data_misses'])
+ prt_i2('Data demand efficiency:',
+ f_perc(arc_stats['demand_data_hits'], dd_total),
+ f_hits(dd_total))
+
+ dp_total = int(arc_stats['prefetch_data_hits']) +\
+ int(arc_stats['prefetch_data_misses'])
+ prt_i2('Data prefetch efficiency:',
+ f_perc(arc_stats['prefetch_data_hits'], dp_total),
+ f_hits(dp_total))
+
+ known_hits = int(arc_stats['mfu_hits']) +\
+ int(arc_stats['mru_hits']) +\
+ int(arc_stats['mfu_ghost_hits']) +\
+ int(arc_stats['mru_ghost_hits'])
+
+ anon_hits = int(arc_stats['hits'])-known_hits
+
+ print()
+ print('Cache hits by cache type:')
+ cl_todo = (('Most frequently used (MFU):', arc_stats['mfu_hits']),
+ ('Most recently used (MRU):', arc_stats['mru_hits']),
+ ('Most frequently used (MFU) ghost:',
+ arc_stats['mfu_ghost_hits']),
+ ('Most recently used (MRU) ghost:',
+ arc_stats['mru_ghost_hits']))
+
+ for title, value in cl_todo:
+ prt_i2(title, f_perc(value, arc_stats['hits']), f_hits(value))
+
+ # For some reason, anon_hits can turn negative, which is weird. Until we
+ # have figured out why this happens, we just hide the problem, following
+ # the behavior of the original arc_summary.
+ if anon_hits >= 0:
+ prt_i2('Anonymously used:',
+ f_perc(anon_hits, arc_stats['hits']), f_hits(anon_hits))
+
+ print()
+ print('Cache hits by data type:')
+ dt_todo = (('Demand data:', arc_stats['demand_data_hits']),
+ ('Demand perfetch data:', arc_stats['prefetch_data_hits']),
+ ('Demand metadata:', arc_stats['demand_metadata_hits']),
+ ('Demand prefetch metadata:',
+ arc_stats['prefetch_metadata_hits']))
+
+ for title, value in dt_todo:
+ prt_i2(title, f_perc(value, arc_stats['hits']), f_hits(value))
+
+ print()
+ print('Cache misses by data type:')
+ dm_todo = (('Demand data:', arc_stats['demand_data_misses']),
+ ('Demand prefetch data:',
+ arc_stats['prefetch_data_misses']),
+ ('Demand metadata:', arc_stats['demand_metadata_misses']),
+ ('Demand prefetch metadata:',
+ arc_stats['prefetch_metadata_misses']))
+
+ for title, value in dm_todo:
+ prt_i2(title, f_perc(value, arc_stats['misses']), f_hits(value))
+
+ print()
+
+
+def section_dmu(kstats_dict):
+ """Collect information on the DMU"""
+
+ zfetch_stats = isolate_section('zfetchstats', kstats_dict)
+
+ zfetch_access_total = int(zfetch_stats['hits'])+int(zfetch_stats['misses'])
+
+ prt_1('DMU prefetch efficiency:', f_hits(zfetch_access_total))
+ prt_i2('Hit ratio:', f_perc(zfetch_stats['hits'], zfetch_access_total),
+ f_hits(zfetch_stats['hits']))
+ prt_i2('Miss ratio:', f_perc(zfetch_stats['misses'], zfetch_access_total),
+ f_hits(zfetch_stats['misses']))
+ print()
+
+
+def section_l2arc(kstats_dict):
+ """Collect information on L2ARC device if present. If not, tell user
+ that we're skipping the section.
+ """
+
+ # The L2ARC statistics live in the same section as the normal ARC stuff
+ arc_stats = isolate_section('arcstats', kstats_dict)
+
+ if arc_stats['l2_size'] == '0':
+ print('L2ARC not detected, skipping section\n')
+ return
+
+ l2_errors = int(arc_stats['l2_writes_error']) +\
+ int(arc_stats['l2_cksum_bad']) +\
+ int(arc_stats['l2_io_error'])
+
+ l2_access_total = int(arc_stats['l2_hits'])+int(arc_stats['l2_misses'])
+ health = 'HEALTHY'
+
+ if l2_errors > 0:
+ health = 'DEGRADED'
+
+ prt_1('L2ARC status:', health)
+
+ l2_todo = (('Low memory aborts:', 'l2_abort_lowmem'),
+ ('Free on write:', 'l2_free_on_write'),
+ ('R/W clashes:', 'l2_rw_clash'),
+ ('Bad checksums:', 'l2_cksum_bad'),
+ ('I/O errors:', 'l2_io_error'))
+
+ for title, value in l2_todo:
+ prt_i1(title, f_hits(arc_stats[value]))
+
+ print()
+ prt_1('L2ARC size (adaptive):', f_bytes(arc_stats['l2_size']))
+ prt_i2('Compressed:', f_perc(arc_stats['l2_asize'], arc_stats['l2_size']),
+ f_bytes(arc_stats['l2_asize']))
+ prt_i2('Header size:',
+ f_perc(arc_stats['l2_hdr_size'], arc_stats['l2_size']),
+ f_bytes(arc_stats['l2_hdr_size']))
+
+ print()
+ prt_1('L2ARC breakdown:', f_hits(l2_access_total))
+ prt_i2('Hit ratio:',
+ f_perc(arc_stats['l2_hits'], l2_access_total),
+ f_bytes(arc_stats['l2_hits']))
+ prt_i2('Miss ratio:',
+ f_perc(arc_stats['l2_misses'], l2_access_total),
+ f_bytes(arc_stats['l2_misses']))
+ prt_i1('Feeds:', f_hits(arc_stats['l2_feeds']))
+
+ print()
+ print('L2ARC writes:')
+
+ if arc_stats['l2_writes_done'] != arc_stats['l2_writes_sent']:
+ prt_i2('Writes sent:', 'FAULTED', f_hits(arc_stats['l2_writes_sent']))
+ prt_i2('Done ratio:',
+ f_perc(arc_stats['l2_writes_done'],
+ arc_stats['l2_writes_sent']),
+ f_bytes(arc_stats['l2_writes_done']))
+ prt_i2('Error ratio:',
+ f_perc(arc_stats['l2_writes_error'],
+ arc_stats['l2_writes_sent']),
+ f_bytes(arc_stats['l2_writes_error']))
+ else:
+ prt_i2('Writes sent:', '100 %', f_bytes(arc_stats['l2_writes_sent']))
+
+ print()
+ print('L2ARC evicts:')
+ prt_i1('Lock retries:', f_hits(arc_stats['l2_evict_lock_retry']))
+ prt_i1('Upon reading:', f_hits(arc_stats['l2_evict_reading']))
+ print()
+
+
+def section_spl(*_):
+ """Print the SPL parameters, if requested with alternative format
+ and/or decriptions. This does not use kstats.
+ """
+
+ spls = get_spl_tunables(SPL_PATH)
+ keylist = sorted(spls.keys())
+ print('Solaris Porting Layer (SPL):')
+
+ if ARGS.desc:
+ descriptions = get_descriptions('spl')
+
+ for key in keylist:
+ value = spls[key]
+
+ if ARGS.desc:
+ try:
+ print(INDENT+'#', descriptions[key])
+ except KeyError:
+ print(INDENT+'# (No decription found)') # paranoid
+
+ print(format_raw_line(key, value))
+
+ print()
+
+
+def section_tunables(*_):
+ """Print the tunables, if requested with alternative format and/or
+ decriptions. This does not use kstasts.
+ """
+
+ tunables = get_spl_tunables(TUNABLES_PATH)
+ keylist = sorted(tunables.keys())
+ print('Tunables:')
+
+ if ARGS.desc:
+ descriptions = get_descriptions('zfs')
+
+ for key in keylist:
+ value = tunables[key]
+
+ if ARGS.desc:
+ try:
+ print(INDENT+'#', descriptions[key])
+ except KeyError:
+ print(INDENT+'# (No decription found)') # paranoid
+
+ print(format_raw_line(key, value))
+
+ print()
+
+
+def section_vdev(kstats_dict):
+ """Collect information on VDEV caches"""
+
+ # Currently [Nov 2017] the VDEV cache is disabled, because it is actually
+ # harmful. When this is the case, we just skip the whole entry. See
+ # https://github.com/zfsonlinux/zfs/blob/master/module/zfs/vdev_cache.c
+ # for details
+ tunables = get_spl_tunables(TUNABLES_PATH)
+
+ if tunables['zfs_vdev_cache_size'] == '0':
+ print('VDEV cache disabled, skipping section\n')
+ return
+
+ vdev_stats = isolate_section('vdev_cache_stats', kstats_dict)
+
+ vdev_cache_total = int(vdev_stats['hits']) +\
+ int(vdev_stats['misses']) +\
+ int(vdev_stats['delegations'])
+
+ prt_1('VDEV cache summary:', f_hits(vdev_cache_total))
+ prt_i2('Hit ratio:', f_perc(vdev_stats['hits'], vdev_cache_total),
+ f_hits(vdev_stats['hits']))
+ prt_i2('Miss ratio:', f_perc(vdev_stats['misses'], vdev_cache_total),
+ f_hits(vdev_stats['misses']))
+ prt_i2('Delegations:', f_perc(vdev_stats['delegations'], vdev_cache_total),
+ f_hits(vdev_stats['delegations']))
+ print()
+
+
+def section_zil(kstats_dict):
+ """Collect information on the ZFS Intent Log. Some of the information
+ taken from https://github.com/zfsonlinux/zfs/blob/master/include/sys/zil.h
+ """
+
+ zil_stats = isolate_section('zil', kstats_dict)
+
+ prt_1('ZIL committed transactions:',
+ f_hits(zil_stats['zil_itx_count']))
+ prt_i1('Commit requests:', f_hits(zil_stats['zil_commit_count']))
+ prt_i1('Flushes to stable storage:',
+ f_hits(zil_stats['zil_commit_writer_count']))
+ prt_i2('Transactions to SLOG storage pool:',
+ f_bytes(zil_stats['zil_itx_metaslab_slog_bytes']),
+ f_hits(zil_stats['zil_itx_metaslab_slog_count']))
+ prt_i2('Transactions to non-SLOG storage pool:',
+ f_bytes(zil_stats['zil_itx_metaslab_normal_bytes']),
+ f_hits(zil_stats['zil_itx_metaslab_normal_count']))
+ print()
+
+
+section_calls = {'arc': section_arc,
+ 'archits': section_archits,
+ 'dmu': section_dmu,
+ 'l2arc': section_l2arc,
+ 'spl': section_spl,
+ 'tunables': section_tunables,
+ 'vdev': section_vdev,
+ 'zil': section_zil}
+
+
+def main():
+ """Run program. The options to draw a graph and to print all data raw are
+ treated separately because they come with their own call.
+ """
+
+ kstats = get_kstats()
+
+ if ARGS.graph:
+ draw_graph(kstats)
+ sys.exit(0)
+
+ print_header()
+
+ if ARGS.raw:
+ print_raw(kstats)
+
+ elif ARGS.section:
+
+ try:
+ section_calls[ARGS.section](kstats)
+ except KeyError:
+ print('Error: Section "{0}" unknown'.format(ARGS.section))
+ sys.exit(1)
+
+ elif ARGS.page:
+ print('WARNING: Pages are deprecated, please use "--section"\n')
+
+ pages_to_calls = {1: 'arc',
+ 2: 'archits',
+ 3: 'l2arc',
+ 4: 'dmu',
+ 5: 'vdev',
+ 6: 'tunables'}
+
+ try:
+ call = pages_to_calls[ARGS.page]
+ except KeyError:
+ print('Error: Page "{0}" not supported'.format(ARGS.page))
+ sys.exit(1)
+ else:
+ section_calls[call](kstats)
+
+ else:
+ # If no parameters were given, we print all sections. We might want to
+ # change the sequence by hand
+ calls = sorted(section_calls.keys())
+
+ for section in calls:
+ section_calls[section](kstats)
+
+ sys.exit(0)
+
+
+if __name__ == '__main__':
+ main()
+++ /dev/null
-#!/usr/bin/python3
-#
-# Copyright (c) 2008 Ben Rockwood <benr@cuddletech.com>,
-# Copyright (c) 2010 Martin Matuska <mm@FreeBSD.org>,
-# Copyright (c) 2010-2011 Jason J. Hellenthal <jhell@DataIX.net>,
-# Copyright (c) 2017 Scot W. Stevenson <scot.stevenson@gmail.com>
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
-# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
-# SUCH DAMAGE.
-"""Print statistics on the ZFS ARC Cache and other information
-
-Provides basic information on the ARC, its efficiency, the L2ARC (if present),
-the Data Management Unit (DMU), Virtual Devices (VDEVs), and tunables. See
-the in-source documentation and code at
-https://github.com/zfsonlinux/zfs/blob/master/module/zfs/arc.c for details.
-The original introduction to arc_summary can be found at
-http://cuddletech.com/?p=454
-"""
-
-import argparse
-import os
-import subprocess
-import sys
-import time
-
-DECRIPTION = 'Print ARC and other statistics for ZFS on Linux'
-INDENT = ' '*8
-LINE_LENGTH = 72
-PROC_PATH = '/proc/spl/kstat/zfs/'
-SPL_PATH = '/sys/module/spl/parameters/'
-TUNABLES_PATH = '/sys/module/zfs/parameters/'
-DATE_FORMAT = '%a %b %d %H:%M:%S %Y'
-TITLE = 'ZFS Subsystem Report'
-
-SECTIONS = 'arc archits dmu l2arc spl tunables vdev zil'.split()
-SECTION_HELP = 'print info from one section ('+' '.join(SECTIONS)+')'
-
-# Tunables and SPL are handled separately because they come from
-# different sources
-SECTION_PATHS = {'arc': 'arcstats',
- 'dmu': 'dmu_tx',
- 'l2arc': 'arcstats', # L2ARC stuff lives in arcstats
- 'vdev': 'vdev_cache_stats',
- 'xuio': 'xuio_stats',
- 'zfetch': 'zfetchstats',
- 'zil': 'zil'}
-
-parser = argparse.ArgumentParser(description=DECRIPTION)
-parser.add_argument('-a', '--alternate', action='store_true', default=False,
- help='use alternate formatting for tunables and SPL',
- dest='alt')
-parser.add_argument('-d', '--description', action='store_true', default=False,
- help='print descriptions with tunables and SPL',
- dest='desc')
-parser.add_argument('-g', '--graph', action='store_true', default=False,
- help='print graph on ARC use and exit', dest='graph')
-parser.add_argument('-p', '--page', type=int, dest='page',
- help='print page by number (DEPRECATED, use "-s")')
-parser.add_argument('-r', '--raw', action='store_true', default=False,
- help='dump all available data with minimal formatting',
- dest='raw')
-parser.add_argument('-s', '--section', dest='section', help=SECTION_HELP)
-ARGS = parser.parse_args()
-
-
-def cleanup_line(single_line):
- """Format a raw line of data from /proc and isolate the name value
- part, returning a tuple with each. Currently, this gets rid of the
- middle '4'. For example "arc_no_grow 4 0" returns the tuple
- ("arc_no_grow", "0").
- """
- name, _, value = single_line.split()
-
- return name, value
-
-
-def draw_graph(kstats_dict):
- """Draw a primitive graph representing the basic information on the
- ARC -- its size and the proportion used by MFU and MRU -- and quit.
- We use max size of the ARC to calculate how full it is. This is a
- very rough representation.
- """
-
- arc_stats = isolate_section('arcstats', kstats_dict)
-
- GRAPH_INDENT = ' '*4
- GRAPH_WIDTH = 60
- arc_size = f_bytes(arc_stats['size'])
- arc_perc = f_perc(arc_stats['size'], arc_stats['c_max'])
- mfu_size = f_bytes(arc_stats['mfu_size'])
- mru_size = f_bytes(arc_stats['mru_size'])
- meta_limit = f_bytes(arc_stats['arc_meta_limit'])
- meta_size = f_bytes(arc_stats['arc_meta_used'])
- dnode_limit = f_bytes(arc_stats['arc_dnode_limit'])
- dnode_size = f_bytes(arc_stats['dnode_size'])
-
- info_form = ('ARC: {0} ({1}) MFU: {2} MRU: {3} META: {4} ({5}) '
- 'DNODE {6} ({7})')
- info_line = info_form.format(arc_size, arc_perc, mfu_size, mru_size,
- meta_size, meta_limit, dnode_size,
- dnode_limit)
- info_spc = ' '*int((GRAPH_WIDTH-len(info_line))/2)
- info_line = GRAPH_INDENT+info_spc+info_line
-
- graph_line = GRAPH_INDENT+'+'+('-'*(GRAPH_WIDTH-2))+'+'
-
- mfu_perc = float(int(arc_stats['mfu_size'])/int(arc_stats['c_max']))
- mru_perc = float(int(arc_stats['mru_size'])/int(arc_stats['c_max']))
- arc_perc = float(int(arc_stats['size'])/int(arc_stats['c_max']))
- total_ticks = float(arc_perc)*GRAPH_WIDTH
- mfu_ticks = mfu_perc*GRAPH_WIDTH
- mru_ticks = mru_perc*GRAPH_WIDTH
- other_ticks = total_ticks-(mfu_ticks+mru_ticks)
-
- core_form = 'F'*int(mfu_ticks)+'R'*int(mru_ticks)+'O'*int(other_ticks)
- core_spc = ' '*(GRAPH_WIDTH-(2+len(core_form)))
- core_line = GRAPH_INDENT+'|'+core_form+core_spc+'|'
-
- for line in ('', info_line, graph_line, core_line, graph_line, ''):
- print(line)
-
-
-def f_bytes(byte_string):
- """Return human-readable representation of a byte value in
- powers of 2 (eg "KiB" for "kibibytes", etc) to two decimal
- points. Values smaller than one KiB are returned without
- decimal points. Note "bytes" is a reserved keyword.
- """
-
- prefixes = ([2**80, "YiB"], # yobibytes (yotta)
- [2**70, "ZiB"], # zebibytes (zetta)
- [2**60, "EiB"], # exbibytes (exa)
- [2**50, "PiB"], # pebibytes (peta)
- [2**40, "TiB"], # tebibytes (tera)
- [2**30, "GiB"], # gibibytes (giga)
- [2**20, "MiB"], # mebibytes (mega)
- [2**10, "KiB"]) # kibibytes (kilo)
-
- bites = int(byte_string)
-
- if bites >= 2**10:
- for limit, unit in prefixes:
-
- if bites >= limit:
- value = bites / limit
- break
-
- result = '{0:.1f} {1}'.format(value, unit)
- else:
- result = '{0} Bytes'.format(bites)
-
- return result
-
-
-def f_hits(hits_string):
- """Create a human-readable representation of the number of hits.
- The single-letter symbols used are SI to avoid the confusion caused
- by the different "short scale" and "long scale" representations in
- English, which use the same words for different values. See
- https://en.wikipedia.org/wiki/Names_of_large_numbers and:
- https://physics.nist.gov/cuu/Units/prefixes.html
- """
-
- numbers = ([10**24, 'Y'], # yotta (septillion)
- [10**21, 'Z'], # zetta (sextillion)
- [10**18, 'E'], # exa (quintrillion)
- [10**15, 'P'], # peta (quadrillion)
- [10**12, 'T'], # tera (trillion)
- [10**9, 'G'], # giga (billion)
- [10**6, 'M'], # mega (million)
- [10**3, 'k']) # kilo (thousand)
-
- hits = int(hits_string)
-
- if hits >= 1000:
- for limit, symbol in numbers:
-
- if hits >= limit:
- value = hits/limit
- break
-
- result = "%0.1f%s" % (value, symbol)
- else:
- result = "%d" % hits
-
- return result
-
-
-def f_perc(value1, value2):
- """Calculate percentage and return in human-readable form. If
- rounding produces the result '0.0' though the first number is
- not zero, include a 'less-than' symbol to avoid confusion.
- Division by zero is handled by returning 'n/a'; no error
- is called.
- """
-
- v1 = float(value1)
- v2 = float(value2)
-
- try:
- perc = 100 * v1/v2
- except ZeroDivisionError:
- result = 'n/a'
- else:
- result = '{0:0.1f} %'.format(perc)
-
- if result == '0.0 %' and v1 > 0:
- result = '< 0.1 %'
-
- return result
-
-
-def format_raw_line(name, value):
- """For the --raw option for the tunable and SPL outputs, decide on the
- correct formatting based on the --alternate flag.
- """
-
- if ARGS.alt:
- result = '{0}{1}={2}'.format(INDENT, name, value)
- else:
- spc = LINE_LENGTH-(len(INDENT)+len(value))
- result = '{0}{1:<{spc}}{2}'.format(INDENT, name, value, spc=spc)
-
- return result
-
-
-def get_kstats():
- """Collect information on the ZFS subsystem from the /proc Linux virtual
- file system. The step does not perform any further processing, giving us
- the option to only work on what is actually needed. The name "kstat" is a
- holdover from the Solaris utility of the same name.
- """
-
- result = {}
- secs = SECTION_PATHS.values()
-
- for section in secs:
-
- with open(PROC_PATH+section, 'r') as proc_location:
- lines = [line for line in proc_location]
-
- del lines[0:2] # Get rid of header
- result[section] = lines
-
- return result
-
-
-def get_spl_tunables(PATH):
- """Collect information on the Solaris Porting Layer (SPL) or the
- tunables, depending on the PATH given. Does not check if PATH is
- legal.
- """
-
- result = {}
- parameters = os.listdir(PATH)
-
- for name in parameters:
-
- with open(PATH+name, 'r') as para_file:
- value = para_file.read()
- result[name] = value.strip()
-
- return result
-
-
-def get_descriptions(request):
- """Get the decriptions of the Solaris Porting Layer (SPL) or the
- tunables, return with minimal formatting.
- """
-
- if request not in ('spl', 'zfs'):
- print('ERROR: description of "{0}" requested)'.format(request))
- sys.exit(1)
-
- descs = {}
- target_prefix = 'parm:'
-
- # We would prefer to do this with /sys/modules -- see the discussion at
- # get_version() -- but there isn't a way to get the descriptions from
- # there, so we fall back on modinfo
- command = ["/sbin/modinfo", request, "-0"]
-
- # The recommended way to do this is with subprocess.run(). However,
- # some installed versions of Python are < 3.5, so we offer them
- # the option of doing it the old way (for now)
- info = ''
-
- try:
-
- if 'run' in dir(subprocess):
- info = subprocess.run(command, stdout=subprocess.PIPE,
- universal_newlines=True)
- raw_output = info.stdout.split('\0')
- else:
- info = subprocess.check_output(command, universal_newlines=True)
- raw_output = info.split('\0')
-
- except subprocess.CalledProcessError:
- print("Error: Descriptions not available (can't access kernel module)")
- sys.exit(1)
-
- for line in raw_output:
-
- if not line.startswith(target_prefix):
- continue
-
- line = line[len(target_prefix):].strip()
- name, raw_desc = line.split(':', 1)
- desc = raw_desc.rsplit('(', 1)[0]
-
- if desc == '':
- desc = '(No description found)'
-
- descs[name.strip()] = desc.strip()
-
- return descs
-
-
-def get_version(request):
- """Get the version number of ZFS or SPL on this machine for header.
- Returns an error string, but does not raise an error, if we can't
- get the ZFS/SPL version via modinfo.
- """
-
- if request not in ('spl', 'zfs'):
- error_msg = '(ERROR: "{0}" requested)'.format(request)
- return error_msg
-
- # The original arc_summary.py called /sbin/modinfo/{spl,zfs} to get
- # the version information. We switch to /sys/module/{spl,zfs}/version
- # to make sure we get what is really loaded in the kernel
- command = ["cat", "/sys/module/{0}/version".format(request)]
- req = request.upper()
- version = "(Can't get {0} version)".format(req)
-
- # The recommended way to do this is with subprocess.run(). However,
- # some installed versions of Python are < 3.5, so we offer them
- # the option of doing it the old way (for now)
- info = ''
- if 'run' in dir(subprocess):
- info = subprocess.run(command, stdout=subprocess.PIPE,
- universal_newlines=True)
- version = info.stdout.strip()
- else:
- info = subprocess.check_output(command, universal_newlines=True)
- version = info.strip()
-
- return version
-
-
-def print_header():
- """Print the initial heading with date and time as well as info on the
- Linux and ZFS versions. This is not called for the graph.
- """
-
- # datetime is now recommended over time but we keep the exact formatting
- # from the older version of arc_summary.py in case there are scripts
- # that expect it in this way
- daydate = time.strftime(DATE_FORMAT)
- spc_date = LINE_LENGTH-len(daydate)
- sys_version = os.uname()
-
- sys_msg = sys_version.sysname+' '+sys_version.release
- zfs = get_version('zfs')
- spc_zfs = LINE_LENGTH-len(zfs)
-
- machine_msg = 'Machine: '+sys_version.nodename+' ('+sys_version.machine+')'
- spl = get_version('spl')
- spc_spl = LINE_LENGTH-len(spl)
-
- print('\n'+('-'*LINE_LENGTH))
- print('{0:<{spc}}{1}'.format(TITLE, daydate, spc=spc_date))
- print('{0:<{spc}}{1}'.format(sys_msg, zfs, spc=spc_zfs))
- print('{0:<{spc}}{1}\n'.format(machine_msg, spl, spc=spc_spl))
-
-
-def print_raw(kstats_dict):
- """Print all available data from the system in a minimally sorted format.
- This can be used as a source to be piped through 'grep'.
- """
-
- sections = sorted(kstats_dict.keys())
-
- for section in sections:
-
- print('\n{0}:'.format(section.upper()))
- lines = sorted(kstats_dict[section])
-
- for line in lines:
- name, value = cleanup_line(line)
- print(format_raw_line(name, value))
-
- # Tunables and SPL must be handled separately because they come from a
- # different source and have descriptions the user might request
- print()
- section_spl()
- section_tunables()
-
-
-def isolate_section(section_name, kstats_dict):
- """From the complete information on all sections, retrieve only those
- for one section.
- """
-
- try:
- section_data = kstats_dict[section_name]
- except KeyError:
- print('ERROR: Data on {0} not available'.format(section_data))
- sys.exit(1)
-
- section_dict = dict(cleanup_line(l) for l in section_data)
-
- return section_dict
-
-
-# Formatted output helper functions
-
-
-def prt_1(text, value):
- """Print text and one value, no indent"""
- spc = ' '*(LINE_LENGTH-(len(text)+len(value)))
- print('{0}{spc}{1}'.format(text, value, spc=spc))
-
-
-def prt_i1(text, value):
- """Print text and one value, with indent"""
- spc = ' '*(LINE_LENGTH-(len(INDENT)+len(text)+len(value)))
- print(INDENT+'{0}{spc}{1}'.format(text, value, spc=spc))
-
-
-def prt_2(text, value1, value2):
- """Print text and two values, no indent"""
- values = '{0:>9} {1:>9}'.format(value1, value2)
- spc = ' '*(LINE_LENGTH-(len(text)+len(values)+2))
- print('{0}{spc} {1}'.format(text, values, spc=spc))
-
-
-def prt_i2(text, value1, value2):
- """Print text and two values, with indent"""
- values = '{0:>9} {1:>9}'.format(value1, value2)
- spc = ' '*(LINE_LENGTH-(len(INDENT)+len(text)+len(values)+2))
- print(INDENT+'{0}{spc} {1}'.format(text, values, spc=spc))
-
-
-# The section output concentrates on important parameters instead of
-# being exhaustive (that is what the --raw parameter is for)
-
-
-def section_arc(kstats_dict):
- """Give basic information on the ARC, MRU and MFU. This is the first
- and most used section.
- """
-
- arc_stats = isolate_section('arcstats', kstats_dict)
-
- throttle = arc_stats['memory_throttle_count']
-
- if throttle == '0':
- health = 'HEALTHY'
- else:
- health = 'THROTTLED'
-
- prt_1('ARC status:', health)
- prt_i1('Memory throttle count:', throttle)
- print()
-
- arc_size = arc_stats['size']
- arc_target_size = arc_stats['c']
- arc_max = arc_stats['c_max']
- arc_min = arc_stats['c_min']
- mfu_size = arc_stats['mfu_size']
- mru_size = arc_stats['mru_size']
- meta_limit = arc_stats['arc_meta_limit']
- meta_size = arc_stats['arc_meta_used']
- dnode_limit = arc_stats['arc_dnode_limit']
- dnode_size = arc_stats['dnode_size']
- target_size_ratio = '{0}:1'.format(int(arc_max) // int(arc_min))
-
- prt_2('ARC size (current):',
- f_perc(arc_size, arc_max), f_bytes(arc_size))
- prt_i2('Target size (adaptive):',
- f_perc(arc_target_size, arc_max), f_bytes(arc_target_size))
- prt_i2('Min size (hard limit):',
- f_perc(arc_min, arc_max), f_bytes(arc_min))
- prt_i2('Max size (high water):',
- target_size_ratio, f_bytes(arc_max))
- caches_size = int(mfu_size)+int(mru_size)
- prt_i2('Most Frequently Used (MFU) cache size:',
- f_perc(mfu_size, caches_size), f_bytes(mfu_size))
- prt_i2('Most Recently Used (MRU) cache size:',
- f_perc(mru_size, caches_size), f_bytes(mru_size))
- prt_i2('Metadata cache size (hard limit):',
- f_perc(meta_limit, arc_max), f_bytes(meta_limit))
- prt_i2('Metadata cache size (current):',
- f_perc(meta_size, meta_limit), f_bytes(meta_size))
- prt_i2('Dnode cache size (hard limit):',
- f_perc(dnode_limit, meta_limit), f_bytes(dnode_limit))
- prt_i2('Dnode cache size (current):',
- f_perc(dnode_size, dnode_limit), f_bytes(dnode_size))
- print()
-
- print('ARC hash breakdown:')
- prt_i1('Elements max:', f_hits(arc_stats['hash_elements_max']))
- prt_i2('Elements current:',
- f_perc(arc_stats['hash_elements'], arc_stats['hash_elements_max']),
- f_hits(arc_stats['hash_elements']))
- prt_i1('Collisions:', f_hits(arc_stats['hash_collisions']))
-
- prt_i1('Chain max:', f_hits(arc_stats['hash_chain_max']))
- prt_i1('Chains:', f_hits(arc_stats['hash_chains']))
- print()
-
- print('ARC misc:')
- prt_i1('Deleted:', f_hits(arc_stats['deleted']))
- prt_i1('Mutex misses:', f_hits(arc_stats['mutex_miss']))
- prt_i1('Eviction skips:', f_hits(arc_stats['evict_skip']))
- print()
-
-
-def section_archits(kstats_dict):
- """Print information on how the caches are accessed ("arc hits").
- """
-
- arc_stats = isolate_section('arcstats', kstats_dict)
- all_accesses = int(arc_stats['hits'])+int(arc_stats['misses'])
- actual_hits = int(arc_stats['mfu_hits'])+int(arc_stats['mru_hits'])
-
- prt_1('ARC total accesses (hits + misses):', f_hits(all_accesses))
- ta_todo = (('Cache hit ratio:', arc_stats['hits']),
- ('Cache miss ratio:', arc_stats['misses']),
- ('Actual hit ratio (MFU + MRU hits):', actual_hits))
-
- for title, value in ta_todo:
- prt_i2(title, f_perc(value, all_accesses), f_hits(value))
-
- dd_total = int(arc_stats['demand_data_hits']) +\
- int(arc_stats['demand_data_misses'])
- prt_i2('Data demand efficiency:',
- f_perc(arc_stats['demand_data_hits'], dd_total),
- f_hits(dd_total))
-
- dp_total = int(arc_stats['prefetch_data_hits']) +\
- int(arc_stats['prefetch_data_misses'])
- prt_i2('Data prefetch efficiency:',
- f_perc(arc_stats['prefetch_data_hits'], dp_total),
- f_hits(dp_total))
-
- known_hits = int(arc_stats['mfu_hits']) +\
- int(arc_stats['mru_hits']) +\
- int(arc_stats['mfu_ghost_hits']) +\
- int(arc_stats['mru_ghost_hits'])
-
- anon_hits = int(arc_stats['hits'])-known_hits
-
- print()
- print('Cache hits by cache type:')
- cl_todo = (('Most frequently used (MFU):', arc_stats['mfu_hits']),
- ('Most recently used (MRU):', arc_stats['mru_hits']),
- ('Most frequently used (MFU) ghost:',
- arc_stats['mfu_ghost_hits']),
- ('Most recently used (MRU) ghost:',
- arc_stats['mru_ghost_hits']))
-
- for title, value in cl_todo:
- prt_i2(title, f_perc(value, arc_stats['hits']), f_hits(value))
-
- # For some reason, anon_hits can turn negative, which is weird. Until we
- # have figured out why this happens, we just hide the problem, following
- # the behavior of the original arc_summary.py
- if anon_hits >= 0:
- prt_i2('Anonymously used:',
- f_perc(anon_hits, arc_stats['hits']), f_hits(anon_hits))
-
- print()
- print('Cache hits by data type:')
- dt_todo = (('Demand data:', arc_stats['demand_data_hits']),
- ('Demand perfetch data:', arc_stats['prefetch_data_hits']),
- ('Demand metadata:', arc_stats['demand_metadata_hits']),
- ('Demand prefetch metadata:',
- arc_stats['prefetch_metadata_hits']))
-
- for title, value in dt_todo:
- prt_i2(title, f_perc(value, arc_stats['hits']), f_hits(value))
-
- print()
- print('Cache misses by data type:')
- dm_todo = (('Demand data:', arc_stats['demand_data_misses']),
- ('Demand prefetch data:',
- arc_stats['prefetch_data_misses']),
- ('Demand metadata:', arc_stats['demand_metadata_misses']),
- ('Demand prefetch metadata:',
- arc_stats['prefetch_metadata_misses']))
-
- for title, value in dm_todo:
- prt_i2(title, f_perc(value, arc_stats['misses']), f_hits(value))
-
- print()
-
-
-def section_dmu(kstats_dict):
- """Collect information on the DMU"""
-
- zfetch_stats = isolate_section('zfetchstats', kstats_dict)
-
- zfetch_access_total = int(zfetch_stats['hits'])+int(zfetch_stats['misses'])
-
- prt_1('DMU prefetch efficiency:', f_hits(zfetch_access_total))
- prt_i2('Hit ratio:', f_perc(zfetch_stats['hits'], zfetch_access_total),
- f_hits(zfetch_stats['hits']))
- prt_i2('Miss ratio:', f_perc(zfetch_stats['misses'], zfetch_access_total),
- f_hits(zfetch_stats['misses']))
- print()
-
-
-def section_l2arc(kstats_dict):
- """Collect information on L2ARC device if present. If not, tell user
- that we're skipping the section.
- """
-
- # The L2ARC statistics live in the same section as the normal ARC stuff
- arc_stats = isolate_section('arcstats', kstats_dict)
-
- if arc_stats['l2_size'] == '0':
- print('L2ARC not detected, skipping section\n')
- return
-
- l2_errors = int(arc_stats['l2_writes_error']) +\
- int(arc_stats['l2_cksum_bad']) +\
- int(arc_stats['l2_io_error'])
-
- l2_access_total = int(arc_stats['l2_hits'])+int(arc_stats['l2_misses'])
- health = 'HEALTHY'
-
- if l2_errors > 0:
- health = 'DEGRADED'
-
- prt_1('L2ARC status:', health)
-
- l2_todo = (('Low memory aborts:', 'l2_abort_lowmem'),
- ('Free on write:', 'l2_free_on_write'),
- ('R/W clashes:', 'l2_rw_clash'),
- ('Bad checksums:', 'l2_cksum_bad'),
- ('I/O errors:', 'l2_io_error'))
-
- for title, value in l2_todo:
- prt_i1(title, f_hits(arc_stats[value]))
-
- print()
- prt_1('L2ARC size (adaptive):', f_bytes(arc_stats['l2_size']))
- prt_i2('Compressed:', f_perc(arc_stats['l2_asize'], arc_stats['l2_size']),
- f_bytes(arc_stats['l2_asize']))
- prt_i2('Header size:',
- f_perc(arc_stats['l2_hdr_size'], arc_stats['l2_size']),
- f_bytes(arc_stats['l2_hdr_size']))
-
- print()
- prt_1('L2ARC breakdown:', f_hits(l2_access_total))
- prt_i2('Hit ratio:',
- f_perc(arc_stats['l2_hits'], l2_access_total),
- f_bytes(arc_stats['l2_hits']))
- prt_i2('Miss ratio:',
- f_perc(arc_stats['l2_misses'], l2_access_total),
- f_bytes(arc_stats['l2_misses']))
- prt_i1('Feeds:', f_hits(arc_stats['l2_feeds']))
-
- print()
- print('L2ARC writes:')
-
- if arc_stats['l2_writes_done'] != arc_stats['l2_writes_sent']:
- prt_i2('Writes sent:', 'FAULTED', f_hits(arc_stats['l2_writes_sent']))
- prt_i2('Done ratio:',
- f_perc(arc_stats['l2_writes_done'],
- arc_stats['l2_writes_sent']),
- f_bytes(arc_stats['l2_writes_done']))
- prt_i2('Error ratio:',
- f_perc(arc_stats['l2_writes_error'],
- arc_stats['l2_writes_sent']),
- f_bytes(arc_stats['l2_writes_error']))
- else:
- prt_i2('Writes sent:', '100 %', f_bytes(arc_stats['l2_writes_sent']))
-
- print()
- print('L2ARC evicts:')
- prt_i1('Lock retries:', f_hits(arc_stats['l2_evict_lock_retry']))
- prt_i1('Upon reading:', f_hits(arc_stats['l2_evict_reading']))
- print()
-
-
-def section_spl(*_):
- """Print the SPL parameters, if requested with alternative format
- and/or decriptions. This does not use kstats.
- """
-
- spls = get_spl_tunables(SPL_PATH)
- keylist = sorted(spls.keys())
- print('Solaris Porting Layer (SPL):')
-
- if ARGS.desc:
- descriptions = get_descriptions('spl')
-
- for key in keylist:
- value = spls[key]
-
- if ARGS.desc:
- try:
- print(INDENT+'#', descriptions[key])
- except KeyError:
- print(INDENT+'# (No decription found)') # paranoid
-
- print(format_raw_line(key, value))
-
- print()
-
-
-def section_tunables(*_):
- """Print the tunables, if requested with alternative format and/or
- decriptions. This does not use kstasts.
- """
-
- tunables = get_spl_tunables(TUNABLES_PATH)
- keylist = sorted(tunables.keys())
- print('Tunables:')
-
- if ARGS.desc:
- descriptions = get_descriptions('zfs')
-
- for key in keylist:
- value = tunables[key]
-
- if ARGS.desc:
- try:
- print(INDENT+'#', descriptions[key])
- except KeyError:
- print(INDENT+'# (No decription found)') # paranoid
-
- print(format_raw_line(key, value))
-
- print()
-
-
-def section_vdev(kstats_dict):
- """Collect information on VDEV caches"""
-
- # Currently [Nov 2017] the VDEV cache is disabled, because it is actually
- # harmful. When this is the case, we just skip the whole entry. See
- # https://github.com/zfsonlinux/zfs/blob/master/module/zfs/vdev_cache.c
- # for details
- tunables = get_spl_tunables(TUNABLES_PATH)
-
- if tunables['zfs_vdev_cache_size'] == '0':
- print('VDEV cache disabled, skipping section\n')
- return
-
- vdev_stats = isolate_section('vdev_cache_stats', kstats_dict)
-
- vdev_cache_total = int(vdev_stats['hits']) +\
- int(vdev_stats['misses']) +\
- int(vdev_stats['delegations'])
-
- prt_1('VDEV cache summary:', f_hits(vdev_cache_total))
- prt_i2('Hit ratio:', f_perc(vdev_stats['hits'], vdev_cache_total),
- f_hits(vdev_stats['hits']))
- prt_i2('Miss ratio:', f_perc(vdev_stats['misses'], vdev_cache_total),
- f_hits(vdev_stats['misses']))
- prt_i2('Delegations:', f_perc(vdev_stats['delegations'], vdev_cache_total),
- f_hits(vdev_stats['delegations']))
- print()
-
-
-def section_zil(kstats_dict):
- """Collect information on the ZFS Intent Log. Some of the information
- taken from https://github.com/zfsonlinux/zfs/blob/master/include/sys/zil.h
- """
-
- zil_stats = isolate_section('zil', kstats_dict)
-
- prt_1('ZIL committed transactions:',
- f_hits(zil_stats['zil_itx_count']))
- prt_i1('Commit requests:', f_hits(zil_stats['zil_commit_count']))
- prt_i1('Flushes to stable storage:',
- f_hits(zil_stats['zil_commit_writer_count']))
- prt_i2('Transactions to SLOG storage pool:',
- f_bytes(zil_stats['zil_itx_metaslab_slog_bytes']),
- f_hits(zil_stats['zil_itx_metaslab_slog_count']))
- prt_i2('Transactions to non-SLOG storage pool:',
- f_bytes(zil_stats['zil_itx_metaslab_normal_bytes']),
- f_hits(zil_stats['zil_itx_metaslab_normal_count']))
- print()
-
-
-section_calls = {'arc': section_arc,
- 'archits': section_archits,
- 'dmu': section_dmu,
- 'l2arc': section_l2arc,
- 'spl': section_spl,
- 'tunables': section_tunables,
- 'vdev': section_vdev,
- 'zil': section_zil}
-
-
-def main():
- """Run program. The options to draw a graph and to print all data raw are
- treated separately because they come with their own call.
- """
-
- kstats = get_kstats()
-
- if ARGS.graph:
- draw_graph(kstats)
- sys.exit(0)
-
- print_header()
-
- if ARGS.raw:
- print_raw(kstats)
-
- elif ARGS.section:
-
- try:
- section_calls[ARGS.section](kstats)
- except KeyError:
- print('Error: Section "{0}" unknown'.format(ARGS.section))
- sys.exit(1)
-
- elif ARGS.page:
- print('WARNING: Pages are deprecated, please use "--section"\n')
-
- pages_to_calls = {1: 'arc',
- 2: 'archits',
- 3: 'l2arc',
- 4: 'dmu',
- 5: 'vdev',
- 6: 'tunables'}
-
- try:
- call = pages_to_calls[ARGS.page]
- except KeyError:
- print('Error: Page "{0}" not supported'.format(ARGS.page))
- sys.exit(1)
- else:
- section_calls[call](kstats)
-
- else:
- # If no parameters were given, we print all sections. We might want to
- # change the sequence by hand
- calls = sorted(section_calls.keys())
-
- for section in calls:
- section_calls[section](kstats)
-
- sys.exit(0)
-
-
-if __name__ == '__main__':
- main()
-dist_bin_SCRIPTS = arcstat.py
+dist_bin_SCRIPTS = arcstat
+
+#
+# The arcstat script is compatibile with both Python 2.6 and 3.4.
+# As such the python 3 shebang can be replaced at install time when
+# targeting a python 2 system. This allows us to maintain a single
+# version of the source.
+#
+if USING_PYTHON_2
+install-exec-hook:
+ sed --in-place 's|^#!/usr/bin/python3|#!/usr/bin/python2|' \
+ $(DESTDIR)$(bindir)/arcstat
+endif
--- /dev/null
+#!/usr/bin/python3
+#
+# Print out ZFS ARC Statistics exported via kstat(1)
+# For a definition of fields, or usage, use arctstat.pl -v
+#
+# This script is a fork of the original arcstat.pl (0.1) by
+# Neelakanth Nadgir, originally published on his Sun blog on
+# 09/18/2007
+# http://blogs.sun.com/realneel/entry/zfs_arc_statistics
+#
+# This version aims to improve upon the original by adding features
+# and fixing bugs as needed. This version is maintained by
+# Mike Harsch and is hosted in a public open source repository:
+# http://github.com/mharsch/arcstat
+#
+# Comments, Questions, or Suggestions are always welcome.
+# Contact the maintainer at ( mike at harschsystems dot com )
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Fields have a fixed width. Every interval, we fill the "v"
+# hash with its corresponding value (v[field]=value) using calculate().
+# @hdr is the array of fields that needs to be printed, so we
+# just iterate over this array and print the values using our pretty printer.
+#
+# This script must remain compatible with Python 2.6+ and Python 3.4+.
+#
+
+import sys
+import time
+import getopt
+import re
+import copy
+
+from decimal import Decimal
+from signal import signal, SIGINT, SIGWINCH, SIG_DFL
+
+cols = {
+ # HDR: [Size, Scale, Description]
+ "time": [8, -1, "Time"],
+ "hits": [4, 1000, "ARC reads per second"],
+ "miss": [4, 1000, "ARC misses per second"],
+ "read": [4, 1000, "Total ARC accesses per second"],
+ "hit%": [4, 100, "ARC Hit percentage"],
+ "miss%": [5, 100, "ARC miss percentage"],
+ "dhit": [4, 1000, "Demand hits per second"],
+ "dmis": [4, 1000, "Demand misses per second"],
+ "dh%": [3, 100, "Demand hit percentage"],
+ "dm%": [3, 100, "Demand miss percentage"],
+ "phit": [4, 1000, "Prefetch hits per second"],
+ "pmis": [4, 1000, "Prefetch misses per second"],
+ "ph%": [3, 100, "Prefetch hits percentage"],
+ "pm%": [3, 100, "Prefetch miss percentage"],
+ "mhit": [4, 1000, "Metadata hits per second"],
+ "mmis": [4, 1000, "Metadata misses per second"],
+ "mread": [5, 1000, "Metadata accesses per second"],
+ "mh%": [3, 100, "Metadata hit percentage"],
+ "mm%": [3, 100, "Metadata miss percentage"],
+ "arcsz": [5, 1024, "ARC Size"],
+ "c": [4, 1024, "ARC Target Size"],
+ "mfu": [4, 1000, "MFU List hits per second"],
+ "mru": [4, 1000, "MRU List hits per second"],
+ "mfug": [4, 1000, "MFU Ghost List hits per second"],
+ "mrug": [4, 1000, "MRU Ghost List hits per second"],
+ "eskip": [5, 1000, "evict_skip per second"],
+ "mtxmis": [6, 1000, "mutex_miss per second"],
+ "dread": [5, 1000, "Demand accesses per second"],
+ "pread": [5, 1000, "Prefetch accesses per second"],
+ "l2hits": [6, 1000, "L2ARC hits per second"],
+ "l2miss": [6, 1000, "L2ARC misses per second"],
+ "l2read": [6, 1000, "Total L2ARC accesses per second"],
+ "l2hit%": [6, 100, "L2ARC access hit percentage"],
+ "l2miss%": [7, 100, "L2ARC access miss percentage"],
+ "l2asize": [7, 1024, "Actual (compressed) size of the L2ARC"],
+ "l2size": [6, 1024, "Size of the L2ARC"],
+ "l2bytes": [7, 1024, "bytes read per second from the L2ARC"],
+ "grow": [4, 1000, "ARC Grow disabled"],
+ "need": [4, 1024, "ARC Reclaim need"],
+ "free": [4, 1024, "ARC Free memory"],
+}
+
+v = {}
+hdr = ["time", "read", "miss", "miss%", "dmis", "dm%", "pmis", "pm%", "mmis",
+ "mm%", "arcsz", "c"]
+xhdr = ["time", "mfu", "mru", "mfug", "mrug", "eskip", "mtxmis", "dread",
+ "pread", "read"]
+sint = 1 # Default interval is 1 second
+count = 1 # Default count is 1
+hdr_intr = 20 # Print header every 20 lines of output
+opfile = None
+sep = " " # Default separator is 2 spaces
+version = "0.4"
+l2exist = False
+cmd = ("Usage: arcstat [-hvx] [-f fields] [-o file] [-s string] [interval "
+ "[count]]\n")
+cur = {}
+d = {}
+out = None
+kstat = None
+
+
+def detailed_usage():
+ sys.stderr.write("%s\n" % cmd)
+ sys.stderr.write("Field definitions are as follows:\n")
+ for key in cols:
+ sys.stderr.write("%11s : %s\n" % (key, cols[key][2]))
+ sys.stderr.write("\n")
+
+ sys.exit(0)
+
+
+def usage():
+ sys.stderr.write("%s\n" % cmd)
+ sys.stderr.write("\t -h : Print this help message\n")
+ sys.stderr.write("\t -v : List all possible field headers and definitions"
+ "\n")
+ sys.stderr.write("\t -x : Print extended stats\n")
+ sys.stderr.write("\t -f : Specify specific fields to print (see -v)\n")
+ sys.stderr.write("\t -o : Redirect output to the specified file\n")
+ sys.stderr.write("\t -s : Override default field separator with custom "
+ "character or string\n")
+ sys.stderr.write("\nExamples:\n")
+ sys.stderr.write("\tarcstat -o /tmp/a.log 2 10\n")
+ sys.stderr.write("\tarcstat -s \",\" -o /tmp/a.log 2 10\n")
+ sys.stderr.write("\tarcstat -v\n")
+ sys.stderr.write("\tarcstat -f time,hit%,dh%,ph%,mh% 1\n")
+ sys.stderr.write("\n")
+
+ sys.exit(1)
+
+
+def kstat_update():
+ global kstat
+
+ k = [line.strip() for line in open('/proc/spl/kstat/zfs/arcstats')]
+
+ if not k:
+ sys.exit(1)
+
+ del k[0:2]
+ kstat = {}
+
+ for s in k:
+ if not s:
+ continue
+
+ name, unused, value = s.split()
+ kstat[name] = Decimal(value)
+
+
+def snap_stats():
+ global cur
+ global kstat
+
+ prev = copy.deepcopy(cur)
+ kstat_update()
+
+ cur = kstat
+ for key in cur:
+ if re.match(key, "class"):
+ continue
+ if key in prev:
+ d[key] = cur[key] - prev[key]
+ else:
+ d[key] = cur[key]
+
+
+def prettynum(sz, scale, num=0):
+ suffix = [' ', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']
+ index = 0
+ save = 0
+
+ # Special case for date field
+ if scale == -1:
+ return "%s" % num
+
+ # Rounding error, return 0
+ elif 0 < num < 1:
+ num = 0
+
+ while num > scale and index < 5:
+ save = num
+ num = num / scale
+ index += 1
+
+ if index == 0:
+ return "%*d" % (sz, num)
+
+ if (save / scale) < 10:
+ return "%*.1f%s" % (sz - 1, num, suffix[index])
+ else:
+ return "%*d%s" % (sz - 1, num, suffix[index])
+
+
+def print_values():
+ global hdr
+ global sep
+ global v
+
+ for col in hdr:
+ sys.stdout.write("%s%s" % (
+ prettynum(cols[col][0], cols[col][1], v[col]),
+ sep
+ ))
+ sys.stdout.write("\n")
+ sys.stdout.flush()
+
+
+def print_header():
+ global hdr
+ global sep
+
+ for col in hdr:
+ sys.stdout.write("%*s%s" % (cols[col][0], col, sep))
+ sys.stdout.write("\n")
+
+
+def get_terminal_lines():
+ try:
+ import fcntl
+ import termios
+ import struct
+ data = fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ, '1234')
+ sz = struct.unpack('hh', data)
+ return sz[0]
+ except Exception:
+ pass
+
+
+def update_hdr_intr():
+ global hdr_intr
+
+ lines = get_terminal_lines()
+ if lines and lines > 3:
+ hdr_intr = lines - 3
+
+
+def resize_handler(signum, frame):
+ update_hdr_intr()
+
+
+def init():
+ global sint
+ global count
+ global hdr
+ global xhdr
+ global opfile
+ global sep
+ global out
+ global l2exist
+
+ desired_cols = None
+ xflag = False
+ hflag = False
+ vflag = False
+ i = 1
+
+ try:
+ opts, args = getopt.getopt(
+ sys.argv[1:],
+ "xo:hvs:f:",
+ [
+ "extended",
+ "outfile",
+ "help",
+ "verbose",
+ "separator",
+ "columns"
+ ]
+ )
+ except getopt.error as msg:
+ sys.stderr.write("Error: %s\n" % str(msg))
+ usage()
+ opts = None
+
+ for opt, arg in opts:
+ if opt in ('-x', '--extended'):
+ xflag = True
+ if opt in ('-o', '--outfile'):
+ opfile = arg
+ i += 1
+ if opt in ('-h', '--help'):
+ hflag = True
+ if opt in ('-v', '--verbose'):
+ vflag = True
+ if opt in ('-s', '--separator'):
+ sep = arg
+ i += 1
+ if opt in ('-f', '--columns'):
+ desired_cols = arg
+ i += 1
+ i += 1
+
+ argv = sys.argv[i:]
+ sint = Decimal(argv[0]) if argv else sint
+ count = int(argv[1]) if len(argv) > 1 else count
+
+ if len(argv) > 1:
+ sint = Decimal(argv[0])
+ count = int(argv[1])
+
+ elif len(argv) > 0:
+ sint = Decimal(argv[0])
+ count = 0
+
+ if hflag or (xflag and desired_cols):
+ usage()
+
+ if vflag:
+ detailed_usage()
+
+ if xflag:
+ hdr = xhdr
+
+ update_hdr_intr()
+
+ # check if L2ARC exists
+ snap_stats()
+ l2_size = cur.get("l2_size")
+ if l2_size:
+ l2exist = True
+
+ if desired_cols:
+ hdr = desired_cols.split(",")
+
+ invalid = []
+ incompat = []
+ for ele in hdr:
+ if ele not in cols:
+ invalid.append(ele)
+ elif not l2exist and ele.startswith("l2"):
+ sys.stdout.write("No L2ARC Here\n%s\n" % ele)
+ incompat.append(ele)
+
+ if len(invalid) > 0:
+ sys.stderr.write("Invalid column definition! -- %s\n" % invalid)
+ usage()
+
+ if len(incompat) > 0:
+ sys.stderr.write("Incompatible field specified! -- %s\n" %
+ incompat)
+ usage()
+
+ if opfile:
+ try:
+ out = open(opfile, "w")
+ sys.stdout = out
+
+ except IOError:
+ sys.stderr.write("Cannot open %s for writing\n" % opfile)
+ sys.exit(1)
+
+
+def calculate():
+ global d
+ global v
+ global l2exist
+
+ v = dict()
+ v["time"] = time.strftime("%H:%M:%S", time.localtime())
+ v["hits"] = d["hits"] / sint
+ v["miss"] = d["misses"] / sint
+ v["read"] = v["hits"] + v["miss"]
+ v["hit%"] = 100 * v["hits"] / v["read"] if v["read"] > 0 else 0
+ v["miss%"] = 100 - v["hit%"] if v["read"] > 0 else 0
+
+ v["dhit"] = (d["demand_data_hits"] + d["demand_metadata_hits"]) / sint
+ v["dmis"] = (d["demand_data_misses"] + d["demand_metadata_misses"]) / sint
+
+ v["dread"] = v["dhit"] + v["dmis"]
+ v["dh%"] = 100 * v["dhit"] / v["dread"] if v["dread"] > 0 else 0
+ v["dm%"] = 100 - v["dh%"] if v["dread"] > 0 else 0
+
+ v["phit"] = (d["prefetch_data_hits"] + d["prefetch_metadata_hits"]) / sint
+ v["pmis"] = (d["prefetch_data_misses"] +
+ d["prefetch_metadata_misses"]) / sint
+
+ v["pread"] = v["phit"] + v["pmis"]
+ v["ph%"] = 100 * v["phit"] / v["pread"] if v["pread"] > 0 else 0
+ v["pm%"] = 100 - v["ph%"] if v["pread"] > 0 else 0
+
+ v["mhit"] = (d["prefetch_metadata_hits"] +
+ d["demand_metadata_hits"]) / sint
+ v["mmis"] = (d["prefetch_metadata_misses"] +
+ d["demand_metadata_misses"]) / sint
+
+ v["mread"] = v["mhit"] + v["mmis"]
+ v["mh%"] = 100 * v["mhit"] / v["mread"] if v["mread"] > 0 else 0
+ v["mm%"] = 100 - v["mh%"] if v["mread"] > 0 else 0
+
+ v["arcsz"] = cur["size"]
+ v["c"] = cur["c"]
+ v["mfu"] = d["mfu_hits"] / sint
+ v["mru"] = d["mru_hits"] / sint
+ v["mrug"] = d["mru_ghost_hits"] / sint
+ v["mfug"] = d["mfu_ghost_hits"] / sint
+ v["eskip"] = d["evict_skip"] / sint
+ v["mtxmis"] = d["mutex_miss"] / sint
+
+ if l2exist:
+ v["l2hits"] = d["l2_hits"] / sint
+ v["l2miss"] = d["l2_misses"] / sint
+ v["l2read"] = v["l2hits"] + v["l2miss"]
+ v["l2hit%"] = 100 * v["l2hits"] / v["l2read"] if v["l2read"] > 0 else 0
+
+ v["l2miss%"] = 100 - v["l2hit%"] if v["l2read"] > 0 else 0
+ v["l2asize"] = cur["l2_asize"]
+ v["l2size"] = cur["l2_size"]
+ v["l2bytes"] = d["l2_read_bytes"] / sint
+
+ v["grow"] = 0 if cur["arc_no_grow"] else 1
+ v["need"] = cur["arc_need_free"]
+ v["free"] = cur["arc_sys_free"]
+
+
+def main():
+ global sint
+ global count
+ global hdr_intr
+
+ i = 0
+ count_flag = 0
+
+ init()
+ if count > 0:
+ count_flag = 1
+
+ signal(SIGINT, SIG_DFL)
+ signal(SIGWINCH, resize_handler)
+ while True:
+ if i == 0:
+ print_header()
+
+ snap_stats()
+ calculate()
+ print_values()
+
+ if count_flag == 1:
+ if count <= 1:
+ break
+ count -= 1
+
+ i = 0 if i >= hdr_intr else i + 1
+ time.sleep(sint)
+
+ if out:
+ out.close()
+
+
+if __name__ == '__main__':
+ main()
+++ /dev/null
-#!/usr/bin/python
-#
-# Print out ZFS ARC Statistics exported via kstat(1)
-# For a definition of fields, or usage, use arctstat.pl -v
-#
-# This script is a fork of the original arcstat.pl (0.1) by
-# Neelakanth Nadgir, originally published on his Sun blog on
-# 09/18/2007
-# http://blogs.sun.com/realneel/entry/zfs_arc_statistics
-#
-# This version aims to improve upon the original by adding features
-# and fixing bugs as needed. This version is maintained by
-# Mike Harsch and is hosted in a public open source repository:
-# http://github.com/mharsch/arcstat
-#
-# Comments, Questions, or Suggestions are always welcome.
-# Contact the maintainer at ( mike at harschsystems dot com )
-#
-# CDDL HEADER START
-#
-# The contents of this file are subject to the terms of the
-# Common Development and Distribution License, Version 1.0 only
-# (the "License"). You may not use this file except in compliance
-# with the License.
-#
-# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
-# or http://www.opensolaris.org/os/licensing.
-# See the License for the specific language governing permissions
-# and limitations under the License.
-#
-# When distributing Covered Code, include this CDDL HEADER in each
-# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
-# If applicable, add the following below this CDDL HEADER, with the
-# fields enclosed by brackets "[]" replaced with your own identifying
-# information: Portions Copyright [yyyy] [name of copyright owner]
-#
-# CDDL HEADER END
-#
-#
-# Fields have a fixed width. Every interval, we fill the "v"
-# hash with its corresponding value (v[field]=value) using calculate().
-# @hdr is the array of fields that needs to be printed, so we
-# just iterate over this array and print the values using our pretty printer.
-#
-
-
-import sys
-import time
-import getopt
-import re
-import copy
-
-from decimal import Decimal
-from signal import signal, SIGINT, SIGWINCH, SIG_DFL
-
-cols = {
- # HDR: [Size, Scale, Description]
- "time": [8, -1, "Time"],
- "hits": [4, 1000, "ARC reads per second"],
- "miss": [4, 1000, "ARC misses per second"],
- "read": [4, 1000, "Total ARC accesses per second"],
- "hit%": [4, 100, "ARC Hit percentage"],
- "miss%": [5, 100, "ARC miss percentage"],
- "dhit": [4, 1000, "Demand hits per second"],
- "dmis": [4, 1000, "Demand misses per second"],
- "dh%": [3, 100, "Demand hit percentage"],
- "dm%": [3, 100, "Demand miss percentage"],
- "phit": [4, 1000, "Prefetch hits per second"],
- "pmis": [4, 1000, "Prefetch misses per second"],
- "ph%": [3, 100, "Prefetch hits percentage"],
- "pm%": [3, 100, "Prefetch miss percentage"],
- "mhit": [4, 1000, "Metadata hits per second"],
- "mmis": [4, 1000, "Metadata misses per second"],
- "mread": [5, 1000, "Metadata accesses per second"],
- "mh%": [3, 100, "Metadata hit percentage"],
- "mm%": [3, 100, "Metadata miss percentage"],
- "arcsz": [5, 1024, "ARC Size"],
- "c": [4, 1024, "ARC Target Size"],
- "mfu": [4, 1000, "MFU List hits per second"],
- "mru": [4, 1000, "MRU List hits per second"],
- "mfug": [4, 1000, "MFU Ghost List hits per second"],
- "mrug": [4, 1000, "MRU Ghost List hits per second"],
- "eskip": [5, 1000, "evict_skip per second"],
- "mtxmis": [6, 1000, "mutex_miss per second"],
- "dread": [5, 1000, "Demand accesses per second"],
- "pread": [5, 1000, "Prefetch accesses per second"],
- "l2hits": [6, 1000, "L2ARC hits per second"],
- "l2miss": [6, 1000, "L2ARC misses per second"],
- "l2read": [6, 1000, "Total L2ARC accesses per second"],
- "l2hit%": [6, 100, "L2ARC access hit percentage"],
- "l2miss%": [7, 100, "L2ARC access miss percentage"],
- "l2asize": [7, 1024, "Actual (compressed) size of the L2ARC"],
- "l2size": [6, 1024, "Size of the L2ARC"],
- "l2bytes": [7, 1024, "bytes read per second from the L2ARC"],
- "grow": [4, 1000, "ARC Grow disabled"],
- "need": [4, 1024, "ARC Reclaim need"],
- "free": [4, 1024, "ARC Free memory"],
-}
-
-v = {}
-hdr = ["time", "read", "miss", "miss%", "dmis", "dm%", "pmis", "pm%", "mmis",
- "mm%", "arcsz", "c"]
-xhdr = ["time", "mfu", "mru", "mfug", "mrug", "eskip", "mtxmis", "dread",
- "pread", "read"]
-sint = 1 # Default interval is 1 second
-count = 1 # Default count is 1
-hdr_intr = 20 # Print header every 20 lines of output
-opfile = None
-sep = " " # Default separator is 2 spaces
-version = "0.4"
-l2exist = False
-cmd = ("Usage: arcstat.py [-hvx] [-f fields] [-o file] [-s string] [interval "
- "[count]]\n")
-cur = {}
-d = {}
-out = None
-kstat = None
-
-
-def detailed_usage():
- sys.stderr.write("%s\n" % cmd)
- sys.stderr.write("Field definitions are as follows:\n")
- for key in cols:
- sys.stderr.write("%11s : %s\n" % (key, cols[key][2]))
- sys.stderr.write("\n")
-
- sys.exit(0)
-
-
-def usage():
- sys.stderr.write("%s\n" % cmd)
- sys.stderr.write("\t -h : Print this help message\n")
- sys.stderr.write("\t -v : List all possible field headers and definitions"
- "\n")
- sys.stderr.write("\t -x : Print extended stats\n")
- sys.stderr.write("\t -f : Specify specific fields to print (see -v)\n")
- sys.stderr.write("\t -o : Redirect output to the specified file\n")
- sys.stderr.write("\t -s : Override default field separator with custom "
- "character or string\n")
- sys.stderr.write("\nExamples:\n")
- sys.stderr.write("\tarcstat.py -o /tmp/a.log 2 10\n")
- sys.stderr.write("\tarcstat.py -s \",\" -o /tmp/a.log 2 10\n")
- sys.stderr.write("\tarcstat.py -v\n")
- sys.stderr.write("\tarcstat.py -f time,hit%,dh%,ph%,mh% 1\n")
- sys.stderr.write("\n")
-
- sys.exit(1)
-
-
-def kstat_update():
- global kstat
-
- k = [line.strip() for line in open('/proc/spl/kstat/zfs/arcstats')]
-
- if not k:
- sys.exit(1)
-
- del k[0:2]
- kstat = {}
-
- for s in k:
- if not s:
- continue
-
- name, unused, value = s.split()
- kstat[name] = Decimal(value)
-
-
-def snap_stats():
- global cur
- global kstat
-
- prev = copy.deepcopy(cur)
- kstat_update()
-
- cur = kstat
- for key in cur:
- if re.match(key, "class"):
- continue
- if key in prev:
- d[key] = cur[key] - prev[key]
- else:
- d[key] = cur[key]
-
-
-def prettynum(sz, scale, num=0):
- suffix = [' ', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']
- index = 0
- save = 0
-
- # Special case for date field
- if scale == -1:
- return "%s" % num
-
- # Rounding error, return 0
- elif 0 < num < 1:
- num = 0
-
- while num > scale and index < 5:
- save = num
- num = num / scale
- index += 1
-
- if index == 0:
- return "%*d" % (sz, num)
-
- if (save / scale) < 10:
- return "%*.1f%s" % (sz - 1, num, suffix[index])
- else:
- return "%*d%s" % (sz - 1, num, suffix[index])
-
-
-def print_values():
- global hdr
- global sep
- global v
-
- for col in hdr:
- sys.stdout.write("%s%s" % (
- prettynum(cols[col][0], cols[col][1], v[col]),
- sep
- ))
- sys.stdout.write("\n")
- sys.stdout.flush()
-
-
-def print_header():
- global hdr
- global sep
-
- for col in hdr:
- sys.stdout.write("%*s%s" % (cols[col][0], col, sep))
- sys.stdout.write("\n")
-
-
-def get_terminal_lines():
- try:
- import fcntl
- import termios
- import struct
- data = fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ, '1234')
- sz = struct.unpack('hh', data)
- return sz[0]
- except Exception:
- pass
-
-
-def update_hdr_intr():
- global hdr_intr
-
- lines = get_terminal_lines()
- if lines and lines > 3:
- hdr_intr = lines - 3
-
-
-def resize_handler(signum, frame):
- update_hdr_intr()
-
-
-def init():
- global sint
- global count
- global hdr
- global xhdr
- global opfile
- global sep
- global out
- global l2exist
-
- desired_cols = None
- xflag = False
- hflag = False
- vflag = False
- i = 1
-
- try:
- opts, args = getopt.getopt(
- sys.argv[1:],
- "xo:hvs:f:",
- [
- "extended",
- "outfile",
- "help",
- "verbose",
- "separator",
- "columns"
- ]
- )
- except getopt.error as msg:
- sys.stderr.write("Error: %s\n" % str(msg))
- usage()
- opts = None
-
- for opt, arg in opts:
- if opt in ('-x', '--extended'):
- xflag = True
- if opt in ('-o', '--outfile'):
- opfile = arg
- i += 1
- if opt in ('-h', '--help'):
- hflag = True
- if opt in ('-v', '--verbose'):
- vflag = True
- if opt in ('-s', '--separator'):
- sep = arg
- i += 1
- if opt in ('-f', '--columns'):
- desired_cols = arg
- i += 1
- i += 1
-
- argv = sys.argv[i:]
- sint = Decimal(argv[0]) if argv else sint
- count = int(argv[1]) if len(argv) > 1 else count
-
- if len(argv) > 1:
- sint = Decimal(argv[0])
- count = int(argv[1])
-
- elif len(argv) > 0:
- sint = Decimal(argv[0])
- count = 0
-
- if hflag or (xflag and desired_cols):
- usage()
-
- if vflag:
- detailed_usage()
-
- if xflag:
- hdr = xhdr
-
- update_hdr_intr()
-
- # check if L2ARC exists
- snap_stats()
- l2_size = cur.get("l2_size")
- if l2_size:
- l2exist = True
-
- if desired_cols:
- hdr = desired_cols.split(",")
-
- invalid = []
- incompat = []
- for ele in hdr:
- if ele not in cols:
- invalid.append(ele)
- elif not l2exist and ele.startswith("l2"):
- sys.stdout.write("No L2ARC Here\n%s\n" % ele)
- incompat.append(ele)
-
- if len(invalid) > 0:
- sys.stderr.write("Invalid column definition! -- %s\n" % invalid)
- usage()
-
- if len(incompat) > 0:
- sys.stderr.write("Incompatible field specified! -- %s\n" %
- incompat)
- usage()
-
- if opfile:
- try:
- out = open(opfile, "w")
- sys.stdout = out
-
- except IOError:
- sys.stderr.write("Cannot open %s for writing\n" % opfile)
- sys.exit(1)
-
-
-def calculate():
- global d
- global v
- global l2exist
-
- v = dict()
- v["time"] = time.strftime("%H:%M:%S", time.localtime())
- v["hits"] = d["hits"] / sint
- v["miss"] = d["misses"] / sint
- v["read"] = v["hits"] + v["miss"]
- v["hit%"] = 100 * v["hits"] / v["read"] if v["read"] > 0 else 0
- v["miss%"] = 100 - v["hit%"] if v["read"] > 0 else 0
-
- v["dhit"] = (d["demand_data_hits"] + d["demand_metadata_hits"]) / sint
- v["dmis"] = (d["demand_data_misses"] + d["demand_metadata_misses"]) / sint
-
- v["dread"] = v["dhit"] + v["dmis"]
- v["dh%"] = 100 * v["dhit"] / v["dread"] if v["dread"] > 0 else 0
- v["dm%"] = 100 - v["dh%"] if v["dread"] > 0 else 0
-
- v["phit"] = (d["prefetch_data_hits"] + d["prefetch_metadata_hits"]) / sint
- v["pmis"] = (d["prefetch_data_misses"] +
- d["prefetch_metadata_misses"]) / sint
-
- v["pread"] = v["phit"] + v["pmis"]
- v["ph%"] = 100 * v["phit"] / v["pread"] if v["pread"] > 0 else 0
- v["pm%"] = 100 - v["ph%"] if v["pread"] > 0 else 0
-
- v["mhit"] = (d["prefetch_metadata_hits"] +
- d["demand_metadata_hits"]) / sint
- v["mmis"] = (d["prefetch_metadata_misses"] +
- d["demand_metadata_misses"]) / sint
-
- v["mread"] = v["mhit"] + v["mmis"]
- v["mh%"] = 100 * v["mhit"] / v["mread"] if v["mread"] > 0 else 0
- v["mm%"] = 100 - v["mh%"] if v["mread"] > 0 else 0
-
- v["arcsz"] = cur["size"]
- v["c"] = cur["c"]
- v["mfu"] = d["mfu_hits"] / sint
- v["mru"] = d["mru_hits"] / sint
- v["mrug"] = d["mru_ghost_hits"] / sint
- v["mfug"] = d["mfu_ghost_hits"] / sint
- v["eskip"] = d["evict_skip"] / sint
- v["mtxmis"] = d["mutex_miss"] / sint
-
- if l2exist:
- v["l2hits"] = d["l2_hits"] / sint
- v["l2miss"] = d["l2_misses"] / sint
- v["l2read"] = v["l2hits"] + v["l2miss"]
- v["l2hit%"] = 100 * v["l2hits"] / v["l2read"] if v["l2read"] > 0 else 0
-
- v["l2miss%"] = 100 - v["l2hit%"] if v["l2read"] > 0 else 0
- v["l2asize"] = cur["l2_asize"]
- v["l2size"] = cur["l2_size"]
- v["l2bytes"] = d["l2_read_bytes"] / sint
-
- v["grow"] = 0 if cur["arc_no_grow"] else 1
- v["need"] = cur["arc_need_free"]
- v["free"] = cur["arc_sys_free"]
-
-
-def main():
- global sint
- global count
- global hdr_intr
-
- i = 0
- count_flag = 0
-
- init()
- if count > 0:
- count_flag = 1
-
- signal(SIGINT, SIG_DFL)
- signal(SIGWINCH, resize_handler)
- while True:
- if i == 0:
- print_header()
-
- snap_stats()
- calculate()
- print_values()
-
- if count_flag == 1:
- if count <= 1:
- break
- count -= 1
-
- i = 0 if i >= hdr_intr else i + 1
- time.sleep(sint)
-
- if out:
- out.close()
-
-
-if __name__ == '__main__':
- main()
-dist_bin_SCRIPTS = dbufstat.py
+dist_bin_SCRIPTS = dbufstat
+
+#
+# The dbufstat script is compatibile with both Python 2.6 and 3.4.
+# As such the python 3 shebang can be replaced at install time when
+# targeting a python 2 system. This allows us to maintain a single
+# version of the source.
+#
+if USING_PYTHON_2
+install-exec-hook:
+ sed --in-place 's|^#!/usr/bin/python3|#!/usr/bin/python2|' \
+ $(DESTDIR)$(bindir)/dbufstat
+endif
--- /dev/null
+#!/usr/bin/python3
+#
+# Print out statistics for all cached dmu buffers. This information
+# is available through the dbufs kstat and may be post-processed as
+# needed by the script.
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License"). You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+# Copyright (C) 2013 Lawrence Livermore National Security, LLC.
+# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+#
+# This script must remain compatible with Python 2.6+ and Python 3.4+.
+#
+
+import sys
+import getopt
+import errno
+import re
+
+bhdr = ["pool", "objset", "object", "level", "blkid", "offset", "dbsize"]
+bxhdr = ["pool", "objset", "object", "level", "blkid", "offset", "dbsize",
+ "meta", "state", "dbholds", "dbc", "list", "atype", "flags",
+ "count", "asize", "access", "mru", "gmru", "mfu", "gmfu", "l2",
+ "l2_dattr", "l2_asize", "l2_comp", "aholds", "dtype", "btype",
+ "data_bs", "meta_bs", "bsize", "lvls", "dholds", "blocks", "dsize"]
+bincompat = ["cached", "direct", "indirect", "bonus", "spill"]
+
+dhdr = ["pool", "objset", "object", "dtype", "cached"]
+dxhdr = ["pool", "objset", "object", "dtype", "btype", "data_bs", "meta_bs",
+ "bsize", "lvls", "dholds", "blocks", "dsize", "cached", "direct",
+ "indirect", "bonus", "spill"]
+dincompat = ["level", "blkid", "offset", "dbsize", "meta", "state", "dbholds",
+ "dbc", "list", "atype", "flags", "count", "asize", "access",
+ "mru", "gmru", "mfu", "gmfu", "l2", "l2_dattr", "l2_asize",
+ "l2_comp", "aholds"]
+
+thdr = ["pool", "objset", "dtype", "cached"]
+txhdr = ["pool", "objset", "dtype", "cached", "direct", "indirect",
+ "bonus", "spill"]
+tincompat = ["object", "level", "blkid", "offset", "dbsize", "meta", "state",
+ "dbc", "dbholds", "list", "atype", "flags", "count", "asize",
+ "access", "mru", "gmru", "mfu", "gmfu", "l2", "l2_dattr",
+ "l2_asize", "l2_comp", "aholds", "btype", "data_bs", "meta_bs",
+ "bsize", "lvls", "dholds", "blocks", "dsize"]
+
+cols = {
+ # hdr: [size, scale, description]
+ "pool": [15, -1, "pool name"],
+ "objset": [6, -1, "dataset identification number"],
+ "object": [10, -1, "object number"],
+ "level": [5, -1, "indirection level of buffer"],
+ "blkid": [8, -1, "block number of buffer"],
+ "offset": [12, 1024, "offset in object of buffer"],
+ "dbsize": [7, 1024, "size of buffer"],
+ "meta": [4, -1, "is this buffer metadata?"],
+ "state": [5, -1, "state of buffer (read, cached, etc)"],
+ "dbholds": [7, 1000, "number of holds on buffer"],
+ "dbc": [3, -1, "in dbuf cache"],
+ "list": [4, -1, "which ARC list contains this buffer"],
+ "atype": [7, -1, "ARC header type (data or metadata)"],
+ "flags": [9, -1, "ARC read flags"],
+ "count": [5, -1, "ARC data count"],
+ "asize": [7, 1024, "size of this ARC buffer"],
+ "access": [10, -1, "time this ARC buffer was last accessed"],
+ "mru": [5, 1000, "hits while on the ARC's MRU list"],
+ "gmru": [5, 1000, "hits while on the ARC's MRU ghost list"],
+ "mfu": [5, 1000, "hits while on the ARC's MFU list"],
+ "gmfu": [5, 1000, "hits while on the ARC's MFU ghost list"],
+ "l2": [5, 1000, "hits while on the L2ARC"],
+ "l2_dattr": [8, -1, "L2ARC disk address/offset"],
+ "l2_asize": [8, 1024, "L2ARC alloc'd size (depending on compression)"],
+ "l2_comp": [21, -1, "L2ARC compression algorithm for buffer"],
+ "aholds": [6, 1000, "number of holds on this ARC buffer"],
+ "dtype": [27, -1, "dnode type"],
+ "btype": [27, -1, "bonus buffer type"],
+ "data_bs": [7, 1024, "data block size"],
+ "meta_bs": [7, 1024, "metadata block size"],
+ "bsize": [6, 1024, "bonus buffer size"],
+ "lvls": [6, -1, "number of indirection levels"],
+ "dholds": [6, 1000, "number of holds on dnode"],
+ "blocks": [8, 1000, "number of allocated blocks"],
+ "dsize": [12, 1024, "size of dnode"],
+ "cached": [6, 1024, "bytes cached for all blocks"],
+ "direct": [6, 1024, "bytes cached for direct blocks"],
+ "indirect": [8, 1024, "bytes cached for indirect blocks"],
+ "bonus": [5, 1024, "bytes cached for bonus buffer"],
+ "spill": [5, 1024, "bytes cached for spill block"],
+}
+
+hdr = None
+xhdr = None
+sep = " " # Default separator is 2 spaces
+cmd = ("Usage: dbufstat [-bdhnrtvx] [-i file] [-f fields] [-o file] "
+ "[-s string] [-F filter]\n")
+raw = 0
+
+
+def print_incompat_helper(incompat):
+ cnt = 0
+ for key in sorted(incompat):
+ if cnt is 0:
+ sys.stderr.write("\t")
+ elif cnt > 8:
+ sys.stderr.write(",\n\t")
+ cnt = 0
+ else:
+ sys.stderr.write(", ")
+
+ sys.stderr.write("%s" % key)
+ cnt += 1
+
+ sys.stderr.write("\n\n")
+
+
+def detailed_usage():
+ sys.stderr.write("%s\n" % cmd)
+
+ sys.stderr.write("Field definitions incompatible with '-b' option:\n")
+ print_incompat_helper(bincompat)
+
+ sys.stderr.write("Field definitions incompatible with '-d' option:\n")
+ print_incompat_helper(dincompat)
+
+ sys.stderr.write("Field definitions incompatible with '-t' option:\n")
+ print_incompat_helper(tincompat)
+
+ sys.stderr.write("Field definitions are as follows:\n")
+ for key in sorted(cols.keys()):
+ sys.stderr.write("%11s : %s\n" % (key, cols[key][2]))
+ sys.stderr.write("\n")
+
+ sys.exit(0)
+
+
+def usage():
+ sys.stderr.write("%s\n" % cmd)
+ sys.stderr.write("\t -b : Print table of information for each dbuf\n")
+ sys.stderr.write("\t -d : Print table of information for each dnode\n")
+ sys.stderr.write("\t -h : Print this help message\n")
+ sys.stderr.write("\t -n : Exclude header from output\n")
+ sys.stderr.write("\t -r : Print raw values\n")
+ sys.stderr.write("\t -t : Print table of information for each dnode type"
+ "\n")
+ sys.stderr.write("\t -v : List all possible field headers and definitions"
+ "\n")
+ sys.stderr.write("\t -x : Print extended stats\n")
+ sys.stderr.write("\t -i : Redirect input from the specified file\n")
+ sys.stderr.write("\t -f : Specify specific fields to print (see -v)\n")
+ sys.stderr.write("\t -o : Redirect output to the specified file\n")
+ sys.stderr.write("\t -s : Override default field separator with custom "
+ "character or string\n")
+ sys.stderr.write("\t -F : Filter output by value or regex\n")
+ sys.stderr.write("\nExamples:\n")
+ sys.stderr.write("\tdbufstat -d -o /tmp/d.log\n")
+ sys.stderr.write("\tdbufstat -t -s \",\" -o /tmp/t.log\n")
+ sys.stderr.write("\tdbufstat -v\n")
+ sys.stderr.write("\tdbufstat -d -f pool,object,objset,dsize,cached\n")
+ sys.stderr.write("\tdbufstat -bx -F dbc=1,objset=54,pool=testpool\n")
+ sys.stderr.write("\n")
+
+ sys.exit(1)
+
+
+def prettynum(sz, scale, num=0):
+ global raw
+
+ suffix = [' ', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']
+ index = 0
+ save = 0
+
+ if raw or scale == -1:
+ return "%*s" % (sz, num)
+
+ # Rounding error, return 0
+ elif 0 < num < 1:
+ num = 0
+
+ while num > scale and index < 5:
+ save = num
+ num = num / scale
+ index += 1
+
+ if index == 0:
+ return "%*d" % (sz, num)
+
+ if (save / scale) < 10:
+ return "%*.1f%s" % (sz - 1, num, suffix[index])
+ else:
+ return "%*d%s" % (sz - 1, num, suffix[index])
+
+
+def print_values(v):
+ global hdr
+ global sep
+
+ try:
+ for col in hdr:
+ sys.stdout.write("%s%s" % (
+ prettynum(cols[col][0], cols[col][1], v[col]), sep))
+ sys.stdout.write("\n")
+ except IOError as e:
+ if e.errno == errno.EPIPE:
+ sys.exit(1)
+
+
+def print_header():
+ global hdr
+ global sep
+
+ try:
+ for col in hdr:
+ sys.stdout.write("%*s%s" % (cols[col][0], col, sep))
+ sys.stdout.write("\n")
+ except IOError as e:
+ if e.errno == errno.EPIPE:
+ sys.exit(1)
+
+
+def get_typestring(t):
+ ot_strings = [
+ "DMU_OT_NONE",
+ # general:
+ "DMU_OT_OBJECT_DIRECTORY",
+ "DMU_OT_OBJECT_ARRAY",
+ "DMU_OT_PACKED_NVLIST",
+ "DMU_OT_PACKED_NVLIST_SIZE",
+ "DMU_OT_BPOBJ",
+ "DMU_OT_BPOBJ_HDR",
+ # spa:
+ "DMU_OT_SPACE_MAP_HEADER",
+ "DMU_OT_SPACE_MAP",
+ # zil:
+ "DMU_OT_INTENT_LOG",
+ # dmu:
+ "DMU_OT_DNODE",
+ "DMU_OT_OBJSET",
+ # dsl:
+ "DMU_OT_DSL_DIR",
+ "DMU_OT_DSL_DIR_CHILD_MAP",
+ "DMU_OT_DSL_DS_SNAP_MAP",
+ "DMU_OT_DSL_PROPS",
+ "DMU_OT_DSL_DATASET",
+ # zpl:
+ "DMU_OT_ZNODE",
+ "DMU_OT_OLDACL",
+ "DMU_OT_PLAIN_FILE_CONTENTS",
+ "DMU_OT_DIRECTORY_CONTENTS",
+ "DMU_OT_MASTER_NODE",
+ "DMU_OT_UNLINKED_SET",
+ # zvol:
+ "DMU_OT_ZVOL",
+ "DMU_OT_ZVOL_PROP",
+ # other; for testing only!
+ "DMU_OT_PLAIN_OTHER",
+ "DMU_OT_UINT64_OTHER",
+ "DMU_OT_ZAP_OTHER",
+ # new object types:
+ "DMU_OT_ERROR_LOG",
+ "DMU_OT_SPA_HISTORY",
+ "DMU_OT_SPA_HISTORY_OFFSETS",
+ "DMU_OT_POOL_PROPS",
+ "DMU_OT_DSL_PERMS",
+ "DMU_OT_ACL",
+ "DMU_OT_SYSACL",
+ "DMU_OT_FUID",
+ "DMU_OT_FUID_SIZE",
+ "DMU_OT_NEXT_CLONES",
+ "DMU_OT_SCAN_QUEUE",
+ "DMU_OT_USERGROUP_USED",
+ "DMU_OT_USERGROUP_QUOTA",
+ "DMU_OT_USERREFS",
+ "DMU_OT_DDT_ZAP",
+ "DMU_OT_DDT_STATS",
+ "DMU_OT_SA",
+ "DMU_OT_SA_MASTER_NODE",
+ "DMU_OT_SA_ATTR_REGISTRATION",
+ "DMU_OT_SA_ATTR_LAYOUTS",
+ "DMU_OT_SCAN_XLATE",
+ "DMU_OT_DEDUP",
+ "DMU_OT_DEADLIST",
+ "DMU_OT_DEADLIST_HDR",
+ "DMU_OT_DSL_CLONES",
+ "DMU_OT_BPOBJ_SUBOBJ"]
+ otn_strings = {
+ 0x80: "DMU_OTN_UINT8_DATA",
+ 0xc0: "DMU_OTN_UINT8_METADATA",
+ 0x81: "DMU_OTN_UINT16_DATA",
+ 0xc1: "DMU_OTN_UINT16_METADATA",
+ 0x82: "DMU_OTN_UINT32_DATA",
+ 0xc2: "DMU_OTN_UINT32_METADATA",
+ 0x83: "DMU_OTN_UINT64_DATA",
+ 0xc3: "DMU_OTN_UINT64_METADATA",
+ 0x84: "DMU_OTN_ZAP_DATA",
+ 0xc4: "DMU_OTN_ZAP_METADATA",
+ 0xa0: "DMU_OTN_UINT8_ENC_DATA",
+ 0xe0: "DMU_OTN_UINT8_ENC_METADATA",
+ 0xa1: "DMU_OTN_UINT16_ENC_DATA",
+ 0xe1: "DMU_OTN_UINT16_ENC_METADATA",
+ 0xa2: "DMU_OTN_UINT32_ENC_DATA",
+ 0xe2: "DMU_OTN_UINT32_ENC_METADATA",
+ 0xa3: "DMU_OTN_UINT64_ENC_DATA",
+ 0xe3: "DMU_OTN_UINT64_ENC_METADATA",
+ 0xa4: "DMU_OTN_ZAP_ENC_DATA",
+ 0xe4: "DMU_OTN_ZAP_ENC_METADATA"}
+
+ # If "-rr" option is used, don't convert to string representation
+ if raw > 1:
+ return "%i" % t
+
+ try:
+ if t < len(ot_strings):
+ return ot_strings[t]
+ else:
+ return otn_strings[t]
+ except (IndexError, KeyError):
+ return "(UNKNOWN)"
+
+
+def get_compstring(c):
+ comp_strings = ["ZIO_COMPRESS_INHERIT", "ZIO_COMPRESS_ON",
+ "ZIO_COMPRESS_OFF", "ZIO_COMPRESS_LZJB",
+ "ZIO_COMPRESS_EMPTY", "ZIO_COMPRESS_GZIP_1",
+ "ZIO_COMPRESS_GZIP_2", "ZIO_COMPRESS_GZIP_3",
+ "ZIO_COMPRESS_GZIP_4", "ZIO_COMPRESS_GZIP_5",
+ "ZIO_COMPRESS_GZIP_6", "ZIO_COMPRESS_GZIP_7",
+ "ZIO_COMPRESS_GZIP_8", "ZIO_COMPRESS_GZIP_9",
+ "ZIO_COMPRESS_ZLE", "ZIO_COMPRESS_LZ4",
+ "ZIO_COMPRESS_FUNCTION"]
+
+ # If "-rr" option is used, don't convert to string representation
+ if raw > 1:
+ return "%i" % c
+
+ try:
+ return comp_strings[c]
+ except IndexError:
+ return "%i" % c
+
+
+def parse_line(line, labels):
+ global hdr
+
+ new = dict()
+ val = None
+ for col in hdr:
+ # These are "special" fields computed in the update_dict
+ # function, prevent KeyError exception on labels[col] for these.
+ if col not in ['bonus', 'cached', 'direct', 'indirect', 'spill']:
+ val = line[labels[col]]
+
+ if col in ['pool', 'flags']:
+ new[col] = str(val)
+ elif col in ['dtype', 'btype']:
+ new[col] = get_typestring(int(val))
+ elif col in ['l2_comp']:
+ new[col] = get_compstring(int(val))
+ else:
+ new[col] = int(val)
+
+ return new
+
+
+def update_dict(d, k, line, labels):
+ pool = line[labels['pool']]
+ objset = line[labels['objset']]
+ key = line[labels[k]]
+
+ dbsize = int(line[labels['dbsize']])
+ blkid = int(line[labels['blkid']])
+ level = int(line[labels['level']])
+
+ if pool not in d:
+ d[pool] = dict()
+
+ if objset not in d[pool]:
+ d[pool][objset] = dict()
+
+ if key not in d[pool][objset]:
+ d[pool][objset][key] = parse_line(line, labels)
+ d[pool][objset][key]['bonus'] = 0
+ d[pool][objset][key]['cached'] = 0
+ d[pool][objset][key]['direct'] = 0
+ d[pool][objset][key]['indirect'] = 0
+ d[pool][objset][key]['spill'] = 0
+
+ d[pool][objset][key]['cached'] += dbsize
+
+ if blkid == -1:
+ d[pool][objset][key]['bonus'] += dbsize
+ elif blkid == -2:
+ d[pool][objset][key]['spill'] += dbsize
+ else:
+ if level == 0:
+ d[pool][objset][key]['direct'] += dbsize
+ else:
+ d[pool][objset][key]['indirect'] += dbsize
+
+ return d
+
+
+def skip_line(vals, filters):
+ '''
+ Determines if a line should be skipped during printing
+ based on a set of filters
+ '''
+ if len(filters) == 0:
+ return False
+
+ for key in vals:
+ if key in filters:
+ val = prettynum(cols[key][0], cols[key][1], vals[key]).strip()
+ # we want a full match here
+ if re.match("(?:" + filters[key] + r")\Z", val) is None:
+ return True
+
+ return False
+
+
+def print_dict(d, filters, noheader):
+ if not noheader:
+ print_header()
+ for pool in list(d.keys()):
+ for objset in list(d[pool].keys()):
+ for v in list(d[pool][objset].values()):
+ if not skip_line(v, filters):
+ print_values(v)
+
+
+def dnodes_build_dict(filehandle):
+ labels = dict()
+ dnodes = dict()
+
+ # First 3 lines are header information, skip the first two
+ for i in range(2):
+ next(filehandle)
+
+ # The third line contains the labels and index locations
+ for i, v in enumerate(next(filehandle).split()):
+ labels[v] = i
+
+ # The rest of the file is buffer information
+ for line in filehandle:
+ update_dict(dnodes, 'object', line.split(), labels)
+
+ return dnodes
+
+
+def types_build_dict(filehandle):
+ labels = dict()
+ types = dict()
+
+ # First 3 lines are header information, skip the first two
+ for i in range(2):
+ next(filehandle)
+
+ # The third line contains the labels and index locations
+ for i, v in enumerate(next(filehandle).split()):
+ labels[v] = i
+
+ # The rest of the file is buffer information
+ for line in filehandle:
+ update_dict(types, 'dtype', line.split(), labels)
+
+ return types
+
+
+def buffers_print_all(filehandle, filters, noheader):
+ labels = dict()
+
+ # First 3 lines are header information, skip the first two
+ for i in range(2):
+ next(filehandle)
+
+ # The third line contains the labels and index locations
+ for i, v in enumerate(next(filehandle).split()):
+ labels[v] = i
+
+ if not noheader:
+ print_header()
+
+ # The rest of the file is buffer information
+ for line in filehandle:
+ vals = parse_line(line.split(), labels)
+ if not skip_line(vals, filters):
+ print_values(vals)
+
+
+def main():
+ global hdr
+ global sep
+ global raw
+
+ desired_cols = None
+ bflag = False
+ dflag = False
+ hflag = False
+ ifile = None
+ ofile = None
+ tflag = False
+ vflag = False
+ xflag = False
+ nflag = False
+ filters = dict()
+
+ try:
+ opts, args = getopt.getopt(
+ sys.argv[1:],
+ "bdf:hi:o:rs:tvxF:n",
+ [
+ "buffers",
+ "dnodes",
+ "columns",
+ "help",
+ "infile",
+ "outfile",
+ "separator",
+ "types",
+ "verbose",
+ "extended",
+ "filter"
+ ]
+ )
+ except getopt.error:
+ usage()
+ opts = None
+
+ for opt, arg in opts:
+ if opt in ('-b', '--buffers'):
+ bflag = True
+ if opt in ('-d', '--dnodes'):
+ dflag = True
+ if opt in ('-f', '--columns'):
+ desired_cols = arg
+ if opt in ('-h', '--help'):
+ hflag = True
+ if opt in ('-i', '--infile'):
+ ifile = arg
+ if opt in ('-o', '--outfile'):
+ ofile = arg
+ if opt in ('-r', '--raw'):
+ raw += 1
+ if opt in ('-s', '--separator'):
+ sep = arg
+ if opt in ('-t', '--types'):
+ tflag = True
+ if opt in ('-v', '--verbose'):
+ vflag = True
+ if opt in ('-x', '--extended'):
+ xflag = True
+ if opt in ('-n', '--noheader'):
+ nflag = True
+ if opt in ('-F', '--filter'):
+ fils = [x.strip() for x in arg.split(",")]
+
+ for fil in fils:
+ f = [x.strip() for x in fil.split("=")]
+
+ if len(f) != 2:
+ sys.stderr.write("Invalid filter '%s'.\n" % fil)
+ sys.exit(1)
+
+ if f[0] not in cols:
+ sys.stderr.write("Invalid field '%s' in filter.\n" % f[0])
+ sys.exit(1)
+
+ if f[0] in filters:
+ sys.stderr.write("Field '%s' specified multiple times in "
+ "filter.\n" % f[0])
+ sys.exit(1)
+
+ try:
+ re.compile("(?:" + f[1] + r")\Z")
+ except re.error:
+ sys.stderr.write("Invalid regex for field '%s' in "
+ "filter.\n" % f[0])
+ sys.exit(1)
+
+ filters[f[0]] = f[1]
+
+ if hflag or (xflag and desired_cols):
+ usage()
+
+ if vflag:
+ detailed_usage()
+
+ # Ensure at most only one of b, d, or t flags are set
+ if (bflag and dflag) or (bflag and tflag) or (dflag and tflag):
+ usage()
+
+ if bflag:
+ hdr = bxhdr if xflag else bhdr
+ elif tflag:
+ hdr = txhdr if xflag else thdr
+ else: # Even if dflag is False, it's the default if none set
+ dflag = True
+ hdr = dxhdr if xflag else dhdr
+
+ if desired_cols:
+ hdr = desired_cols.split(",")
+
+ invalid = []
+ incompat = []
+ for ele in hdr:
+ if ele not in cols:
+ invalid.append(ele)
+ elif ((bflag and bincompat and ele in bincompat) or
+ (dflag and dincompat and ele in dincompat) or
+ (tflag and tincompat and ele in tincompat)):
+ incompat.append(ele)
+
+ if len(invalid) > 0:
+ sys.stderr.write("Invalid column definition! -- %s\n" % invalid)
+ usage()
+
+ if len(incompat) > 0:
+ sys.stderr.write("Incompatible field specified! -- %s\n" %
+ incompat)
+ usage()
+
+ if ofile:
+ try:
+ tmp = open(ofile, "w")
+ sys.stdout = tmp
+
+ except IOError:
+ sys.stderr.write("Cannot open %s for writing\n" % ofile)
+ sys.exit(1)
+
+ if not ifile:
+ ifile = '/proc/spl/kstat/zfs/dbufs'
+
+ if ifile is not "-":
+ try:
+ tmp = open(ifile, "r")
+ sys.stdin = tmp
+ except IOError:
+ sys.stderr.write("Cannot open %s for reading\n" % ifile)
+ sys.exit(1)
+
+ if bflag:
+ buffers_print_all(sys.stdin, filters, nflag)
+
+ if dflag:
+ print_dict(dnodes_build_dict(sys.stdin), filters, nflag)
+
+ if tflag:
+ print_dict(types_build_dict(sys.stdin), filters, nflag)
+
+
+if __name__ == '__main__':
+ main()
+++ /dev/null
-#!/usr/bin/python
-#
-# Print out statistics for all cached dmu buffers. This information
-# is available through the dbufs kstat and may be post-processed as
-# needed by the script.
-#
-# CDDL HEADER START
-#
-# The contents of this file are subject to the terms of the
-# Common Development and Distribution License, Version 1.0 only
-# (the "License"). You may not use this file except in compliance
-# with the License.
-#
-# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
-# or http://www.opensolaris.org/os/licensing.
-# See the License for the specific language governing permissions
-# and limitations under the License.
-#
-# When distributing Covered Code, include this CDDL HEADER in each
-# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
-# If applicable, add the following below this CDDL HEADER, with the
-# fields enclosed by brackets "[]" replaced with your own identifying
-# information: Portions Copyright [yyyy] [name of copyright owner]
-#
-# CDDL HEADER END
-#
-# Copyright (C) 2013 Lawrence Livermore National Security, LLC.
-# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-#
-
-import sys
-import getopt
-import errno
-import re
-
-bhdr = ["pool", "objset", "object", "level", "blkid", "offset", "dbsize"]
-bxhdr = ["pool", "objset", "object", "level", "blkid", "offset", "dbsize",
- "meta", "state", "dbholds", "dbc", "list", "atype", "flags",
- "count", "asize", "access", "mru", "gmru", "mfu", "gmfu", "l2",
- "l2_dattr", "l2_asize", "l2_comp", "aholds", "dtype", "btype",
- "data_bs", "meta_bs", "bsize", "lvls", "dholds", "blocks", "dsize"]
-bincompat = ["cached", "direct", "indirect", "bonus", "spill"]
-
-dhdr = ["pool", "objset", "object", "dtype", "cached"]
-dxhdr = ["pool", "objset", "object", "dtype", "btype", "data_bs", "meta_bs",
- "bsize", "lvls", "dholds", "blocks", "dsize", "cached", "direct",
- "indirect", "bonus", "spill"]
-dincompat = ["level", "blkid", "offset", "dbsize", "meta", "state", "dbholds",
- "dbc", "list", "atype", "flags", "count", "asize", "access",
- "mru", "gmru", "mfu", "gmfu", "l2", "l2_dattr", "l2_asize",
- "l2_comp", "aholds"]
-
-thdr = ["pool", "objset", "dtype", "cached"]
-txhdr = ["pool", "objset", "dtype", "cached", "direct", "indirect",
- "bonus", "spill"]
-tincompat = ["object", "level", "blkid", "offset", "dbsize", "meta", "state",
- "dbc", "dbholds", "list", "atype", "flags", "count", "asize",
- "access", "mru", "gmru", "mfu", "gmfu", "l2", "l2_dattr",
- "l2_asize", "l2_comp", "aholds", "btype", "data_bs", "meta_bs",
- "bsize", "lvls", "dholds", "blocks", "dsize"]
-
-cols = {
- # hdr: [size, scale, description]
- "pool": [15, -1, "pool name"],
- "objset": [6, -1, "dataset identification number"],
- "object": [10, -1, "object number"],
- "level": [5, -1, "indirection level of buffer"],
- "blkid": [8, -1, "block number of buffer"],
- "offset": [12, 1024, "offset in object of buffer"],
- "dbsize": [7, 1024, "size of buffer"],
- "meta": [4, -1, "is this buffer metadata?"],
- "state": [5, -1, "state of buffer (read, cached, etc)"],
- "dbholds": [7, 1000, "number of holds on buffer"],
- "dbc": [3, -1, "in dbuf cache"],
- "list": [4, -1, "which ARC list contains this buffer"],
- "atype": [7, -1, "ARC header type (data or metadata)"],
- "flags": [9, -1, "ARC read flags"],
- "count": [5, -1, "ARC data count"],
- "asize": [7, 1024, "size of this ARC buffer"],
- "access": [10, -1, "time this ARC buffer was last accessed"],
- "mru": [5, 1000, "hits while on the ARC's MRU list"],
- "gmru": [5, 1000, "hits while on the ARC's MRU ghost list"],
- "mfu": [5, 1000, "hits while on the ARC's MFU list"],
- "gmfu": [5, 1000, "hits while on the ARC's MFU ghost list"],
- "l2": [5, 1000, "hits while on the L2ARC"],
- "l2_dattr": [8, -1, "L2ARC disk address/offset"],
- "l2_asize": [8, 1024, "L2ARC alloc'd size (depending on compression)"],
- "l2_comp": [21, -1, "L2ARC compression algorithm for buffer"],
- "aholds": [6, 1000, "number of holds on this ARC buffer"],
- "dtype": [27, -1, "dnode type"],
- "btype": [27, -1, "bonus buffer type"],
- "data_bs": [7, 1024, "data block size"],
- "meta_bs": [7, 1024, "metadata block size"],
- "bsize": [6, 1024, "bonus buffer size"],
- "lvls": [6, -1, "number of indirection levels"],
- "dholds": [6, 1000, "number of holds on dnode"],
- "blocks": [8, 1000, "number of allocated blocks"],
- "dsize": [12, 1024, "size of dnode"],
- "cached": [6, 1024, "bytes cached for all blocks"],
- "direct": [6, 1024, "bytes cached for direct blocks"],
- "indirect": [8, 1024, "bytes cached for indirect blocks"],
- "bonus": [5, 1024, "bytes cached for bonus buffer"],
- "spill": [5, 1024, "bytes cached for spill block"],
-}
-
-hdr = None
-xhdr = None
-sep = " " # Default separator is 2 spaces
-cmd = ("Usage: dbufstat.py [-bdhnrtvx] [-i file] [-f fields] [-o file] "
- "[-s string] [-F filter]\n")
-raw = 0
-
-
-def print_incompat_helper(incompat):
- cnt = 0
- for key in sorted(incompat):
- if cnt is 0:
- sys.stderr.write("\t")
- elif cnt > 8:
- sys.stderr.write(",\n\t")
- cnt = 0
- else:
- sys.stderr.write(", ")
-
- sys.stderr.write("%s" % key)
- cnt += 1
-
- sys.stderr.write("\n\n")
-
-
-def detailed_usage():
- sys.stderr.write("%s\n" % cmd)
-
- sys.stderr.write("Field definitions incompatible with '-b' option:\n")
- print_incompat_helper(bincompat)
-
- sys.stderr.write("Field definitions incompatible with '-d' option:\n")
- print_incompat_helper(dincompat)
-
- sys.stderr.write("Field definitions incompatible with '-t' option:\n")
- print_incompat_helper(tincompat)
-
- sys.stderr.write("Field definitions are as follows:\n")
- for key in sorted(cols.keys()):
- sys.stderr.write("%11s : %s\n" % (key, cols[key][2]))
- sys.stderr.write("\n")
-
- sys.exit(0)
-
-
-def usage():
- sys.stderr.write("%s\n" % cmd)
- sys.stderr.write("\t -b : Print table of information for each dbuf\n")
- sys.stderr.write("\t -d : Print table of information for each dnode\n")
- sys.stderr.write("\t -h : Print this help message\n")
- sys.stderr.write("\t -n : Exclude header from output\n")
- sys.stderr.write("\t -r : Print raw values\n")
- sys.stderr.write("\t -t : Print table of information for each dnode type"
- "\n")
- sys.stderr.write("\t -v : List all possible field headers and definitions"
- "\n")
- sys.stderr.write("\t -x : Print extended stats\n")
- sys.stderr.write("\t -i : Redirect input from the specified file\n")
- sys.stderr.write("\t -f : Specify specific fields to print (see -v)\n")
- sys.stderr.write("\t -o : Redirect output to the specified file\n")
- sys.stderr.write("\t -s : Override default field separator with custom "
- "character or string\n")
- sys.stderr.write("\t -F : Filter output by value or regex\n")
- sys.stderr.write("\nExamples:\n")
- sys.stderr.write("\tdbufstat.py -d -o /tmp/d.log\n")
- sys.stderr.write("\tdbufstat.py -t -s \",\" -o /tmp/t.log\n")
- sys.stderr.write("\tdbufstat.py -v\n")
- sys.stderr.write("\tdbufstat.py -d -f pool,object,objset,dsize,cached\n")
- sys.stderr.write("\tdbufstat.py -bx -F dbc=1,objset=54,pool=testpool\n")
- sys.stderr.write("\n")
-
- sys.exit(1)
-
-
-def prettynum(sz, scale, num=0):
- global raw
-
- suffix = [' ', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']
- index = 0
- save = 0
-
- if raw or scale == -1:
- return "%*s" % (sz, num)
-
- # Rounding error, return 0
- elif 0 < num < 1:
- num = 0
-
- while num > scale and index < 5:
- save = num
- num = num / scale
- index += 1
-
- if index == 0:
- return "%*d" % (sz, num)
-
- if (save / scale) < 10:
- return "%*.1f%s" % (sz - 1, num, suffix[index])
- else:
- return "%*d%s" % (sz - 1, num, suffix[index])
-
-
-def print_values(v):
- global hdr
- global sep
-
- try:
- for col in hdr:
- sys.stdout.write("%s%s" % (
- prettynum(cols[col][0], cols[col][1], v[col]), sep))
- sys.stdout.write("\n")
- except IOError as e:
- if e.errno == errno.EPIPE:
- sys.exit(1)
-
-
-def print_header():
- global hdr
- global sep
-
- try:
- for col in hdr:
- sys.stdout.write("%*s%s" % (cols[col][0], col, sep))
- sys.stdout.write("\n")
- except IOError as e:
- if e.errno == errno.EPIPE:
- sys.exit(1)
-
-
-def get_typestring(t):
- ot_strings = [
- "DMU_OT_NONE",
- # general:
- "DMU_OT_OBJECT_DIRECTORY",
- "DMU_OT_OBJECT_ARRAY",
- "DMU_OT_PACKED_NVLIST",
- "DMU_OT_PACKED_NVLIST_SIZE",
- "DMU_OT_BPOBJ",
- "DMU_OT_BPOBJ_HDR",
- # spa:
- "DMU_OT_SPACE_MAP_HEADER",
- "DMU_OT_SPACE_MAP",
- # zil:
- "DMU_OT_INTENT_LOG",
- # dmu:
- "DMU_OT_DNODE",
- "DMU_OT_OBJSET",
- # dsl:
- "DMU_OT_DSL_DIR",
- "DMU_OT_DSL_DIR_CHILD_MAP",
- "DMU_OT_DSL_DS_SNAP_MAP",
- "DMU_OT_DSL_PROPS",
- "DMU_OT_DSL_DATASET",
- # zpl:
- "DMU_OT_ZNODE",
- "DMU_OT_OLDACL",
- "DMU_OT_PLAIN_FILE_CONTENTS",
- "DMU_OT_DIRECTORY_CONTENTS",
- "DMU_OT_MASTER_NODE",
- "DMU_OT_UNLINKED_SET",
- # zvol:
- "DMU_OT_ZVOL",
- "DMU_OT_ZVOL_PROP",
- # other; for testing only!
- "DMU_OT_PLAIN_OTHER",
- "DMU_OT_UINT64_OTHER",
- "DMU_OT_ZAP_OTHER",
- # new object types:
- "DMU_OT_ERROR_LOG",
- "DMU_OT_SPA_HISTORY",
- "DMU_OT_SPA_HISTORY_OFFSETS",
- "DMU_OT_POOL_PROPS",
- "DMU_OT_DSL_PERMS",
- "DMU_OT_ACL",
- "DMU_OT_SYSACL",
- "DMU_OT_FUID",
- "DMU_OT_FUID_SIZE",
- "DMU_OT_NEXT_CLONES",
- "DMU_OT_SCAN_QUEUE",
- "DMU_OT_USERGROUP_USED",
- "DMU_OT_USERGROUP_QUOTA",
- "DMU_OT_USERREFS",
- "DMU_OT_DDT_ZAP",
- "DMU_OT_DDT_STATS",
- "DMU_OT_SA",
- "DMU_OT_SA_MASTER_NODE",
- "DMU_OT_SA_ATTR_REGISTRATION",
- "DMU_OT_SA_ATTR_LAYOUTS",
- "DMU_OT_SCAN_XLATE",
- "DMU_OT_DEDUP",
- "DMU_OT_DEADLIST",
- "DMU_OT_DEADLIST_HDR",
- "DMU_OT_DSL_CLONES",
- "DMU_OT_BPOBJ_SUBOBJ"]
- otn_strings = {
- 0x80: "DMU_OTN_UINT8_DATA",
- 0xc0: "DMU_OTN_UINT8_METADATA",
- 0x81: "DMU_OTN_UINT16_DATA",
- 0xc1: "DMU_OTN_UINT16_METADATA",
- 0x82: "DMU_OTN_UINT32_DATA",
- 0xc2: "DMU_OTN_UINT32_METADATA",
- 0x83: "DMU_OTN_UINT64_DATA",
- 0xc3: "DMU_OTN_UINT64_METADATA",
- 0x84: "DMU_OTN_ZAP_DATA",
- 0xc4: "DMU_OTN_ZAP_METADATA",
- 0xa0: "DMU_OTN_UINT8_ENC_DATA",
- 0xe0: "DMU_OTN_UINT8_ENC_METADATA",
- 0xa1: "DMU_OTN_UINT16_ENC_DATA",
- 0xe1: "DMU_OTN_UINT16_ENC_METADATA",
- 0xa2: "DMU_OTN_UINT32_ENC_DATA",
- 0xe2: "DMU_OTN_UINT32_ENC_METADATA",
- 0xa3: "DMU_OTN_UINT64_ENC_DATA",
- 0xe3: "DMU_OTN_UINT64_ENC_METADATA",
- 0xa4: "DMU_OTN_ZAP_ENC_DATA",
- 0xe4: "DMU_OTN_ZAP_ENC_METADATA"}
-
- # If "-rr" option is used, don't convert to string representation
- if raw > 1:
- return "%i" % t
-
- try:
- if t < len(ot_strings):
- return ot_strings[t]
- else:
- return otn_strings[t]
- except (IndexError, KeyError):
- return "(UNKNOWN)"
-
-
-def get_compstring(c):
- comp_strings = ["ZIO_COMPRESS_INHERIT", "ZIO_COMPRESS_ON",
- "ZIO_COMPRESS_OFF", "ZIO_COMPRESS_LZJB",
- "ZIO_COMPRESS_EMPTY", "ZIO_COMPRESS_GZIP_1",
- "ZIO_COMPRESS_GZIP_2", "ZIO_COMPRESS_GZIP_3",
- "ZIO_COMPRESS_GZIP_4", "ZIO_COMPRESS_GZIP_5",
- "ZIO_COMPRESS_GZIP_6", "ZIO_COMPRESS_GZIP_7",
- "ZIO_COMPRESS_GZIP_8", "ZIO_COMPRESS_GZIP_9",
- "ZIO_COMPRESS_ZLE", "ZIO_COMPRESS_LZ4",
- "ZIO_COMPRESS_FUNCTION"]
-
- # If "-rr" option is used, don't convert to string representation
- if raw > 1:
- return "%i" % c
-
- try:
- return comp_strings[c]
- except IndexError:
- return "%i" % c
-
-
-def parse_line(line, labels):
- global hdr
-
- new = dict()
- val = None
- for col in hdr:
- # These are "special" fields computed in the update_dict
- # function, prevent KeyError exception on labels[col] for these.
- if col not in ['bonus', 'cached', 'direct', 'indirect', 'spill']:
- val = line[labels[col]]
-
- if col in ['pool', 'flags']:
- new[col] = str(val)
- elif col in ['dtype', 'btype']:
- new[col] = get_typestring(int(val))
- elif col in ['l2_comp']:
- new[col] = get_compstring(int(val))
- else:
- new[col] = int(val)
-
- return new
-
-
-def update_dict(d, k, line, labels):
- pool = line[labels['pool']]
- objset = line[labels['objset']]
- key = line[labels[k]]
-
- dbsize = int(line[labels['dbsize']])
- blkid = int(line[labels['blkid']])
- level = int(line[labels['level']])
-
- if pool not in d:
- d[pool] = dict()
-
- if objset not in d[pool]:
- d[pool][objset] = dict()
-
- if key not in d[pool][objset]:
- d[pool][objset][key] = parse_line(line, labels)
- d[pool][objset][key]['bonus'] = 0
- d[pool][objset][key]['cached'] = 0
- d[pool][objset][key]['direct'] = 0
- d[pool][objset][key]['indirect'] = 0
- d[pool][objset][key]['spill'] = 0
-
- d[pool][objset][key]['cached'] += dbsize
-
- if blkid == -1:
- d[pool][objset][key]['bonus'] += dbsize
- elif blkid == -2:
- d[pool][objset][key]['spill'] += dbsize
- else:
- if level == 0:
- d[pool][objset][key]['direct'] += dbsize
- else:
- d[pool][objset][key]['indirect'] += dbsize
-
- return d
-
-
-def skip_line(vals, filters):
- '''
- Determines if a line should be skipped during printing
- based on a set of filters
- '''
- if len(filters) == 0:
- return False
-
- for key in vals:
- if key in filters:
- val = prettynum(cols[key][0], cols[key][1], vals[key]).strip()
- # we want a full match here
- if re.match("(?:" + filters[key] + r")\Z", val) is None:
- return True
-
- return False
-
-
-def print_dict(d, filters, noheader):
- if not noheader:
- print_header()
- for pool in list(d.keys()):
- for objset in list(d[pool].keys()):
- for v in list(d[pool][objset].values()):
- if not skip_line(v, filters):
- print_values(v)
-
-
-def dnodes_build_dict(filehandle):
- labels = dict()
- dnodes = dict()
-
- # First 3 lines are header information, skip the first two
- for i in range(2):
- next(filehandle)
-
- # The third line contains the labels and index locations
- for i, v in enumerate(next(filehandle).split()):
- labels[v] = i
-
- # The rest of the file is buffer information
- for line in filehandle:
- update_dict(dnodes, 'object', line.split(), labels)
-
- return dnodes
-
-
-def types_build_dict(filehandle):
- labels = dict()
- types = dict()
-
- # First 3 lines are header information, skip the first two
- for i in range(2):
- next(filehandle)
-
- # The third line contains the labels and index locations
- for i, v in enumerate(next(filehandle).split()):
- labels[v] = i
-
- # The rest of the file is buffer information
- for line in filehandle:
- update_dict(types, 'dtype', line.split(), labels)
-
- return types
-
-
-def buffers_print_all(filehandle, filters, noheader):
- labels = dict()
-
- # First 3 lines are header information, skip the first two
- for i in range(2):
- next(filehandle)
-
- # The third line contains the labels and index locations
- for i, v in enumerate(next(filehandle).split()):
- labels[v] = i
-
- if not noheader:
- print_header()
-
- # The rest of the file is buffer information
- for line in filehandle:
- vals = parse_line(line.split(), labels)
- if not skip_line(vals, filters):
- print_values(vals)
-
-
-def main():
- global hdr
- global sep
- global raw
-
- desired_cols = None
- bflag = False
- dflag = False
- hflag = False
- ifile = None
- ofile = None
- tflag = False
- vflag = False
- xflag = False
- nflag = False
- filters = dict()
-
- try:
- opts, args = getopt.getopt(
- sys.argv[1:],
- "bdf:hi:o:rs:tvxF:n",
- [
- "buffers",
- "dnodes",
- "columns",
- "help",
- "infile",
- "outfile",
- "separator",
- "types",
- "verbose",
- "extended",
- "filter"
- ]
- )
- except getopt.error:
- usage()
- opts = None
-
- for opt, arg in opts:
- if opt in ('-b', '--buffers'):
- bflag = True
- if opt in ('-d', '--dnodes'):
- dflag = True
- if opt in ('-f', '--columns'):
- desired_cols = arg
- if opt in ('-h', '--help'):
- hflag = True
- if opt in ('-i', '--infile'):
- ifile = arg
- if opt in ('-o', '--outfile'):
- ofile = arg
- if opt in ('-r', '--raw'):
- raw += 1
- if opt in ('-s', '--separator'):
- sep = arg
- if opt in ('-t', '--types'):
- tflag = True
- if opt in ('-v', '--verbose'):
- vflag = True
- if opt in ('-x', '--extended'):
- xflag = True
- if opt in ('-n', '--noheader'):
- nflag = True
- if opt in ('-F', '--filter'):
- fils = [x.strip() for x in arg.split(",")]
-
- for fil in fils:
- f = [x.strip() for x in fil.split("=")]
-
- if len(f) != 2:
- sys.stderr.write("Invalid filter '%s'.\n" % fil)
- sys.exit(1)
-
- if f[0] not in cols:
- sys.stderr.write("Invalid field '%s' in filter.\n" % f[0])
- sys.exit(1)
-
- if f[0] in filters:
- sys.stderr.write("Field '%s' specified multiple times in "
- "filter.\n" % f[0])
- sys.exit(1)
-
- try:
- re.compile("(?:" + f[1] + r")\Z")
- except re.error:
- sys.stderr.write("Invalid regex for field '%s' in "
- "filter.\n" % f[0])
- sys.exit(1)
-
- filters[f[0]] = f[1]
-
- if hflag or (xflag and desired_cols):
- usage()
-
- if vflag:
- detailed_usage()
-
- # Ensure at most only one of b, d, or t flags are set
- if (bflag and dflag) or (bflag and tflag) or (dflag and tflag):
- usage()
-
- if bflag:
- hdr = bxhdr if xflag else bhdr
- elif tflag:
- hdr = txhdr if xflag else thdr
- else: # Even if dflag is False, it's the default if none set
- dflag = True
- hdr = dxhdr if xflag else dhdr
-
- if desired_cols:
- hdr = desired_cols.split(",")
-
- invalid = []
- incompat = []
- for ele in hdr:
- if ele not in cols:
- invalid.append(ele)
- elif ((bflag and bincompat and ele in bincompat) or
- (dflag and dincompat and ele in dincompat) or
- (tflag and tincompat and ele in tincompat)):
- incompat.append(ele)
-
- if len(invalid) > 0:
- sys.stderr.write("Invalid column definition! -- %s\n" % invalid)
- usage()
-
- if len(incompat) > 0:
- sys.stderr.write("Incompatible field specified! -- %s\n" %
- incompat)
- usage()
-
- if ofile:
- try:
- tmp = open(ofile, "w")
- sys.stdout = tmp
-
- except IOError:
- sys.stderr.write("Cannot open %s for writing\n" % ofile)
- sys.exit(1)
-
- if not ifile:
- ifile = '/proc/spl/kstat/zfs/dbufs'
-
- if ifile is not "-":
- try:
- tmp = open(ifile, "r")
- sys.stdin = tmp
- except IOError:
- sys.stderr.write("Cannot open %s for reading\n" % ifile)
- sys.exit(1)
-
- if bflag:
- buffers_print_all(sys.stdin, filters, nflag)
-
- if dflag:
- print_dict(dnodes_build_dict(sys.stdin), filters, nflag)
-
- if tflag:
- print_dict(types_build_dict(sys.stdin), filters, nflag)
-
-
-if __name__ == '__main__':
- main()
--- /dev/null
+dnl #
+dnl # ZFS_AC_PYTHON_VERSION(version, [action-if-true], [action-if-false])
+dnl #
+dnl # Verify Python version
+dnl #
+AC_DEFUN([ZFS_AC_PYTHON_VERSION], [
+ ver_check=`$PYTHON -c "import sys; print (sys.version.split()[[0]] $1)"`
+ AS_IF([test "$ver_check" = "True"], [
+ m4_ifvaln([$2], [$2])
+ ], [
+ m4_ifvaln([$3], [$3])
+ ])
+])
+
+dnl #
+dnl # ZFS_AC_PYTHON_MODULE(module_name, [action-if-true], [action-if-false])
+dnl #
+dnl # Checks for Python module. Freely inspired by AX_PYTHON_MODULE
+dnl # https://www.gnu.org/software/autoconf-archive/ax_python_module.html
+dnl # Required by ZFS_AC_CONFIG_ALWAYS_PYZFS.
+dnl #
+AC_DEFUN([ZFS_AC_PYTHON_MODULE], [
+ PYTHON_NAME=`basename $PYTHON`
+ AC_MSG_CHECKING([for $PYTHON_NAME module: $1])
+ AS_IF([$PYTHON -c "import $1" 2>/dev/null], [
+ AC_MSG_RESULT(yes)
+ m4_ifvaln([$2], [$2])
+ ], [
+ AC_MSG_RESULT(no)
+ m4_ifvaln([$3], [$3])
+ ])
+])
+
+dnl #
+dnl # The majority of the python scripts are written to be compatible
+dnl # with Python 2.6 and Python 3.4. Therefore, they may be installed
+dnl # and used with either interpreter. This option is intended to
+dnl # to provide a method to specify the default system version, and
+dnl # set the PYTHON environment variable accordingly.
+dnl #
+AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_PYTHON], [
+ AC_ARG_WITH([python],
+ AC_HELP_STRING([--with-python[=VERSION]],
+ [default system python version @<:@default=check@:>@]),
+ [with_python=$withval],
+ [with_python=check])
+
+ AS_CASE([$with_python],
+ [check],
+ [AS_IF([test -x /usr/bin/python3],
+ [PYTHON="python3"],
+ [AS_IF([test -x /usr/bin/python2],
+ [PYTHON="python2"],
+ [PYTHON=""]
+ )]
+ )],
+ [2*], [PYTHON="python${with_python}"],
+ [*python2*], [PYTHON="${with_python}"],
+ [3*], [PYTHON="python${with_python}"],
+ [*python3*], [PYTHON="${with_python}"],
+ [no], [PYTHON=""],
+ [AC_MSG_ERROR([Unknown --with-python value '$with_python'])]
+ )
+
+ AS_IF([$PYTHON --version >/dev/null 2>&1], [ /bin/true ], [
+ AC_MSG_ERROR([Cannot find $PYTHON in your system path])
+ ])
+
+ AM_PATH_PYTHON([2.6], [], [:])
+ AM_CONDITIONAL([USING_PYTHON], [test "$PYTHON" != :])
+ AM_CONDITIONAL([USING_PYTHON_2], [test "${PYTHON_VERSION:0:2}" = "2."])
+ AM_CONDITIONAL([USING_PYTHON_3], [test "${PYTHON_VERSION:0:2}" = "3."])
+
+ dnl #
+ dnl # Minimum supported Python versions for utilities:
+ dnl # Python 2.6.x, or Python 3.4.x
+ dnl #
+ AS_IF([test "${PYTHON_VERSION:0:2}" = "2."], [
+ ZFS_AC_PYTHON_VERSION([>= '2.6'], [ /bin/true ],
+ [AC_MSG_ERROR("Python >= 2.6.x is not available")])
+ ])
+
+ AS_IF([test "${PYTHON_VERSION:0:2}" = "3."], [
+ ZFS_AC_PYTHON_VERSION([>= '3.4'], [ /bin/true ],
+ [AC_MSG_ERROR("Python >= 3.4.x is not available")])
+ ])
+
+ dnl #
+ dnl # Request that packages be built for a specific Python version.
+ dnl #
+ AS_IF([test $with_python != check], [
+ PYTHON_PKG_VERSION=`echo ${PYTHON} | tr -d 'a-zA-Z.'`
+ DEFINE_PYTHON_PKG_VERSION='--define "__use_python_pkg_version '${PYTHON_PKG_VERSION}'"'
+ DEFINE_PYTHON_VERSION='--define "__use_python '${PYTHON}'"'
+ ], [
+ DEFINE_PYTHON_VERSION=''
+ DEFINE_PYTHON_PKG_VERSION=''
+ ])
+
+ AC_SUBST(DEFINE_PYTHON_VERSION)
+ AC_SUBST(DEFINE_PYTHON_PKG_VERSION)
+])
dnl #
-dnl # ZFS_AC_PYTHON_MODULE(module_name, [action-if-true], [action-if-false])
+dnl # Determines if pyzfs can be built, requires Python 2.7 or latter.
dnl #
-dnl # Checks for Python module. Freely inspired by AX_PYTHON_MODULE
-dnl # https://www.gnu.org/software/autoconf-archive/ax_python_module.html
-dnl #
-AC_DEFUN([ZFS_AC_PYTHON_MODULE],[
- PYTHON_NAME=`basename $PYTHON`
- AC_MSG_CHECKING([for $PYTHON_NAME module: $1])
- $PYTHON -c "import $1" 2>/dev/null
- if test $? -eq 0;
- then
- AC_MSG_RESULT(yes)
- m4_ifvaln([$2], [$2])
- else
- AC_MSG_RESULT(no)
- m4_ifvaln([$3], [$3])
- fi
-])
-
-dnl #
-dnl # ZFS_AC_PYTHON_VERSION(version, [action-if-true], [action-if-false])
-dnl #
-dnl # Verify Python version
-dnl #
-AC_DEFUN([ZFS_AC_PYTHON_VERSION], [
- AC_MSG_CHECKING([for a version of Python $1])
- version_check=`$PYTHON -c "import sys; print (sys.version.split()[[0]] $1)"`
- if test "$version_check" = "True";
- then
- AC_MSG_RESULT(yes)
- m4_ifvaln([$2], [$2])
- else
- AC_MSG_RESULT(no)
- m4_ifvaln([$3], [$3])
- fi
-
-])
-
AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_PYZFS], [
- PYTHON_REQUIRED_VERSION="<= '2.7.x'"
-
AC_ARG_ENABLE([pyzfs],
AC_HELP_STRING([--enable-pyzfs],
[install libzfs_core python bindings @<:@default=check@:>@]),
[enable_pyzfs=$enableval],
[enable_pyzfs=check])
- AM_PATH_PYTHON([2.7], [], [
+ dnl #
+ dnl # Packages for pyzfs specifically enabled/disabled.
+ dnl #
+ AS_IF([test "x$enable_pyzfs" != xcheck], [
AS_IF([test "x$enable_pyzfs" = xyes], [
- AC_MSG_ERROR("python >= 2.7 is not installed")
- ], [test ! "x$enable_pyzfs" = xno], [
- enable_pyzfs=no
+ DEFINE_PYZFS='--with pyzfs'
+ ], [
+ DEFINE_PYZFS='--without pyzfs'
])
+ ], [
+ DEFINE_PYZFS=''
])
- AM_CONDITIONAL([HAVE_PYTHON], [test "$PYTHON" != :])
+ AC_SUBST(DEFINE_PYZFS)
dnl #
- dnl # Python 2.7.x is supported, other versions (3.5) are not yet
+ dnl # Require python-devel libraries
dnl #
- AS_IF([test "x$enable_pyzfs" = xcheck], [
- ZFS_AC_PYTHON_VERSION([$PYTHON_REQUIRED_VERSION], [], [
- AS_IF([test "x$enable_pyzfs" = xyes], [
- AC_MSG_ERROR("Python $PYTHON_REQUIRED_VERSION is not available")
- ], [test ! "x$enable_pyzfs" = xno], [
- enable_pyzfs=no
+ AS_IF([test "x$enable_pyzfs" = xcheck -o "x$enable_pyzfs" = xyes], [
+ AS_IF([test "${PYTHON_VERSION:0:2}" = "2."], [
+ PYTHON_REQUIRED_VERSION=">= '2.7.0'"
+ ], [
+ AS_IF([test "${PYTHON_VERSION:0:2}" = "3."], [
+ PYTHON_REQUIRED_VERSION=">= '3.4.0'"
+ ], [
+ AC_MSG_ERROR("Python $PYTHON_VERSION unknown")
])
])
- ])
- dnl #
- dnl # Require python-devel libraries
- dnl #
- AS_IF([test "x$enable_pyzfs" = xcheck], [
AX_PYTHON_DEVEL([$PYTHON_REQUIRED_VERSION], [
AS_IF([test "x$enable_pyzfs" = xyes], [
- AC_MSG_ERROR("Python development library is not available")
+ AC_MSG_ERROR("Python $PYTHON_REQUIRED_VERSION development library is not installed")
], [test ! "x$enable_pyzfs" = xno], [
enable_pyzfs=no
])
dnl #
dnl # Python "setuptools" module is required to build and install pyzfs
dnl #
- AS_IF([test "x$enable_pyzfs" = xcheck], [
+ AS_IF([test "x$enable_pyzfs" = xcheck -o "x$enable_pyzfs" = xyes], [
ZFS_AC_PYTHON_MODULE([setuptools], [], [
AS_IF([test "x$enable_pyzfs" = xyes], [
- AC_MSG_ERROR("python-setuptools is not installed")
+ AC_MSG_ERROR("Python $PYTHON_VERSION setuptools is not installed")
], [test ! "x$enable_pyzfs" = xno], [
enable_pyzfs=no
])
dnl #
dnl # Python "cffi" module is required to run pyzfs
dnl #
- AS_IF([test "x$enable_pyzfs" = xcheck], [
+ AS_IF([test "x$enable_pyzfs" = xcheck -o "x$enable_pyzfs" = xyes], [
ZFS_AC_PYTHON_MODULE([cffi], [], [
AS_IF([test "x$enable_pyzfs" = xyes], [
- AC_MSG_ERROR("python-cffi is not installed")
+ AC_MSG_ERROR("Python $PYTHON_VERSION cffi is not installed")
], [test ! "x$enable_pyzfs" = xno], [
enable_pyzfs=no
])
AM_CONDITIONAL([PYZFS_ENABLED], [test x$enable_pyzfs = xyes])
AC_SUBST([PYZFS_ENABLED], [$enable_pyzfs])
-
- AS_IF([test "x$enable_pyzfs" = xyes], [
- DEFINE_PYZFS='--define "_pyzfs 1"'
- ],[
- DEFINE_PYZFS=''
- ])
- AC_SUBST(DEFINE_PYZFS)
AC_SUBST(pythonsitedir, [$PYTHON_SITE_PKG])
+
+ AC_MSG_CHECKING([whether to enable pyzfs: ])
+ AC_MSG_RESULT($enable_pyzfs)
])
pkg7=$${name}-test-$${version}.$${arch}.rpm; \
pkg8=$${name}-dracut-$${version}.$${arch}.rpm; \
pkg9=$${name}-initramfs-$${version}.$${arch}.rpm; \
- pkg10=pyzfs-$${version}.noarch.rpm; \
+ pkg10=`ls python*-pyzfs-$${version}* | tail -1`; \
## Arguments need to be passed to dh_shlibdeps. Alien provides no mechanism
## to do this, so we install a shim onto the path which calls the real
## dh_shlibdeps with the required arguments.
ZFS_AC_CONFIG_ALWAYS_CC_ASAN
ZFS_AC_CONFIG_ALWAYS_TOOLCHAIN_SIMD
ZFS_AC_CONFIG_ALWAYS_ARCH
+ ZFS_AC_CONFIG_ALWAYS_PYTHON
ZFS_AC_CONFIG_ALWAYS_PYZFS
])
RPM_DEFINE_UTIL+=' $(DEFINE_INITRAMFS)'
RPM_DEFINE_UTIL+=' $(DEFINE_SYSTEMD)'
RPM_DEFINE_UTIL+=' $(DEFINE_PYZFS)'
+ RPM_DEFINE_UTIL+=' $(DEFINE_PYTHON_VERSION)'
+ RPM_DEFINE_UTIL+=' $(DEFINE_PYTHON_PKG_VERSION)'
- dnl # Override default lib directory on Debian/Ubuntu systems. The provided
- dnl # /usr/lib/rpm/platform/<arch>/macros files do not specify the correct
- dnl # path for multiarch systems as described by the packaging guidelines.
+ dnl # Override default lib directory on Debian/Ubuntu systems. The
+ dnl # provided /usr/lib/rpm/platform/<arch>/macros files do not
+ dnl # specify the correct path for multiarch systems as described
+ dnl # by the packaging guidelines.
dnl #
dnl # https://wiki.ubuntu.com/MultiarchSpec
dnl # https://wiki.debian.org/Multiarch/Implementation
$(PYTHON) $(srcdir)/setup.py install \
--prefix $(prefix) \
--root $(DESTDIR)/ \
- --install-lib $(pythondir) \
+ --install-lib $(pythonsitedir) \
--single-version-externally-managed \
--verbose
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
- "Programming Language :: Python :: 2 :: Only",
+ "Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.4",
+ "Programming Language :: Python :: 3.5",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
"Topic :: System :: Filesystems",
"Topic :: Software Development :: Libraries",
],
setup_requires=[
"cffi",
],
- python_requires='>=2.7,<3',
+ python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,<4',
zip_safe=False,
test_suite="libzfs_core.test",
)
%bcond_with asan
%bcond_with systemd
-# Python permits the !/usr/bin/python shebang for scripts that are cross
-# compatible between python2 and python3, but Fedora 28 does not. Fedora
-# wants us to choose python3 for cross-compatible scripts. Since we want
-# to support python2 and python3 users, exclude our scripts from Fedora 28's
-# RPM build check, so that we don't get a bunch of build warnings.
-#
-# Details: https://github.com/zfsonlinux/zfs/issues/7360
-#
-%global __brp_mangle_shebangs_exclude_from arc_summary.py|arcstat.py|dbufstat.py|test-runner.py|zts-report.py
-
# Generic enable switch for systemd
%if %{with systemd}
%define _systemd 1
%define _systemd 1
%endif
+# When not specified default to distribution provided version. This
+# is normally Python 3, but for RHEL <= 7 only Python 2 is provided.
+%if %{undefined __use_python}
+%if 0%{?rhel} && 0%{?rhel} <= 7
+%define __python /usr/bin/python2
+%define __python_pkg_version 2
+%define __python_cffi_pkg python-cffi
+%else
+%define __python /usr/bin/python3
+%define __python_pkg_version 3
+%define __python_cffi_pkg python3-cffi
+%endif
+%else
+%define __python %{__use_python}
+%define __python_pkg_version %{__use_python_pkg_version}
+%define __python_cffi_pkg python%{__python_pkg_version}-cffi
+%endif
+
+# By default python-pyzfs is enabled, with the exception of
+# RHEL 6 which by default uses Python 2.6 which is too old.
+%if 0%{?rhel} == 6
+%bcond_with pyzfs
+%else
+%bcond_without pyzfs
+%endif
+
Name: @PACKAGE@
Version: @VERSION@
Release: @RELEASE@%{?dist}
Requires: sysstat
%description
-This package contains the ZFS command line utilities.
+This package contains the core ZFS command line utilities.
%package -n libzpool2
Summary: Native ZFS pool library for Linux
Requires: sudo
Requires: sysstat
Requires: libaio
+Requires: python%{__python_pkg_version}
%if 0%{?rhel}%{?fedora}%{?suse_version}
BuildRequires: libaio-devel
%endif
This package contains a dracut module used to construct an initramfs
image which is ZFS aware.
-%if 0%{?_pyzfs}
-%package -n pyzfs
-Summary: Python wrapper for libzfs_core
+%if %{with pyzfs}
+%package -n python%{__python_pkg_version}-pyzfs
+Summary: Python %{python_version} wrapper for libzfs_core
Group: Development/Languages/Python
License: Apache-2.0
BuildArch: noarch
Requires: libzfs2 = %{version}
Requires: libnvpair1 = %{version}
Requires: libffi
-Requires: python >= 2.7
-Requires: python-cffi
+Requires: python%{__python_pkg_version}
+Requires: %{__python_cffi_pkg}
%if 0%{?rhel}%{?fedora}%{?suse_version}
-BuildRequires: python-devel
+BuildRequires: python%{__python_pkg_version}-devel
BuildRequires: libffi-devel
%endif
-%description -n pyzfs
+%description -n python%{__python_pkg_version}-pyzfs
This package provides a python wrapper for the libzfs_core C library.
%endif
%define systemd --enable-sysvinit --disable-systemd
%endif
+%if %{with pyzfs}
+ %define pyzfs --enable-pyzfs
+%else
+ %define pyzfs --disable-pyzfs
+%endif
+
%setup -q
%build
--with-udevdir=%{_udevdir} \
--with-udevruledir=%{_udevruledir} \
--with-dracutdir=%{_dracutdir} \
+ --with-python=%{__python} \
--disable-static \
%{debug} \
%{debuginfo} \
%{asan} \
- %{systemd}
+ %{systemd}\
+ %{pyzfs}
make %{?_smp_mflags}
%install
%endif
%files
+# Core utilities
%{_sbindir}/*
-%{_bindir}/*
-%{_libexecdir}/%{name}
+%{_bindir}/raidz_test
+%{_bindir}/zgenhostid
+# Optional Python 2/3 scripts
+%{_bindir}/arc_summary
+%{_bindir}/arcstat
+%{_bindir}/dbufstat
+# Man pages
%{_mandir}/man1/*
%{_mandir}/man5/*
%{_mandir}/man8/*
+# Configuration files and scripts
+%{_libexecdir}/%{name}
%{_udevdir}/vdev_id
%{_udevdir}/zvol_id
%{_udevdir}/rules.d/*
%doc contrib/dracut/README.dracut.markdown
%{_dracutdir}/modules.d/*
-%if 0%{?_pyzfs}
-%files -n pyzfs
+%if %{with pyzfs}
+%files -n python%{__python_pkg_version}-pyzfs
%doc contrib/pyzfs/README
%doc contrib/pyzfs/LICENSE
%defattr(-,root,root,-)
dist_pkgdata_SCRIPTS = \
test-runner.py \
zts-report.py
+#
+# These scripts are compatibile with both Python 2.6 and 3.4. As such the
+# python 3 shebang can be replaced at install time when targeting a python
+# 2 system. This allows us to maintain a single version of the source.
+#
+if USING_PYTHON_2
+install-data-hook:
+ sed --in-place 's|^#!/usr/bin/python3|#!/usr/bin/python2|' \
+ $(DESTDIR)$(pkgdatadir)/test-runner.py \
+ $(DESTDIR)$(pkgdatadir)/zts-report.py
+endif
# Copyright (c) 2012, 2015 by Delphix. All rights reserved.
# Copyright (c) 2017 Datto Inc.
#
+# This script must remain compatible with Python 2.6+ and Python 3.4+.
+#
# some python 2.7 system don't have a configparser shim
try:
-#!/usr/bin/python
+#!/usr/bin/python3
#
# This file and its contents are supplied under the terms of the
# Copyright (c) 2017 by Delphix. All rights reserved.
# Copyright (c) 2018 by Lawrence Livermore National Security, LLC.
#
+# This script must remain compatible with Python 2.6+ and Python 3.4+.
+#
import os
import re
zpool
ztest
raidz_test
- arc_summary.py
- arc_summary3.py
- arcstat.py
- dbufstat.py
+ arc_summary
+ arc_summary3
+ arcstat
+ dbufstat
zed
zgenhostid
zstreamdump'
# 2. Store output from dbufs kstat
# 3. Store output from dbufstats kstat
# 4. Compare stats presented in dbufstats with stat generated using
-# dbufstat.py and the dbufs kstat output
+# dbufstat and the dbufs kstat output
#
DBUFSTATS_FILE=$(mktemp $TEST_BASE_DIR/dbufstats.out.XXXXXX)
[[ -n "$2" ]] && filter="-F $2"
from_dbufstat=$(grep -w "$name" "$DBUFSTATS_FILE" | awk '{ print $3 }')
- from_dbufs=$(dbufstat.py -bxn -i "$DBUFS_FILE" "$filter" | wc -l)
+ from_dbufs=$(dbufstat -bxn -i "$DBUFS_FILE" "$filter" | wc -l)
within_tolerance $from_dbufstat $from_dbufs 9 \
|| log_fail "Stat $name exceeded tolerance"
log_note "Object ID for $TESTDIR/file is $objid"
log_must eval "cat /proc/spl/kstat/zfs/dbufs > $DBUFS_FILE"
-dbuf=$(dbufstat.py -bxn -i "$DBUFS_FILE" -F "object=$objid" | wc -l)
-mru=$(dbufstat.py -bxn -i "$DBUFS_FILE" -F "object=$objid,list=1" | wc -l)
-mfu=$(dbufstat.py -bxn -i "$DBUFS_FILE" -F "object=$objid,list=3" | wc -l)
+dbuf=$(dbufstat -bxn -i "$DBUFS_FILE" -F "object=$objid" | wc -l)
+mru=$(dbufstat -bxn -i "$DBUFS_FILE" -F "object=$objid,list=1" | wc -l)
+mfu=$(dbufstat -bxn -i "$DBUFS_FILE" -F "object=$objid,list=3" | wc -l)
log_note "dbuf count is $dbuf, mru count is $mru, mfu count is $mfu"
verify_ne "0" "$mru" "mru count"
verify_eq "0" "$mfu" "mfu count"
log_must eval "cat $TESTDIR/file > /dev/null"
log_must eval "cat /proc/spl/kstat/zfs/dbufs > $DBUFS_FILE"
-dbuf=$(dbufstat.py -bxn -i "$DBUFS_FILE" -F "object=$objid" | wc -l)
-mru=$(dbufstat.py -bxn -i "$DBUFS_FILE" -F "object=$objid,list=1" | wc -l)
-mfu=$(dbufstat.py -bxn -i "$DBUFS_FILE" -F "object=$objid,list=3" | wc -l)
+dbuf=$(dbufstat -bxn -i "$DBUFS_FILE" -F "object=$objid" | wc -l)
+mru=$(dbufstat -bxn -i "$DBUFS_FILE" -F "object=$objid,list=1" | wc -l)
+mfu=$(dbufstat -bxn -i "$DBUFS_FILE" -F "object=$objid,list=3" | wc -l)
log_note "dbuf count is $dbuf, mru count is $mru, mfu count is $mfu"
verify_ne "0" "$mfu" "mfu count"
set -A args "" "-a" "-d" "-p 1" "-g" "-s arc" "-r"
-log_assert "arc_summary3.py generates output and doesn't return an error code"
+log_assert "arc_summary3 generates output and doesn't return an error code"
typeset -i i=0
while [[ $i -lt ${#args[*]} ]]; do
- log_must eval "arc_summary3.py ${args[i]} > /dev/null"
+ log_must eval "arc_summary3 ${args[i]} > /dev/null"
((i = i + 1))
done
-log_pass "arc_summary3.py generates output and doesn't return an error code"
+log_pass "arc_summary3 generates output and doesn't return an error code"
set -A args "" "-a" "-d" "-p 1"
-log_assert "arc_summary.py generates output and doesn't return an error code"
+log_assert "arc_summary generates output and doesn't return an error code"
typeset -i i=0
while [[ $i -lt ${#args[*]} ]]; do
- log_must eval "arc_summary.py ${args[i]} > /dev/null"
+ log_must eval "arc_summary ${args[i]} > /dev/null"
((i = i + 1))
done
-log_must eval "arc_summary.py | head > /dev/null"
-log_must eval "arc_summary.py | head -1 > /dev/null"
+log_must eval "arc_summary | head > /dev/null"
+log_must eval "arc_summary | head -1 > /dev/null"
-log_pass "arc_summary.py generates output and doesn't return an error code"
+log_pass "arc_summary generates output and doesn't return an error code"
typeset args=("-x" "-r" "-5" "-p 7" "--err" "-@")
-log_assert "arc_summary.py generates an error code with invalid options"
+log_assert "arc_summary generates an error code with invalid options"
for arg in "${args[@]}"; do
- log_mustnot eval "arc_summary.py $arg > /dev/null"
+ log_mustnot eval "arc_summary $arg > /dev/null"
done
-log_pass "arc_summary.py generates an error code with invalid options"
+log_pass "arc_summary generates an error code with invalid options"
set -A args "" "-s \",\"" "-x" "-v" \
"-f time,hit%,dh%,ph%,mh%"
-log_assert "arcstat.py generates output and doesn't return an error code"
+log_assert "arcstat generates output and doesn't return an error code"
typeset -i i=0
while [[ $i -lt ${#args[*]} ]]; do
- log_must eval "arcstat.py ${args[i]} > /dev/null"
+ log_must eval "arcstat ${args[i]} > /dev/null"
((i = i + 1))
done
-log_pass "arcstat.py generates output and doesn't return an error code"
+log_pass "arcstat generates output and doesn't return an error code"
set -A args "" "-b" "-d" "-r" "-v" "-s \",\"" "-x" "-n"
-log_assert "dbufstat.py generates output and doesn't return an error code"
+log_assert "dbufstat generates output and doesn't return an error code"
typeset -i i=0
while [[ $i -lt ${#args[*]} ]]; do
- log_must eval "dbufstat.py ${args[i]} > /dev/null"
+ log_must eval "dbufstat ${args[i]} > /dev/null"
((i = i + 1))
done
-# A simple test of dbufstat.py filter functionality
-log_must eval "dbufstat.py -F object=10,dbc=1,pool=$TESTPOOL > /dev/null"
+# A simple test of dbufstat filter functionality
+log_must eval "dbufstat -F object=10,dbc=1,pool=$TESTPOOL > /dev/null"
-log_pass "dbufstat.py generates output and doesn't return an error code"
+log_pass "dbufstat generates output and doesn't return an error code"
--- /dev/null
+pyzfs_unittest.ksh
-pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/pyzfs
-
-dist_pkgdata_SCRIPTS = \
+pkgpyzfsdir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/pyzfs
+pkgpyzfs_SCRIPTS = \
pyzfs_unittest.ksh
+
+EXTRA_DIST = \
+ pyzfs_unittest.ksh.in
+
+#
+# The pyzfs module is built either for Python 2 or Python 3. In order
+# to properly test it the unit tests must be updated to the matching vesion.
+#
+$(pkgpyzfs_SCRIPTS):%:%.in
+ -$(SED) -e 's,@PYTHON\@,$(PYTHON),g' \
+ $< >'$@'
+ -chmod 775 $@
+
+distclean-local::
+ -$(RM) $(pkgpyzfs_SCRIPTS)
+++ /dev/null
-#!/bin/ksh -p
-#
-# This file and its contents are supplied under the terms of the
-# Common Development and Distribution License ("CDDL"), version 1.0.
-# You may only use this file in accordance with the terms of version
-# 1.0 of the CDDL.
-#
-# A full copy of the text of the CDDL should have accompanied this
-# source. A copy of the CDDL is also available via the Internet at
-# http://www.illumos.org/license/CDDL.
-#
-
-#
-# Copyright 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
-#
-
-. $STF_SUITE/include/libtest.shlib
-
-#
-# DESCRIPTION:
-# Verify the libzfs_core Python test suite can be run successfully
-#
-# STRATEGY:
-# 1. Run the nvlist and libzfs_core Python unittest
-# 2. Verify the exit code is 0 (no errors)
-#
-
-verify_runnable "global"
-
-# Verify that the required dependencies for testing are installed.
-python -c "import cffi" 2>/dev/null
-if [ $? -eq 1 ]; then
- log_unsupported "python-cffi not found by Python"
-fi
-
-# We don't just try to "import libzfs_core" because we want to skip these tests
-# only if pyzfs was not installed due to missing, build-time, dependencies; if
-# we cannot load "libzfs_core" due to other reasons, for instance an API/ABI
-# mismatch, we want to report it.
-python -c '
-import pkgutil, sys
-sys.exit(pkgutil.find_loader("libzfs_core") is None)'
-if [ $? -eq 1 ]; then
- log_unsupported "libzfs_core not found by Python"
-fi
-
-log_assert "Verify the nvlist and libzfs_core Python unittest run successfully"
-
-# NOTE: don't use log_must() here because it makes output unreadable
-python -m unittest --verbose \
- libzfs_core.test.test_nvlist.TestNVList \
- libzfs_core.test.test_libzfs_core.ZFSTest
-if [ $? -ne 0 ]; then
- log_fail "Python unittest completed with errors"
-fi
-
-log_pass "Python unittest completed without errors"
--- /dev/null
+#!/bin/ksh -p
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+#
+# DESCRIPTION:
+# Verify the libzfs_core Python test suite can be run successfully
+#
+# STRATEGY:
+# 1. Run the nvlist and libzfs_core Python unittest
+# 2. Verify the exit code is 0 (no errors)
+#
+
+verify_runnable "global"
+
+# Verify that the required dependencies for testing are installed.
+@PYTHON@ -c "import cffi" 2>/dev/null
+if [ $? -eq 1 ]; then
+ log_unsupported "python-cffi not found by Python"
+fi
+
+# We don't just try to "import libzfs_core" because we want to skip these tests
+# only if pyzfs was not installed due to missing, build-time, dependencies; if
+# we cannot load "libzfs_core" due to other reasons, for instance an API/ABI
+# mismatch, we want to report it.
+@PYTHON@ -c '
+import pkgutil, sys
+sys.exit(pkgutil.find_loader("libzfs_core") is None)'
+if [ $? -eq 1 ]; then
+ log_unsupported "libzfs_core not found by Python"
+fi
+
+log_assert "Verify the nvlist and libzfs_core Python unittest run successfully"
+
+# NOTE: don't use log_must() here because it makes output unreadable
+@PYTHON@ -m unittest --verbose \
+ libzfs_core.test.test_nvlist.TestNVList \
+ libzfs_core.test.test_libzfs_core.ZFSTest
+if [ $? -ne 0 ]; then
+ log_fail "Python unittest completed with errors"
+fi
+
+log_pass "Python unittest completed without errors"