]> git.proxmox.com Git - mirror_zfs-debian.git/blame - cmd/dbufstat/dbufstat.py
Imported Upstream version 0.6.5.3
[mirror_zfs-debian.git] / cmd / dbufstat / dbufstat.py
CommitLineData
a08ee875
LG
1#!/usr/bin/python
2#
3# Print out statistics for all cached dmu buffers. This information
4# is available through the dbufs kstat and may be post-processed as
5# needed by the script.
6#
7# CDDL HEADER START
8#
9# The contents of this file are subject to the terms of the
10# Common Development and Distribution License, Version 1.0 only
11# (the "License"). You may not use this file except in compliance
12# with the License.
13#
14# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
15# or http://www.opensolaris.org/os/licensing.
16# See the License for the specific language governing permissions
17# and limitations under the License.
18#
19# When distributing Covered Code, include this CDDL HEADER in each
20# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
21# If applicable, add the following below this CDDL HEADER, with the
22# fields enclosed by brackets "[]" replaced with your own identifying
23# information: Portions Copyright [yyyy] [name of copyright owner]
24#
25# CDDL HEADER END
26#
27# Copyright (C) 2013 Lawrence Livermore National Security, LLC.
28# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
29#
30
31import sys
32import getopt
33import errno
34
35bhdr = ["pool", "objset", "object", "level", "blkid", "offset", "dbsize"]
36bxhdr = ["pool", "objset", "object", "level", "blkid", "offset", "dbsize",
37 "meta", "state", "dbholds", "list", "atype", "index", "flags",
38 "count", "asize", "access", "mru", "gmru", "mfu", "gmfu", "l2",
39 "l2_dattr", "l2_asize", "l2_comp", "aholds", "dtype", "btype",
40 "data_bs", "meta_bs", "bsize", "lvls", "dholds", "blocks", "dsize"]
41bincompat = ["cached", "direct", "indirect", "bonus", "spill"]
42
43dhdr = ["pool", "objset", "object", "dtype", "cached"]
44dxhdr = ["pool", "objset", "object", "dtype", "btype", "data_bs", "meta_bs",
45 "bsize", "lvls", "dholds", "blocks", "dsize", "cached", "direct",
46 "indirect", "bonus", "spill"]
47dincompat = ["level", "blkid", "offset", "dbsize", "meta", "state", "dbholds",
48 "list", "atype", "index", "flags", "count", "asize", "access",
49 "mru", "gmru", "mfu", "gmfu", "l2", "l2_dattr", "l2_asize",
50 "l2_comp", "aholds"]
51
52thdr = ["pool", "objset", "dtype", "cached"]
53txhdr = ["pool", "objset", "dtype", "cached", "direct", "indirect",
54 "bonus", "spill"]
55tincompat = ["object", "level", "blkid", "offset", "dbsize", "meta", "state",
56 "dbholds", "list", "atype", "index", "flags", "count", "asize",
57 "access", "mru", "gmru", "mfu", "gmfu", "l2", "l2_dattr",
58 "l2_asize", "l2_comp", "aholds", "btype", "data_bs", "meta_bs",
59 "bsize", "lvls", "dholds", "blocks", "dsize"]
60
61cols = {
62 # hdr: [size, scale, description]
63 "pool": [15, -1, "pool name"],
64 "objset": [6, -1, "dataset identification number"],
65 "object": [10, -1, "object number"],
66 "level": [5, -1, "indirection level of buffer"],
67 "blkid": [8, -1, "block number of buffer"],
68 "offset": [12, 1024, "offset in object of buffer"],
69 "dbsize": [7, 1024, "size of buffer"],
70 "meta": [4, -1, "is this buffer metadata?"],
71 "state": [5, -1, "state of buffer (read, cached, etc)"],
72 "dbholds": [7, 1000, "number of holds on buffer"],
73 "list": [4, -1, "which ARC list contains this buffer"],
74 "atype": [7, -1, "ARC header type (data or metadata)"],
75 "index": [5, -1, "buffer's index into its ARC list"],
76 "flags": [8, -1, "ARC read flags"],
77 "count": [5, -1, "ARC data count"],
78 "asize": [7, 1024, "size of this ARC buffer"],
79 "access": [10, -1, "time this ARC buffer was last accessed"],
80 "mru": [5, 1000, "hits while on the ARC's MRU list"],
81 "gmru": [5, 1000, "hits while on the ARC's MRU ghost list"],
82 "mfu": [5, 1000, "hits while on the ARC's MFU list"],
83 "gmfu": [5, 1000, "hits while on the ARC's MFU ghost list"],
84 "l2": [5, 1000, "hits while on the L2ARC"],
85 "l2_dattr": [8, -1, "L2ARC disk address/offset"],
86 "l2_asize": [8, 1024, "L2ARC alloc'd size (depending on compression)"],
87 "l2_comp": [21, -1, "L2ARC compression algorithm for buffer"],
88 "aholds": [6, 1000, "number of holds on this ARC buffer"],
89 "dtype": [27, -1, "dnode type"],
90 "btype": [27, -1, "bonus buffer type"],
91 "data_bs": [7, 1024, "data block size"],
92 "meta_bs": [7, 1024, "metadata block size"],
93 "bsize": [6, 1024, "bonus buffer size"],
94 "lvls": [6, -1, "number of indirection levels"],
95 "dholds": [6, 1000, "number of holds on dnode"],
96 "blocks": [8, 1000, "number of allocated blocks"],
97 "dsize": [12, 1024, "size of dnode"],
98 "cached": [6, 1024, "bytes cached for all blocks"],
99 "direct": [6, 1024, "bytes cached for direct blocks"],
100 "indirect": [8, 1024, "bytes cached for indirect blocks"],
101 "bonus": [5, 1024, "bytes cached for bonus buffer"],
102 "spill": [5, 1024, "bytes cached for spill block"],
103}
104
105hdr = None
106xhdr = None
107sep = " " # Default separator is 2 spaces
108cmd = ("Usage: dbufstat.py [-bdhrtvx] [-i file] [-f fields] [-o file] "
109 "[-s string]\n")
110raw = 0
111
112
113def print_incompat_helper(incompat):
114 cnt = 0
115 for key in sorted(incompat):
116 if cnt is 0:
117 sys.stderr.write("\t")
118 elif cnt > 8:
119 sys.stderr.write(",\n\t")
120 cnt = 0
121 else:
122 sys.stderr.write(", ")
123
124 sys.stderr.write("%s" % key)
125 cnt += 1
126
127 sys.stderr.write("\n\n")
128
129
130def detailed_usage():
131 sys.stderr.write("%s\n" % cmd)
132
133 sys.stderr.write("Field definitions incompatible with '-b' option:\n")
134 print_incompat_helper(bincompat)
135
136 sys.stderr.write("Field definitions incompatible with '-d' option:\n")
137 print_incompat_helper(dincompat)
138
139 sys.stderr.write("Field definitions incompatible with '-t' option:\n")
140 print_incompat_helper(tincompat)
141
142 sys.stderr.write("Field definitions are as follows:\n")
143 for key in sorted(cols.keys()):
144 sys.stderr.write("%11s : %s\n" % (key, cols[key][2]))
145 sys.stderr.write("\n")
146
147 sys.exit(1)
148
149
150def usage():
151 sys.stderr.write("%s\n" % cmd)
152 sys.stderr.write("\t -b : Print table of information for each dbuf\n")
153 sys.stderr.write("\t -d : Print table of information for each dnode\n")
154 sys.stderr.write("\t -h : Print this help message\n")
155 sys.stderr.write("\t -r : Print raw values\n")
156 sys.stderr.write("\t -t : Print table of information for each dnode type"
157 "\n")
158 sys.stderr.write("\t -v : List all possible field headers and definitions"
159 "\n")
160 sys.stderr.write("\t -x : Print extended stats\n")
161 sys.stderr.write("\t -i : Redirect input from the specified file\n")
162 sys.stderr.write("\t -f : Specify specific fields to print (see -v)\n")
163 sys.stderr.write("\t -o : Redirect output to the specified file\n")
164 sys.stderr.write("\t -s : Override default field separator with custom "
165 "character or string\n")
166 sys.stderr.write("\nExamples:\n")
167 sys.stderr.write("\tdbufstat.py -d -o /tmp/d.log\n")
168 sys.stderr.write("\tdbufstat.py -t -s \",\" -o /tmp/t.log\n")
169 sys.stderr.write("\tdbufstat.py -v\n")
170 sys.stderr.write("\tdbufstat.py -d -f pool,object,objset,dsize,cached\n")
171 sys.stderr.write("\n")
172
173 sys.exit(1)
174
175
176def prettynum(sz, scale, num=0):
177 global raw
178
179 suffix = [' ', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']
180 index = 0
181 save = 0
182
183 if raw or scale == -1:
184 return "%*s" % (sz, num)
185
186 # Rounding error, return 0
187 elif 0 < num < 1:
188 num = 0
189
190 while num > scale and index < 5:
191 save = num
192 num = num / scale
193 index += 1
194
195 if index == 0:
196 return "%*d" % (sz, num)
197
198 if (save / scale) < 10:
199 return "%*.1f%s" % (sz - 1, num, suffix[index])
200 else:
201 return "%*d%s" % (sz - 1, num, suffix[index])
202
203
204def print_values(v):
205 global hdr
206 global sep
207
208 try:
209 for col in hdr:
210 sys.stdout.write("%s%s" % (
211 prettynum(cols[col][0], cols[col][1], v[col]), sep))
212 sys.stdout.write("\n")
213 except IOError as e:
214 if e.errno == errno.EPIPE:
215 sys.exit(1)
216
217
218def print_header():
219 global hdr
220 global sep
221
222 try:
223 for col in hdr:
224 sys.stdout.write("%*s%s" % (cols[col][0], col, sep))
225 sys.stdout.write("\n")
226 except IOError as e:
227 if e.errno == errno.EPIPE:
228 sys.exit(1)
229
230
231def get_typestring(t):
232 type_strings = ["DMU_OT_NONE",
233 # general:
234 "DMU_OT_OBJECT_DIRECTORY",
235 "DMU_OT_OBJECT_ARRAY",
236 "DMU_OT_PACKED_NVLIST",
237 "DMU_OT_PACKED_NVLIST_SIZE",
238 "DMU_OT_BPOBJ",
239 "DMU_OT_BPOBJ_HDR",
240 # spa:
241 "DMU_OT_SPACE_MAP_HEADER",
242 "DMU_OT_SPACE_MAP",
243 # zil:
244 "DMU_OT_INTENT_LOG",
245 # dmu:
246 "DMU_OT_DNODE",
247 "DMU_OT_OBJSET",
248 # dsl:
249 "DMU_OT_DSL_DIR",
250 "DMU_OT_DSL_DIR_CHILD_MAP",
251 "DMU_OT_DSL_DS_SNAP_MAP",
252 "DMU_OT_DSL_PROPS",
253 "DMU_OT_DSL_DATASET",
254 # zpl:
255 "DMU_OT_ZNODE",
256 "DMU_OT_OLDACL",
257 "DMU_OT_PLAIN_FILE_CONTENTS",
258 "DMU_OT_DIRECTORY_CONTENTS",
259 "DMU_OT_MASTER_NODE",
260 "DMU_OT_UNLINKED_SET",
261 # zvol:
262 "DMU_OT_ZVOL",
263 "DMU_OT_ZVOL_PROP",
264 # other; for testing only!
265 "DMU_OT_PLAIN_OTHER",
266 "DMU_OT_UINT64_OTHER",
267 "DMU_OT_ZAP_OTHER",
268 # new object types:
269 "DMU_OT_ERROR_LOG",
270 "DMU_OT_SPA_HISTORY",
271 "DMU_OT_SPA_HISTORY_OFFSETS",
272 "DMU_OT_POOL_PROPS",
273 "DMU_OT_DSL_PERMS",
274 "DMU_OT_ACL",
275 "DMU_OT_SYSACL",
276 "DMU_OT_FUID",
277 "DMU_OT_FUID_SIZE",
278 "DMU_OT_NEXT_CLONES",
279 "DMU_OT_SCAN_QUEUE",
280 "DMU_OT_USERGROUP_USED",
281 "DMU_OT_USERGROUP_QUOTA",
282 "DMU_OT_USERREFS",
283 "DMU_OT_DDT_ZAP",
284 "DMU_OT_DDT_STATS",
285 "DMU_OT_SA",
286 "DMU_OT_SA_MASTER_NODE",
287 "DMU_OT_SA_ATTR_REGISTRATION",
288 "DMU_OT_SA_ATTR_LAYOUTS",
289 "DMU_OT_SCAN_XLATE",
290 "DMU_OT_DEDUP",
291 "DMU_OT_DEADLIST",
292 "DMU_OT_DEADLIST_HDR",
293 "DMU_OT_DSL_CLONES",
294 "DMU_OT_BPOBJ_SUBOBJ"]
295
296 # If "-rr" option is used, don't convert to string representation
297 if raw > 1:
298 return "%i" % t
299
300 try:
301 return type_strings[t]
302 except IndexError:
303 return "%i" % t
304
305
306def get_compstring(c):
307 comp_strings = ["ZIO_COMPRESS_INHERIT", "ZIO_COMPRESS_ON",
308 "ZIO_COMPRESS_OFF", "ZIO_COMPRESS_LZJB",
309 "ZIO_COMPRESS_EMPTY", "ZIO_COMPRESS_GZIP_1",
310 "ZIO_COMPRESS_GZIP_2", "ZIO_COMPRESS_GZIP_3",
311 "ZIO_COMPRESS_GZIP_4", "ZIO_COMPRESS_GZIP_5",
312 "ZIO_COMPRESS_GZIP_6", "ZIO_COMPRESS_GZIP_7",
313 "ZIO_COMPRESS_GZIP_8", "ZIO_COMPRESS_GZIP_9",
314 "ZIO_COMPRESS_ZLE", "ZIO_COMPRESS_LZ4",
315 "ZIO_COMPRESS_FUNCTION"]
316
317 # If "-rr" option is used, don't convert to string representation
318 if raw > 1:
319 return "%i" % c
320
321 try:
322 return comp_strings[c]
323 except IndexError:
324 return "%i" % c
325
326
327def parse_line(line, labels):
328 global hdr
329
330 new = dict()
331 val = None
332 for col in hdr:
333 # These are "special" fields computed in the update_dict
334 # function, prevent KeyError exception on labels[col] for these.
335 if col not in ['bonus', 'cached', 'direct', 'indirect', 'spill']:
336 val = line[labels[col]]
337
338 if col in ['pool', 'flags']:
339 new[col] = str(val)
340 elif col in ['dtype', 'btype']:
341 new[col] = get_typestring(int(val))
342 elif col in ['l2_comp']:
343 new[col] = get_compstring(int(val))
344 else:
345 new[col] = int(val)
346
347 return new
348
349
350def update_dict(d, k, line, labels):
351 pool = line[labels['pool']]
352 objset = line[labels['objset']]
353 key = line[labels[k]]
354
355 dbsize = int(line[labels['dbsize']])
356 blkid = int(line[labels['blkid']])
357 level = int(line[labels['level']])
358
359 if pool not in d:
360 d[pool] = dict()
361
362 if objset not in d[pool]:
363 d[pool][objset] = dict()
364
365 if key not in d[pool][objset]:
366 d[pool][objset][key] = parse_line(line, labels)
367 d[pool][objset][key]['bonus'] = 0
368 d[pool][objset][key]['cached'] = 0
369 d[pool][objset][key]['direct'] = 0
370 d[pool][objset][key]['indirect'] = 0
371 d[pool][objset][key]['spill'] = 0
372
373 d[pool][objset][key]['cached'] += dbsize
374
375 if blkid == -1:
376 d[pool][objset][key]['bonus'] += dbsize
377 elif blkid == -2:
378 d[pool][objset][key]['spill'] += dbsize
379 else:
380 if level == 0:
381 d[pool][objset][key]['direct'] += dbsize
382 else:
383 d[pool][objset][key]['indirect'] += dbsize
384
385 return d
386
387
388def print_dict(d):
389 print_header()
390 for pool in d.keys():
391 for objset in d[pool].keys():
392 for v in d[pool][objset].values():
393 print_values(v)
394
395
396def dnodes_build_dict(filehandle):
397 labels = dict()
398 dnodes = dict()
399
400 # First 3 lines are header information, skip the first two
401 for i in range(2):
402 next(filehandle)
403
404 # The third line contains the labels and index locations
405 for i, v in enumerate(next(filehandle).split()):
406 labels[v] = i
407
408 # The rest of the file is buffer information
409 for line in filehandle:
410 update_dict(dnodes, 'object', line.split(), labels)
411
412 return dnodes
413
414
415def types_build_dict(filehandle):
416 labels = dict()
417 types = dict()
418
419 # First 3 lines are header information, skip the first two
420 for i in range(2):
421 next(filehandle)
422
423 # The third line contains the labels and index locations
424 for i, v in enumerate(next(filehandle).split()):
425 labels[v] = i
426
427 # The rest of the file is buffer information
428 for line in filehandle:
429 update_dict(types, 'dtype', line.split(), labels)
430
431 return types
432
433
434def buffers_print_all(filehandle):
435 labels = dict()
436
437 # First 3 lines are header information, skip the first two
438 for i in range(2):
439 next(filehandle)
440
441 # The third line contains the labels and index locations
442 for i, v in enumerate(next(filehandle).split()):
443 labels[v] = i
444
445 print_header()
446
447 # The rest of the file is buffer information
448 for line in filehandle:
449 print_values(parse_line(line.split(), labels))
450
451
452def main():
453 global hdr
454 global sep
455 global raw
456
457 desired_cols = None
458 bflag = False
459 dflag = False
460 hflag = False
461 ifile = None
462 ofile = None
463 tflag = False
464 vflag = False
465 xflag = False
466
467 try:
468 opts, args = getopt.getopt(
469 sys.argv[1:],
470 "bdf:hi:o:rs:tvx",
471 [
472 "buffers",
473 "dnodes",
474 "columns",
475 "help",
476 "infile",
477 "outfile",
478 "seperator",
479 "types",
480 "verbose",
481 "extended"
482 ]
483 )
484 except getopt.error:
485 usage()
486 opts = None
487
488 for opt, arg in opts:
489 if opt in ('-b', '--buffers'):
490 bflag = True
491 if opt in ('-d', '--dnodes'):
492 dflag = True
493 if opt in ('-f', '--columns'):
494 desired_cols = arg
495 if opt in ('-h', '--help'):
496 hflag = True
497 if opt in ('-i', '--infile'):
498 ifile = arg
499 if opt in ('-o', '--outfile'):
500 ofile = arg
501 if opt in ('-r', '--raw'):
502 raw += 1
503 if opt in ('-s', '--seperator'):
504 sep = arg
505 if opt in ('-t', '--types'):
506 tflag = True
507 if opt in ('-v', '--verbose'):
508 vflag = True
509 if opt in ('-x', '--extended'):
510 xflag = True
511
512 if hflag or (xflag and desired_cols):
513 usage()
514
515 if vflag:
516 detailed_usage()
517
518 # Ensure at most only one of b, d, or t flags are set
519 if (bflag and dflag) or (bflag and tflag) or (dflag and tflag):
520 usage()
521
522 if bflag:
523 hdr = bxhdr if xflag else bhdr
524 elif tflag:
525 hdr = txhdr if xflag else thdr
526 else: # Even if dflag is False, it's the default if none set
527 dflag = True
528 hdr = dxhdr if xflag else dhdr
529
530 if desired_cols:
531 hdr = desired_cols.split(",")
532
533 invalid = []
534 incompat = []
535 for ele in hdr:
536 if ele not in cols:
537 invalid.append(ele)
538 elif ((bflag and bincompat and ele in bincompat) or
539 (dflag and dincompat and ele in dincompat) or
540 (tflag and tincompat and ele in tincompat)):
541 incompat.append(ele)
542
543 if len(invalid) > 0:
544 sys.stderr.write("Invalid column definition! -- %s\n" % invalid)
545 usage()
546
547 if len(incompat) > 0:
548 sys.stderr.write("Incompatible field specified! -- %s\n" %
549 incompat)
550 usage()
551
552 if ofile:
553 try:
554 tmp = open(ofile, "w")
555 sys.stdout = tmp
556
557 except IOError:
558 sys.stderr.write("Cannot open %s for writing\n" % ofile)
559 sys.exit(1)
560
561 if not ifile:
562 ifile = '/proc/spl/kstat/zfs/dbufs'
563
564 if ifile is not "-":
565 try:
566 tmp = open(ifile, "r")
567 sys.stdin = tmp
568 except IOError:
569 sys.stderr.write("Cannot open %s for reading\n" % ifile)
570 sys.exit(1)
571
572 if bflag:
573 buffers_print_all(sys.stdin)
574
575 if dflag:
576 print_dict(dnodes_build_dict(sys.stdin))
577
578 if tflag:
579 print_dict(types_build_dict(sys.stdin))
580
581if __name__ == '__main__':
582 main()