]>
Commit | Line | Data |
---|---|---|
a08ee875 LG |
1 | #!/usr/bin/python |
2 | # | |
3 | # Print out statistics for all cached dmu buffers. This information | |
4 | # is available through the dbufs kstat and may be post-processed as | |
5 | # needed by the script. | |
6 | # | |
7 | # CDDL HEADER START | |
8 | # | |
9 | # The contents of this file are subject to the terms of the | |
10 | # Common Development and Distribution License, Version 1.0 only | |
11 | # (the "License"). You may not use this file except in compliance | |
12 | # with the License. | |
13 | # | |
14 | # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
15 | # or http://www.opensolaris.org/os/licensing. | |
16 | # See the License for the specific language governing permissions | |
17 | # and limitations under the License. | |
18 | # | |
19 | # When distributing Covered Code, include this CDDL HEADER in each | |
20 | # file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
21 | # If applicable, add the following below this CDDL HEADER, with the | |
22 | # fields enclosed by brackets "[]" replaced with your own identifying | |
23 | # information: Portions Copyright [yyyy] [name of copyright owner] | |
24 | # | |
25 | # CDDL HEADER END | |
26 | # | |
27 | # Copyright (C) 2013 Lawrence Livermore National Security, LLC. | |
28 | # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). | |
29 | # | |
30 | ||
31 | import sys | |
32 | import getopt | |
33 | import errno | |
34 | ||
35 | bhdr = ["pool", "objset", "object", "level", "blkid", "offset", "dbsize"] | |
36 | bxhdr = ["pool", "objset", "object", "level", "blkid", "offset", "dbsize", | |
94a40997 | 37 | "meta", "state", "dbholds", "list", "atype", "flags", |
a08ee875 LG |
38 | "count", "asize", "access", "mru", "gmru", "mfu", "gmfu", "l2", |
39 | "l2_dattr", "l2_asize", "l2_comp", "aholds", "dtype", "btype", | |
40 | "data_bs", "meta_bs", "bsize", "lvls", "dholds", "blocks", "dsize"] | |
41 | bincompat = ["cached", "direct", "indirect", "bonus", "spill"] | |
42 | ||
43 | dhdr = ["pool", "objset", "object", "dtype", "cached"] | |
44 | dxhdr = ["pool", "objset", "object", "dtype", "btype", "data_bs", "meta_bs", | |
45 | "bsize", "lvls", "dholds", "blocks", "dsize", "cached", "direct", | |
46 | "indirect", "bonus", "spill"] | |
47 | dincompat = ["level", "blkid", "offset", "dbsize", "meta", "state", "dbholds", | |
94a40997 | 48 | "list", "atype", "flags", "count", "asize", "access", |
a08ee875 LG |
49 | "mru", "gmru", "mfu", "gmfu", "l2", "l2_dattr", "l2_asize", |
50 | "l2_comp", "aholds"] | |
51 | ||
52 | thdr = ["pool", "objset", "dtype", "cached"] | |
53 | txhdr = ["pool", "objset", "dtype", "cached", "direct", "indirect", | |
54 | "bonus", "spill"] | |
55 | tincompat = ["object", "level", "blkid", "offset", "dbsize", "meta", "state", | |
94a40997 | 56 | "dbholds", "list", "atype", "flags", "count", "asize", |
a08ee875 LG |
57 | "access", "mru", "gmru", "mfu", "gmfu", "l2", "l2_dattr", |
58 | "l2_asize", "l2_comp", "aholds", "btype", "data_bs", "meta_bs", | |
59 | "bsize", "lvls", "dholds", "blocks", "dsize"] | |
60 | ||
61 | cols = { | |
62 | # hdr: [size, scale, description] | |
63 | "pool": [15, -1, "pool name"], | |
64 | "objset": [6, -1, "dataset identification number"], | |
65 | "object": [10, -1, "object number"], | |
66 | "level": [5, -1, "indirection level of buffer"], | |
67 | "blkid": [8, -1, "block number of buffer"], | |
68 | "offset": [12, 1024, "offset in object of buffer"], | |
69 | "dbsize": [7, 1024, "size of buffer"], | |
70 | "meta": [4, -1, "is this buffer metadata?"], | |
71 | "state": [5, -1, "state of buffer (read, cached, etc)"], | |
72 | "dbholds": [7, 1000, "number of holds on buffer"], | |
73 | "list": [4, -1, "which ARC list contains this buffer"], | |
74 | "atype": [7, -1, "ARC header type (data or metadata)"], | |
a08ee875 LG |
75 | "flags": [8, -1, "ARC read flags"], |
76 | "count": [5, -1, "ARC data count"], | |
77 | "asize": [7, 1024, "size of this ARC buffer"], | |
78 | "access": [10, -1, "time this ARC buffer was last accessed"], | |
79 | "mru": [5, 1000, "hits while on the ARC's MRU list"], | |
80 | "gmru": [5, 1000, "hits while on the ARC's MRU ghost list"], | |
81 | "mfu": [5, 1000, "hits while on the ARC's MFU list"], | |
82 | "gmfu": [5, 1000, "hits while on the ARC's MFU ghost list"], | |
83 | "l2": [5, 1000, "hits while on the L2ARC"], | |
84 | "l2_dattr": [8, -1, "L2ARC disk address/offset"], | |
85 | "l2_asize": [8, 1024, "L2ARC alloc'd size (depending on compression)"], | |
86 | "l2_comp": [21, -1, "L2ARC compression algorithm for buffer"], | |
87 | "aholds": [6, 1000, "number of holds on this ARC buffer"], | |
88 | "dtype": [27, -1, "dnode type"], | |
89 | "btype": [27, -1, "bonus buffer type"], | |
90 | "data_bs": [7, 1024, "data block size"], | |
91 | "meta_bs": [7, 1024, "metadata block size"], | |
92 | "bsize": [6, 1024, "bonus buffer size"], | |
93 | "lvls": [6, -1, "number of indirection levels"], | |
94 | "dholds": [6, 1000, "number of holds on dnode"], | |
95 | "blocks": [8, 1000, "number of allocated blocks"], | |
96 | "dsize": [12, 1024, "size of dnode"], | |
97 | "cached": [6, 1024, "bytes cached for all blocks"], | |
98 | "direct": [6, 1024, "bytes cached for direct blocks"], | |
99 | "indirect": [8, 1024, "bytes cached for indirect blocks"], | |
100 | "bonus": [5, 1024, "bytes cached for bonus buffer"], | |
101 | "spill": [5, 1024, "bytes cached for spill block"], | |
102 | } | |
103 | ||
104 | hdr = None | |
105 | xhdr = None | |
106 | sep = " " # Default separator is 2 spaces | |
107 | cmd = ("Usage: dbufstat.py [-bdhrtvx] [-i file] [-f fields] [-o file] " | |
108 | "[-s string]\n") | |
109 | raw = 0 | |
110 | ||
111 | ||
112 | def print_incompat_helper(incompat): | |
113 | cnt = 0 | |
114 | for key in sorted(incompat): | |
115 | if cnt is 0: | |
116 | sys.stderr.write("\t") | |
117 | elif cnt > 8: | |
118 | sys.stderr.write(",\n\t") | |
119 | cnt = 0 | |
120 | else: | |
121 | sys.stderr.write(", ") | |
122 | ||
123 | sys.stderr.write("%s" % key) | |
124 | cnt += 1 | |
125 | ||
126 | sys.stderr.write("\n\n") | |
127 | ||
128 | ||
129 | def detailed_usage(): | |
130 | sys.stderr.write("%s\n" % cmd) | |
131 | ||
132 | sys.stderr.write("Field definitions incompatible with '-b' option:\n") | |
133 | print_incompat_helper(bincompat) | |
134 | ||
135 | sys.stderr.write("Field definitions incompatible with '-d' option:\n") | |
136 | print_incompat_helper(dincompat) | |
137 | ||
138 | sys.stderr.write("Field definitions incompatible with '-t' option:\n") | |
139 | print_incompat_helper(tincompat) | |
140 | ||
141 | sys.stderr.write("Field definitions are as follows:\n") | |
142 | for key in sorted(cols.keys()): | |
143 | sys.stderr.write("%11s : %s\n" % (key, cols[key][2])) | |
144 | sys.stderr.write("\n") | |
145 | ||
cae5b340 | 146 | sys.exit(0) |
a08ee875 LG |
147 | |
148 | ||
149 | def usage(): | |
150 | sys.stderr.write("%s\n" % cmd) | |
151 | sys.stderr.write("\t -b : Print table of information for each dbuf\n") | |
152 | sys.stderr.write("\t -d : Print table of information for each dnode\n") | |
153 | sys.stderr.write("\t -h : Print this help message\n") | |
154 | sys.stderr.write("\t -r : Print raw values\n") | |
155 | sys.stderr.write("\t -t : Print table of information for each dnode type" | |
156 | "\n") | |
157 | sys.stderr.write("\t -v : List all possible field headers and definitions" | |
158 | "\n") | |
159 | sys.stderr.write("\t -x : Print extended stats\n") | |
160 | sys.stderr.write("\t -i : Redirect input from the specified file\n") | |
161 | sys.stderr.write("\t -f : Specify specific fields to print (see -v)\n") | |
162 | sys.stderr.write("\t -o : Redirect output to the specified file\n") | |
163 | sys.stderr.write("\t -s : Override default field separator with custom " | |
164 | "character or string\n") | |
165 | sys.stderr.write("\nExamples:\n") | |
166 | sys.stderr.write("\tdbufstat.py -d -o /tmp/d.log\n") | |
167 | sys.stderr.write("\tdbufstat.py -t -s \",\" -o /tmp/t.log\n") | |
168 | sys.stderr.write("\tdbufstat.py -v\n") | |
169 | sys.stderr.write("\tdbufstat.py -d -f pool,object,objset,dsize,cached\n") | |
170 | sys.stderr.write("\n") | |
171 | ||
172 | sys.exit(1) | |
173 | ||
174 | ||
175 | def prettynum(sz, scale, num=0): | |
176 | global raw | |
177 | ||
178 | suffix = [' ', 'K', 'M', 'G', 'T', 'P', 'E', 'Z'] | |
179 | index = 0 | |
180 | save = 0 | |
181 | ||
182 | if raw or scale == -1: | |
183 | return "%*s" % (sz, num) | |
184 | ||
185 | # Rounding error, return 0 | |
186 | elif 0 < num < 1: | |
187 | num = 0 | |
188 | ||
189 | while num > scale and index < 5: | |
190 | save = num | |
191 | num = num / scale | |
192 | index += 1 | |
193 | ||
194 | if index == 0: | |
195 | return "%*d" % (sz, num) | |
196 | ||
197 | if (save / scale) < 10: | |
198 | return "%*.1f%s" % (sz - 1, num, suffix[index]) | |
199 | else: | |
200 | return "%*d%s" % (sz - 1, num, suffix[index]) | |
201 | ||
202 | ||
203 | def print_values(v): | |
204 | global hdr | |
205 | global sep | |
206 | ||
207 | try: | |
208 | for col in hdr: | |
209 | sys.stdout.write("%s%s" % ( | |
210 | prettynum(cols[col][0], cols[col][1], v[col]), sep)) | |
211 | sys.stdout.write("\n") | |
212 | except IOError as e: | |
213 | if e.errno == errno.EPIPE: | |
214 | sys.exit(1) | |
215 | ||
216 | ||
217 | def print_header(): | |
218 | global hdr | |
219 | global sep | |
220 | ||
221 | try: | |
222 | for col in hdr: | |
223 | sys.stdout.write("%*s%s" % (cols[col][0], col, sep)) | |
224 | sys.stdout.write("\n") | |
225 | except IOError as e: | |
226 | if e.errno == errno.EPIPE: | |
227 | sys.exit(1) | |
228 | ||
229 | ||
230 | def get_typestring(t): | |
231 | type_strings = ["DMU_OT_NONE", | |
232 | # general: | |
233 | "DMU_OT_OBJECT_DIRECTORY", | |
234 | "DMU_OT_OBJECT_ARRAY", | |
235 | "DMU_OT_PACKED_NVLIST", | |
236 | "DMU_OT_PACKED_NVLIST_SIZE", | |
237 | "DMU_OT_BPOBJ", | |
238 | "DMU_OT_BPOBJ_HDR", | |
239 | # spa: | |
240 | "DMU_OT_SPACE_MAP_HEADER", | |
241 | "DMU_OT_SPACE_MAP", | |
242 | # zil: | |
243 | "DMU_OT_INTENT_LOG", | |
244 | # dmu: | |
245 | "DMU_OT_DNODE", | |
246 | "DMU_OT_OBJSET", | |
247 | # dsl: | |
248 | "DMU_OT_DSL_DIR", | |
249 | "DMU_OT_DSL_DIR_CHILD_MAP", | |
250 | "DMU_OT_DSL_DS_SNAP_MAP", | |
251 | "DMU_OT_DSL_PROPS", | |
252 | "DMU_OT_DSL_DATASET", | |
253 | # zpl: | |
254 | "DMU_OT_ZNODE", | |
255 | "DMU_OT_OLDACL", | |
256 | "DMU_OT_PLAIN_FILE_CONTENTS", | |
257 | "DMU_OT_DIRECTORY_CONTENTS", | |
258 | "DMU_OT_MASTER_NODE", | |
259 | "DMU_OT_UNLINKED_SET", | |
260 | # zvol: | |
261 | "DMU_OT_ZVOL", | |
262 | "DMU_OT_ZVOL_PROP", | |
263 | # other; for testing only! | |
264 | "DMU_OT_PLAIN_OTHER", | |
265 | "DMU_OT_UINT64_OTHER", | |
266 | "DMU_OT_ZAP_OTHER", | |
267 | # new object types: | |
268 | "DMU_OT_ERROR_LOG", | |
269 | "DMU_OT_SPA_HISTORY", | |
270 | "DMU_OT_SPA_HISTORY_OFFSETS", | |
271 | "DMU_OT_POOL_PROPS", | |
272 | "DMU_OT_DSL_PERMS", | |
273 | "DMU_OT_ACL", | |
274 | "DMU_OT_SYSACL", | |
275 | "DMU_OT_FUID", | |
276 | "DMU_OT_FUID_SIZE", | |
277 | "DMU_OT_NEXT_CLONES", | |
278 | "DMU_OT_SCAN_QUEUE", | |
279 | "DMU_OT_USERGROUP_USED", | |
280 | "DMU_OT_USERGROUP_QUOTA", | |
281 | "DMU_OT_USERREFS", | |
282 | "DMU_OT_DDT_ZAP", | |
283 | "DMU_OT_DDT_STATS", | |
284 | "DMU_OT_SA", | |
285 | "DMU_OT_SA_MASTER_NODE", | |
286 | "DMU_OT_SA_ATTR_REGISTRATION", | |
287 | "DMU_OT_SA_ATTR_LAYOUTS", | |
288 | "DMU_OT_SCAN_XLATE", | |
289 | "DMU_OT_DEDUP", | |
290 | "DMU_OT_DEADLIST", | |
291 | "DMU_OT_DEADLIST_HDR", | |
292 | "DMU_OT_DSL_CLONES", | |
293 | "DMU_OT_BPOBJ_SUBOBJ"] | |
294 | ||
295 | # If "-rr" option is used, don't convert to string representation | |
296 | if raw > 1: | |
297 | return "%i" % t | |
298 | ||
299 | try: | |
300 | return type_strings[t] | |
301 | except IndexError: | |
302 | return "%i" % t | |
303 | ||
304 | ||
305 | def get_compstring(c): | |
306 | comp_strings = ["ZIO_COMPRESS_INHERIT", "ZIO_COMPRESS_ON", | |
307 | "ZIO_COMPRESS_OFF", "ZIO_COMPRESS_LZJB", | |
308 | "ZIO_COMPRESS_EMPTY", "ZIO_COMPRESS_GZIP_1", | |
309 | "ZIO_COMPRESS_GZIP_2", "ZIO_COMPRESS_GZIP_3", | |
310 | "ZIO_COMPRESS_GZIP_4", "ZIO_COMPRESS_GZIP_5", | |
311 | "ZIO_COMPRESS_GZIP_6", "ZIO_COMPRESS_GZIP_7", | |
312 | "ZIO_COMPRESS_GZIP_8", "ZIO_COMPRESS_GZIP_9", | |
313 | "ZIO_COMPRESS_ZLE", "ZIO_COMPRESS_LZ4", | |
314 | "ZIO_COMPRESS_FUNCTION"] | |
315 | ||
316 | # If "-rr" option is used, don't convert to string representation | |
317 | if raw > 1: | |
318 | return "%i" % c | |
319 | ||
320 | try: | |
321 | return comp_strings[c] | |
322 | except IndexError: | |
323 | return "%i" % c | |
324 | ||
325 | ||
326 | def parse_line(line, labels): | |
327 | global hdr | |
328 | ||
329 | new = dict() | |
330 | val = None | |
331 | for col in hdr: | |
332 | # These are "special" fields computed in the update_dict | |
333 | # function, prevent KeyError exception on labels[col] for these. | |
334 | if col not in ['bonus', 'cached', 'direct', 'indirect', 'spill']: | |
335 | val = line[labels[col]] | |
336 | ||
337 | if col in ['pool', 'flags']: | |
338 | new[col] = str(val) | |
339 | elif col in ['dtype', 'btype']: | |
340 | new[col] = get_typestring(int(val)) | |
341 | elif col in ['l2_comp']: | |
342 | new[col] = get_compstring(int(val)) | |
343 | else: | |
344 | new[col] = int(val) | |
345 | ||
346 | return new | |
347 | ||
348 | ||
349 | def update_dict(d, k, line, labels): | |
350 | pool = line[labels['pool']] | |
351 | objset = line[labels['objset']] | |
352 | key = line[labels[k]] | |
353 | ||
354 | dbsize = int(line[labels['dbsize']]) | |
355 | blkid = int(line[labels['blkid']]) | |
356 | level = int(line[labels['level']]) | |
357 | ||
358 | if pool not in d: | |
359 | d[pool] = dict() | |
360 | ||
361 | if objset not in d[pool]: | |
362 | d[pool][objset] = dict() | |
363 | ||
364 | if key not in d[pool][objset]: | |
365 | d[pool][objset][key] = parse_line(line, labels) | |
366 | d[pool][objset][key]['bonus'] = 0 | |
367 | d[pool][objset][key]['cached'] = 0 | |
368 | d[pool][objset][key]['direct'] = 0 | |
369 | d[pool][objset][key]['indirect'] = 0 | |
370 | d[pool][objset][key]['spill'] = 0 | |
371 | ||
372 | d[pool][objset][key]['cached'] += dbsize | |
373 | ||
374 | if blkid == -1: | |
375 | d[pool][objset][key]['bonus'] += dbsize | |
376 | elif blkid == -2: | |
377 | d[pool][objset][key]['spill'] += dbsize | |
378 | else: | |
379 | if level == 0: | |
380 | d[pool][objset][key]['direct'] += dbsize | |
381 | else: | |
382 | d[pool][objset][key]['indirect'] += dbsize | |
383 | ||
384 | return d | |
385 | ||
386 | ||
387 | def print_dict(d): | |
388 | print_header() | |
4d815aed AX |
389 | for pool in list(d.keys()): |
390 | for objset in list(d[pool].keys()): | |
391 | for v in list(d[pool][objset].values()): | |
a08ee875 LG |
392 | print_values(v) |
393 | ||
394 | ||
395 | def dnodes_build_dict(filehandle): | |
396 | labels = dict() | |
397 | dnodes = dict() | |
398 | ||
399 | # First 3 lines are header information, skip the first two | |
400 | for i in range(2): | |
401 | next(filehandle) | |
402 | ||
403 | # The third line contains the labels and index locations | |
404 | for i, v in enumerate(next(filehandle).split()): | |
405 | labels[v] = i | |
406 | ||
407 | # The rest of the file is buffer information | |
408 | for line in filehandle: | |
409 | update_dict(dnodes, 'object', line.split(), labels) | |
410 | ||
411 | return dnodes | |
412 | ||
413 | ||
414 | def types_build_dict(filehandle): | |
415 | labels = dict() | |
416 | types = dict() | |
417 | ||
418 | # First 3 lines are header information, skip the first two | |
419 | for i in range(2): | |
420 | next(filehandle) | |
421 | ||
422 | # The third line contains the labels and index locations | |
423 | for i, v in enumerate(next(filehandle).split()): | |
424 | labels[v] = i | |
425 | ||
426 | # The rest of the file is buffer information | |
427 | for line in filehandle: | |
428 | update_dict(types, 'dtype', line.split(), labels) | |
429 | ||
430 | return types | |
431 | ||
432 | ||
433 | def buffers_print_all(filehandle): | |
434 | labels = dict() | |
435 | ||
436 | # First 3 lines are header information, skip the first two | |
437 | for i in range(2): | |
438 | next(filehandle) | |
439 | ||
440 | # The third line contains the labels and index locations | |
441 | for i, v in enumerate(next(filehandle).split()): | |
442 | labels[v] = i | |
443 | ||
444 | print_header() | |
445 | ||
446 | # The rest of the file is buffer information | |
447 | for line in filehandle: | |
448 | print_values(parse_line(line.split(), labels)) | |
449 | ||
450 | ||
451 | def main(): | |
452 | global hdr | |
453 | global sep | |
454 | global raw | |
455 | ||
456 | desired_cols = None | |
457 | bflag = False | |
458 | dflag = False | |
459 | hflag = False | |
460 | ifile = None | |
461 | ofile = None | |
462 | tflag = False | |
463 | vflag = False | |
464 | xflag = False | |
465 | ||
466 | try: | |
467 | opts, args = getopt.getopt( | |
468 | sys.argv[1:], | |
469 | "bdf:hi:o:rs:tvx", | |
470 | [ | |
471 | "buffers", | |
472 | "dnodes", | |
473 | "columns", | |
474 | "help", | |
475 | "infile", | |
476 | "outfile", | |
42f7b73b | 477 | "separator", |
a08ee875 LG |
478 | "types", |
479 | "verbose", | |
480 | "extended" | |
481 | ] | |
482 | ) | |
483 | except getopt.error: | |
484 | usage() | |
485 | opts = None | |
486 | ||
487 | for opt, arg in opts: | |
488 | if opt in ('-b', '--buffers'): | |
489 | bflag = True | |
490 | if opt in ('-d', '--dnodes'): | |
491 | dflag = True | |
492 | if opt in ('-f', '--columns'): | |
493 | desired_cols = arg | |
494 | if opt in ('-h', '--help'): | |
495 | hflag = True | |
496 | if opt in ('-i', '--infile'): | |
497 | ifile = arg | |
498 | if opt in ('-o', '--outfile'): | |
499 | ofile = arg | |
500 | if opt in ('-r', '--raw'): | |
501 | raw += 1 | |
42f7b73b | 502 | if opt in ('-s', '--separator'): |
a08ee875 LG |
503 | sep = arg |
504 | if opt in ('-t', '--types'): | |
505 | tflag = True | |
506 | if opt in ('-v', '--verbose'): | |
507 | vflag = True | |
508 | if opt in ('-x', '--extended'): | |
509 | xflag = True | |
510 | ||
511 | if hflag or (xflag and desired_cols): | |
512 | usage() | |
513 | ||
514 | if vflag: | |
515 | detailed_usage() | |
516 | ||
517 | # Ensure at most only one of b, d, or t flags are set | |
518 | if (bflag and dflag) or (bflag and tflag) or (dflag and tflag): | |
519 | usage() | |
520 | ||
521 | if bflag: | |
522 | hdr = bxhdr if xflag else bhdr | |
523 | elif tflag: | |
524 | hdr = txhdr if xflag else thdr | |
525 | else: # Even if dflag is False, it's the default if none set | |
526 | dflag = True | |
527 | hdr = dxhdr if xflag else dhdr | |
528 | ||
529 | if desired_cols: | |
530 | hdr = desired_cols.split(",") | |
531 | ||
532 | invalid = [] | |
533 | incompat = [] | |
534 | for ele in hdr: | |
535 | if ele not in cols: | |
536 | invalid.append(ele) | |
537 | elif ((bflag and bincompat and ele in bincompat) or | |
538 | (dflag and dincompat and ele in dincompat) or | |
539 | (tflag and tincompat and ele in tincompat)): | |
540 | incompat.append(ele) | |
541 | ||
542 | if len(invalid) > 0: | |
543 | sys.stderr.write("Invalid column definition! -- %s\n" % invalid) | |
544 | usage() | |
545 | ||
546 | if len(incompat) > 0: | |
547 | sys.stderr.write("Incompatible field specified! -- %s\n" % | |
548 | incompat) | |
549 | usage() | |
550 | ||
551 | if ofile: | |
552 | try: | |
553 | tmp = open(ofile, "w") | |
554 | sys.stdout = tmp | |
555 | ||
556 | except IOError: | |
557 | sys.stderr.write("Cannot open %s for writing\n" % ofile) | |
558 | sys.exit(1) | |
559 | ||
560 | if not ifile: | |
561 | ifile = '/proc/spl/kstat/zfs/dbufs' | |
562 | ||
563 | if ifile is not "-": | |
564 | try: | |
565 | tmp = open(ifile, "r") | |
566 | sys.stdin = tmp | |
567 | except IOError: | |
568 | sys.stderr.write("Cannot open %s for reading\n" % ifile) | |
569 | sys.exit(1) | |
570 | ||
571 | if bflag: | |
572 | buffers_print_all(sys.stdin) | |
573 | ||
574 | if dflag: | |
575 | print_dict(dnodes_build_dict(sys.stdin)) | |
576 | ||
577 | if tflag: | |
578 | print_dict(types_build_dict(sys.stdin)) | |
579 | ||
cae5b340 | 580 | |
a08ee875 LG |
581 | if __name__ == '__main__': |
582 | main() |