]> git.proxmox.com Git - mirror_zfs.git/blob - cmd/zpool/zpool_iter.c
Add -c to zpool iostat & status to run command
[mirror_zfs.git] / cmd / zpool / zpool_iter.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26
27
28 #include <libintl.h>
29 #include <libuutil.h>
30 #include <stddef.h>
31 #include <stdio.h>
32 #include <stdlib.h>
33 #include <strings.h>
34
35 #include <libzfs.h>
36 #include <sys/zfs_context.h>
37
38 #include "zpool_util.h"
39
40 /*
41 * Private interface for iterating over pools specified on the command line.
42 * Most consumers will call for_each_pool, but in order to support iostat, we
43 * allow fined grained control through the zpool_list_t interface.
44 */
45
46 typedef struct zpool_node {
47 zpool_handle_t *zn_handle;
48 uu_avl_node_t zn_avlnode;
49 int zn_mark;
50 } zpool_node_t;
51
52 struct zpool_list {
53 boolean_t zl_findall;
54 uu_avl_t *zl_avl;
55 uu_avl_pool_t *zl_pool;
56 zprop_list_t **zl_proplist;
57 };
58
59 /* ARGSUSED */
60 static int
61 zpool_compare(const void *larg, const void *rarg, void *unused)
62 {
63 zpool_handle_t *l = ((zpool_node_t *)larg)->zn_handle;
64 zpool_handle_t *r = ((zpool_node_t *)rarg)->zn_handle;
65 const char *lname = zpool_get_name(l);
66 const char *rname = zpool_get_name(r);
67
68 return (strcmp(lname, rname));
69 }
70
71 /*
72 * Callback function for pool_list_get(). Adds the given pool to the AVL tree
73 * of known pools.
74 */
75 static int
76 add_pool(zpool_handle_t *zhp, void *data)
77 {
78 zpool_list_t *zlp = data;
79 zpool_node_t *node = safe_malloc(sizeof (zpool_node_t));
80 uu_avl_index_t idx;
81
82 node->zn_handle = zhp;
83 uu_avl_node_init(node, &node->zn_avlnode, zlp->zl_pool);
84 if (uu_avl_find(zlp->zl_avl, node, NULL, &idx) == NULL) {
85 if (zlp->zl_proplist &&
86 zpool_expand_proplist(zhp, zlp->zl_proplist) != 0) {
87 zpool_close(zhp);
88 free(node);
89 return (-1);
90 }
91 uu_avl_insert(zlp->zl_avl, node, idx);
92 } else {
93 zpool_close(zhp);
94 free(node);
95 return (-1);
96 }
97
98 return (0);
99 }
100
101 /*
102 * Create a list of pools based on the given arguments. If we're given no
103 * arguments, then iterate over all pools in the system and add them to the AVL
104 * tree. Otherwise, add only those pool explicitly specified on the command
105 * line.
106 */
107 zpool_list_t *
108 pool_list_get(int argc, char **argv, zprop_list_t **proplist, int *err)
109 {
110 zpool_list_t *zlp;
111
112 zlp = safe_malloc(sizeof (zpool_list_t));
113
114 zlp->zl_pool = uu_avl_pool_create("zfs_pool", sizeof (zpool_node_t),
115 offsetof(zpool_node_t, zn_avlnode), zpool_compare, UU_DEFAULT);
116
117 if (zlp->zl_pool == NULL)
118 zpool_no_memory();
119
120 if ((zlp->zl_avl = uu_avl_create(zlp->zl_pool, NULL,
121 UU_DEFAULT)) == NULL)
122 zpool_no_memory();
123
124 zlp->zl_proplist = proplist;
125
126 if (argc == 0) {
127 (void) zpool_iter(g_zfs, add_pool, zlp);
128 zlp->zl_findall = B_TRUE;
129 } else {
130 int i;
131
132 for (i = 0; i < argc; i++) {
133 zpool_handle_t *zhp;
134
135 if ((zhp = zpool_open_canfail(g_zfs, argv[i]))) {
136 if (add_pool(zhp, zlp) != 0)
137 *err = B_TRUE;
138 } else {
139 *err = B_TRUE;
140 }
141 }
142 }
143
144 return (zlp);
145 }
146
147 /*
148 * Search for any new pools, adding them to the list. We only add pools when no
149 * options were given on the command line. Otherwise, we keep the list fixed as
150 * those that were explicitly specified.
151 */
152 void
153 pool_list_update(zpool_list_t *zlp)
154 {
155 if (zlp->zl_findall)
156 (void) zpool_iter(g_zfs, add_pool, zlp);
157 }
158
159 /*
160 * Iterate over all pools in the list, executing the callback for each
161 */
162 int
163 pool_list_iter(zpool_list_t *zlp, int unavail, zpool_iter_f func,
164 void *data)
165 {
166 zpool_node_t *node, *next_node;
167 int ret = 0;
168
169 for (node = uu_avl_first(zlp->zl_avl); node != NULL; node = next_node) {
170 next_node = uu_avl_next(zlp->zl_avl, node);
171 if (zpool_get_state(node->zn_handle) != POOL_STATE_UNAVAIL ||
172 unavail)
173 ret |= func(node->zn_handle, data);
174 }
175
176 return (ret);
177 }
178
179 /*
180 * Remove the given pool from the list. When running iostat, we want to remove
181 * those pools that no longer exist.
182 */
183 void
184 pool_list_remove(zpool_list_t *zlp, zpool_handle_t *zhp)
185 {
186 zpool_node_t search, *node;
187
188 search.zn_handle = zhp;
189 if ((node = uu_avl_find(zlp->zl_avl, &search, NULL, NULL)) != NULL) {
190 uu_avl_remove(zlp->zl_avl, node);
191 zpool_close(node->zn_handle);
192 free(node);
193 }
194 }
195
196 /*
197 * Free all the handles associated with this list.
198 */
199 void
200 pool_list_free(zpool_list_t *zlp)
201 {
202 uu_avl_walk_t *walk;
203 zpool_node_t *node;
204
205 if ((walk = uu_avl_walk_start(zlp->zl_avl, UU_WALK_ROBUST)) == NULL) {
206 (void) fprintf(stderr,
207 gettext("internal error: out of memory"));
208 exit(1);
209 }
210
211 while ((node = uu_avl_walk_next(walk)) != NULL) {
212 uu_avl_remove(zlp->zl_avl, node);
213 zpool_close(node->zn_handle);
214 free(node);
215 }
216
217 uu_avl_walk_end(walk);
218 uu_avl_destroy(zlp->zl_avl);
219 uu_avl_pool_destroy(zlp->zl_pool);
220
221 free(zlp);
222 }
223
224 /*
225 * Returns the number of elements in the pool list.
226 */
227 int
228 pool_list_count(zpool_list_t *zlp)
229 {
230 return (uu_avl_numnodes(zlp->zl_avl));
231 }
232
233 /*
234 * High level function which iterates over all pools given on the command line,
235 * using the pool_list_* interfaces.
236 */
237 int
238 for_each_pool(int argc, char **argv, boolean_t unavail,
239 zprop_list_t **proplist, zpool_iter_f func, void *data)
240 {
241 zpool_list_t *list;
242 int ret = 0;
243
244 if ((list = pool_list_get(argc, argv, proplist, &ret)) == NULL)
245 return (1);
246
247 if (pool_list_iter(list, unavail, func, data) != 0)
248 ret = 1;
249
250 pool_list_free(list);
251
252 return (ret);
253 }
254
255 static int
256 for_each_vdev_cb(zpool_handle_t *zhp, nvlist_t *nv, pool_vdev_iter_f func,
257 void *data)
258 {
259 nvlist_t **child;
260 uint_t c, children;
261 int ret = 0;
262 int i;
263 char *type;
264
265 const char *list[] = {
266 ZPOOL_CONFIG_SPARES,
267 ZPOOL_CONFIG_L2CACHE,
268 ZPOOL_CONFIG_CHILDREN
269 };
270
271 for (i = 0; i < ARRAY_SIZE(list); i++) {
272 if (nvlist_lookup_nvlist_array(nv, list[i], &child,
273 &children) == 0) {
274 for (c = 0; c < children; c++) {
275 uint64_t ishole = 0;
276
277 (void) nvlist_lookup_uint64(child[c],
278 ZPOOL_CONFIG_IS_HOLE, &ishole);
279
280 if (ishole)
281 continue;
282
283 ret |= for_each_vdev_cb(zhp, child[c], func,
284 data);
285 }
286 }
287 }
288
289 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
290 return (ret);
291
292 /* Don't run our function on root vdevs */
293 if (strcmp(type, VDEV_TYPE_ROOT) != 0) {
294 ret |= func(zhp, nv, data);
295 }
296
297 return (ret);
298 }
299
300 /*
301 * This is the equivalent of for_each_pool() for vdevs. It iterates thorough
302 * all vdevs in the pool, ignoring root vdevs and holes, calling func() on
303 * each one.
304 *
305 * @zhp: Zpool handle
306 * @func: Function to call on each vdev
307 * @data: Custom data to pass to the function
308 */
309 int
310 for_each_vdev(zpool_handle_t *zhp, pool_vdev_iter_f func, void *data)
311 {
312 nvlist_t *config, *nvroot = NULL;
313
314 if ((config = zpool_get_config(zhp, NULL)) != NULL) {
315 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
316 &nvroot) == 0);
317 }
318 return (for_each_vdev_cb(zhp, nvroot, func, data));
319 }
320
321 /* Thread function run for each vdev */
322 static void
323 vdev_run_cmd_thread(void *cb_cmd_data)
324 {
325 vdev_cmd_data_t *data = cb_cmd_data;
326 char *pos = NULL;
327 FILE *fp;
328 size_t len = 0;
329 char cmd[_POSIX_ARG_MAX];
330
331 /* Set our VDEV_PATH and VDEV_UPATH env vars and run command */
332 if (snprintf(cmd, sizeof (cmd), "VDEV_PATH=%s && VDEV_UPATH=%s && %s",
333 data->path, data->upath ? data->upath : "\"\"", data->cmd) >=
334 sizeof (cmd)) {
335 /* Our string was truncated */
336 return;
337 }
338
339 fp = popen(cmd, "r");
340 if (fp == NULL)
341 return;
342
343 data->line = NULL;
344
345 /* Save the first line of output from the command */
346 if (getline(&data->line, &len, fp) != -1) {
347 /* Success. Remove newline from the end, if necessary. */
348 if ((pos = strchr(data->line, '\n')) != NULL)
349 *pos = '\0';
350 } else {
351 data->line = NULL;
352 }
353 pclose(fp);
354 }
355
356 /* For each vdev in the pool run a command */
357 static int
358 for_each_vdev_run_cb(zpool_handle_t *zhp, nvlist_t *nv, void *cb_vcdl)
359 {
360 vdev_cmd_data_list_t *vcdl = cb_vcdl;
361 vdev_cmd_data_t *data;
362 char *path = NULL;
363 int i;
364
365 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
366 return (1);
367
368 /* Spares show more than once if they're in use, so skip if exists */
369 for (i = 0; i < vcdl->count; i++) {
370 if ((strcmp(vcdl->data[i].path, path) == 0) &&
371 (strcmp(vcdl->data[i].pool, zpool_get_name(zhp)) == 0)) {
372 /* vdev already exists, skip it */
373 return (0);
374 }
375 }
376
377 /*
378 * Resize our array and add in the new element.
379 */
380 if (!(vcdl->data = realloc(vcdl->data,
381 sizeof (*vcdl->data) * (vcdl->count + 1))))
382 return (ENOMEM); /* couldn't realloc */
383
384 data = &vcdl->data[vcdl->count];
385
386 data->pool = strdup(zpool_get_name(zhp));
387 data->path = strdup(path);
388 data->upath = zfs_get_underlying_path(path);
389 data->cmd = vcdl->cmd;
390
391 vcdl->count++;
392
393 return (0);
394 }
395
396 /* Get the names and count of the vdevs */
397 static int
398 all_pools_for_each_vdev_gather_cb(zpool_handle_t *zhp, void *cb_vcdl)
399 {
400 return (for_each_vdev(zhp, for_each_vdev_run_cb, cb_vcdl));
401 }
402
403 /*
404 * Now that vcdl is populated with our complete list of vdevs, spawn
405 * off the commands.
406 */
407 static void
408 all_pools_for_each_vdev_run_vcdl(vdev_cmd_data_list_t *vcdl)
409 {
410 taskq_t *t;
411 int i;
412 /* 5 * boot_ncpus selfishly chosen since it works best on LLNL's HW */
413 int max_threads = 5 * boot_ncpus;
414
415 /*
416 * Under Linux we use a taskq to parallelize running a command
417 * on each vdev. It is therefore necessary to initialize this
418 * functionality for the duration of the threads.
419 */
420 thread_init();
421
422 t = taskq_create("z_pool_cmd", max_threads, defclsyspri, max_threads,
423 INT_MAX, 0);
424 if (t == NULL)
425 return;
426
427 /* Spawn off the command for each vdev */
428 for (i = 0; i < vcdl->count; i++) {
429 (void) taskq_dispatch(t, vdev_run_cmd_thread,
430 (void *) &vcdl->data[i], TQ_SLEEP);
431 }
432
433 /* Wait for threads to finish */
434 taskq_wait(t);
435 taskq_destroy(t);
436 thread_fini();
437 }
438
439 /*
440 * Run command 'cmd' on all vdevs in all pools. Saves the first line of output
441 * from the command in vcdk->data[].line for all vdevs.
442 *
443 * Returns a vdev_cmd_data_list_t that must be freed with
444 * free_vdev_cmd_data_list();
445 */
446 vdev_cmd_data_list_t *
447 all_pools_for_each_vdev_run(int argc, char **argv, char *cmd)
448 {
449 vdev_cmd_data_list_t *vcdl;
450 vcdl = safe_malloc(sizeof (vcdl));
451 vcdl->cmd = cmd;
452
453 /* Gather our list of all vdevs in all pools */
454 for_each_pool(argc, argv, B_TRUE, NULL,
455 all_pools_for_each_vdev_gather_cb, vcdl);
456
457 /* Run command on all vdevs in all pools */
458 all_pools_for_each_vdev_run_vcdl(vcdl);
459
460 return (vcdl);
461 }
462
463 /*
464 * Free the vdev_cmd_data_list_t created by all_pools_for_each_vdev_run()
465 */
466 void
467 free_vdev_cmd_data_list(vdev_cmd_data_list_t *vcdl)
468 {
469 int i;
470 for (i = 0; i < vcdl->count; i++) {
471 free(vcdl->data[i].path);
472 free(vcdl->data[i].pool);
473 free(vcdl->data[i].upath);
474 free(vcdl->data[i].line);
475 }
476 free(vcdl->data);
477 free(vcdl);
478 }