4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
36 #include <sys/zfs_context.h>
38 #include "zpool_util.h"
41 * Private interface for iterating over pools specified on the command line.
42 * Most consumers will call for_each_pool, but in order to support iostat, we
43 * allow fined grained control through the zpool_list_t interface.
46 typedef struct zpool_node
{
47 zpool_handle_t
*zn_handle
;
48 uu_avl_node_t zn_avlnode
;
55 uu_avl_pool_t
*zl_pool
;
56 zprop_list_t
**zl_proplist
;
61 zpool_compare(const void *larg
, const void *rarg
, void *unused
)
63 zpool_handle_t
*l
= ((zpool_node_t
*)larg
)->zn_handle
;
64 zpool_handle_t
*r
= ((zpool_node_t
*)rarg
)->zn_handle
;
65 const char *lname
= zpool_get_name(l
);
66 const char *rname
= zpool_get_name(r
);
68 return (strcmp(lname
, rname
));
72 * Callback function for pool_list_get(). Adds the given pool to the AVL tree
76 add_pool(zpool_handle_t
*zhp
, void *data
)
78 zpool_list_t
*zlp
= data
;
79 zpool_node_t
*node
= safe_malloc(sizeof (zpool_node_t
));
82 node
->zn_handle
= zhp
;
83 uu_avl_node_init(node
, &node
->zn_avlnode
, zlp
->zl_pool
);
84 if (uu_avl_find(zlp
->zl_avl
, node
, NULL
, &idx
) == NULL
) {
85 if (zlp
->zl_proplist
&&
86 zpool_expand_proplist(zhp
, zlp
->zl_proplist
) != 0) {
91 uu_avl_insert(zlp
->zl_avl
, node
, idx
);
102 * Create a list of pools based on the given arguments. If we're given no
103 * arguments, then iterate over all pools in the system and add them to the AVL
104 * tree. Otherwise, add only those pool explicitly specified on the command
108 pool_list_get(int argc
, char **argv
, zprop_list_t
**proplist
, int *err
)
112 zlp
= safe_malloc(sizeof (zpool_list_t
));
114 zlp
->zl_pool
= uu_avl_pool_create("zfs_pool", sizeof (zpool_node_t
),
115 offsetof(zpool_node_t
, zn_avlnode
), zpool_compare
, UU_DEFAULT
);
117 if (zlp
->zl_pool
== NULL
)
120 if ((zlp
->zl_avl
= uu_avl_create(zlp
->zl_pool
, NULL
,
121 UU_DEFAULT
)) == NULL
)
124 zlp
->zl_proplist
= proplist
;
127 (void) zpool_iter(g_zfs
, add_pool
, zlp
);
128 zlp
->zl_findall
= B_TRUE
;
132 for (i
= 0; i
< argc
; i
++) {
135 if ((zhp
= zpool_open_canfail(g_zfs
, argv
[i
]))) {
136 if (add_pool(zhp
, zlp
) != 0)
148 * Search for any new pools, adding them to the list. We only add pools when no
149 * options were given on the command line. Otherwise, we keep the list fixed as
150 * those that were explicitly specified.
153 pool_list_update(zpool_list_t
*zlp
)
156 (void) zpool_iter(g_zfs
, add_pool
, zlp
);
160 * Iterate over all pools in the list, executing the callback for each
163 pool_list_iter(zpool_list_t
*zlp
, int unavail
, zpool_iter_f func
,
166 zpool_node_t
*node
, *next_node
;
169 for (node
= uu_avl_first(zlp
->zl_avl
); node
!= NULL
; node
= next_node
) {
170 next_node
= uu_avl_next(zlp
->zl_avl
, node
);
171 if (zpool_get_state(node
->zn_handle
) != POOL_STATE_UNAVAIL
||
173 ret
|= func(node
->zn_handle
, data
);
180 * Remove the given pool from the list. When running iostat, we want to remove
181 * those pools that no longer exist.
184 pool_list_remove(zpool_list_t
*zlp
, zpool_handle_t
*zhp
)
186 zpool_node_t search
, *node
;
188 search
.zn_handle
= zhp
;
189 if ((node
= uu_avl_find(zlp
->zl_avl
, &search
, NULL
, NULL
)) != NULL
) {
190 uu_avl_remove(zlp
->zl_avl
, node
);
191 zpool_close(node
->zn_handle
);
197 * Free all the handles associated with this list.
200 pool_list_free(zpool_list_t
*zlp
)
205 if ((walk
= uu_avl_walk_start(zlp
->zl_avl
, UU_WALK_ROBUST
)) == NULL
) {
206 (void) fprintf(stderr
,
207 gettext("internal error: out of memory"));
211 while ((node
= uu_avl_walk_next(walk
)) != NULL
) {
212 uu_avl_remove(zlp
->zl_avl
, node
);
213 zpool_close(node
->zn_handle
);
217 uu_avl_walk_end(walk
);
218 uu_avl_destroy(zlp
->zl_avl
);
219 uu_avl_pool_destroy(zlp
->zl_pool
);
225 * Returns the number of elements in the pool list.
228 pool_list_count(zpool_list_t
*zlp
)
230 return (uu_avl_numnodes(zlp
->zl_avl
));
234 * High level function which iterates over all pools given on the command line,
235 * using the pool_list_* interfaces.
238 for_each_pool(int argc
, char **argv
, boolean_t unavail
,
239 zprop_list_t
**proplist
, zpool_iter_f func
, void *data
)
244 if ((list
= pool_list_get(argc
, argv
, proplist
, &ret
)) == NULL
)
247 if (pool_list_iter(list
, unavail
, func
, data
) != 0)
250 pool_list_free(list
);
256 for_each_vdev_cb(zpool_handle_t
*zhp
, nvlist_t
*nv
, pool_vdev_iter_f func
,
265 const char *list
[] = {
267 ZPOOL_CONFIG_L2CACHE
,
268 ZPOOL_CONFIG_CHILDREN
271 for (i
= 0; i
< ARRAY_SIZE(list
); i
++) {
272 if (nvlist_lookup_nvlist_array(nv
, list
[i
], &child
,
274 for (c
= 0; c
< children
; c
++) {
277 (void) nvlist_lookup_uint64(child
[c
],
278 ZPOOL_CONFIG_IS_HOLE
, &ishole
);
283 ret
|= for_each_vdev_cb(zhp
, child
[c
], func
,
289 if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_TYPE
, &type
) != 0)
292 /* Don't run our function on root vdevs */
293 if (strcmp(type
, VDEV_TYPE_ROOT
) != 0) {
294 ret
|= func(zhp
, nv
, data
);
301 * This is the equivalent of for_each_pool() for vdevs. It iterates thorough
302 * all vdevs in the pool, ignoring root vdevs and holes, calling func() on
306 * @func: Function to call on each vdev
307 * @data: Custom data to pass to the function
310 for_each_vdev(zpool_handle_t
*zhp
, pool_vdev_iter_f func
, void *data
)
312 nvlist_t
*config
, *nvroot
= NULL
;
314 if ((config
= zpool_get_config(zhp
, NULL
)) != NULL
) {
315 verify(nvlist_lookup_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
,
318 return (for_each_vdev_cb(zhp
, nvroot
, func
, data
));
321 /* Thread function run for each vdev */
323 vdev_run_cmd_thread(void *cb_cmd_data
)
325 vdev_cmd_data_t
*data
= cb_cmd_data
;
329 char cmd
[_POSIX_ARG_MAX
];
331 /* Set our VDEV_PATH and VDEV_UPATH env vars and run command */
332 if (snprintf(cmd
, sizeof (cmd
), "VDEV_PATH=%s && VDEV_UPATH=%s && %s",
333 data
->path
, data
->upath
? data
->upath
: "\"\"", data
->cmd
) >=
335 /* Our string was truncated */
339 fp
= popen(cmd
, "r");
345 /* Save the first line of output from the command */
346 if (getline(&data
->line
, &len
, fp
) != -1) {
347 /* Success. Remove newline from the end, if necessary. */
348 if ((pos
= strchr(data
->line
, '\n')) != NULL
)
356 /* For each vdev in the pool run a command */
358 for_each_vdev_run_cb(zpool_handle_t
*zhp
, nvlist_t
*nv
, void *cb_vcdl
)
360 vdev_cmd_data_list_t
*vcdl
= cb_vcdl
;
361 vdev_cmd_data_t
*data
;
365 if (nvlist_lookup_string(nv
, ZPOOL_CONFIG_PATH
, &path
) != 0)
368 /* Spares show more than once if they're in use, so skip if exists */
369 for (i
= 0; i
< vcdl
->count
; i
++) {
370 if ((strcmp(vcdl
->data
[i
].path
, path
) == 0) &&
371 (strcmp(vcdl
->data
[i
].pool
, zpool_get_name(zhp
)) == 0)) {
372 /* vdev already exists, skip it */
378 * Resize our array and add in the new element.
380 if (!(vcdl
->data
= realloc(vcdl
->data
,
381 sizeof (*vcdl
->data
) * (vcdl
->count
+ 1))))
382 return (ENOMEM
); /* couldn't realloc */
384 data
= &vcdl
->data
[vcdl
->count
];
386 data
->pool
= strdup(zpool_get_name(zhp
));
387 data
->path
= strdup(path
);
388 data
->upath
= zfs_get_underlying_path(path
);
389 data
->cmd
= vcdl
->cmd
;
396 /* Get the names and count of the vdevs */
398 all_pools_for_each_vdev_gather_cb(zpool_handle_t
*zhp
, void *cb_vcdl
)
400 return (for_each_vdev(zhp
, for_each_vdev_run_cb
, cb_vcdl
));
404 * Now that vcdl is populated with our complete list of vdevs, spawn
408 all_pools_for_each_vdev_run_vcdl(vdev_cmd_data_list_t
*vcdl
)
412 /* 5 * boot_ncpus selfishly chosen since it works best on LLNL's HW */
413 int max_threads
= 5 * boot_ncpus
;
416 * Under Linux we use a taskq to parallelize running a command
417 * on each vdev. It is therefore necessary to initialize this
418 * functionality for the duration of the threads.
422 t
= taskq_create("z_pool_cmd", max_threads
, defclsyspri
, max_threads
,
427 /* Spawn off the command for each vdev */
428 for (i
= 0; i
< vcdl
->count
; i
++) {
429 (void) taskq_dispatch(t
, vdev_run_cmd_thread
,
430 (void *) &vcdl
->data
[i
], TQ_SLEEP
);
433 /* Wait for threads to finish */
440 * Run command 'cmd' on all vdevs in all pools. Saves the first line of output
441 * from the command in vcdk->data[].line for all vdevs.
443 * Returns a vdev_cmd_data_list_t that must be freed with
444 * free_vdev_cmd_data_list();
446 vdev_cmd_data_list_t
*
447 all_pools_for_each_vdev_run(int argc
, char **argv
, char *cmd
)
449 vdev_cmd_data_list_t
*vcdl
;
450 vcdl
= safe_malloc(sizeof (vcdl
));
453 /* Gather our list of all vdevs in all pools */
454 for_each_pool(argc
, argv
, B_TRUE
, NULL
,
455 all_pools_for_each_vdev_gather_cb
, vcdl
);
457 /* Run command on all vdevs in all pools */
458 all_pools_for_each_vdev_run_vcdl(vcdl
);
464 * Free the vdev_cmd_data_list_t created by all_pools_for_each_vdev_run()
467 free_vdev_cmd_data_list(vdev_cmd_data_list_t
*vcdl
)
470 for (i
= 0; i
< vcdl
->count
; i
++) {
471 free(vcdl
->data
[i
].path
);
472 free(vcdl
->data
[i
].pool
);
473 free(vcdl
->data
[i
].upath
);
474 free(vcdl
->data
[i
].line
);