4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
25 * Copyright 2015 RackTop Systems.
26 * Copyright (c) 2016, Intel Corporation.
38 #include <sys/vdev_impl.h>
40 #include <libzfs_impl.h>
44 * Returns true if the named pool matches the given GUID.
47 pool_active(libzfs_handle_t
*hdl
, const char *name
, uint64_t guid
,
53 if (zpool_open_silent(hdl
, name
, &zhp
) != 0)
61 verify(nvlist_lookup_uint64(zhp
->zpool_config
, ZPOOL_CONFIG_POOL_GUID
,
66 *isactive
= (theguid
== guid
);
71 refresh_config(libzfs_handle_t
*hdl
, nvlist_t
*config
)
74 zfs_cmd_t zc
= {"\0"};
77 if (zcmd_write_conf_nvlist(hdl
, &zc
, config
) != 0)
80 dstbuf_size
= MAX(CONFIG_BUF_MINSIZE
, zc
.zc_nvlist_conf_size
* 4);
82 if (zcmd_alloc_dst_nvlist(hdl
, &zc
, dstbuf_size
) != 0) {
83 zcmd_free_nvlists(&zc
);
87 while ((err
= ioctl(hdl
->libzfs_fd
, ZFS_IOC_POOL_TRYIMPORT
,
88 &zc
)) != 0 && errno
== ENOMEM
) {
89 if (zcmd_expand_dst_nvlist(hdl
, &zc
) != 0) {
90 zcmd_free_nvlists(&zc
);
96 zcmd_free_nvlists(&zc
);
100 if (zcmd_read_dst_nvlist(hdl
, &zc
, &nvl
) != 0) {
101 zcmd_free_nvlists(&zc
);
105 zcmd_free_nvlists(&zc
);
110 refresh_config_libzfs(void *handle
, nvlist_t
*tryconfig
)
112 return (refresh_config((libzfs_handle_t
*)handle
, tryconfig
));
117 pool_active_libzfs(void *handle
, const char *name
, uint64_t guid
,
120 return (pool_active((libzfs_handle_t
*)handle
, name
, guid
, isactive
));
123 const pool_config_ops_t libzfs_config_ops
= {
124 .pco_refresh_config
= refresh_config_libzfs
,
125 .pco_pool_active
= pool_active_libzfs
,
129 * Return the offset of the given label.
132 label_offset(uint64_t size
, int l
)
134 ASSERT(P2PHASE_TYPED(size
, sizeof (vdev_label_t
), uint64_t) == 0);
135 return (l
* sizeof (vdev_label_t
) + (l
< VDEV_LABELS
/ 2 ?
136 0 : size
- VDEV_LABELS
* sizeof (vdev_label_t
)));
140 * Given a file descriptor, clear (zero) the label information. This function
141 * is used in the appliance stack as part of the ZFS sysevent module and
142 * to implement the "zpool labelclear" command.
145 zpool_clear_label(int fd
)
147 struct stat64 statbuf
;
152 if (fstat64_blk(fd
, &statbuf
) == -1)
154 size
= P2ALIGN_TYPED(statbuf
.st_size
, sizeof (vdev_label_t
), uint64_t);
156 if ((label
= calloc(1, sizeof (vdev_label_t
))) == NULL
)
159 for (l
= 0; l
< VDEV_LABELS
; l
++) {
160 if (pwrite64(fd
, label
, sizeof (vdev_label_t
),
161 label_offset(size
, l
)) != sizeof (vdev_label_t
)) {
172 find_guid(nvlist_t
*nv
, uint64_t guid
)
178 verify(nvlist_lookup_uint64(nv
, ZPOOL_CONFIG_GUID
, &tmp
) == 0);
182 if (nvlist_lookup_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
183 &child
, &children
) == 0) {
184 for (c
= 0; c
< children
; c
++)
185 if (find_guid(child
[c
], guid
))
192 typedef struct aux_cbdata
{
195 zpool_handle_t
*cb_zhp
;
199 find_aux(zpool_handle_t
*zhp
, void *data
)
201 aux_cbdata_t
*cbp
= data
;
207 verify(nvlist_lookup_nvlist(zhp
->zpool_config
, ZPOOL_CONFIG_VDEV_TREE
,
210 if (nvlist_lookup_nvlist_array(nvroot
, cbp
->cb_type
,
211 &list
, &count
) == 0) {
212 for (i
= 0; i
< count
; i
++) {
213 verify(nvlist_lookup_uint64(list
[i
],
214 ZPOOL_CONFIG_GUID
, &guid
) == 0);
215 if (guid
== cbp
->cb_guid
) {
227 * Determines if the pool is in use. If so, it returns true and the state of
228 * the pool as well as the name of the pool. Name string is allocated and
229 * must be freed by the caller.
232 zpool_in_use(libzfs_handle_t
*hdl
, int fd
, pool_state_t
*state
, char **namestr
,
238 uint64_t guid
, vdev_guid
;
240 nvlist_t
*pool_config
;
241 uint64_t stateval
, isspare
;
242 aux_cbdata_t cb
= { 0 };
247 if (zpool_read_label(fd
, &config
, NULL
) != 0) {
248 (void) no_memory(hdl
);
255 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_STATE
,
257 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_GUID
,
260 if (stateval
!= POOL_STATE_SPARE
&& stateval
!= POOL_STATE_L2CACHE
) {
261 verify(nvlist_lookup_string(config
, ZPOOL_CONFIG_POOL_NAME
,
263 verify(nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
268 case POOL_STATE_EXPORTED
:
270 * A pool with an exported state may in fact be imported
271 * read-only, so check the in-core state to see if it's
272 * active and imported read-only. If it is, set
273 * its state to active.
275 if (pool_active(hdl
, name
, guid
, &isactive
) == 0 && isactive
&&
276 (zhp
= zpool_open_canfail(hdl
, name
)) != NULL
) {
277 if (zpool_get_prop_int(zhp
, ZPOOL_PROP_READONLY
, NULL
))
278 stateval
= POOL_STATE_ACTIVE
;
281 * All we needed the zpool handle for is the
282 * readonly prop check.
290 case POOL_STATE_ACTIVE
:
292 * For an active pool, we have to determine if it's really part
293 * of a currently active pool (in which case the pool will exist
294 * and the guid will be the same), or whether it's part of an
295 * active pool that was disconnected without being explicitly
298 if (pool_active(hdl
, name
, guid
, &isactive
) != 0) {
305 * Because the device may have been removed while
306 * offlined, we only report it as active if the vdev is
307 * still present in the config. Otherwise, pretend like
310 if ((zhp
= zpool_open_canfail(hdl
, name
)) != NULL
&&
311 (pool_config
= zpool_get_config(zhp
, NULL
))
315 verify(nvlist_lookup_nvlist(pool_config
,
316 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
317 ret
= find_guid(nvroot
, vdev_guid
);
323 * If this is an active spare within another pool, we
324 * treat it like an unused hot spare. This allows the
325 * user to create a pool with a hot spare that currently
326 * in use within another pool. Since we return B_TRUE,
327 * libdiskmgt will continue to prevent generic consumers
328 * from using the device.
330 if (ret
&& nvlist_lookup_uint64(config
,
331 ZPOOL_CONFIG_IS_SPARE
, &isspare
) == 0 && isspare
)
332 stateval
= POOL_STATE_SPARE
;
337 stateval
= POOL_STATE_POTENTIALLY_ACTIVE
;
342 case POOL_STATE_SPARE
:
344 * For a hot spare, it can be either definitively in use, or
345 * potentially active. To determine if it's in use, we iterate
346 * over all pools in the system and search for one with a spare
347 * with a matching guid.
349 * Due to the shared nature of spares, we don't actually report
350 * the potentially active case as in use. This means the user
351 * can freely create pools on the hot spares of exported pools,
352 * but to do otherwise makes the resulting code complicated, and
353 * we end up having to deal with this case anyway.
356 cb
.cb_guid
= vdev_guid
;
357 cb
.cb_type
= ZPOOL_CONFIG_SPARES
;
358 if (zpool_iter(hdl
, find_aux
, &cb
) == 1) {
359 name
= (char *)zpool_get_name(cb
.cb_zhp
);
366 case POOL_STATE_L2CACHE
:
369 * Check if any pool is currently using this l2cache device.
372 cb
.cb_guid
= vdev_guid
;
373 cb
.cb_type
= ZPOOL_CONFIG_L2CACHE
;
374 if (zpool_iter(hdl
, find_aux
, &cb
) == 1) {
375 name
= (char *)zpool_get_name(cb
.cb_zhp
);
388 if ((*namestr
= zfs_strdup(hdl
, name
)) == NULL
) {
390 zpool_close(cb
.cb_zhp
);
394 *state
= (pool_state_t
)stateval
;
398 zpool_close(cb
.cb_zhp
);