4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2020 by Delphix. All rights reserved.
25 * Copyright (c) 2017, Intel Corporation.
29 * Virtual Device Labels
30 * ---------------------
32 * The vdev label serves several distinct purposes:
34 * 1. Uniquely identify this device as part of a ZFS pool and confirm its
35 * identity within the pool.
37 * 2. Verify that all the devices given in a configuration are present
40 * 3. Determine the uberblock for the pool.
42 * 4. In case of an import operation, determine the configuration of the
43 * toplevel vdev of which it is a part.
45 * 5. If an import operation cannot find all the devices in the pool,
46 * provide enough information to the administrator to determine which
47 * devices are missing.
49 * It is important to note that while the kernel is responsible for writing the
50 * label, it only consumes the information in the first three cases. The
51 * latter information is only consumed in userland when determining the
52 * configuration to import a pool.
58 * Before describing the contents of the label, it's important to understand how
59 * the labels are written and updated with respect to the uberblock.
61 * When the pool configuration is altered, either because it was newly created
62 * or a device was added, we want to update all the labels such that we can deal
63 * with fatal failure at any point. To this end, each disk has two labels which
64 * are updated before and after the uberblock is synced. Assuming we have
65 * labels and an uberblock with the following transaction groups:
68 * +------+ +------+ +------+
70 * | t10 | | t10 | | t10 |
72 * +------+ +------+ +------+
74 * In this stable state, the labels and the uberblock were all updated within
75 * the same transaction group (10). Each label is mirrored and checksummed, so
76 * that we can detect when we fail partway through writing the label.
78 * In order to identify which labels are valid, the labels are written in the
81 * 1. For each vdev, update 'L1' to the new label
82 * 2. Update the uberblock
83 * 3. For each vdev, update 'L2' to the new label
85 * Given arbitrary failure, we can determine the correct label to use based on
86 * the transaction group. If we fail after updating L1 but before updating the
87 * UB, we will notice that L1's transaction group is greater than the uberblock,
88 * so L2 must be valid. If we fail after writing the uberblock but before
89 * writing L2, we will notice that L2's transaction group is less than L1, and
90 * therefore L1 is valid.
92 * Another added complexity is that not every label is updated when the config
93 * is synced. If we add a single device, we do not want to have to re-write
94 * every label for every device in the pool. This means that both L1 and L2 may
95 * be older than the pool uberblock, because the necessary information is stored
102 * The vdev label consists of two distinct parts, and is wrapped within the
103 * vdev_label_t structure. The label includes 8k of padding to permit legacy
104 * VTOC disk labels, but is otherwise ignored.
106 * The first half of the label is a packed nvlist which contains pool wide
107 * properties, per-vdev properties, and configuration information. It is
108 * described in more detail below.
110 * The latter half of the label consists of a redundant array of uberblocks.
111 * These uberblocks are updated whenever a transaction group is committed,
112 * or when the configuration is updated. When a pool is loaded, we scan each
113 * vdev for the 'best' uberblock.
116 * Configuration Information
117 * -------------------------
119 * The nvlist describing the pool and vdev contains the following elements:
121 * version ZFS on-disk version
124 * txg Transaction group in which this label was written
125 * pool_guid Unique identifier for this pool
126 * vdev_tree An nvlist describing vdev tree.
128 * An nvlist of the features necessary for reading the MOS.
130 * Each leaf device label also contains the following:
132 * top_guid Unique ID for top-level vdev in which this is contained
133 * guid Unique ID for the leaf vdev
135 * The 'vs' configuration follows the format described in 'spa_config.c'.
138 #include <sys/zfs_context.h>
140 #include <sys/spa_impl.h>
143 #include <sys/vdev.h>
144 #include <sys/vdev_impl.h>
145 #include <sys/vdev_draid.h>
146 #include <sys/uberblock_impl.h>
147 #include <sys/metaslab.h>
148 #include <sys/metaslab_impl.h>
150 #include <sys/dsl_scan.h>
152 #include <sys/fs/zfs.h>
153 #include <sys/byteorder.h>
154 #include <sys/zfs_bootenv.h>
157 * Basic routines to read and write from a vdev label.
158 * Used throughout the rest of this file.
161 vdev_label_offset(uint64_t psize
, int l
, uint64_t offset
)
163 ASSERT(offset
< sizeof (vdev_label_t
));
164 ASSERT(P2PHASE_TYPED(psize
, sizeof (vdev_label_t
), uint64_t) == 0);
166 return (offset
+ l
* sizeof (vdev_label_t
) + (l
< VDEV_LABELS
/ 2 ?
167 0 : psize
- VDEV_LABELS
* sizeof (vdev_label_t
)));
171 * Returns back the vdev label associated with the passed in offset.
174 vdev_label_number(uint64_t psize
, uint64_t offset
)
178 if (offset
>= psize
- VDEV_LABEL_END_SIZE
) {
179 offset
-= psize
- VDEV_LABEL_END_SIZE
;
180 offset
+= (VDEV_LABELS
/ 2) * sizeof (vdev_label_t
);
182 l
= offset
/ sizeof (vdev_label_t
);
183 return (l
< VDEV_LABELS
? l
: -1);
187 vdev_label_read(zio_t
*zio
, vdev_t
*vd
, int l
, abd_t
*buf
, uint64_t offset
,
188 uint64_t size
, zio_done_func_t
*done
, void *private, int flags
)
191 spa_config_held(zio
->io_spa
, SCL_STATE
, RW_READER
) == SCL_STATE
||
192 spa_config_held(zio
->io_spa
, SCL_STATE
, RW_WRITER
) == SCL_STATE
);
193 ASSERT(flags
& ZIO_FLAG_CONFIG_WRITER
);
195 zio_nowait(zio_read_phys(zio
, vd
,
196 vdev_label_offset(vd
->vdev_psize
, l
, offset
),
197 size
, buf
, ZIO_CHECKSUM_LABEL
, done
, private,
198 ZIO_PRIORITY_SYNC_READ
, flags
, B_TRUE
));
202 vdev_label_write(zio_t
*zio
, vdev_t
*vd
, int l
, abd_t
*buf
, uint64_t offset
,
203 uint64_t size
, zio_done_func_t
*done
, void *private, int flags
)
206 spa_config_held(zio
->io_spa
, SCL_STATE
, RW_READER
) == SCL_STATE
||
207 spa_config_held(zio
->io_spa
, SCL_STATE
, RW_WRITER
) == SCL_STATE
);
208 ASSERT(flags
& ZIO_FLAG_CONFIG_WRITER
);
210 zio_nowait(zio_write_phys(zio
, vd
,
211 vdev_label_offset(vd
->vdev_psize
, l
, offset
),
212 size
, buf
, ZIO_CHECKSUM_LABEL
, done
, private,
213 ZIO_PRIORITY_SYNC_WRITE
, flags
, B_TRUE
));
217 * Generate the nvlist representing this vdev's stats
220 vdev_config_generate_stats(vdev_t
*vd
, nvlist_t
*nv
)
226 vs
= kmem_alloc(sizeof (*vs
), KM_SLEEP
);
227 vsx
= kmem_alloc(sizeof (*vsx
), KM_SLEEP
);
229 vdev_get_stats_ex(vd
, vs
, vsx
);
230 fnvlist_add_uint64_array(nv
, ZPOOL_CONFIG_VDEV_STATS
,
231 (uint64_t *)vs
, sizeof (*vs
) / sizeof (uint64_t));
234 * Add extended stats into a special extended stats nvlist. This keeps
235 * all the extended stats nicely grouped together. The extended stats
236 * nvlist is then added to the main nvlist.
238 nvx
= fnvlist_alloc();
240 /* ZIOs in flight to disk */
241 fnvlist_add_uint64(nvx
, ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE
,
242 vsx
->vsx_active_queue
[ZIO_PRIORITY_SYNC_READ
]);
244 fnvlist_add_uint64(nvx
, ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE
,
245 vsx
->vsx_active_queue
[ZIO_PRIORITY_SYNC_WRITE
]);
247 fnvlist_add_uint64(nvx
, ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE
,
248 vsx
->vsx_active_queue
[ZIO_PRIORITY_ASYNC_READ
]);
250 fnvlist_add_uint64(nvx
, ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE
,
251 vsx
->vsx_active_queue
[ZIO_PRIORITY_ASYNC_WRITE
]);
253 fnvlist_add_uint64(nvx
, ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE
,
254 vsx
->vsx_active_queue
[ZIO_PRIORITY_SCRUB
]);
256 fnvlist_add_uint64(nvx
, ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE
,
257 vsx
->vsx_active_queue
[ZIO_PRIORITY_TRIM
]);
259 fnvlist_add_uint64(nvx
, ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE
,
260 vsx
->vsx_active_queue
[ZIO_PRIORITY_REBUILD
]);
263 fnvlist_add_uint64(nvx
, ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE
,
264 vsx
->vsx_pend_queue
[ZIO_PRIORITY_SYNC_READ
]);
266 fnvlist_add_uint64(nvx
, ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE
,
267 vsx
->vsx_pend_queue
[ZIO_PRIORITY_SYNC_WRITE
]);
269 fnvlist_add_uint64(nvx
, ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE
,
270 vsx
->vsx_pend_queue
[ZIO_PRIORITY_ASYNC_READ
]);
272 fnvlist_add_uint64(nvx
, ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE
,
273 vsx
->vsx_pend_queue
[ZIO_PRIORITY_ASYNC_WRITE
]);
275 fnvlist_add_uint64(nvx
, ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE
,
276 vsx
->vsx_pend_queue
[ZIO_PRIORITY_SCRUB
]);
278 fnvlist_add_uint64(nvx
, ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE
,
279 vsx
->vsx_pend_queue
[ZIO_PRIORITY_TRIM
]);
281 fnvlist_add_uint64(nvx
, ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE
,
282 vsx
->vsx_pend_queue
[ZIO_PRIORITY_REBUILD
]);
285 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO
,
286 vsx
->vsx_total_histo
[ZIO_TYPE_READ
],
287 ARRAY_SIZE(vsx
->vsx_total_histo
[ZIO_TYPE_READ
]));
289 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO
,
290 vsx
->vsx_total_histo
[ZIO_TYPE_WRITE
],
291 ARRAY_SIZE(vsx
->vsx_total_histo
[ZIO_TYPE_WRITE
]));
293 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO
,
294 vsx
->vsx_disk_histo
[ZIO_TYPE_READ
],
295 ARRAY_SIZE(vsx
->vsx_disk_histo
[ZIO_TYPE_READ
]));
297 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO
,
298 vsx
->vsx_disk_histo
[ZIO_TYPE_WRITE
],
299 ARRAY_SIZE(vsx
->vsx_disk_histo
[ZIO_TYPE_WRITE
]));
301 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO
,
302 vsx
->vsx_queue_histo
[ZIO_PRIORITY_SYNC_READ
],
303 ARRAY_SIZE(vsx
->vsx_queue_histo
[ZIO_PRIORITY_SYNC_READ
]));
305 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO
,
306 vsx
->vsx_queue_histo
[ZIO_PRIORITY_SYNC_WRITE
],
307 ARRAY_SIZE(vsx
->vsx_queue_histo
[ZIO_PRIORITY_SYNC_WRITE
]));
309 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO
,
310 vsx
->vsx_queue_histo
[ZIO_PRIORITY_ASYNC_READ
],
311 ARRAY_SIZE(vsx
->vsx_queue_histo
[ZIO_PRIORITY_ASYNC_READ
]));
313 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO
,
314 vsx
->vsx_queue_histo
[ZIO_PRIORITY_ASYNC_WRITE
],
315 ARRAY_SIZE(vsx
->vsx_queue_histo
[ZIO_PRIORITY_ASYNC_WRITE
]));
317 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO
,
318 vsx
->vsx_queue_histo
[ZIO_PRIORITY_SCRUB
],
319 ARRAY_SIZE(vsx
->vsx_queue_histo
[ZIO_PRIORITY_SCRUB
]));
321 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO
,
322 vsx
->vsx_queue_histo
[ZIO_PRIORITY_TRIM
],
323 ARRAY_SIZE(vsx
->vsx_queue_histo
[ZIO_PRIORITY_TRIM
]));
325 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO
,
326 vsx
->vsx_queue_histo
[ZIO_PRIORITY_REBUILD
],
327 ARRAY_SIZE(vsx
->vsx_queue_histo
[ZIO_PRIORITY_REBUILD
]));
330 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO
,
331 vsx
->vsx_ind_histo
[ZIO_PRIORITY_SYNC_READ
],
332 ARRAY_SIZE(vsx
->vsx_ind_histo
[ZIO_PRIORITY_SYNC_READ
]));
334 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO
,
335 vsx
->vsx_ind_histo
[ZIO_PRIORITY_SYNC_WRITE
],
336 ARRAY_SIZE(vsx
->vsx_ind_histo
[ZIO_PRIORITY_SYNC_WRITE
]));
338 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO
,
339 vsx
->vsx_ind_histo
[ZIO_PRIORITY_ASYNC_READ
],
340 ARRAY_SIZE(vsx
->vsx_ind_histo
[ZIO_PRIORITY_ASYNC_READ
]));
342 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO
,
343 vsx
->vsx_ind_histo
[ZIO_PRIORITY_ASYNC_WRITE
],
344 ARRAY_SIZE(vsx
->vsx_ind_histo
[ZIO_PRIORITY_ASYNC_WRITE
]));
346 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO
,
347 vsx
->vsx_ind_histo
[ZIO_PRIORITY_SCRUB
],
348 ARRAY_SIZE(vsx
->vsx_ind_histo
[ZIO_PRIORITY_SCRUB
]));
350 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO
,
351 vsx
->vsx_ind_histo
[ZIO_PRIORITY_TRIM
],
352 ARRAY_SIZE(vsx
->vsx_ind_histo
[ZIO_PRIORITY_TRIM
]));
354 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO
,
355 vsx
->vsx_ind_histo
[ZIO_PRIORITY_REBUILD
],
356 ARRAY_SIZE(vsx
->vsx_ind_histo
[ZIO_PRIORITY_REBUILD
]));
358 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO
,
359 vsx
->vsx_agg_histo
[ZIO_PRIORITY_SYNC_READ
],
360 ARRAY_SIZE(vsx
->vsx_agg_histo
[ZIO_PRIORITY_SYNC_READ
]));
362 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO
,
363 vsx
->vsx_agg_histo
[ZIO_PRIORITY_SYNC_WRITE
],
364 ARRAY_SIZE(vsx
->vsx_agg_histo
[ZIO_PRIORITY_SYNC_WRITE
]));
366 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO
,
367 vsx
->vsx_agg_histo
[ZIO_PRIORITY_ASYNC_READ
],
368 ARRAY_SIZE(vsx
->vsx_agg_histo
[ZIO_PRIORITY_ASYNC_READ
]));
370 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO
,
371 vsx
->vsx_agg_histo
[ZIO_PRIORITY_ASYNC_WRITE
],
372 ARRAY_SIZE(vsx
->vsx_agg_histo
[ZIO_PRIORITY_ASYNC_WRITE
]));
374 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO
,
375 vsx
->vsx_agg_histo
[ZIO_PRIORITY_SCRUB
],
376 ARRAY_SIZE(vsx
->vsx_agg_histo
[ZIO_PRIORITY_SCRUB
]));
378 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO
,
379 vsx
->vsx_agg_histo
[ZIO_PRIORITY_TRIM
],
380 ARRAY_SIZE(vsx
->vsx_agg_histo
[ZIO_PRIORITY_TRIM
]));
382 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO
,
383 vsx
->vsx_agg_histo
[ZIO_PRIORITY_REBUILD
],
384 ARRAY_SIZE(vsx
->vsx_agg_histo
[ZIO_PRIORITY_REBUILD
]));
387 fnvlist_add_uint64(nvx
, ZPOOL_CONFIG_VDEV_SLOW_IOS
, vs
->vs_slow_ios
);
389 /* Add extended stats nvlist to main nvlist */
390 fnvlist_add_nvlist(nv
, ZPOOL_CONFIG_VDEV_STATS_EX
, nvx
);
393 kmem_free(vs
, sizeof (*vs
));
394 kmem_free(vsx
, sizeof (*vsx
));
398 root_vdev_actions_getprogress(vdev_t
*vd
, nvlist_t
*nvl
)
400 spa_t
*spa
= vd
->vdev_spa
;
402 if (vd
!= spa
->spa_root_vdev
)
405 /* provide either current or previous scan information */
407 if (spa_scan_get_stats(spa
, &ps
) == 0) {
408 fnvlist_add_uint64_array(nvl
,
409 ZPOOL_CONFIG_SCAN_STATS
, (uint64_t *)&ps
,
410 sizeof (pool_scan_stat_t
) / sizeof (uint64_t));
413 pool_removal_stat_t prs
;
414 if (spa_removal_get_stats(spa
, &prs
) == 0) {
415 fnvlist_add_uint64_array(nvl
,
416 ZPOOL_CONFIG_REMOVAL_STATS
, (uint64_t *)&prs
,
417 sizeof (prs
) / sizeof (uint64_t));
420 pool_checkpoint_stat_t pcs
;
421 if (spa_checkpoint_get_stats(spa
, &pcs
) == 0) {
422 fnvlist_add_uint64_array(nvl
,
423 ZPOOL_CONFIG_CHECKPOINT_STATS
, (uint64_t *)&pcs
,
424 sizeof (pcs
) / sizeof (uint64_t));
429 top_vdev_actions_getprogress(vdev_t
*vd
, nvlist_t
*nvl
)
431 if (vd
== vd
->vdev_top
) {
432 vdev_rebuild_stat_t vrs
;
433 if (vdev_rebuild_get_stats(vd
, &vrs
) == 0) {
434 fnvlist_add_uint64_array(nvl
,
435 ZPOOL_CONFIG_REBUILD_STATS
, (uint64_t *)&vrs
,
436 sizeof (vrs
) / sizeof (uint64_t));
442 * Generate the nvlist representing this vdev's config.
445 vdev_config_generate(spa_t
*spa
, vdev_t
*vd
, boolean_t getstats
,
446 vdev_config_flag_t flags
)
449 vdev_indirect_config_t
*vic
= &vd
->vdev_indirect_config
;
451 nv
= fnvlist_alloc();
453 fnvlist_add_string(nv
, ZPOOL_CONFIG_TYPE
, vd
->vdev_ops
->vdev_op_type
);
454 if (!(flags
& (VDEV_CONFIG_SPARE
| VDEV_CONFIG_L2CACHE
)))
455 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_ID
, vd
->vdev_id
);
456 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_GUID
, vd
->vdev_guid
);
458 if (vd
->vdev_path
!= NULL
)
459 fnvlist_add_string(nv
, ZPOOL_CONFIG_PATH
, vd
->vdev_path
);
461 if (vd
->vdev_devid
!= NULL
)
462 fnvlist_add_string(nv
, ZPOOL_CONFIG_DEVID
, vd
->vdev_devid
);
464 if (vd
->vdev_physpath
!= NULL
)
465 fnvlist_add_string(nv
, ZPOOL_CONFIG_PHYS_PATH
,
468 if (vd
->vdev_enc_sysfs_path
!= NULL
)
469 fnvlist_add_string(nv
, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH
,
470 vd
->vdev_enc_sysfs_path
);
472 if (vd
->vdev_fru
!= NULL
)
473 fnvlist_add_string(nv
, ZPOOL_CONFIG_FRU
, vd
->vdev_fru
);
475 if (vd
->vdev_ops
->vdev_op_config_generate
!= NULL
)
476 vd
->vdev_ops
->vdev_op_config_generate(vd
, nv
);
478 if (vd
->vdev_wholedisk
!= -1ULL) {
479 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_WHOLE_DISK
,
483 if (vd
->vdev_not_present
&& !(flags
& VDEV_CONFIG_MISSING
))
484 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_NOT_PRESENT
, 1);
486 if (vd
->vdev_isspare
)
487 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_IS_SPARE
, 1);
489 if (!(flags
& (VDEV_CONFIG_SPARE
| VDEV_CONFIG_L2CACHE
)) &&
490 vd
== vd
->vdev_top
) {
491 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_METASLAB_ARRAY
,
493 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_METASLAB_SHIFT
,
495 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_ASHIFT
, vd
->vdev_ashift
);
496 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_ASIZE
,
498 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_IS_LOG
, vd
->vdev_islog
);
499 if (vd
->vdev_noalloc
) {
500 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_NONALLOCATING
,
503 if (vd
->vdev_removing
) {
504 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_REMOVING
,
508 /* zpool command expects alloc class data */
509 if (getstats
&& vd
->vdev_alloc_bias
!= VDEV_BIAS_NONE
) {
510 const char *bias
= NULL
;
512 switch (vd
->vdev_alloc_bias
) {
514 bias
= VDEV_ALLOC_BIAS_LOG
;
516 case VDEV_BIAS_SPECIAL
:
517 bias
= VDEV_ALLOC_BIAS_SPECIAL
;
519 case VDEV_BIAS_DEDUP
:
520 bias
= VDEV_ALLOC_BIAS_DEDUP
;
523 ASSERT3U(vd
->vdev_alloc_bias
, ==,
526 fnvlist_add_string(nv
, ZPOOL_CONFIG_ALLOCATION_BIAS
,
531 if (vd
->vdev_dtl_sm
!= NULL
) {
532 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_DTL
,
533 space_map_object(vd
->vdev_dtl_sm
));
536 if (vic
->vic_mapping_object
!= 0) {
537 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_INDIRECT_OBJECT
,
538 vic
->vic_mapping_object
);
541 if (vic
->vic_births_object
!= 0) {
542 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_INDIRECT_BIRTHS
,
543 vic
->vic_births_object
);
546 if (vic
->vic_prev_indirect_vdev
!= UINT64_MAX
) {
547 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_PREV_INDIRECT_VDEV
,
548 vic
->vic_prev_indirect_vdev
);
552 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_CREATE_TXG
, vd
->vdev_crtxg
);
554 if (vd
->vdev_expansion_time
)
555 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_EXPANSION_TIME
,
556 vd
->vdev_expansion_time
);
558 if (flags
& VDEV_CONFIG_MOS
) {
559 if (vd
->vdev_leaf_zap
!= 0) {
560 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
561 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_VDEV_LEAF_ZAP
,
565 if (vd
->vdev_top_zap
!= 0) {
566 ASSERT(vd
== vd
->vdev_top
);
567 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_VDEV_TOP_ZAP
,
571 if (vd
->vdev_resilver_deferred
) {
572 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
573 ASSERT(spa
->spa_resilver_deferred
);
574 fnvlist_add_boolean(nv
, ZPOOL_CONFIG_RESILVER_DEFER
);
579 vdev_config_generate_stats(vd
, nv
);
581 root_vdev_actions_getprogress(vd
, nv
);
582 top_vdev_actions_getprogress(vd
, nv
);
585 * Note: this can be called from open context
586 * (spa_get_stats()), so we need the rwlock to prevent
587 * the mapping from being changed by condensing.
589 rw_enter(&vd
->vdev_indirect_rwlock
, RW_READER
);
590 if (vd
->vdev_indirect_mapping
!= NULL
) {
591 ASSERT(vd
->vdev_indirect_births
!= NULL
);
592 vdev_indirect_mapping_t
*vim
=
593 vd
->vdev_indirect_mapping
;
594 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_INDIRECT_SIZE
,
595 vdev_indirect_mapping_size(vim
));
597 rw_exit(&vd
->vdev_indirect_rwlock
);
598 if (vd
->vdev_mg
!= NULL
&&
599 vd
->vdev_mg
->mg_fragmentation
!= ZFS_FRAG_INVALID
) {
601 * Compute approximately how much memory would be used
602 * for the indirect mapping if this device were to
605 * Note: If the frag metric is invalid, then not
606 * enough metaslabs have been converted to have
609 uint64_t seg_count
= 0;
610 uint64_t to_alloc
= vd
->vdev_stat
.vs_alloc
;
613 * There are the same number of allocated segments
614 * as free segments, so we will have at least one
615 * entry per free segment. However, small free
616 * segments (smaller than vdev_removal_max_span)
617 * will be combined with adjacent allocated segments
618 * as a single mapping.
620 for (int i
= 0; i
< RANGE_TREE_HISTOGRAM_SIZE
; i
++) {
621 if (i
+ 1 < highbit64(vdev_removal_max_span
)
624 vd
->vdev_mg
->mg_histogram
[i
] <<
628 vd
->vdev_mg
->mg_histogram
[i
];
633 * The maximum length of a mapping is
634 * zfs_remove_max_segment, so we need at least one entry
635 * per zfs_remove_max_segment of allocated data.
637 seg_count
+= to_alloc
/ spa_remove_max_segment(spa
);
639 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_INDIRECT_SIZE
,
641 sizeof (vdev_indirect_mapping_entry_phys_t
));
645 if (!vd
->vdev_ops
->vdev_op_leaf
) {
649 ASSERT(!vd
->vdev_ishole
);
651 child
= kmem_alloc(vd
->vdev_children
* sizeof (nvlist_t
*),
654 for (c
= 0, idx
= 0; c
< vd
->vdev_children
; c
++) {
655 vdev_t
*cvd
= vd
->vdev_child
[c
];
658 * If we're generating an nvlist of removing
659 * vdevs then skip over any device which is
662 if ((flags
& VDEV_CONFIG_REMOVING
) &&
666 child
[idx
++] = vdev_config_generate(spa
, cvd
,
671 fnvlist_add_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
672 (const nvlist_t
* const *)child
, idx
);
675 for (c
= 0; c
< idx
; c
++)
676 nvlist_free(child
[c
]);
678 kmem_free(child
, vd
->vdev_children
* sizeof (nvlist_t
*));
681 const char *aux
= NULL
;
683 if (vd
->vdev_offline
&& !vd
->vdev_tmpoffline
)
684 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_OFFLINE
, B_TRUE
);
685 if (vd
->vdev_resilver_txg
!= 0)
686 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_RESILVER_TXG
,
687 vd
->vdev_resilver_txg
);
688 if (vd
->vdev_rebuild_txg
!= 0)
689 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_REBUILD_TXG
,
690 vd
->vdev_rebuild_txg
);
691 if (vd
->vdev_faulted
)
692 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_FAULTED
, B_TRUE
);
693 if (vd
->vdev_degraded
)
694 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_DEGRADED
, B_TRUE
);
695 if (vd
->vdev_removed
)
696 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_REMOVED
, B_TRUE
);
697 if (vd
->vdev_unspare
)
698 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_UNSPARE
, B_TRUE
);
700 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_IS_HOLE
, B_TRUE
);
702 /* Set the reason why we're FAULTED/DEGRADED. */
703 switch (vd
->vdev_stat
.vs_aux
) {
704 case VDEV_AUX_ERR_EXCEEDED
:
705 aux
= "err_exceeded";
708 case VDEV_AUX_EXTERNAL
:
713 if (aux
!= NULL
&& !vd
->vdev_tmpoffline
) {
714 fnvlist_add_string(nv
, ZPOOL_CONFIG_AUX_STATE
, aux
);
717 * We're healthy - clear any previous AUX_STATE values.
719 if (nvlist_exists(nv
, ZPOOL_CONFIG_AUX_STATE
))
720 nvlist_remove_all(nv
, ZPOOL_CONFIG_AUX_STATE
);
723 if (vd
->vdev_splitting
&& vd
->vdev_orig_guid
!= 0LL) {
724 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_ORIG_GUID
,
733 * Generate a view of the top-level vdevs. If we currently have holes
734 * in the namespace, then generate an array which contains a list of holey
735 * vdevs. Additionally, add the number of top-level children that currently
739 vdev_top_config_generate(spa_t
*spa
, nvlist_t
*config
)
741 vdev_t
*rvd
= spa
->spa_root_vdev
;
745 array
= kmem_alloc(rvd
->vdev_children
* sizeof (uint64_t), KM_SLEEP
);
747 for (c
= 0, idx
= 0; c
< rvd
->vdev_children
; c
++) {
748 vdev_t
*tvd
= rvd
->vdev_child
[c
];
750 if (tvd
->vdev_ishole
) {
756 VERIFY(nvlist_add_uint64_array(config
, ZPOOL_CONFIG_HOLE_ARRAY
,
760 VERIFY(nvlist_add_uint64(config
, ZPOOL_CONFIG_VDEV_CHILDREN
,
761 rvd
->vdev_children
) == 0);
763 kmem_free(array
, rvd
->vdev_children
* sizeof (uint64_t));
767 * Returns the configuration from the label of the given vdev. For vdevs
768 * which don't have a txg value stored on their label (i.e. spares/cache)
769 * or have not been completely initialized (txg = 0) just return
770 * the configuration from the first valid label we find. Otherwise,
771 * find the most up-to-date label that does not exceed the specified
775 vdev_label_read_config(vdev_t
*vd
, uint64_t txg
)
777 spa_t
*spa
= vd
->vdev_spa
;
778 nvlist_t
*config
= NULL
;
779 vdev_phys_t
*vp
[VDEV_LABELS
];
780 abd_t
*vp_abd
[VDEV_LABELS
];
781 zio_t
*zio
[VDEV_LABELS
];
782 uint64_t best_txg
= 0;
783 uint64_t label_txg
= 0;
785 int flags
= ZIO_FLAG_CONFIG_WRITER
| ZIO_FLAG_CANFAIL
|
786 ZIO_FLAG_SPECULATIVE
;
788 ASSERT(vd
->vdev_validate_thread
== curthread
||
789 spa_config_held(spa
, SCL_STATE_ALL
, RW_WRITER
) == SCL_STATE_ALL
);
791 if (!vdev_readable(vd
))
795 * The label for a dRAID distributed spare is not stored on disk.
796 * Instead it is generated when needed which allows us to bypass
797 * the pipeline when reading the config from the label.
799 if (vd
->vdev_ops
== &vdev_draid_spare_ops
)
800 return (vdev_draid_read_config_spare(vd
));
802 for (int l
= 0; l
< VDEV_LABELS
; l
++) {
803 vp_abd
[l
] = abd_alloc_linear(sizeof (vdev_phys_t
), B_TRUE
);
804 vp
[l
] = abd_to_buf(vp_abd
[l
]);
808 for (int l
= 0; l
< VDEV_LABELS
; l
++) {
809 zio
[l
] = zio_root(spa
, NULL
, NULL
, flags
);
811 vdev_label_read(zio
[l
], vd
, l
, vp_abd
[l
],
812 offsetof(vdev_label_t
, vl_vdev_phys
), sizeof (vdev_phys_t
),
815 for (int l
= 0; l
< VDEV_LABELS
; l
++) {
816 nvlist_t
*label
= NULL
;
818 if (zio_wait(zio
[l
]) == 0 &&
819 nvlist_unpack(vp
[l
]->vp_nvlist
, sizeof (vp
[l
]->vp_nvlist
),
822 * Auxiliary vdevs won't have txg values in their
823 * labels and newly added vdevs may not have been
824 * completely initialized so just return the
825 * configuration from the first valid label we
828 error
= nvlist_lookup_uint64(label
,
829 ZPOOL_CONFIG_POOL_TXG
, &label_txg
);
830 if ((error
|| label_txg
== 0) && !config
) {
832 for (l
++; l
< VDEV_LABELS
; l
++)
835 } else if (label_txg
<= txg
&& label_txg
> best_txg
) {
836 best_txg
= label_txg
;
838 config
= fnvlist_dup(label
);
848 if (config
== NULL
&& !(flags
& ZIO_FLAG_TRYHARD
)) {
849 flags
|= ZIO_FLAG_TRYHARD
;
854 * We found a valid label but it didn't pass txg restrictions.
856 if (config
== NULL
&& label_txg
!= 0) {
857 vdev_dbgmsg(vd
, "label discarded as txg is too large "
858 "(%llu > %llu)", (u_longlong_t
)label_txg
,
862 for (int l
= 0; l
< VDEV_LABELS
; l
++) {
870 * Determine if a device is in use. The 'spare_guid' parameter will be filled
871 * in with the device guid if this spare is active elsewhere on the system.
874 vdev_inuse(vdev_t
*vd
, uint64_t crtxg
, vdev_labeltype_t reason
,
875 uint64_t *spare_guid
, uint64_t *l2cache_guid
)
877 spa_t
*spa
= vd
->vdev_spa
;
878 uint64_t state
, pool_guid
, device_guid
, txg
, spare_pool
;
885 *l2cache_guid
= 0ULL;
888 * Read the label, if any, and perform some basic sanity checks.
890 if ((label
= vdev_label_read_config(vd
, -1ULL)) == NULL
)
893 (void) nvlist_lookup_uint64(label
, ZPOOL_CONFIG_CREATE_TXG
,
896 if (nvlist_lookup_uint64(label
, ZPOOL_CONFIG_POOL_STATE
,
898 nvlist_lookup_uint64(label
, ZPOOL_CONFIG_GUID
,
899 &device_guid
) != 0) {
904 if (state
!= POOL_STATE_SPARE
&& state
!= POOL_STATE_L2CACHE
&&
905 (nvlist_lookup_uint64(label
, ZPOOL_CONFIG_POOL_GUID
,
907 nvlist_lookup_uint64(label
, ZPOOL_CONFIG_POOL_TXG
,
916 * Check to see if this device indeed belongs to the pool it claims to
917 * be a part of. The only way this is allowed is if the device is a hot
918 * spare (which we check for later on).
920 if (state
!= POOL_STATE_SPARE
&& state
!= POOL_STATE_L2CACHE
&&
921 !spa_guid_exists(pool_guid
, device_guid
) &&
922 !spa_spare_exists(device_guid
, NULL
, NULL
) &&
923 !spa_l2cache_exists(device_guid
, NULL
))
927 * If the transaction group is zero, then this an initialized (but
928 * unused) label. This is only an error if the create transaction
929 * on-disk is the same as the one we're using now, in which case the
930 * user has attempted to add the same vdev multiple times in the same
933 if (state
!= POOL_STATE_SPARE
&& state
!= POOL_STATE_L2CACHE
&&
934 txg
== 0 && vdtxg
== crtxg
)
938 * Check to see if this is a spare device. We do an explicit check for
939 * spa_has_spare() here because it may be on our pending list of spares
942 if (spa_spare_exists(device_guid
, &spare_pool
, NULL
) ||
943 spa_has_spare(spa
, device_guid
)) {
945 *spare_guid
= device_guid
;
948 case VDEV_LABEL_CREATE
:
951 case VDEV_LABEL_REPLACE
:
952 return (!spa_has_spare(spa
, device_guid
) ||
955 case VDEV_LABEL_SPARE
:
956 return (spa_has_spare(spa
, device_guid
));
963 * Check to see if this is an l2cache device.
965 if (spa_l2cache_exists(device_guid
, NULL
) ||
966 spa_has_l2cache(spa
, device_guid
)) {
968 *l2cache_guid
= device_guid
;
971 case VDEV_LABEL_CREATE
:
974 case VDEV_LABEL_REPLACE
:
975 return (!spa_has_l2cache(spa
, device_guid
));
977 case VDEV_LABEL_L2CACHE
:
978 return (spa_has_l2cache(spa
, device_guid
));
985 * We can't rely on a pool's state if it's been imported
986 * read-only. Instead we look to see if the pools is marked
987 * read-only in the namespace and set the state to active.
989 if (state
!= POOL_STATE_SPARE
&& state
!= POOL_STATE_L2CACHE
&&
990 (spa
= spa_by_guid(pool_guid
, device_guid
)) != NULL
&&
991 spa_mode(spa
) == SPA_MODE_READ
)
992 state
= POOL_STATE_ACTIVE
;
995 * If the device is marked ACTIVE, then this device is in use by another
996 * pool on the system.
998 return (state
== POOL_STATE_ACTIVE
);
1002 * Initialize a vdev label. We check to make sure each leaf device is not in
1003 * use, and writable. We put down an initial label which we will later
1004 * overwrite with a complete label. Note that it's important to do this
1005 * sequentially, not in parallel, so that we catch cases of multiple use of the
1006 * same leaf vdev in the vdev we're creating -- e.g. mirroring a disk with
1010 vdev_label_init(vdev_t
*vd
, uint64_t crtxg
, vdev_labeltype_t reason
)
1012 spa_t
*spa
= vd
->vdev_spa
;
1023 uint64_t spare_guid
= 0, l2cache_guid
= 0;
1024 int flags
= ZIO_FLAG_CONFIG_WRITER
| ZIO_FLAG_CANFAIL
;
1026 ASSERT(spa_config_held(spa
, SCL_ALL
, RW_WRITER
) == SCL_ALL
);
1028 for (int c
= 0; c
< vd
->vdev_children
; c
++)
1029 if ((error
= vdev_label_init(vd
->vdev_child
[c
],
1030 crtxg
, reason
)) != 0)
1033 /* Track the creation time for this vdev */
1034 vd
->vdev_crtxg
= crtxg
;
1036 if (!vd
->vdev_ops
->vdev_op_leaf
|| !spa_writeable(spa
))
1040 * Dead vdevs cannot be initialized.
1042 if (vdev_is_dead(vd
))
1043 return (SET_ERROR(EIO
));
1046 * Determine if the vdev is in use.
1048 if (reason
!= VDEV_LABEL_REMOVE
&& reason
!= VDEV_LABEL_SPLIT
&&
1049 vdev_inuse(vd
, crtxg
, reason
, &spare_guid
, &l2cache_guid
))
1050 return (SET_ERROR(EBUSY
));
1053 * If this is a request to add or replace a spare or l2cache device
1054 * that is in use elsewhere on the system, then we must update the
1055 * guid (which was initialized to a random value) to reflect the
1056 * actual GUID (which is shared between multiple pools).
1058 if (reason
!= VDEV_LABEL_REMOVE
&& reason
!= VDEV_LABEL_L2CACHE
&&
1059 spare_guid
!= 0ULL) {
1060 uint64_t guid_delta
= spare_guid
- vd
->vdev_guid
;
1062 vd
->vdev_guid
+= guid_delta
;
1064 for (vdev_t
*pvd
= vd
; pvd
!= NULL
; pvd
= pvd
->vdev_parent
)
1065 pvd
->vdev_guid_sum
+= guid_delta
;
1068 * If this is a replacement, then we want to fallthrough to the
1069 * rest of the code. If we're adding a spare, then it's already
1070 * labeled appropriately and we can just return.
1072 if (reason
== VDEV_LABEL_SPARE
)
1074 ASSERT(reason
== VDEV_LABEL_REPLACE
||
1075 reason
== VDEV_LABEL_SPLIT
);
1078 if (reason
!= VDEV_LABEL_REMOVE
&& reason
!= VDEV_LABEL_SPARE
&&
1079 l2cache_guid
!= 0ULL) {
1080 uint64_t guid_delta
= l2cache_guid
- vd
->vdev_guid
;
1082 vd
->vdev_guid
+= guid_delta
;
1084 for (vdev_t
*pvd
= vd
; pvd
!= NULL
; pvd
= pvd
->vdev_parent
)
1085 pvd
->vdev_guid_sum
+= guid_delta
;
1088 * If this is a replacement, then we want to fallthrough to the
1089 * rest of the code. If we're adding an l2cache, then it's
1090 * already labeled appropriately and we can just return.
1092 if (reason
== VDEV_LABEL_L2CACHE
)
1094 ASSERT(reason
== VDEV_LABEL_REPLACE
);
1098 * Initialize its label.
1100 vp_abd
= abd_alloc_linear(sizeof (vdev_phys_t
), B_TRUE
);
1101 abd_zero(vp_abd
, sizeof (vdev_phys_t
));
1102 vp
= abd_to_buf(vp_abd
);
1105 * Generate a label describing the pool and our top-level vdev.
1106 * We mark it as being from txg 0 to indicate that it's not
1107 * really part of an active pool just yet. The labels will
1108 * be written again with a meaningful txg by spa_sync().
1110 if (reason
== VDEV_LABEL_SPARE
||
1111 (reason
== VDEV_LABEL_REMOVE
&& vd
->vdev_isspare
)) {
1113 * For inactive hot spares, we generate a special label that
1114 * identifies as a mutually shared hot spare. We write the
1115 * label if we are adding a hot spare, or if we are removing an
1116 * active hot spare (in which case we want to revert the
1119 VERIFY(nvlist_alloc(&label
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
1121 VERIFY(nvlist_add_uint64(label
, ZPOOL_CONFIG_VERSION
,
1122 spa_version(spa
)) == 0);
1123 VERIFY(nvlist_add_uint64(label
, ZPOOL_CONFIG_POOL_STATE
,
1124 POOL_STATE_SPARE
) == 0);
1125 VERIFY(nvlist_add_uint64(label
, ZPOOL_CONFIG_GUID
,
1126 vd
->vdev_guid
) == 0);
1127 } else if (reason
== VDEV_LABEL_L2CACHE
||
1128 (reason
== VDEV_LABEL_REMOVE
&& vd
->vdev_isl2cache
)) {
1130 * For level 2 ARC devices, add a special label.
1132 VERIFY(nvlist_alloc(&label
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
1134 VERIFY(nvlist_add_uint64(label
, ZPOOL_CONFIG_VERSION
,
1135 spa_version(spa
)) == 0);
1136 VERIFY(nvlist_add_uint64(label
, ZPOOL_CONFIG_POOL_STATE
,
1137 POOL_STATE_L2CACHE
) == 0);
1138 VERIFY(nvlist_add_uint64(label
, ZPOOL_CONFIG_GUID
,
1139 vd
->vdev_guid
) == 0);
1141 uint64_t txg
= 0ULL;
1143 if (reason
== VDEV_LABEL_SPLIT
)
1144 txg
= spa
->spa_uberblock
.ub_txg
;
1145 label
= spa_config_generate(spa
, vd
, txg
, B_FALSE
);
1148 * Add our creation time. This allows us to detect multiple
1149 * vdev uses as described above, and automatically expires if we
1152 VERIFY(nvlist_add_uint64(label
, ZPOOL_CONFIG_CREATE_TXG
,
1156 buf
= vp
->vp_nvlist
;
1157 buflen
= sizeof (vp
->vp_nvlist
);
1159 error
= nvlist_pack(label
, &buf
, &buflen
, NV_ENCODE_XDR
, KM_SLEEP
);
1163 /* EFAULT means nvlist_pack ran out of room */
1164 return (SET_ERROR(error
== EFAULT
? ENAMETOOLONG
: EINVAL
));
1168 * Initialize uberblock template.
1170 ub_abd
= abd_alloc_linear(VDEV_UBERBLOCK_RING
, B_TRUE
);
1171 abd_zero(ub_abd
, VDEV_UBERBLOCK_RING
);
1172 abd_copy_from_buf(ub_abd
, &spa
->spa_uberblock
, sizeof (uberblock_t
));
1173 ub
= abd_to_buf(ub_abd
);
1176 /* Initialize the 2nd padding area. */
1177 bootenv
= abd_alloc_for_io(VDEV_PAD_SIZE
, B_TRUE
);
1178 abd_zero(bootenv
, VDEV_PAD_SIZE
);
1181 * Write everything in parallel.
1184 zio
= zio_root(spa
, NULL
, NULL
, flags
);
1186 for (int l
= 0; l
< VDEV_LABELS
; l
++) {
1188 vdev_label_write(zio
, vd
, l
, vp_abd
,
1189 offsetof(vdev_label_t
, vl_vdev_phys
),
1190 sizeof (vdev_phys_t
), NULL
, NULL
, flags
);
1193 * Skip the 1st padding area.
1194 * Zero out the 2nd padding area where it might have
1195 * left over data from previous filesystem format.
1197 vdev_label_write(zio
, vd
, l
, bootenv
,
1198 offsetof(vdev_label_t
, vl_be
),
1199 VDEV_PAD_SIZE
, NULL
, NULL
, flags
);
1201 vdev_label_write(zio
, vd
, l
, ub_abd
,
1202 offsetof(vdev_label_t
, vl_uberblock
),
1203 VDEV_UBERBLOCK_RING
, NULL
, NULL
, flags
);
1206 error
= zio_wait(zio
);
1208 if (error
!= 0 && !(flags
& ZIO_FLAG_TRYHARD
)) {
1209 flags
|= ZIO_FLAG_TRYHARD
;
1219 * If this vdev hasn't been previously identified as a spare, then we
1220 * mark it as such only if a) we are labeling it as a spare, or b) it
1221 * exists as a spare elsewhere in the system. Do the same for
1222 * level 2 ARC devices.
1224 if (error
== 0 && !vd
->vdev_isspare
&&
1225 (reason
== VDEV_LABEL_SPARE
||
1226 spa_spare_exists(vd
->vdev_guid
, NULL
, NULL
)))
1229 if (error
== 0 && !vd
->vdev_isl2cache
&&
1230 (reason
== VDEV_LABEL_L2CACHE
||
1231 spa_l2cache_exists(vd
->vdev_guid
, NULL
)))
1232 spa_l2cache_add(vd
);
1238 * Done callback for vdev_label_read_bootenv_impl. If this is the first
1239 * callback to finish, store our abd in the callback pointer. Otherwise, we
1240 * just free our abd and return.
1243 vdev_label_read_bootenv_done(zio_t
*zio
)
1245 zio_t
*rio
= zio
->io_private
;
1246 abd_t
**cbp
= rio
->io_private
;
1248 ASSERT3U(zio
->io_size
, ==, VDEV_PAD_SIZE
);
1250 if (zio
->io_error
== 0) {
1251 mutex_enter(&rio
->io_lock
);
1253 /* Will free this buffer in vdev_label_read_bootenv. */
1256 abd_free(zio
->io_abd
);
1258 mutex_exit(&rio
->io_lock
);
1260 abd_free(zio
->io_abd
);
1265 vdev_label_read_bootenv_impl(zio_t
*zio
, vdev_t
*vd
, int flags
)
1267 for (int c
= 0; c
< vd
->vdev_children
; c
++)
1268 vdev_label_read_bootenv_impl(zio
, vd
->vdev_child
[c
], flags
);
1271 * We just use the first label that has a correct checksum; the
1272 * bootloader should have rewritten them all to be the same on boot,
1273 * and any changes we made since boot have been the same across all
1276 if (vd
->vdev_ops
->vdev_op_leaf
&& vdev_readable(vd
)) {
1277 for (int l
= 0; l
< VDEV_LABELS
; l
++) {
1278 vdev_label_read(zio
, vd
, l
,
1279 abd_alloc_linear(VDEV_PAD_SIZE
, B_FALSE
),
1280 offsetof(vdev_label_t
, vl_be
), VDEV_PAD_SIZE
,
1281 vdev_label_read_bootenv_done
, zio
, flags
);
1287 vdev_label_read_bootenv(vdev_t
*rvd
, nvlist_t
*bootenv
)
1290 spa_t
*spa
= rvd
->vdev_spa
;
1292 int flags
= ZIO_FLAG_CONFIG_WRITER
| ZIO_FLAG_CANFAIL
|
1293 ZIO_FLAG_SPECULATIVE
| ZIO_FLAG_TRYHARD
;
1296 ASSERT(spa_config_held(spa
, SCL_ALL
, RW_WRITER
) == SCL_ALL
);
1298 zio_t
*zio
= zio_root(spa
, NULL
, &abd
, flags
);
1299 vdev_label_read_bootenv_impl(zio
, rvd
, flags
);
1300 int err
= zio_wait(zio
);
1304 vdev_boot_envblock_t
*vbe
= abd_to_buf(abd
);
1306 vbe
->vbe_version
= ntohll(vbe
->vbe_version
);
1307 switch (vbe
->vbe_version
) {
1310 * if we have textual data in vbe_bootenv, create nvlist
1311 * with key "envmap".
1313 fnvlist_add_uint64(bootenv
, BOOTENV_VERSION
, VB_RAW
);
1314 vbe
->vbe_bootenv
[sizeof (vbe
->vbe_bootenv
) - 1] = '\0';
1315 fnvlist_add_string(bootenv
, GRUB_ENVMAP
,
1320 err
= nvlist_unpack(vbe
->vbe_bootenv
,
1321 sizeof (vbe
->vbe_bootenv
), &config
, 0);
1323 fnvlist_merge(bootenv
, config
);
1324 nvlist_free(config
);
1329 /* Check for FreeBSD zfs bootonce command string */
1330 buf
= abd_to_buf(abd
);
1332 fnvlist_add_uint64(bootenv
, BOOTENV_VERSION
,
1336 fnvlist_add_string(bootenv
, FREEBSD_BOOTONCE
, buf
);
1340 * abd was allocated in vdev_label_read_bootenv_impl()
1344 * If we managed to read any successfully,
1353 vdev_label_write_bootenv(vdev_t
*vd
, nvlist_t
*env
)
1356 spa_t
*spa
= vd
->vdev_spa
;
1357 vdev_boot_envblock_t
*bootenv
;
1358 int flags
= ZIO_FLAG_CONFIG_WRITER
| ZIO_FLAG_CANFAIL
;
1363 error
= nvlist_size(env
, &nvsize
, NV_ENCODE_XDR
);
1365 return (SET_ERROR(error
));
1367 if (nvsize
>= sizeof (bootenv
->vbe_bootenv
)) {
1368 return (SET_ERROR(E2BIG
));
1371 ASSERT(spa_config_held(spa
, SCL_ALL
, RW_WRITER
) == SCL_ALL
);
1374 for (int c
= 0; c
< vd
->vdev_children
; c
++) {
1377 child_err
= vdev_label_write_bootenv(vd
->vdev_child
[c
], env
);
1379 * As long as any of the disks managed to write all of their
1380 * labels successfully, return success.
1386 if (!vd
->vdev_ops
->vdev_op_leaf
|| vdev_is_dead(vd
) ||
1387 !vdev_writeable(vd
)) {
1390 ASSERT3U(sizeof (*bootenv
), ==, VDEV_PAD_SIZE
);
1391 abd_t
*abd
= abd_alloc_for_io(VDEV_PAD_SIZE
, B_TRUE
);
1392 abd_zero(abd
, VDEV_PAD_SIZE
);
1394 bootenv
= abd_borrow_buf_copy(abd
, VDEV_PAD_SIZE
);
1395 nvbuf
= bootenv
->vbe_bootenv
;
1396 nvsize
= sizeof (bootenv
->vbe_bootenv
);
1398 bootenv
->vbe_version
= fnvlist_lookup_uint64(env
, BOOTENV_VERSION
);
1399 switch (bootenv
->vbe_version
) {
1401 if (nvlist_lookup_string(env
, GRUB_ENVMAP
, &nvbuf
) == 0) {
1402 (void) strlcpy(bootenv
->vbe_bootenv
, nvbuf
, nvsize
);
1408 error
= nvlist_pack(env
, &nvbuf
, &nvsize
, NV_ENCODE_XDR
,
1418 bootenv
->vbe_version
= htonll(bootenv
->vbe_version
);
1419 abd_return_buf_copy(abd
, bootenv
, VDEV_PAD_SIZE
);
1422 return (SET_ERROR(error
));
1426 zio
= zio_root(spa
, NULL
, NULL
, flags
);
1427 for (int l
= 0; l
< VDEV_LABELS
; l
++) {
1428 vdev_label_write(zio
, vd
, l
, abd
,
1429 offsetof(vdev_label_t
, vl_be
),
1430 VDEV_PAD_SIZE
, NULL
, NULL
, flags
);
1433 error
= zio_wait(zio
);
1434 if (error
!= 0 && !(flags
& ZIO_FLAG_TRYHARD
)) {
1435 flags
|= ZIO_FLAG_TRYHARD
;
1444 * ==========================================================================
1445 * uberblock load/sync
1446 * ==========================================================================
1450 * Consider the following situation: txg is safely synced to disk. We've
1451 * written the first uberblock for txg + 1, and then we lose power. When we
1452 * come back up, we fail to see the uberblock for txg + 1 because, say,
1453 * it was on a mirrored device and the replica to which we wrote txg + 1
1454 * is now offline. If we then make some changes and sync txg + 1, and then
1455 * the missing replica comes back, then for a few seconds we'll have two
1456 * conflicting uberblocks on disk with the same txg. The solution is simple:
1457 * among uberblocks with equal txg, choose the one with the latest timestamp.
1460 vdev_uberblock_compare(const uberblock_t
*ub1
, const uberblock_t
*ub2
)
1462 int cmp
= TREE_CMP(ub1
->ub_txg
, ub2
->ub_txg
);
1467 cmp
= TREE_CMP(ub1
->ub_timestamp
, ub2
->ub_timestamp
);
1472 * If MMP_VALID(ub) && MMP_SEQ_VALID(ub) then the host has an MMP-aware
1473 * ZFS, e.g. OpenZFS >= 0.7.
1475 * If one ub has MMP and the other does not, they were written by
1476 * different hosts, which matters for MMP. So we treat no MMP/no SEQ as
1479 * Since timestamp and txg are the same if we get this far, either is
1480 * acceptable for importing the pool.
1482 unsigned int seq1
= 0;
1483 unsigned int seq2
= 0;
1485 if (MMP_VALID(ub1
) && MMP_SEQ_VALID(ub1
))
1486 seq1
= MMP_SEQ(ub1
);
1488 if (MMP_VALID(ub2
) && MMP_SEQ_VALID(ub2
))
1489 seq2
= MMP_SEQ(ub2
);
1491 return (TREE_CMP(seq1
, seq2
));
1495 uberblock_t
*ubl_ubbest
; /* Best uberblock */
1496 vdev_t
*ubl_vd
; /* vdev associated with the above */
1500 vdev_uberblock_load_done(zio_t
*zio
)
1502 vdev_t
*vd
= zio
->io_vd
;
1503 spa_t
*spa
= zio
->io_spa
;
1504 zio_t
*rio
= zio
->io_private
;
1505 uberblock_t
*ub
= abd_to_buf(zio
->io_abd
);
1506 struct ubl_cbdata
*cbp
= rio
->io_private
;
1508 ASSERT3U(zio
->io_size
, ==, VDEV_UBERBLOCK_SIZE(vd
));
1510 if (zio
->io_error
== 0 && uberblock_verify(ub
) == 0) {
1511 mutex_enter(&rio
->io_lock
);
1512 if (ub
->ub_txg
<= spa
->spa_load_max_txg
&&
1513 vdev_uberblock_compare(ub
, cbp
->ubl_ubbest
) > 0) {
1515 * Keep track of the vdev in which this uberblock
1516 * was found. We will use this information later
1517 * to obtain the config nvlist associated with
1520 *cbp
->ubl_ubbest
= *ub
;
1523 mutex_exit(&rio
->io_lock
);
1526 abd_free(zio
->io_abd
);
1530 vdev_uberblock_load_impl(zio_t
*zio
, vdev_t
*vd
, int flags
,
1531 struct ubl_cbdata
*cbp
)
1533 for (int c
= 0; c
< vd
->vdev_children
; c
++)
1534 vdev_uberblock_load_impl(zio
, vd
->vdev_child
[c
], flags
, cbp
);
1536 if (vd
->vdev_ops
->vdev_op_leaf
&& vdev_readable(vd
) &&
1537 vd
->vdev_ops
!= &vdev_draid_spare_ops
) {
1538 for (int l
= 0; l
< VDEV_LABELS
; l
++) {
1539 for (int n
= 0; n
< VDEV_UBERBLOCK_COUNT(vd
); n
++) {
1540 vdev_label_read(zio
, vd
, l
,
1541 abd_alloc_linear(VDEV_UBERBLOCK_SIZE(vd
),
1542 B_TRUE
), VDEV_UBERBLOCK_OFFSET(vd
, n
),
1543 VDEV_UBERBLOCK_SIZE(vd
),
1544 vdev_uberblock_load_done
, zio
, flags
);
1551 * Reads the 'best' uberblock from disk along with its associated
1552 * configuration. First, we read the uberblock array of each label of each
1553 * vdev, keeping track of the uberblock with the highest txg in each array.
1554 * Then, we read the configuration from the same vdev as the best uberblock.
1557 vdev_uberblock_load(vdev_t
*rvd
, uberblock_t
*ub
, nvlist_t
**config
)
1560 spa_t
*spa
= rvd
->vdev_spa
;
1561 struct ubl_cbdata cb
;
1562 int flags
= ZIO_FLAG_CONFIG_WRITER
| ZIO_FLAG_CANFAIL
|
1563 ZIO_FLAG_SPECULATIVE
| ZIO_FLAG_TRYHARD
;
1568 bzero(ub
, sizeof (uberblock_t
));
1574 spa_config_enter(spa
, SCL_ALL
, FTAG
, RW_WRITER
);
1575 zio
= zio_root(spa
, NULL
, &cb
, flags
);
1576 vdev_uberblock_load_impl(zio
, rvd
, flags
, &cb
);
1577 (void) zio_wait(zio
);
1580 * It's possible that the best uberblock was discovered on a label
1581 * that has a configuration which was written in a future txg.
1582 * Search all labels on this vdev to find the configuration that
1583 * matches the txg for our uberblock.
1585 if (cb
.ubl_vd
!= NULL
) {
1586 vdev_dbgmsg(cb
.ubl_vd
, "best uberblock found for spa %s. "
1587 "txg %llu", spa
->spa_name
, (u_longlong_t
)ub
->ub_txg
);
1589 *config
= vdev_label_read_config(cb
.ubl_vd
, ub
->ub_txg
);
1590 if (*config
== NULL
&& spa
->spa_extreme_rewind
) {
1591 vdev_dbgmsg(cb
.ubl_vd
, "failed to read label config. "
1592 "Trying again without txg restrictions.");
1593 *config
= vdev_label_read_config(cb
.ubl_vd
, UINT64_MAX
);
1595 if (*config
== NULL
) {
1596 vdev_dbgmsg(cb
.ubl_vd
, "failed to read label config");
1599 spa_config_exit(spa
, SCL_ALL
, FTAG
);
1603 * For use when a leaf vdev is expanded.
1604 * The location of labels 2 and 3 changed, and at the new location the
1605 * uberblock rings are either empty or contain garbage. The sync will write
1606 * new configs there because the vdev is dirty, but expansion also needs the
1607 * uberblock rings copied. Read them from label 0 which did not move.
1609 * Since the point is to populate labels {2,3} with valid uberblocks,
1610 * we zero uberblocks we fail to read or which are not valid.
1614 vdev_copy_uberblocks(vdev_t
*vd
)
1618 int locks
= (SCL_L2ARC
| SCL_ZIO
);
1619 int flags
= ZIO_FLAG_CONFIG_WRITER
| ZIO_FLAG_CANFAIL
|
1620 ZIO_FLAG_SPECULATIVE
;
1622 ASSERT(spa_config_held(vd
->vdev_spa
, SCL_STATE
, RW_READER
) ==
1624 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
1627 * No uberblocks are stored on distributed spares, they may be
1628 * safely skipped when expanding a leaf vdev.
1630 if (vd
->vdev_ops
== &vdev_draid_spare_ops
)
1633 spa_config_enter(vd
->vdev_spa
, locks
, FTAG
, RW_READER
);
1635 ub_abd
= abd_alloc_linear(VDEV_UBERBLOCK_SIZE(vd
), B_TRUE
);
1637 write_zio
= zio_root(vd
->vdev_spa
, NULL
, NULL
, flags
);
1638 for (int n
= 0; n
< VDEV_UBERBLOCK_COUNT(vd
); n
++) {
1639 const int src_label
= 0;
1642 zio
= zio_root(vd
->vdev_spa
, NULL
, NULL
, flags
);
1643 vdev_label_read(zio
, vd
, src_label
, ub_abd
,
1644 VDEV_UBERBLOCK_OFFSET(vd
, n
), VDEV_UBERBLOCK_SIZE(vd
),
1647 if (zio_wait(zio
) || uberblock_verify(abd_to_buf(ub_abd
)))
1648 abd_zero(ub_abd
, VDEV_UBERBLOCK_SIZE(vd
));
1650 for (int l
= 2; l
< VDEV_LABELS
; l
++)
1651 vdev_label_write(write_zio
, vd
, l
, ub_abd
,
1652 VDEV_UBERBLOCK_OFFSET(vd
, n
),
1653 VDEV_UBERBLOCK_SIZE(vd
), NULL
, NULL
,
1654 flags
| ZIO_FLAG_DONT_PROPAGATE
);
1656 (void) zio_wait(write_zio
);
1658 spa_config_exit(vd
->vdev_spa
, locks
, FTAG
);
1664 * On success, increment root zio's count of good writes.
1665 * We only get credit for writes to known-visible vdevs; see spa_vdev_add().
1668 vdev_uberblock_sync_done(zio_t
*zio
)
1670 uint64_t *good_writes
= zio
->io_private
;
1672 if (zio
->io_error
== 0 && zio
->io_vd
->vdev_top
->vdev_ms_array
!= 0)
1673 atomic_inc_64(good_writes
);
1677 * Write the uberblock to all labels of all leaves of the specified vdev.
1680 vdev_uberblock_sync(zio_t
*zio
, uint64_t *good_writes
,
1681 uberblock_t
*ub
, vdev_t
*vd
, int flags
)
1683 for (uint64_t c
= 0; c
< vd
->vdev_children
; c
++) {
1684 vdev_uberblock_sync(zio
, good_writes
,
1685 ub
, vd
->vdev_child
[c
], flags
);
1688 if (!vd
->vdev_ops
->vdev_op_leaf
)
1691 if (!vdev_writeable(vd
))
1695 * There's no need to write uberblocks to a distributed spare, they
1696 * are already stored on all the leaves of the parent dRAID. For
1697 * this same reason vdev_uberblock_load_impl() skips distributed
1698 * spares when reading uberblocks.
1700 if (vd
->vdev_ops
== &vdev_draid_spare_ops
)
1703 /* If the vdev was expanded, need to copy uberblock rings. */
1704 if (vd
->vdev_state
== VDEV_STATE_HEALTHY
&&
1705 vd
->vdev_copy_uberblocks
== B_TRUE
) {
1706 vdev_copy_uberblocks(vd
);
1707 vd
->vdev_copy_uberblocks
= B_FALSE
;
1710 int m
= spa_multihost(vd
->vdev_spa
) ? MMP_BLOCKS_PER_LABEL
: 0;
1711 int n
= ub
->ub_txg
% (VDEV_UBERBLOCK_COUNT(vd
) - m
);
1713 /* Copy the uberblock_t into the ABD */
1714 abd_t
*ub_abd
= abd_alloc_for_io(VDEV_UBERBLOCK_SIZE(vd
), B_TRUE
);
1715 abd_zero(ub_abd
, VDEV_UBERBLOCK_SIZE(vd
));
1716 abd_copy_from_buf(ub_abd
, ub
, sizeof (uberblock_t
));
1718 for (int l
= 0; l
< VDEV_LABELS
; l
++)
1719 vdev_label_write(zio
, vd
, l
, ub_abd
,
1720 VDEV_UBERBLOCK_OFFSET(vd
, n
), VDEV_UBERBLOCK_SIZE(vd
),
1721 vdev_uberblock_sync_done
, good_writes
,
1722 flags
| ZIO_FLAG_DONT_PROPAGATE
);
1727 /* Sync the uberblocks to all vdevs in svd[] */
1729 vdev_uberblock_sync_list(vdev_t
**svd
, int svdcount
, uberblock_t
*ub
, int flags
)
1731 spa_t
*spa
= svd
[0]->vdev_spa
;
1733 uint64_t good_writes
= 0;
1735 zio
= zio_root(spa
, NULL
, NULL
, flags
);
1737 for (int v
= 0; v
< svdcount
; v
++)
1738 vdev_uberblock_sync(zio
, &good_writes
, ub
, svd
[v
], flags
);
1740 (void) zio_wait(zio
);
1743 * Flush the uberblocks to disk. This ensures that the odd labels
1744 * are no longer needed (because the new uberblocks and the even
1745 * labels are safely on disk), so it is safe to overwrite them.
1747 zio
= zio_root(spa
, NULL
, NULL
, flags
);
1749 for (int v
= 0; v
< svdcount
; v
++) {
1750 if (vdev_writeable(svd
[v
])) {
1751 zio_flush(zio
, svd
[v
]);
1755 (void) zio_wait(zio
);
1757 return (good_writes
>= 1 ? 0 : EIO
);
1761 * On success, increment the count of good writes for our top-level vdev.
1764 vdev_label_sync_done(zio_t
*zio
)
1766 uint64_t *good_writes
= zio
->io_private
;
1768 if (zio
->io_error
== 0)
1769 atomic_inc_64(good_writes
);
1773 * If there weren't enough good writes, indicate failure to the parent.
1776 vdev_label_sync_top_done(zio_t
*zio
)
1778 uint64_t *good_writes
= zio
->io_private
;
1780 if (*good_writes
== 0)
1781 zio
->io_error
= SET_ERROR(EIO
);
1783 kmem_free(good_writes
, sizeof (uint64_t));
1787 * We ignore errors for log and cache devices, simply free the private data.
1790 vdev_label_sync_ignore_done(zio_t
*zio
)
1792 kmem_free(zio
->io_private
, sizeof (uint64_t));
1796 * Write all even or odd labels to all leaves of the specified vdev.
1799 vdev_label_sync(zio_t
*zio
, uint64_t *good_writes
,
1800 vdev_t
*vd
, int l
, uint64_t txg
, int flags
)
1808 for (int c
= 0; c
< vd
->vdev_children
; c
++) {
1809 vdev_label_sync(zio
, good_writes
,
1810 vd
->vdev_child
[c
], l
, txg
, flags
);
1813 if (!vd
->vdev_ops
->vdev_op_leaf
)
1816 if (!vdev_writeable(vd
))
1820 * The top-level config never needs to be written to a distributed
1821 * spare. When read vdev_dspare_label_read_config() will generate
1822 * the config for the vdev_label_read_config().
1824 if (vd
->vdev_ops
== &vdev_draid_spare_ops
)
1828 * Generate a label describing the top-level config to which we belong.
1830 label
= spa_config_generate(vd
->vdev_spa
, vd
, txg
, B_FALSE
);
1832 vp_abd
= abd_alloc_linear(sizeof (vdev_phys_t
), B_TRUE
);
1833 abd_zero(vp_abd
, sizeof (vdev_phys_t
));
1834 vp
= abd_to_buf(vp_abd
);
1836 buf
= vp
->vp_nvlist
;
1837 buflen
= sizeof (vp
->vp_nvlist
);
1839 if (!nvlist_pack(label
, &buf
, &buflen
, NV_ENCODE_XDR
, KM_SLEEP
)) {
1840 for (; l
< VDEV_LABELS
; l
+= 2) {
1841 vdev_label_write(zio
, vd
, l
, vp_abd
,
1842 offsetof(vdev_label_t
, vl_vdev_phys
),
1843 sizeof (vdev_phys_t
),
1844 vdev_label_sync_done
, good_writes
,
1845 flags
| ZIO_FLAG_DONT_PROPAGATE
);
1854 vdev_label_sync_list(spa_t
*spa
, int l
, uint64_t txg
, int flags
)
1856 list_t
*dl
= &spa
->spa_config_dirty_list
;
1862 * Write the new labels to disk.
1864 zio
= zio_root(spa
, NULL
, NULL
, flags
);
1866 for (vd
= list_head(dl
); vd
!= NULL
; vd
= list_next(dl
, vd
)) {
1867 uint64_t *good_writes
;
1869 ASSERT(!vd
->vdev_ishole
);
1871 good_writes
= kmem_zalloc(sizeof (uint64_t), KM_SLEEP
);
1872 zio_t
*vio
= zio_null(zio
, spa
, NULL
,
1873 (vd
->vdev_islog
|| vd
->vdev_aux
!= NULL
) ?
1874 vdev_label_sync_ignore_done
: vdev_label_sync_top_done
,
1875 good_writes
, flags
);
1876 vdev_label_sync(vio
, good_writes
, vd
, l
, txg
, flags
);
1880 error
= zio_wait(zio
);
1883 * Flush the new labels to disk.
1885 zio
= zio_root(spa
, NULL
, NULL
, flags
);
1887 for (vd
= list_head(dl
); vd
!= NULL
; vd
= list_next(dl
, vd
))
1890 (void) zio_wait(zio
);
1896 * Sync the uberblock and any changes to the vdev configuration.
1898 * The order of operations is carefully crafted to ensure that
1899 * if the system panics or loses power at any time, the state on disk
1900 * is still transactionally consistent. The in-line comments below
1901 * describe the failure semantics at each stage.
1903 * Moreover, vdev_config_sync() is designed to be idempotent: if it fails
1904 * at any time, you can just call it again, and it will resume its work.
1907 vdev_config_sync(vdev_t
**svd
, int svdcount
, uint64_t txg
)
1909 spa_t
*spa
= svd
[0]->vdev_spa
;
1910 uberblock_t
*ub
= &spa
->spa_uberblock
;
1912 int flags
= ZIO_FLAG_CONFIG_WRITER
| ZIO_FLAG_CANFAIL
;
1914 ASSERT(svdcount
!= 0);
1917 * Normally, we don't want to try too hard to write every label and
1918 * uberblock. If there is a flaky disk, we don't want the rest of the
1919 * sync process to block while we retry. But if we can't write a
1920 * single label out, we should retry with ZIO_FLAG_TRYHARD before
1921 * bailing out and declaring the pool faulted.
1924 if ((flags
& ZIO_FLAG_TRYHARD
) != 0)
1926 flags
|= ZIO_FLAG_TRYHARD
;
1929 ASSERT(ub
->ub_txg
<= txg
);
1932 * If this isn't a resync due to I/O errors,
1933 * and nothing changed in this transaction group,
1934 * and the vdev configuration hasn't changed,
1935 * then there's nothing to do.
1937 if (ub
->ub_txg
< txg
) {
1938 boolean_t changed
= uberblock_update(ub
, spa
->spa_root_vdev
,
1939 txg
, spa
->spa_mmp
.mmp_delay
);
1941 if (!changed
&& list_is_empty(&spa
->spa_config_dirty_list
))
1945 if (txg
> spa_freeze_txg(spa
))
1948 ASSERT(txg
<= spa
->spa_final_txg
);
1951 * Flush the write cache of every disk that's been written to
1952 * in this transaction group. This ensures that all blocks
1953 * written in this txg will be committed to stable storage
1954 * before any uberblock that references them.
1956 zio_t
*zio
= zio_root(spa
, NULL
, NULL
, flags
);
1959 txg_list_head(&spa
->spa_vdev_txg_list
, TXG_CLEAN(txg
)); vd
!= NULL
;
1960 vd
= txg_list_next(&spa
->spa_vdev_txg_list
, vd
, TXG_CLEAN(txg
)))
1963 (void) zio_wait(zio
);
1966 * Sync out the even labels (L0, L2) for every dirty vdev. If the
1967 * system dies in the middle of this process, that's OK: all of the
1968 * even labels that made it to disk will be newer than any uberblock,
1969 * and will therefore be considered invalid. The odd labels (L1, L3),
1970 * which have not yet been touched, will still be valid. We flush
1971 * the new labels to disk to ensure that all even-label updates
1972 * are committed to stable storage before the uberblock update.
1974 if ((error
= vdev_label_sync_list(spa
, 0, txg
, flags
)) != 0) {
1975 if ((flags
& ZIO_FLAG_TRYHARD
) != 0) {
1976 zfs_dbgmsg("vdev_label_sync_list() returned error %d "
1977 "for pool '%s' when syncing out the even labels "
1978 "of dirty vdevs", error
, spa_name(spa
));
1984 * Sync the uberblocks to all vdevs in svd[].
1985 * If the system dies in the middle of this step, there are two cases
1986 * to consider, and the on-disk state is consistent either way:
1988 * (1) If none of the new uberblocks made it to disk, then the
1989 * previous uberblock will be the newest, and the odd labels
1990 * (which had not yet been touched) will be valid with respect
1991 * to that uberblock.
1993 * (2) If one or more new uberblocks made it to disk, then they
1994 * will be the newest, and the even labels (which had all
1995 * been successfully committed) will be valid with respect
1996 * to the new uberblocks.
1998 if ((error
= vdev_uberblock_sync_list(svd
, svdcount
, ub
, flags
)) != 0) {
1999 if ((flags
& ZIO_FLAG_TRYHARD
) != 0) {
2000 zfs_dbgmsg("vdev_uberblock_sync_list() returned error "
2001 "%d for pool '%s'", error
, spa_name(spa
));
2006 if (spa_multihost(spa
))
2007 mmp_update_uberblock(spa
, ub
);
2010 * Sync out odd labels for every dirty vdev. If the system dies
2011 * in the middle of this process, the even labels and the new
2012 * uberblocks will suffice to open the pool. The next time
2013 * the pool is opened, the first thing we'll do -- before any
2014 * user data is modified -- is mark every vdev dirty so that
2015 * all labels will be brought up to date. We flush the new labels
2016 * to disk to ensure that all odd-label updates are committed to
2017 * stable storage before the next transaction group begins.
2019 if ((error
= vdev_label_sync_list(spa
, 1, txg
, flags
)) != 0) {
2020 if ((flags
& ZIO_FLAG_TRYHARD
) != 0) {
2021 zfs_dbgmsg("vdev_label_sync_list() returned error %d "
2022 "for pool '%s' when syncing out the odd labels of "
2023 "dirty vdevs", error
, spa_name(spa
));