4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2013 by Delphix. All rights reserved.
28 * Virtual Device Labels
29 * ---------------------
31 * The vdev label serves several distinct purposes:
33 * 1. Uniquely identify this device as part of a ZFS pool and confirm its
34 * identity within the pool.
36 * 2. Verify that all the devices given in a configuration are present
39 * 3. Determine the uberblock for the pool.
41 * 4. In case of an import operation, determine the configuration of the
42 * toplevel vdev of which it is a part.
44 * 5. If an import operation cannot find all the devices in the pool,
45 * provide enough information to the administrator to determine which
46 * devices are missing.
48 * It is important to note that while the kernel is responsible for writing the
49 * label, it only consumes the information in the first three cases. The
50 * latter information is only consumed in userland when determining the
51 * configuration to import a pool.
57 * Before describing the contents of the label, it's important to understand how
58 * the labels are written and updated with respect to the uberblock.
60 * When the pool configuration is altered, either because it was newly created
61 * or a device was added, we want to update all the labels such that we can deal
62 * with fatal failure at any point. To this end, each disk has two labels which
63 * are updated before and after the uberblock is synced. Assuming we have
64 * labels and an uberblock with the following transaction groups:
67 * +------+ +------+ +------+
69 * | t10 | | t10 | | t10 |
71 * +------+ +------+ +------+
73 * In this stable state, the labels and the uberblock were all updated within
74 * the same transaction group (10). Each label is mirrored and checksummed, so
75 * that we can detect when we fail partway through writing the label.
77 * In order to identify which labels are valid, the labels are written in the
80 * 1. For each vdev, update 'L1' to the new label
81 * 2. Update the uberblock
82 * 3. For each vdev, update 'L2' to the new label
84 * Given arbitrary failure, we can determine the correct label to use based on
85 * the transaction group. If we fail after updating L1 but before updating the
86 * UB, we will notice that L1's transaction group is greater than the uberblock,
87 * so L2 must be valid. If we fail after writing the uberblock but before
88 * writing L2, we will notice that L2's transaction group is less than L1, and
89 * therefore L1 is valid.
91 * Another added complexity is that not every label is updated when the config
92 * is synced. If we add a single device, we do not want to have to re-write
93 * every label for every device in the pool. This means that both L1 and L2 may
94 * be older than the pool uberblock, because the necessary information is stored
101 * The vdev label consists of two distinct parts, and is wrapped within the
102 * vdev_label_t structure. The label includes 8k of padding to permit legacy
103 * VTOC disk labels, but is otherwise ignored.
105 * The first half of the label is a packed nvlist which contains pool wide
106 * properties, per-vdev properties, and configuration information. It is
107 * described in more detail below.
109 * The latter half of the label consists of a redundant array of uberblocks.
110 * These uberblocks are updated whenever a transaction group is committed,
111 * or when the configuration is updated. When a pool is loaded, we scan each
112 * vdev for the 'best' uberblock.
115 * Configuration Information
116 * -------------------------
118 * The nvlist describing the pool and vdev contains the following elements:
120 * version ZFS on-disk version
123 * txg Transaction group in which this label was written
124 * pool_guid Unique identifier for this pool
125 * vdev_tree An nvlist describing vdev tree.
127 * An nvlist of the features necessary for reading the MOS.
129 * Each leaf device label also contains the following:
131 * top_guid Unique ID for top-level vdev in which this is contained
132 * guid Unique ID for the leaf vdev
134 * The 'vs' configuration follows the format described in 'spa_config.c'.
137 #include <sys/zfs_context.h>
139 #include <sys/spa_impl.h>
142 #include <sys/vdev.h>
143 #include <sys/vdev_impl.h>
144 #include <sys/uberblock_impl.h>
145 #include <sys/metaslab.h>
147 #include <sys/dsl_scan.h>
148 #include <sys/fs/zfs.h>
151 * Basic routines to read and write from a vdev label.
152 * Used throughout the rest of this file.
155 vdev_label_offset(uint64_t psize
, int l
, uint64_t offset
)
157 ASSERT(offset
< sizeof (vdev_label_t
));
158 ASSERT(P2PHASE_TYPED(psize
, sizeof (vdev_label_t
), uint64_t) == 0);
160 return (offset
+ l
* sizeof (vdev_label_t
) + (l
< VDEV_LABELS
/ 2 ?
161 0 : psize
- VDEV_LABELS
* sizeof (vdev_label_t
)));
165 * Returns back the vdev label associated with the passed in offset.
168 vdev_label_number(uint64_t psize
, uint64_t offset
)
172 if (offset
>= psize
- VDEV_LABEL_END_SIZE
) {
173 offset
-= psize
- VDEV_LABEL_END_SIZE
;
174 offset
+= (VDEV_LABELS
/ 2) * sizeof (vdev_label_t
);
176 l
= offset
/ sizeof (vdev_label_t
);
177 return (l
< VDEV_LABELS
? l
: -1);
181 vdev_label_read(zio_t
*zio
, vdev_t
*vd
, int l
, void *buf
, uint64_t offset
,
182 uint64_t size
, zio_done_func_t
*done
, void *private, int flags
)
184 ASSERT(spa_config_held(zio
->io_spa
, SCL_STATE_ALL
, RW_WRITER
) ==
186 ASSERT(flags
& ZIO_FLAG_CONFIG_WRITER
);
188 zio_nowait(zio_read_phys(zio
, vd
,
189 vdev_label_offset(vd
->vdev_psize
, l
, offset
),
190 size
, buf
, ZIO_CHECKSUM_LABEL
, done
, private,
191 ZIO_PRIORITY_SYNC_READ
, flags
, B_TRUE
));
195 vdev_label_write(zio_t
*zio
, vdev_t
*vd
, int l
, void *buf
, uint64_t offset
,
196 uint64_t size
, zio_done_func_t
*done
, void *private, int flags
)
198 ASSERT(spa_config_held(zio
->io_spa
, SCL_ALL
, RW_WRITER
) == SCL_ALL
||
199 (spa_config_held(zio
->io_spa
, SCL_CONFIG
| SCL_STATE
, RW_READER
) ==
200 (SCL_CONFIG
| SCL_STATE
) &&
201 dsl_pool_sync_context(spa_get_dsl(zio
->io_spa
))));
202 ASSERT(flags
& ZIO_FLAG_CONFIG_WRITER
);
204 zio_nowait(zio_write_phys(zio
, vd
,
205 vdev_label_offset(vd
->vdev_psize
, l
, offset
),
206 size
, buf
, ZIO_CHECKSUM_LABEL
, done
, private,
207 ZIO_PRIORITY_SYNC_WRITE
, flags
, B_TRUE
));
211 * Generate the nvlist representing this vdev's stats
214 vdev_config_generate_stats(vdev_t
*vd
, nvlist_t
*nv
)
220 vs
= kmem_alloc(sizeof (*vs
), KM_SLEEP
);
221 vsx
= kmem_alloc(sizeof (*vsx
), KM_SLEEP
);
223 vdev_get_stats_ex(vd
, vs
, vsx
);
224 fnvlist_add_uint64_array(nv
, ZPOOL_CONFIG_VDEV_STATS
,
225 (uint64_t *)vs
, sizeof (*vs
) / sizeof (uint64_t));
227 kmem_free(vs
, sizeof (*vs
));
230 * Add extended stats into a special extended stats nvlist. This keeps
231 * all the extended stats nicely grouped together. The extended stats
232 * nvlist is then added to the main nvlist.
234 nvx
= fnvlist_alloc();
236 /* ZIOs in flight to disk */
237 fnvlist_add_uint64(nvx
, ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE
,
238 vsx
->vsx_active_queue
[ZIO_PRIORITY_SYNC_READ
]);
240 fnvlist_add_uint64(nvx
, ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE
,
241 vsx
->vsx_active_queue
[ZIO_PRIORITY_SYNC_WRITE
]);
243 fnvlist_add_uint64(nvx
, ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE
,
244 vsx
->vsx_active_queue
[ZIO_PRIORITY_ASYNC_READ
]);
246 fnvlist_add_uint64(nvx
, ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE
,
247 vsx
->vsx_active_queue
[ZIO_PRIORITY_ASYNC_WRITE
]);
249 fnvlist_add_uint64(nvx
, ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE
,
250 vsx
->vsx_active_queue
[ZIO_PRIORITY_SCRUB
]);
253 fnvlist_add_uint64(nvx
, ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE
,
254 vsx
->vsx_pend_queue
[ZIO_PRIORITY_SYNC_READ
]);
256 fnvlist_add_uint64(nvx
, ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE
,
257 vsx
->vsx_pend_queue
[ZIO_PRIORITY_SYNC_WRITE
]);
259 fnvlist_add_uint64(nvx
, ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE
,
260 vsx
->vsx_pend_queue
[ZIO_PRIORITY_ASYNC_READ
]);
262 fnvlist_add_uint64(nvx
, ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE
,
263 vsx
->vsx_pend_queue
[ZIO_PRIORITY_ASYNC_WRITE
]);
265 fnvlist_add_uint64(nvx
, ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE
,
266 vsx
->vsx_pend_queue
[ZIO_PRIORITY_SCRUB
]);
269 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO
,
270 vsx
->vsx_total_histo
[ZIO_TYPE_READ
],
271 ARRAY_SIZE(vsx
->vsx_total_histo
[ZIO_TYPE_READ
]));
273 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO
,
274 vsx
->vsx_total_histo
[ZIO_TYPE_WRITE
],
275 ARRAY_SIZE(vsx
->vsx_total_histo
[ZIO_TYPE_WRITE
]));
277 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO
,
278 vsx
->vsx_disk_histo
[ZIO_TYPE_READ
],
279 ARRAY_SIZE(vsx
->vsx_disk_histo
[ZIO_TYPE_READ
]));
281 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO
,
282 vsx
->vsx_disk_histo
[ZIO_TYPE_WRITE
],
283 ARRAY_SIZE(vsx
->vsx_disk_histo
[ZIO_TYPE_WRITE
]));
285 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO
,
286 vsx
->vsx_queue_histo
[ZIO_PRIORITY_SYNC_READ
],
287 ARRAY_SIZE(vsx
->vsx_queue_histo
[ZIO_PRIORITY_SYNC_READ
]));
289 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO
,
290 vsx
->vsx_queue_histo
[ZIO_PRIORITY_SYNC_WRITE
],
291 ARRAY_SIZE(vsx
->vsx_queue_histo
[ZIO_PRIORITY_SYNC_WRITE
]));
293 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO
,
294 vsx
->vsx_queue_histo
[ZIO_PRIORITY_ASYNC_READ
],
295 ARRAY_SIZE(vsx
->vsx_queue_histo
[ZIO_PRIORITY_ASYNC_READ
]));
297 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO
,
298 vsx
->vsx_queue_histo
[ZIO_PRIORITY_ASYNC_WRITE
],
299 ARRAY_SIZE(vsx
->vsx_queue_histo
[ZIO_PRIORITY_ASYNC_WRITE
]));
301 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO
,
302 vsx
->vsx_queue_histo
[ZIO_PRIORITY_SCRUB
],
303 ARRAY_SIZE(vsx
->vsx_queue_histo
[ZIO_PRIORITY_SCRUB
]));
306 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO
,
307 vsx
->vsx_ind_histo
[ZIO_PRIORITY_SYNC_READ
],
308 ARRAY_SIZE(vsx
->vsx_ind_histo
[ZIO_PRIORITY_SYNC_READ
]));
310 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO
,
311 vsx
->vsx_ind_histo
[ZIO_PRIORITY_SYNC_WRITE
],
312 ARRAY_SIZE(vsx
->vsx_ind_histo
[ZIO_PRIORITY_SYNC_WRITE
]));
314 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO
,
315 vsx
->vsx_ind_histo
[ZIO_PRIORITY_ASYNC_READ
],
316 ARRAY_SIZE(vsx
->vsx_ind_histo
[ZIO_PRIORITY_ASYNC_READ
]));
318 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO
,
319 vsx
->vsx_ind_histo
[ZIO_PRIORITY_ASYNC_WRITE
],
320 ARRAY_SIZE(vsx
->vsx_ind_histo
[ZIO_PRIORITY_ASYNC_WRITE
]));
322 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO
,
323 vsx
->vsx_ind_histo
[ZIO_PRIORITY_SCRUB
],
324 ARRAY_SIZE(vsx
->vsx_ind_histo
[ZIO_PRIORITY_SCRUB
]));
326 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO
,
327 vsx
->vsx_agg_histo
[ZIO_PRIORITY_SYNC_READ
],
328 ARRAY_SIZE(vsx
->vsx_agg_histo
[ZIO_PRIORITY_SYNC_READ
]));
330 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO
,
331 vsx
->vsx_agg_histo
[ZIO_PRIORITY_SYNC_WRITE
],
332 ARRAY_SIZE(vsx
->vsx_agg_histo
[ZIO_PRIORITY_SYNC_WRITE
]));
334 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO
,
335 vsx
->vsx_agg_histo
[ZIO_PRIORITY_ASYNC_READ
],
336 ARRAY_SIZE(vsx
->vsx_agg_histo
[ZIO_PRIORITY_ASYNC_READ
]));
338 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO
,
339 vsx
->vsx_agg_histo
[ZIO_PRIORITY_ASYNC_WRITE
],
340 ARRAY_SIZE(vsx
->vsx_agg_histo
[ZIO_PRIORITY_ASYNC_WRITE
]));
342 fnvlist_add_uint64_array(nvx
, ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO
,
343 vsx
->vsx_agg_histo
[ZIO_PRIORITY_SCRUB
],
344 ARRAY_SIZE(vsx
->vsx_agg_histo
[ZIO_PRIORITY_SCRUB
]));
346 /* Add extended stats nvlist to main nvlist */
347 fnvlist_add_nvlist(nv
, ZPOOL_CONFIG_VDEV_STATS_EX
, nvx
);
349 kmem_free(vsx
, sizeof (*vsx
));
353 * Generate the nvlist representing this vdev's config.
356 vdev_config_generate(spa_t
*spa
, vdev_t
*vd
, boolean_t getstats
,
357 vdev_config_flag_t flags
)
360 nv
= fnvlist_alloc();
362 fnvlist_add_string(nv
, ZPOOL_CONFIG_TYPE
, vd
->vdev_ops
->vdev_op_type
);
363 if (!(flags
& (VDEV_CONFIG_SPARE
| VDEV_CONFIG_L2CACHE
)))
364 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_ID
, vd
->vdev_id
);
365 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_GUID
, vd
->vdev_guid
);
367 if (vd
->vdev_path
!= NULL
)
368 fnvlist_add_string(nv
, ZPOOL_CONFIG_PATH
, vd
->vdev_path
);
370 if (vd
->vdev_devid
!= NULL
)
371 fnvlist_add_string(nv
, ZPOOL_CONFIG_DEVID
, vd
->vdev_devid
);
373 if (vd
->vdev_physpath
!= NULL
)
374 fnvlist_add_string(nv
, ZPOOL_CONFIG_PHYS_PATH
,
377 if (vd
->vdev_fru
!= NULL
)
378 fnvlist_add_string(nv
, ZPOOL_CONFIG_FRU
, vd
->vdev_fru
);
380 if (vd
->vdev_nparity
!= 0) {
381 ASSERT(strcmp(vd
->vdev_ops
->vdev_op_type
,
382 VDEV_TYPE_RAIDZ
) == 0);
385 * Make sure someone hasn't managed to sneak a fancy new vdev
386 * into a crufty old storage pool.
388 ASSERT(vd
->vdev_nparity
== 1 ||
389 (vd
->vdev_nparity
<= 2 &&
390 spa_version(spa
) >= SPA_VERSION_RAIDZ2
) ||
391 (vd
->vdev_nparity
<= 3 &&
392 spa_version(spa
) >= SPA_VERSION_RAIDZ3
));
395 * Note that we'll add the nparity tag even on storage pools
396 * that only support a single parity device -- older software
397 * will just ignore it.
399 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_NPARITY
, vd
->vdev_nparity
);
402 if (vd
->vdev_wholedisk
!= -1ULL)
403 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_WHOLE_DISK
,
406 if (vd
->vdev_not_present
)
407 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_NOT_PRESENT
, 1);
409 if (vd
->vdev_isspare
)
410 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_IS_SPARE
, 1);
412 if (!(flags
& (VDEV_CONFIG_SPARE
| VDEV_CONFIG_L2CACHE
)) &&
413 vd
== vd
->vdev_top
) {
414 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_METASLAB_ARRAY
,
416 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_METASLAB_SHIFT
,
418 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_ASHIFT
, vd
->vdev_ashift
);
419 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_ASIZE
,
421 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_IS_LOG
, vd
->vdev_islog
);
422 if (vd
->vdev_removing
)
423 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_REMOVING
,
427 if (vd
->vdev_dtl_sm
!= NULL
) {
428 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_DTL
,
429 space_map_object(vd
->vdev_dtl_sm
));
433 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_CREATE_TXG
, vd
->vdev_crtxg
);
435 if (flags
& VDEV_CONFIG_MOS
) {
436 if (vd
->vdev_leaf_zap
!= 0) {
437 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
438 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_VDEV_LEAF_ZAP
,
442 if (vd
->vdev_top_zap
!= 0) {
443 ASSERT(vd
== vd
->vdev_top
);
444 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_VDEV_TOP_ZAP
,
452 vdev_config_generate_stats(vd
, nv
);
454 /* provide either current or previous scan information */
455 if (spa_scan_get_stats(spa
, &ps
) == 0) {
456 fnvlist_add_uint64_array(nv
,
457 ZPOOL_CONFIG_SCAN_STATS
, (uint64_t *)&ps
,
458 sizeof (pool_scan_stat_t
) / sizeof (uint64_t));
462 if (!vd
->vdev_ops
->vdev_op_leaf
) {
466 ASSERT(!vd
->vdev_ishole
);
468 child
= kmem_alloc(vd
->vdev_children
* sizeof (nvlist_t
*),
471 for (c
= 0, idx
= 0; c
< vd
->vdev_children
; c
++) {
472 vdev_t
*cvd
= vd
->vdev_child
[c
];
475 * If we're generating an nvlist of removing
476 * vdevs then skip over any device which is
479 if ((flags
& VDEV_CONFIG_REMOVING
) &&
483 child
[idx
++] = vdev_config_generate(spa
, cvd
,
488 fnvlist_add_nvlist_array(nv
, ZPOOL_CONFIG_CHILDREN
,
492 for (c
= 0; c
< idx
; c
++)
493 nvlist_free(child
[c
]);
495 kmem_free(child
, vd
->vdev_children
* sizeof (nvlist_t
*));
498 const char *aux
= NULL
;
500 if (vd
->vdev_offline
&& !vd
->vdev_tmpoffline
)
501 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_OFFLINE
, B_TRUE
);
502 if (vd
->vdev_resilver_txg
!= 0)
503 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_RESILVER_TXG
,
504 vd
->vdev_resilver_txg
);
505 if (vd
->vdev_faulted
)
506 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_FAULTED
, B_TRUE
);
507 if (vd
->vdev_degraded
)
508 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_DEGRADED
, B_TRUE
);
509 if (vd
->vdev_removed
)
510 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_REMOVED
, B_TRUE
);
511 if (vd
->vdev_unspare
)
512 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_UNSPARE
, B_TRUE
);
514 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_IS_HOLE
, B_TRUE
);
516 switch (vd
->vdev_stat
.vs_aux
) {
517 case VDEV_AUX_ERR_EXCEEDED
:
518 aux
= "err_exceeded";
521 case VDEV_AUX_EXTERNAL
:
527 fnvlist_add_string(nv
, ZPOOL_CONFIG_AUX_STATE
, aux
);
529 if (vd
->vdev_splitting
&& vd
->vdev_orig_guid
!= 0LL) {
530 fnvlist_add_uint64(nv
, ZPOOL_CONFIG_ORIG_GUID
,
539 * Generate a view of the top-level vdevs. If we currently have holes
540 * in the namespace, then generate an array which contains a list of holey
541 * vdevs. Additionally, add the number of top-level children that currently
545 vdev_top_config_generate(spa_t
*spa
, nvlist_t
*config
)
547 vdev_t
*rvd
= spa
->spa_root_vdev
;
551 array
= kmem_alloc(rvd
->vdev_children
* sizeof (uint64_t), KM_SLEEP
);
553 for (c
= 0, idx
= 0; c
< rvd
->vdev_children
; c
++) {
554 vdev_t
*tvd
= rvd
->vdev_child
[c
];
556 if (tvd
->vdev_ishole
)
561 VERIFY(nvlist_add_uint64_array(config
, ZPOOL_CONFIG_HOLE_ARRAY
,
565 VERIFY(nvlist_add_uint64(config
, ZPOOL_CONFIG_VDEV_CHILDREN
,
566 rvd
->vdev_children
) == 0);
568 kmem_free(array
, rvd
->vdev_children
* sizeof (uint64_t));
572 * Returns the configuration from the label of the given vdev. For vdevs
573 * which don't have a txg value stored on their label (i.e. spares/cache)
574 * or have not been completely initialized (txg = 0) just return
575 * the configuration from the first valid label we find. Otherwise,
576 * find the most up-to-date label that does not exceed the specified
580 vdev_label_read_config(vdev_t
*vd
, uint64_t txg
)
582 spa_t
*spa
= vd
->vdev_spa
;
583 nvlist_t
*config
= NULL
;
586 uint64_t best_txg
= 0;
588 int flags
= ZIO_FLAG_CONFIG_WRITER
| ZIO_FLAG_CANFAIL
|
589 ZIO_FLAG_SPECULATIVE
;
592 ASSERT(spa_config_held(spa
, SCL_STATE_ALL
, RW_WRITER
) == SCL_STATE_ALL
);
594 if (!vdev_readable(vd
))
597 vp
= zio_buf_alloc(sizeof (vdev_phys_t
));
600 for (l
= 0; l
< VDEV_LABELS
; l
++) {
601 nvlist_t
*label
= NULL
;
603 zio
= zio_root(spa
, NULL
, NULL
, flags
);
605 vdev_label_read(zio
, vd
, l
, vp
,
606 offsetof(vdev_label_t
, vl_vdev_phys
),
607 sizeof (vdev_phys_t
), NULL
, NULL
, flags
);
609 if (zio_wait(zio
) == 0 &&
610 nvlist_unpack(vp
->vp_nvlist
, sizeof (vp
->vp_nvlist
),
612 uint64_t label_txg
= 0;
615 * Auxiliary vdevs won't have txg values in their
616 * labels and newly added vdevs may not have been
617 * completely initialized so just return the
618 * configuration from the first valid label we
621 error
= nvlist_lookup_uint64(label
,
622 ZPOOL_CONFIG_POOL_TXG
, &label_txg
);
623 if ((error
|| label_txg
== 0) && !config
) {
626 } else if (label_txg
<= txg
&& label_txg
> best_txg
) {
627 best_txg
= label_txg
;
629 config
= fnvlist_dup(label
);
639 if (config
== NULL
&& !(flags
& ZIO_FLAG_TRYHARD
)) {
640 flags
|= ZIO_FLAG_TRYHARD
;
644 zio_buf_free(vp
, sizeof (vdev_phys_t
));
650 * Determine if a device is in use. The 'spare_guid' parameter will be filled
651 * in with the device guid if this spare is active elsewhere on the system.
654 vdev_inuse(vdev_t
*vd
, uint64_t crtxg
, vdev_labeltype_t reason
,
655 uint64_t *spare_guid
, uint64_t *l2cache_guid
)
657 spa_t
*spa
= vd
->vdev_spa
;
658 uint64_t state
, pool_guid
, device_guid
, txg
, spare_pool
;
665 *l2cache_guid
= 0ULL;
668 * Read the label, if any, and perform some basic sanity checks.
670 if ((label
= vdev_label_read_config(vd
, -1ULL)) == NULL
)
673 (void) nvlist_lookup_uint64(label
, ZPOOL_CONFIG_CREATE_TXG
,
676 if (nvlist_lookup_uint64(label
, ZPOOL_CONFIG_POOL_STATE
,
678 nvlist_lookup_uint64(label
, ZPOOL_CONFIG_GUID
,
679 &device_guid
) != 0) {
684 if (state
!= POOL_STATE_SPARE
&& state
!= POOL_STATE_L2CACHE
&&
685 (nvlist_lookup_uint64(label
, ZPOOL_CONFIG_POOL_GUID
,
687 nvlist_lookup_uint64(label
, ZPOOL_CONFIG_POOL_TXG
,
696 * Check to see if this device indeed belongs to the pool it claims to
697 * be a part of. The only way this is allowed is if the device is a hot
698 * spare (which we check for later on).
700 if (state
!= POOL_STATE_SPARE
&& state
!= POOL_STATE_L2CACHE
&&
701 !spa_guid_exists(pool_guid
, device_guid
) &&
702 !spa_spare_exists(device_guid
, NULL
, NULL
) &&
703 !spa_l2cache_exists(device_guid
, NULL
))
707 * If the transaction group is zero, then this an initialized (but
708 * unused) label. This is only an error if the create transaction
709 * on-disk is the same as the one we're using now, in which case the
710 * user has attempted to add the same vdev multiple times in the same
713 if (state
!= POOL_STATE_SPARE
&& state
!= POOL_STATE_L2CACHE
&&
714 txg
== 0 && vdtxg
== crtxg
)
718 * Check to see if this is a spare device. We do an explicit check for
719 * spa_has_spare() here because it may be on our pending list of spares
720 * to add. We also check if it is an l2cache device.
722 if (spa_spare_exists(device_guid
, &spare_pool
, NULL
) ||
723 spa_has_spare(spa
, device_guid
)) {
725 *spare_guid
= device_guid
;
728 case VDEV_LABEL_CREATE
:
729 case VDEV_LABEL_L2CACHE
:
732 case VDEV_LABEL_REPLACE
:
733 return (!spa_has_spare(spa
, device_guid
) ||
736 case VDEV_LABEL_SPARE
:
737 return (spa_has_spare(spa
, device_guid
));
744 * Check to see if this is an l2cache device.
746 if (spa_l2cache_exists(device_guid
, NULL
))
750 * We can't rely on a pool's state if it's been imported
751 * read-only. Instead we look to see if the pools is marked
752 * read-only in the namespace and set the state to active.
754 if (state
!= POOL_STATE_SPARE
&& state
!= POOL_STATE_L2CACHE
&&
755 (spa
= spa_by_guid(pool_guid
, device_guid
)) != NULL
&&
756 spa_mode(spa
) == FREAD
)
757 state
= POOL_STATE_ACTIVE
;
760 * If the device is marked ACTIVE, then this device is in use by another
761 * pool on the system.
763 return (state
== POOL_STATE_ACTIVE
);
767 * Initialize a vdev label. We check to make sure each leaf device is not in
768 * use, and writable. We put down an initial label which we will later
769 * overwrite with a complete label. Note that it's important to do this
770 * sequentially, not in parallel, so that we catch cases of multiple use of the
771 * same leaf vdev in the vdev we're creating -- e.g. mirroring a disk with
775 vdev_label_init(vdev_t
*vd
, uint64_t crtxg
, vdev_labeltype_t reason
)
777 spa_t
*spa
= vd
->vdev_spa
;
786 uint64_t spare_guid
= 0, l2cache_guid
= 0;
787 int flags
= ZIO_FLAG_CONFIG_WRITER
| ZIO_FLAG_CANFAIL
;
791 ASSERT(spa_config_held(spa
, SCL_ALL
, RW_WRITER
) == SCL_ALL
);
793 for (c
= 0; c
< vd
->vdev_children
; c
++)
794 if ((error
= vdev_label_init(vd
->vdev_child
[c
],
795 crtxg
, reason
)) != 0)
798 /* Track the creation time for this vdev */
799 vd
->vdev_crtxg
= crtxg
;
801 if (!vd
->vdev_ops
->vdev_op_leaf
|| !spa_writeable(spa
))
805 * Dead vdevs cannot be initialized.
807 if (vdev_is_dead(vd
))
808 return (SET_ERROR(EIO
));
811 * Determine if the vdev is in use.
813 if (reason
!= VDEV_LABEL_REMOVE
&& reason
!= VDEV_LABEL_SPLIT
&&
814 vdev_inuse(vd
, crtxg
, reason
, &spare_guid
, &l2cache_guid
))
815 return (SET_ERROR(EBUSY
));
818 * If this is a request to add or replace a spare or l2cache device
819 * that is in use elsewhere on the system, then we must update the
820 * guid (which was initialized to a random value) to reflect the
821 * actual GUID (which is shared between multiple pools).
823 if (reason
!= VDEV_LABEL_REMOVE
&& reason
!= VDEV_LABEL_L2CACHE
&&
824 spare_guid
!= 0ULL) {
825 uint64_t guid_delta
= spare_guid
- vd
->vdev_guid
;
827 vd
->vdev_guid
+= guid_delta
;
829 for (pvd
= vd
; pvd
!= NULL
; pvd
= pvd
->vdev_parent
)
830 pvd
->vdev_guid_sum
+= guid_delta
;
833 * If this is a replacement, then we want to fallthrough to the
834 * rest of the code. If we're adding a spare, then it's already
835 * labeled appropriately and we can just return.
837 if (reason
== VDEV_LABEL_SPARE
)
839 ASSERT(reason
== VDEV_LABEL_REPLACE
||
840 reason
== VDEV_LABEL_SPLIT
);
843 if (reason
!= VDEV_LABEL_REMOVE
&& reason
!= VDEV_LABEL_SPARE
&&
844 l2cache_guid
!= 0ULL) {
845 uint64_t guid_delta
= l2cache_guid
- vd
->vdev_guid
;
847 vd
->vdev_guid
+= guid_delta
;
849 for (pvd
= vd
; pvd
!= NULL
; pvd
= pvd
->vdev_parent
)
850 pvd
->vdev_guid_sum
+= guid_delta
;
853 * If this is a replacement, then we want to fallthrough to the
854 * rest of the code. If we're adding an l2cache, then it's
855 * already labeled appropriately and we can just return.
857 if (reason
== VDEV_LABEL_L2CACHE
)
859 ASSERT(reason
== VDEV_LABEL_REPLACE
);
863 * Initialize its label.
865 vp
= zio_buf_alloc(sizeof (vdev_phys_t
));
866 bzero(vp
, sizeof (vdev_phys_t
));
869 * Generate a label describing the pool and our top-level vdev.
870 * We mark it as being from txg 0 to indicate that it's not
871 * really part of an active pool just yet. The labels will
872 * be written again with a meaningful txg by spa_sync().
874 if (reason
== VDEV_LABEL_SPARE
||
875 (reason
== VDEV_LABEL_REMOVE
&& vd
->vdev_isspare
)) {
877 * For inactive hot spares, we generate a special label that
878 * identifies as a mutually shared hot spare. We write the
879 * label if we are adding a hot spare, or if we are removing an
880 * active hot spare (in which case we want to revert the
883 VERIFY(nvlist_alloc(&label
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
885 VERIFY(nvlist_add_uint64(label
, ZPOOL_CONFIG_VERSION
,
886 spa_version(spa
)) == 0);
887 VERIFY(nvlist_add_uint64(label
, ZPOOL_CONFIG_POOL_STATE
,
888 POOL_STATE_SPARE
) == 0);
889 VERIFY(nvlist_add_uint64(label
, ZPOOL_CONFIG_GUID
,
890 vd
->vdev_guid
) == 0);
891 } else if (reason
== VDEV_LABEL_L2CACHE
||
892 (reason
== VDEV_LABEL_REMOVE
&& vd
->vdev_isl2cache
)) {
894 * For level 2 ARC devices, add a special label.
896 VERIFY(nvlist_alloc(&label
, NV_UNIQUE_NAME
, KM_SLEEP
) == 0);
898 VERIFY(nvlist_add_uint64(label
, ZPOOL_CONFIG_VERSION
,
899 spa_version(spa
)) == 0);
900 VERIFY(nvlist_add_uint64(label
, ZPOOL_CONFIG_POOL_STATE
,
901 POOL_STATE_L2CACHE
) == 0);
902 VERIFY(nvlist_add_uint64(label
, ZPOOL_CONFIG_GUID
,
903 vd
->vdev_guid
) == 0);
907 if (reason
== VDEV_LABEL_SPLIT
)
908 txg
= spa
->spa_uberblock
.ub_txg
;
909 label
= spa_config_generate(spa
, vd
, txg
, B_FALSE
);
912 * Add our creation time. This allows us to detect multiple
913 * vdev uses as described above, and automatically expires if we
916 VERIFY(nvlist_add_uint64(label
, ZPOOL_CONFIG_CREATE_TXG
,
921 buflen
= sizeof (vp
->vp_nvlist
);
923 error
= nvlist_pack(label
, &buf
, &buflen
, NV_ENCODE_XDR
, KM_SLEEP
);
926 zio_buf_free(vp
, sizeof (vdev_phys_t
));
927 /* EFAULT means nvlist_pack ran out of room */
928 return (error
== EFAULT
? ENAMETOOLONG
: EINVAL
);
932 * Initialize uberblock template.
934 ub
= zio_buf_alloc(VDEV_UBERBLOCK_RING
);
935 bzero(ub
, VDEV_UBERBLOCK_RING
);
936 *ub
= spa
->spa_uberblock
;
939 /* Initialize the 2nd padding area. */
940 pad2
= zio_buf_alloc(VDEV_PAD_SIZE
);
941 bzero(pad2
, VDEV_PAD_SIZE
);
944 * Write everything in parallel.
947 zio
= zio_root(spa
, NULL
, NULL
, flags
);
949 for (l
= 0; l
< VDEV_LABELS
; l
++) {
951 vdev_label_write(zio
, vd
, l
, vp
,
952 offsetof(vdev_label_t
, vl_vdev_phys
),
953 sizeof (vdev_phys_t
), NULL
, NULL
, flags
);
956 * Skip the 1st padding area.
957 * Zero out the 2nd padding area where it might have
958 * left over data from previous filesystem format.
960 vdev_label_write(zio
, vd
, l
, pad2
,
961 offsetof(vdev_label_t
, vl_pad2
),
962 VDEV_PAD_SIZE
, NULL
, NULL
, flags
);
964 vdev_label_write(zio
, vd
, l
, ub
,
965 offsetof(vdev_label_t
, vl_uberblock
),
966 VDEV_UBERBLOCK_RING
, NULL
, NULL
, flags
);
969 error
= zio_wait(zio
);
971 if (error
!= 0 && !(flags
& ZIO_FLAG_TRYHARD
)) {
972 flags
|= ZIO_FLAG_TRYHARD
;
977 zio_buf_free(pad2
, VDEV_PAD_SIZE
);
978 zio_buf_free(ub
, VDEV_UBERBLOCK_RING
);
979 zio_buf_free(vp
, sizeof (vdev_phys_t
));
982 * If this vdev hasn't been previously identified as a spare, then we
983 * mark it as such only if a) we are labeling it as a spare, or b) it
984 * exists as a spare elsewhere in the system. Do the same for
985 * level 2 ARC devices.
987 if (error
== 0 && !vd
->vdev_isspare
&&
988 (reason
== VDEV_LABEL_SPARE
||
989 spa_spare_exists(vd
->vdev_guid
, NULL
, NULL
)))
992 if (error
== 0 && !vd
->vdev_isl2cache
&&
993 (reason
== VDEV_LABEL_L2CACHE
||
994 spa_l2cache_exists(vd
->vdev_guid
, NULL
)))
1001 * ==========================================================================
1002 * uberblock load/sync
1003 * ==========================================================================
1007 * Consider the following situation: txg is safely synced to disk. We've
1008 * written the first uberblock for txg + 1, and then we lose power. When we
1009 * come back up, we fail to see the uberblock for txg + 1 because, say,
1010 * it was on a mirrored device and the replica to which we wrote txg + 1
1011 * is now offline. If we then make some changes and sync txg + 1, and then
1012 * the missing replica comes back, then for a few seconds we'll have two
1013 * conflicting uberblocks on disk with the same txg. The solution is simple:
1014 * among uberblocks with equal txg, choose the one with the latest timestamp.
1017 vdev_uberblock_compare(uberblock_t
*ub1
, uberblock_t
*ub2
)
1019 if (ub1
->ub_txg
< ub2
->ub_txg
)
1021 if (ub1
->ub_txg
> ub2
->ub_txg
)
1024 if (ub1
->ub_timestamp
< ub2
->ub_timestamp
)
1026 if (ub1
->ub_timestamp
> ub2
->ub_timestamp
)
1033 uberblock_t
*ubl_ubbest
; /* Best uberblock */
1034 vdev_t
*ubl_vd
; /* vdev associated with the above */
1038 vdev_uberblock_load_done(zio_t
*zio
)
1040 vdev_t
*vd
= zio
->io_vd
;
1041 spa_t
*spa
= zio
->io_spa
;
1042 zio_t
*rio
= zio
->io_private
;
1043 uberblock_t
*ub
= zio
->io_data
;
1044 struct ubl_cbdata
*cbp
= rio
->io_private
;
1046 ASSERT3U(zio
->io_size
, ==, VDEV_UBERBLOCK_SIZE(vd
));
1048 if (zio
->io_error
== 0 && uberblock_verify(ub
) == 0) {
1049 mutex_enter(&rio
->io_lock
);
1050 if (ub
->ub_txg
<= spa
->spa_load_max_txg
&&
1051 vdev_uberblock_compare(ub
, cbp
->ubl_ubbest
) > 0) {
1053 * Keep track of the vdev in which this uberblock
1054 * was found. We will use this information later
1055 * to obtain the config nvlist associated with
1058 *cbp
->ubl_ubbest
= *ub
;
1061 mutex_exit(&rio
->io_lock
);
1064 zio_buf_free(zio
->io_data
, zio
->io_size
);
1068 vdev_uberblock_load_impl(zio_t
*zio
, vdev_t
*vd
, int flags
,
1069 struct ubl_cbdata
*cbp
)
1073 for (c
= 0; c
< vd
->vdev_children
; c
++)
1074 vdev_uberblock_load_impl(zio
, vd
->vdev_child
[c
], flags
, cbp
);
1076 if (vd
->vdev_ops
->vdev_op_leaf
&& vdev_readable(vd
)) {
1077 for (l
= 0; l
< VDEV_LABELS
; l
++) {
1078 for (n
= 0; n
< VDEV_UBERBLOCK_COUNT(vd
); n
++) {
1079 vdev_label_read(zio
, vd
, l
,
1080 zio_buf_alloc(VDEV_UBERBLOCK_SIZE(vd
)),
1081 VDEV_UBERBLOCK_OFFSET(vd
, n
),
1082 VDEV_UBERBLOCK_SIZE(vd
),
1083 vdev_uberblock_load_done
, zio
, flags
);
1090 * Reads the 'best' uberblock from disk along with its associated
1091 * configuration. First, we read the uberblock array of each label of each
1092 * vdev, keeping track of the uberblock with the highest txg in each array.
1093 * Then, we read the configuration from the same vdev as the best uberblock.
1096 vdev_uberblock_load(vdev_t
*rvd
, uberblock_t
*ub
, nvlist_t
**config
)
1099 spa_t
*spa
= rvd
->vdev_spa
;
1100 struct ubl_cbdata cb
;
1101 int flags
= ZIO_FLAG_CONFIG_WRITER
| ZIO_FLAG_CANFAIL
|
1102 ZIO_FLAG_SPECULATIVE
| ZIO_FLAG_TRYHARD
;
1107 bzero(ub
, sizeof (uberblock_t
));
1113 spa_config_enter(spa
, SCL_ALL
, FTAG
, RW_WRITER
);
1114 zio
= zio_root(spa
, NULL
, &cb
, flags
);
1115 vdev_uberblock_load_impl(zio
, rvd
, flags
, &cb
);
1116 (void) zio_wait(zio
);
1119 * It's possible that the best uberblock was discovered on a label
1120 * that has a configuration which was written in a future txg.
1121 * Search all labels on this vdev to find the configuration that
1122 * matches the txg for our uberblock.
1124 if (cb
.ubl_vd
!= NULL
)
1125 *config
= vdev_label_read_config(cb
.ubl_vd
, ub
->ub_txg
);
1126 spa_config_exit(spa
, SCL_ALL
, FTAG
);
1130 * On success, increment root zio's count of good writes.
1131 * We only get credit for writes to known-visible vdevs; see spa_vdev_add().
1134 vdev_uberblock_sync_done(zio_t
*zio
)
1136 uint64_t *good_writes
= zio
->io_private
;
1138 if (zio
->io_error
== 0 && zio
->io_vd
->vdev_top
->vdev_ms_array
!= 0)
1139 atomic_inc_64(good_writes
);
1143 * Write the uberblock to all labels of all leaves of the specified vdev.
1146 vdev_uberblock_sync(zio_t
*zio
, uberblock_t
*ub
, vdev_t
*vd
, int flags
)
1151 for (c
= 0; c
< vd
->vdev_children
; c
++)
1152 vdev_uberblock_sync(zio
, ub
, vd
->vdev_child
[c
], flags
);
1154 if (!vd
->vdev_ops
->vdev_op_leaf
)
1157 if (!vdev_writeable(vd
))
1160 n
= ub
->ub_txg
& (VDEV_UBERBLOCK_COUNT(vd
) - 1);
1162 ubbuf
= zio_buf_alloc(VDEV_UBERBLOCK_SIZE(vd
));
1163 bzero(ubbuf
, VDEV_UBERBLOCK_SIZE(vd
));
1166 for (l
= 0; l
< VDEV_LABELS
; l
++)
1167 vdev_label_write(zio
, vd
, l
, ubbuf
,
1168 VDEV_UBERBLOCK_OFFSET(vd
, n
), VDEV_UBERBLOCK_SIZE(vd
),
1169 vdev_uberblock_sync_done
, zio
->io_private
,
1170 flags
| ZIO_FLAG_DONT_PROPAGATE
);
1172 zio_buf_free(ubbuf
, VDEV_UBERBLOCK_SIZE(vd
));
1175 /* Sync the uberblocks to all vdevs in svd[] */
1177 vdev_uberblock_sync_list(vdev_t
**svd
, int svdcount
, uberblock_t
*ub
, int flags
)
1179 spa_t
*spa
= svd
[0]->vdev_spa
;
1181 uint64_t good_writes
= 0;
1184 zio
= zio_root(spa
, NULL
, &good_writes
, flags
);
1186 for (v
= 0; v
< svdcount
; v
++)
1187 vdev_uberblock_sync(zio
, ub
, svd
[v
], flags
);
1189 (void) zio_wait(zio
);
1192 * Flush the uberblocks to disk. This ensures that the odd labels
1193 * are no longer needed (because the new uberblocks and the even
1194 * labels are safely on disk), so it is safe to overwrite them.
1196 zio
= zio_root(spa
, NULL
, NULL
, flags
);
1198 for (v
= 0; v
< svdcount
; v
++)
1199 zio_flush(zio
, svd
[v
]);
1201 (void) zio_wait(zio
);
1203 return (good_writes
>= 1 ? 0 : EIO
);
1207 * On success, increment the count of good writes for our top-level vdev.
1210 vdev_label_sync_done(zio_t
*zio
)
1212 uint64_t *good_writes
= zio
->io_private
;
1214 if (zio
->io_error
== 0)
1215 atomic_inc_64(good_writes
);
1219 * If there weren't enough good writes, indicate failure to the parent.
1222 vdev_label_sync_top_done(zio_t
*zio
)
1224 uint64_t *good_writes
= zio
->io_private
;
1226 if (*good_writes
== 0)
1227 zio
->io_error
= SET_ERROR(EIO
);
1229 kmem_free(good_writes
, sizeof (uint64_t));
1233 * We ignore errors for log and cache devices, simply free the private data.
1236 vdev_label_sync_ignore_done(zio_t
*zio
)
1238 kmem_free(zio
->io_private
, sizeof (uint64_t));
1242 * Write all even or odd labels to all leaves of the specified vdev.
1245 vdev_label_sync(zio_t
*zio
, vdev_t
*vd
, int l
, uint64_t txg
, int flags
)
1253 for (c
= 0; c
< vd
->vdev_children
; c
++)
1254 vdev_label_sync(zio
, vd
->vdev_child
[c
], l
, txg
, flags
);
1256 if (!vd
->vdev_ops
->vdev_op_leaf
)
1259 if (!vdev_writeable(vd
))
1263 * Generate a label describing the top-level config to which we belong.
1265 label
= spa_config_generate(vd
->vdev_spa
, vd
, txg
, B_FALSE
);
1267 vp
= zio_buf_alloc(sizeof (vdev_phys_t
));
1268 bzero(vp
, sizeof (vdev_phys_t
));
1270 buf
= vp
->vp_nvlist
;
1271 buflen
= sizeof (vp
->vp_nvlist
);
1273 if (!nvlist_pack(label
, &buf
, &buflen
, NV_ENCODE_XDR
, KM_SLEEP
)) {
1274 for (; l
< VDEV_LABELS
; l
+= 2) {
1275 vdev_label_write(zio
, vd
, l
, vp
,
1276 offsetof(vdev_label_t
, vl_vdev_phys
),
1277 sizeof (vdev_phys_t
),
1278 vdev_label_sync_done
, zio
->io_private
,
1279 flags
| ZIO_FLAG_DONT_PROPAGATE
);
1283 zio_buf_free(vp
, sizeof (vdev_phys_t
));
1288 vdev_label_sync_list(spa_t
*spa
, int l
, uint64_t txg
, int flags
)
1290 list_t
*dl
= &spa
->spa_config_dirty_list
;
1296 * Write the new labels to disk.
1298 zio
= zio_root(spa
, NULL
, NULL
, flags
);
1300 for (vd
= list_head(dl
); vd
!= NULL
; vd
= list_next(dl
, vd
)) {
1301 uint64_t *good_writes
;
1304 ASSERT(!vd
->vdev_ishole
);
1306 good_writes
= kmem_zalloc(sizeof (uint64_t), KM_SLEEP
);
1307 vio
= zio_null(zio
, spa
, NULL
,
1308 (vd
->vdev_islog
|| vd
->vdev_aux
!= NULL
) ?
1309 vdev_label_sync_ignore_done
: vdev_label_sync_top_done
,
1310 good_writes
, flags
);
1311 vdev_label_sync(vio
, vd
, l
, txg
, flags
);
1315 error
= zio_wait(zio
);
1318 * Flush the new labels to disk.
1320 zio
= zio_root(spa
, NULL
, NULL
, flags
);
1322 for (vd
= list_head(dl
); vd
!= NULL
; vd
= list_next(dl
, vd
))
1325 (void) zio_wait(zio
);
1331 * Sync the uberblock and any changes to the vdev configuration.
1333 * The order of operations is carefully crafted to ensure that
1334 * if the system panics or loses power at any time, the state on disk
1335 * is still transactionally consistent. The in-line comments below
1336 * describe the failure semantics at each stage.
1338 * Moreover, vdev_config_sync() is designed to be idempotent: if it fails
1339 * at any time, you can just call it again, and it will resume its work.
1342 vdev_config_sync(vdev_t
**svd
, int svdcount
, uint64_t txg
)
1344 spa_t
*spa
= svd
[0]->vdev_spa
;
1345 uberblock_t
*ub
= &spa
->spa_uberblock
;
1349 int flags
= ZIO_FLAG_CONFIG_WRITER
| ZIO_FLAG_CANFAIL
;
1353 * Normally, we don't want to try too hard to write every label and
1354 * uberblock. If there is a flaky disk, we don't want the rest of the
1355 * sync process to block while we retry. But if we can't write a
1356 * single label out, we should retry with ZIO_FLAG_TRYHARD before
1357 * bailing out and declaring the pool faulted.
1360 if ((flags
& ZIO_FLAG_TRYHARD
) != 0)
1362 flags
|= ZIO_FLAG_TRYHARD
;
1365 ASSERT(ub
->ub_txg
<= txg
);
1368 * If this isn't a resync due to I/O errors,
1369 * and nothing changed in this transaction group,
1370 * and the vdev configuration hasn't changed,
1371 * then there's nothing to do.
1373 if (ub
->ub_txg
< txg
&&
1374 uberblock_update(ub
, spa
->spa_root_vdev
, txg
) == B_FALSE
&&
1375 list_is_empty(&spa
->spa_config_dirty_list
))
1378 if (txg
> spa_freeze_txg(spa
))
1381 ASSERT(txg
<= spa
->spa_final_txg
);
1384 * Flush the write cache of every disk that's been written to
1385 * in this transaction group. This ensures that all blocks
1386 * written in this txg will be committed to stable storage
1387 * before any uberblock that references them.
1389 zio
= zio_root(spa
, NULL
, NULL
, flags
);
1391 for (vd
= txg_list_head(&spa
->spa_vdev_txg_list
, TXG_CLEAN(txg
)); vd
;
1392 vd
= txg_list_next(&spa
->spa_vdev_txg_list
, vd
, TXG_CLEAN(txg
)))
1395 (void) zio_wait(zio
);
1398 * Sync out the even labels (L0, L2) for every dirty vdev. If the
1399 * system dies in the middle of this process, that's OK: all of the
1400 * even labels that made it to disk will be newer than any uberblock,
1401 * and will therefore be considered invalid. The odd labels (L1, L3),
1402 * which have not yet been touched, will still be valid. We flush
1403 * the new labels to disk to ensure that all even-label updates
1404 * are committed to stable storage before the uberblock update.
1406 if ((error
= vdev_label_sync_list(spa
, 0, txg
, flags
)) != 0)
1410 * Sync the uberblocks to all vdevs in svd[].
1411 * If the system dies in the middle of this step, there are two cases
1412 * to consider, and the on-disk state is consistent either way:
1414 * (1) If none of the new uberblocks made it to disk, then the
1415 * previous uberblock will be the newest, and the odd labels
1416 * (which had not yet been touched) will be valid with respect
1417 * to that uberblock.
1419 * (2) If one or more new uberblocks made it to disk, then they
1420 * will be the newest, and the even labels (which had all
1421 * been successfully committed) will be valid with respect
1422 * to the new uberblocks.
1424 if ((error
= vdev_uberblock_sync_list(svd
, svdcount
, ub
, flags
)) != 0)
1428 * Sync out odd labels for every dirty vdev. If the system dies
1429 * in the middle of this process, the even labels and the new
1430 * uberblocks will suffice to open the pool. The next time
1431 * the pool is opened, the first thing we'll do -- before any
1432 * user data is modified -- is mark every vdev dirty so that
1433 * all labels will be brought up to date. We flush the new labels
1434 * to disk to ensure that all odd-label updates are committed to
1435 * stable storage before the next transaction group begins.
1437 if ((error
= vdev_label_sync_list(spa
, 1, txg
, flags
)) != 0)