]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/vdev_label.c
Rename fallthrough to zfs_fallthrough
[mirror_zfs.git] / module / zfs / vdev_label.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2020 by Delphix. All rights reserved.
25 * Copyright (c) 2017, Intel Corporation.
26 */
27
28 /*
29 * Virtual Device Labels
30 * ---------------------
31 *
32 * The vdev label serves several distinct purposes:
33 *
34 * 1. Uniquely identify this device as part of a ZFS pool and confirm its
35 * identity within the pool.
36 *
37 * 2. Verify that all the devices given in a configuration are present
38 * within the pool.
39 *
40 * 3. Determine the uberblock for the pool.
41 *
42 * 4. In case of an import operation, determine the configuration of the
43 * toplevel vdev of which it is a part.
44 *
45 * 5. If an import operation cannot find all the devices in the pool,
46 * provide enough information to the administrator to determine which
47 * devices are missing.
48 *
49 * It is important to note that while the kernel is responsible for writing the
50 * label, it only consumes the information in the first three cases. The
51 * latter information is only consumed in userland when determining the
52 * configuration to import a pool.
53 *
54 *
55 * Label Organization
56 * ------------------
57 *
58 * Before describing the contents of the label, it's important to understand how
59 * the labels are written and updated with respect to the uberblock.
60 *
61 * When the pool configuration is altered, either because it was newly created
62 * or a device was added, we want to update all the labels such that we can deal
63 * with fatal failure at any point. To this end, each disk has two labels which
64 * are updated before and after the uberblock is synced. Assuming we have
65 * labels and an uberblock with the following transaction groups:
66 *
67 * L1 UB L2
68 * +------+ +------+ +------+
69 * | | | | | |
70 * | t10 | | t10 | | t10 |
71 * | | | | | |
72 * +------+ +------+ +------+
73 *
74 * In this stable state, the labels and the uberblock were all updated within
75 * the same transaction group (10). Each label is mirrored and checksummed, so
76 * that we can detect when we fail partway through writing the label.
77 *
78 * In order to identify which labels are valid, the labels are written in the
79 * following manner:
80 *
81 * 1. For each vdev, update 'L1' to the new label
82 * 2. Update the uberblock
83 * 3. For each vdev, update 'L2' to the new label
84 *
85 * Given arbitrary failure, we can determine the correct label to use based on
86 * the transaction group. If we fail after updating L1 but before updating the
87 * UB, we will notice that L1's transaction group is greater than the uberblock,
88 * so L2 must be valid. If we fail after writing the uberblock but before
89 * writing L2, we will notice that L2's transaction group is less than L1, and
90 * therefore L1 is valid.
91 *
92 * Another added complexity is that not every label is updated when the config
93 * is synced. If we add a single device, we do not want to have to re-write
94 * every label for every device in the pool. This means that both L1 and L2 may
95 * be older than the pool uberblock, because the necessary information is stored
96 * on another vdev.
97 *
98 *
99 * On-disk Format
100 * --------------
101 *
102 * The vdev label consists of two distinct parts, and is wrapped within the
103 * vdev_label_t structure. The label includes 8k of padding to permit legacy
104 * VTOC disk labels, but is otherwise ignored.
105 *
106 * The first half of the label is a packed nvlist which contains pool wide
107 * properties, per-vdev properties, and configuration information. It is
108 * described in more detail below.
109 *
110 * The latter half of the label consists of a redundant array of uberblocks.
111 * These uberblocks are updated whenever a transaction group is committed,
112 * or when the configuration is updated. When a pool is loaded, we scan each
113 * vdev for the 'best' uberblock.
114 *
115 *
116 * Configuration Information
117 * -------------------------
118 *
119 * The nvlist describing the pool and vdev contains the following elements:
120 *
121 * version ZFS on-disk version
122 * name Pool name
123 * state Pool state
124 * txg Transaction group in which this label was written
125 * pool_guid Unique identifier for this pool
126 * vdev_tree An nvlist describing vdev tree.
127 * features_for_read
128 * An nvlist of the features necessary for reading the MOS.
129 *
130 * Each leaf device label also contains the following:
131 *
132 * top_guid Unique ID for top-level vdev in which this is contained
133 * guid Unique ID for the leaf vdev
134 *
135 * The 'vs' configuration follows the format described in 'spa_config.c'.
136 */
137
138 #include <sys/zfs_context.h>
139 #include <sys/spa.h>
140 #include <sys/spa_impl.h>
141 #include <sys/dmu.h>
142 #include <sys/zap.h>
143 #include <sys/vdev.h>
144 #include <sys/vdev_impl.h>
145 #include <sys/vdev_draid.h>
146 #include <sys/uberblock_impl.h>
147 #include <sys/metaslab.h>
148 #include <sys/metaslab_impl.h>
149 #include <sys/zio.h>
150 #include <sys/dsl_scan.h>
151 #include <sys/abd.h>
152 #include <sys/fs/zfs.h>
153 #include <sys/byteorder.h>
154 #include <sys/zfs_bootenv.h>
155
156 /*
157 * Basic routines to read and write from a vdev label.
158 * Used throughout the rest of this file.
159 */
160 uint64_t
161 vdev_label_offset(uint64_t psize, int l, uint64_t offset)
162 {
163 ASSERT(offset < sizeof (vdev_label_t));
164 ASSERT(P2PHASE_TYPED(psize, sizeof (vdev_label_t), uint64_t) == 0);
165
166 return (offset + l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
167 0 : psize - VDEV_LABELS * sizeof (vdev_label_t)));
168 }
169
170 /*
171 * Returns back the vdev label associated with the passed in offset.
172 */
173 int
174 vdev_label_number(uint64_t psize, uint64_t offset)
175 {
176 int l;
177
178 if (offset >= psize - VDEV_LABEL_END_SIZE) {
179 offset -= psize - VDEV_LABEL_END_SIZE;
180 offset += (VDEV_LABELS / 2) * sizeof (vdev_label_t);
181 }
182 l = offset / sizeof (vdev_label_t);
183 return (l < VDEV_LABELS ? l : -1);
184 }
185
186 static void
187 vdev_label_read(zio_t *zio, vdev_t *vd, int l, abd_t *buf, uint64_t offset,
188 uint64_t size, zio_done_func_t *done, void *private, int flags)
189 {
190 ASSERT(
191 spa_config_held(zio->io_spa, SCL_STATE, RW_READER) == SCL_STATE ||
192 spa_config_held(zio->io_spa, SCL_STATE, RW_WRITER) == SCL_STATE);
193 ASSERT(flags & ZIO_FLAG_CONFIG_WRITER);
194
195 zio_nowait(zio_read_phys(zio, vd,
196 vdev_label_offset(vd->vdev_psize, l, offset),
197 size, buf, ZIO_CHECKSUM_LABEL, done, private,
198 ZIO_PRIORITY_SYNC_READ, flags, B_TRUE));
199 }
200
201 void
202 vdev_label_write(zio_t *zio, vdev_t *vd, int l, abd_t *buf, uint64_t offset,
203 uint64_t size, zio_done_func_t *done, void *private, int flags)
204 {
205 ASSERT(
206 spa_config_held(zio->io_spa, SCL_STATE, RW_READER) == SCL_STATE ||
207 spa_config_held(zio->io_spa, SCL_STATE, RW_WRITER) == SCL_STATE);
208 ASSERT(flags & ZIO_FLAG_CONFIG_WRITER);
209
210 zio_nowait(zio_write_phys(zio, vd,
211 vdev_label_offset(vd->vdev_psize, l, offset),
212 size, buf, ZIO_CHECKSUM_LABEL, done, private,
213 ZIO_PRIORITY_SYNC_WRITE, flags, B_TRUE));
214 }
215
216 /*
217 * Generate the nvlist representing this vdev's stats
218 */
219 void
220 vdev_config_generate_stats(vdev_t *vd, nvlist_t *nv)
221 {
222 nvlist_t *nvx;
223 vdev_stat_t *vs;
224 vdev_stat_ex_t *vsx;
225
226 vs = kmem_alloc(sizeof (*vs), KM_SLEEP);
227 vsx = kmem_alloc(sizeof (*vsx), KM_SLEEP);
228
229 vdev_get_stats_ex(vd, vs, vsx);
230 fnvlist_add_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
231 (uint64_t *)vs, sizeof (*vs) / sizeof (uint64_t));
232
233 /*
234 * Add extended stats into a special extended stats nvlist. This keeps
235 * all the extended stats nicely grouped together. The extended stats
236 * nvlist is then added to the main nvlist.
237 */
238 nvx = fnvlist_alloc();
239
240 /* ZIOs in flight to disk */
241 fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
242 vsx->vsx_active_queue[ZIO_PRIORITY_SYNC_READ]);
243
244 fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
245 vsx->vsx_active_queue[ZIO_PRIORITY_SYNC_WRITE]);
246
247 fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
248 vsx->vsx_active_queue[ZIO_PRIORITY_ASYNC_READ]);
249
250 fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
251 vsx->vsx_active_queue[ZIO_PRIORITY_ASYNC_WRITE]);
252
253 fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
254 vsx->vsx_active_queue[ZIO_PRIORITY_SCRUB]);
255
256 fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
257 vsx->vsx_active_queue[ZIO_PRIORITY_TRIM]);
258
259 fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
260 vsx->vsx_active_queue[ZIO_PRIORITY_REBUILD]);
261
262 /* ZIOs pending */
263 fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE,
264 vsx->vsx_pend_queue[ZIO_PRIORITY_SYNC_READ]);
265
266 fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE,
267 vsx->vsx_pend_queue[ZIO_PRIORITY_SYNC_WRITE]);
268
269 fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE,
270 vsx->vsx_pend_queue[ZIO_PRIORITY_ASYNC_READ]);
271
272 fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE,
273 vsx->vsx_pend_queue[ZIO_PRIORITY_ASYNC_WRITE]);
274
275 fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE,
276 vsx->vsx_pend_queue[ZIO_PRIORITY_SCRUB]);
277
278 fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE,
279 vsx->vsx_pend_queue[ZIO_PRIORITY_TRIM]);
280
281 fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE,
282 vsx->vsx_pend_queue[ZIO_PRIORITY_REBUILD]);
283
284 /* Histograms */
285 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
286 vsx->vsx_total_histo[ZIO_TYPE_READ],
287 ARRAY_SIZE(vsx->vsx_total_histo[ZIO_TYPE_READ]));
288
289 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
290 vsx->vsx_total_histo[ZIO_TYPE_WRITE],
291 ARRAY_SIZE(vsx->vsx_total_histo[ZIO_TYPE_WRITE]));
292
293 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
294 vsx->vsx_disk_histo[ZIO_TYPE_READ],
295 ARRAY_SIZE(vsx->vsx_disk_histo[ZIO_TYPE_READ]));
296
297 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
298 vsx->vsx_disk_histo[ZIO_TYPE_WRITE],
299 ARRAY_SIZE(vsx->vsx_disk_histo[ZIO_TYPE_WRITE]));
300
301 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
302 vsx->vsx_queue_histo[ZIO_PRIORITY_SYNC_READ],
303 ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_SYNC_READ]));
304
305 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
306 vsx->vsx_queue_histo[ZIO_PRIORITY_SYNC_WRITE],
307 ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_SYNC_WRITE]));
308
309 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
310 vsx->vsx_queue_histo[ZIO_PRIORITY_ASYNC_READ],
311 ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_ASYNC_READ]));
312
313 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
314 vsx->vsx_queue_histo[ZIO_PRIORITY_ASYNC_WRITE],
315 ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_ASYNC_WRITE]));
316
317 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
318 vsx->vsx_queue_histo[ZIO_PRIORITY_SCRUB],
319 ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_SCRUB]));
320
321 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
322 vsx->vsx_queue_histo[ZIO_PRIORITY_TRIM],
323 ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_TRIM]));
324
325 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
326 vsx->vsx_queue_histo[ZIO_PRIORITY_REBUILD],
327 ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_REBUILD]));
328
329 /* Request sizes */
330 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO,
331 vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_READ],
332 ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_READ]));
333
334 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO,
335 vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_WRITE],
336 ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_WRITE]));
337
338 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO,
339 vsx->vsx_ind_histo[ZIO_PRIORITY_ASYNC_READ],
340 ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_ASYNC_READ]));
341
342 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO,
343 vsx->vsx_ind_histo[ZIO_PRIORITY_ASYNC_WRITE],
344 ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_ASYNC_WRITE]));
345
346 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO,
347 vsx->vsx_ind_histo[ZIO_PRIORITY_SCRUB],
348 ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_SCRUB]));
349
350 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO,
351 vsx->vsx_ind_histo[ZIO_PRIORITY_TRIM],
352 ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_TRIM]));
353
354 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO,
355 vsx->vsx_ind_histo[ZIO_PRIORITY_REBUILD],
356 ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_REBUILD]));
357
358 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO,
359 vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_READ],
360 ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_READ]));
361
362 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO,
363 vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_WRITE],
364 ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_WRITE]));
365
366 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO,
367 vsx->vsx_agg_histo[ZIO_PRIORITY_ASYNC_READ],
368 ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_ASYNC_READ]));
369
370 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO,
371 vsx->vsx_agg_histo[ZIO_PRIORITY_ASYNC_WRITE],
372 ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_ASYNC_WRITE]));
373
374 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO,
375 vsx->vsx_agg_histo[ZIO_PRIORITY_SCRUB],
376 ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_SCRUB]));
377
378 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO,
379 vsx->vsx_agg_histo[ZIO_PRIORITY_TRIM],
380 ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_TRIM]));
381
382 fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO,
383 vsx->vsx_agg_histo[ZIO_PRIORITY_REBUILD],
384 ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_REBUILD]));
385
386 /* IO delays */
387 fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SLOW_IOS, vs->vs_slow_ios);
388
389 /* Add extended stats nvlist to main nvlist */
390 fnvlist_add_nvlist(nv, ZPOOL_CONFIG_VDEV_STATS_EX, nvx);
391
392 fnvlist_free(nvx);
393 kmem_free(vs, sizeof (*vs));
394 kmem_free(vsx, sizeof (*vsx));
395 }
396
397 static void
398 root_vdev_actions_getprogress(vdev_t *vd, nvlist_t *nvl)
399 {
400 spa_t *spa = vd->vdev_spa;
401
402 if (vd != spa->spa_root_vdev)
403 return;
404
405 /* provide either current or previous scan information */
406 pool_scan_stat_t ps;
407 if (spa_scan_get_stats(spa, &ps) == 0) {
408 fnvlist_add_uint64_array(nvl,
409 ZPOOL_CONFIG_SCAN_STATS, (uint64_t *)&ps,
410 sizeof (pool_scan_stat_t) / sizeof (uint64_t));
411 }
412
413 pool_removal_stat_t prs;
414 if (spa_removal_get_stats(spa, &prs) == 0) {
415 fnvlist_add_uint64_array(nvl,
416 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t *)&prs,
417 sizeof (prs) / sizeof (uint64_t));
418 }
419
420 pool_checkpoint_stat_t pcs;
421 if (spa_checkpoint_get_stats(spa, &pcs) == 0) {
422 fnvlist_add_uint64_array(nvl,
423 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t *)&pcs,
424 sizeof (pcs) / sizeof (uint64_t));
425 }
426 }
427
428 static void
429 top_vdev_actions_getprogress(vdev_t *vd, nvlist_t *nvl)
430 {
431 if (vd == vd->vdev_top) {
432 vdev_rebuild_stat_t vrs;
433 if (vdev_rebuild_get_stats(vd, &vrs) == 0) {
434 fnvlist_add_uint64_array(nvl,
435 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t *)&vrs,
436 sizeof (vrs) / sizeof (uint64_t));
437 }
438 }
439 }
440
441 /*
442 * Generate the nvlist representing this vdev's config.
443 */
444 nvlist_t *
445 vdev_config_generate(spa_t *spa, vdev_t *vd, boolean_t getstats,
446 vdev_config_flag_t flags)
447 {
448 nvlist_t *nv = NULL;
449 vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
450
451 nv = fnvlist_alloc();
452
453 fnvlist_add_string(nv, ZPOOL_CONFIG_TYPE, vd->vdev_ops->vdev_op_type);
454 if (!(flags & (VDEV_CONFIG_SPARE | VDEV_CONFIG_L2CACHE)))
455 fnvlist_add_uint64(nv, ZPOOL_CONFIG_ID, vd->vdev_id);
456 fnvlist_add_uint64(nv, ZPOOL_CONFIG_GUID, vd->vdev_guid);
457
458 if (vd->vdev_path != NULL)
459 fnvlist_add_string(nv, ZPOOL_CONFIG_PATH, vd->vdev_path);
460
461 if (vd->vdev_devid != NULL)
462 fnvlist_add_string(nv, ZPOOL_CONFIG_DEVID, vd->vdev_devid);
463
464 if (vd->vdev_physpath != NULL)
465 fnvlist_add_string(nv, ZPOOL_CONFIG_PHYS_PATH,
466 vd->vdev_physpath);
467
468 if (vd->vdev_enc_sysfs_path != NULL)
469 fnvlist_add_string(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
470 vd->vdev_enc_sysfs_path);
471
472 if (vd->vdev_fru != NULL)
473 fnvlist_add_string(nv, ZPOOL_CONFIG_FRU, vd->vdev_fru);
474
475 if (vd->vdev_ops->vdev_op_config_generate != NULL)
476 vd->vdev_ops->vdev_op_config_generate(vd, nv);
477
478 if (vd->vdev_wholedisk != -1ULL) {
479 fnvlist_add_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
480 vd->vdev_wholedisk);
481 }
482
483 if (vd->vdev_not_present && !(flags & VDEV_CONFIG_MISSING))
484 fnvlist_add_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 1);
485
486 if (vd->vdev_isspare)
487 fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 1);
488
489 if (!(flags & (VDEV_CONFIG_SPARE | VDEV_CONFIG_L2CACHE)) &&
490 vd == vd->vdev_top) {
491 fnvlist_add_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
492 vd->vdev_ms_array);
493 fnvlist_add_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
494 vd->vdev_ms_shift);
495 fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASHIFT, vd->vdev_ashift);
496 fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASIZE,
497 vd->vdev_asize);
498 fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_LOG, vd->vdev_islog);
499 if (vd->vdev_noalloc) {
500 fnvlist_add_uint64(nv, ZPOOL_CONFIG_NONALLOCATING,
501 vd->vdev_noalloc);
502 }
503 if (vd->vdev_removing) {
504 fnvlist_add_uint64(nv, ZPOOL_CONFIG_REMOVING,
505 vd->vdev_removing);
506 }
507
508 /* zpool command expects alloc class data */
509 if (getstats && vd->vdev_alloc_bias != VDEV_BIAS_NONE) {
510 const char *bias = NULL;
511
512 switch (vd->vdev_alloc_bias) {
513 case VDEV_BIAS_LOG:
514 bias = VDEV_ALLOC_BIAS_LOG;
515 break;
516 case VDEV_BIAS_SPECIAL:
517 bias = VDEV_ALLOC_BIAS_SPECIAL;
518 break;
519 case VDEV_BIAS_DEDUP:
520 bias = VDEV_ALLOC_BIAS_DEDUP;
521 break;
522 default:
523 ASSERT3U(vd->vdev_alloc_bias, ==,
524 VDEV_BIAS_NONE);
525 }
526 fnvlist_add_string(nv, ZPOOL_CONFIG_ALLOCATION_BIAS,
527 bias);
528 }
529 }
530
531 if (vd->vdev_dtl_sm != NULL) {
532 fnvlist_add_uint64(nv, ZPOOL_CONFIG_DTL,
533 space_map_object(vd->vdev_dtl_sm));
534 }
535
536 if (vic->vic_mapping_object != 0) {
537 fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT,
538 vic->vic_mapping_object);
539 }
540
541 if (vic->vic_births_object != 0) {
542 fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS,
543 vic->vic_births_object);
544 }
545
546 if (vic->vic_prev_indirect_vdev != UINT64_MAX) {
547 fnvlist_add_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV,
548 vic->vic_prev_indirect_vdev);
549 }
550
551 if (vd->vdev_crtxg)
552 fnvlist_add_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, vd->vdev_crtxg);
553
554 if (vd->vdev_expansion_time)
555 fnvlist_add_uint64(nv, ZPOOL_CONFIG_EXPANSION_TIME,
556 vd->vdev_expansion_time);
557
558 if (flags & VDEV_CONFIG_MOS) {
559 if (vd->vdev_leaf_zap != 0) {
560 ASSERT(vd->vdev_ops->vdev_op_leaf);
561 fnvlist_add_uint64(nv, ZPOOL_CONFIG_VDEV_LEAF_ZAP,
562 vd->vdev_leaf_zap);
563 }
564
565 if (vd->vdev_top_zap != 0) {
566 ASSERT(vd == vd->vdev_top);
567 fnvlist_add_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP,
568 vd->vdev_top_zap);
569 }
570
571 if (vd->vdev_resilver_deferred) {
572 ASSERT(vd->vdev_ops->vdev_op_leaf);
573 ASSERT(spa->spa_resilver_deferred);
574 fnvlist_add_boolean(nv, ZPOOL_CONFIG_RESILVER_DEFER);
575 }
576 }
577
578 if (getstats) {
579 vdev_config_generate_stats(vd, nv);
580
581 root_vdev_actions_getprogress(vd, nv);
582 top_vdev_actions_getprogress(vd, nv);
583
584 /*
585 * Note: this can be called from open context
586 * (spa_get_stats()), so we need the rwlock to prevent
587 * the mapping from being changed by condensing.
588 */
589 rw_enter(&vd->vdev_indirect_rwlock, RW_READER);
590 if (vd->vdev_indirect_mapping != NULL) {
591 ASSERT(vd->vdev_indirect_births != NULL);
592 vdev_indirect_mapping_t *vim =
593 vd->vdev_indirect_mapping;
594 fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_SIZE,
595 vdev_indirect_mapping_size(vim));
596 }
597 rw_exit(&vd->vdev_indirect_rwlock);
598 if (vd->vdev_mg != NULL &&
599 vd->vdev_mg->mg_fragmentation != ZFS_FRAG_INVALID) {
600 /*
601 * Compute approximately how much memory would be used
602 * for the indirect mapping if this device were to
603 * be removed.
604 *
605 * Note: If the frag metric is invalid, then not
606 * enough metaslabs have been converted to have
607 * histograms.
608 */
609 uint64_t seg_count = 0;
610 uint64_t to_alloc = vd->vdev_stat.vs_alloc;
611
612 /*
613 * There are the same number of allocated segments
614 * as free segments, so we will have at least one
615 * entry per free segment. However, small free
616 * segments (smaller than vdev_removal_max_span)
617 * will be combined with adjacent allocated segments
618 * as a single mapping.
619 */
620 for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
621 if (i + 1 < highbit64(vdev_removal_max_span)
622 - 1) {
623 to_alloc +=
624 vd->vdev_mg->mg_histogram[i] <<
625 (i + 1);
626 } else {
627 seg_count +=
628 vd->vdev_mg->mg_histogram[i];
629 }
630 }
631
632 /*
633 * The maximum length of a mapping is
634 * zfs_remove_max_segment, so we need at least one entry
635 * per zfs_remove_max_segment of allocated data.
636 */
637 seg_count += to_alloc / spa_remove_max_segment(spa);
638
639 fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_SIZE,
640 seg_count *
641 sizeof (vdev_indirect_mapping_entry_phys_t));
642 }
643 }
644
645 if (!vd->vdev_ops->vdev_op_leaf) {
646 nvlist_t **child;
647 int c, idx;
648
649 ASSERT(!vd->vdev_ishole);
650
651 child = kmem_alloc(vd->vdev_children * sizeof (nvlist_t *),
652 KM_SLEEP);
653
654 for (c = 0, idx = 0; c < vd->vdev_children; c++) {
655 vdev_t *cvd = vd->vdev_child[c];
656
657 /*
658 * If we're generating an nvlist of removing
659 * vdevs then skip over any device which is
660 * not being removed.
661 */
662 if ((flags & VDEV_CONFIG_REMOVING) &&
663 !cvd->vdev_removing)
664 continue;
665
666 child[idx++] = vdev_config_generate(spa, cvd,
667 getstats, flags);
668 }
669
670 if (idx) {
671 fnvlist_add_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
672 (const nvlist_t * const *)child, idx);
673 }
674
675 for (c = 0; c < idx; c++)
676 nvlist_free(child[c]);
677
678 kmem_free(child, vd->vdev_children * sizeof (nvlist_t *));
679
680 } else {
681 const char *aux = NULL;
682
683 if (vd->vdev_offline && !vd->vdev_tmpoffline)
684 fnvlist_add_uint64(nv, ZPOOL_CONFIG_OFFLINE, B_TRUE);
685 if (vd->vdev_resilver_txg != 0)
686 fnvlist_add_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG,
687 vd->vdev_resilver_txg);
688 if (vd->vdev_rebuild_txg != 0)
689 fnvlist_add_uint64(nv, ZPOOL_CONFIG_REBUILD_TXG,
690 vd->vdev_rebuild_txg);
691 if (vd->vdev_faulted)
692 fnvlist_add_uint64(nv, ZPOOL_CONFIG_FAULTED, B_TRUE);
693 if (vd->vdev_degraded)
694 fnvlist_add_uint64(nv, ZPOOL_CONFIG_DEGRADED, B_TRUE);
695 if (vd->vdev_removed)
696 fnvlist_add_uint64(nv, ZPOOL_CONFIG_REMOVED, B_TRUE);
697 if (vd->vdev_unspare)
698 fnvlist_add_uint64(nv, ZPOOL_CONFIG_UNSPARE, B_TRUE);
699 if (vd->vdev_ishole)
700 fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_HOLE, B_TRUE);
701
702 /* Set the reason why we're FAULTED/DEGRADED. */
703 switch (vd->vdev_stat.vs_aux) {
704 case VDEV_AUX_ERR_EXCEEDED:
705 aux = "err_exceeded";
706 break;
707
708 case VDEV_AUX_EXTERNAL:
709 aux = "external";
710 break;
711 }
712
713 if (aux != NULL && !vd->vdev_tmpoffline) {
714 fnvlist_add_string(nv, ZPOOL_CONFIG_AUX_STATE, aux);
715 } else {
716 /*
717 * We're healthy - clear any previous AUX_STATE values.
718 */
719 if (nvlist_exists(nv, ZPOOL_CONFIG_AUX_STATE))
720 nvlist_remove_all(nv, ZPOOL_CONFIG_AUX_STATE);
721 }
722
723 if (vd->vdev_splitting && vd->vdev_orig_guid != 0LL) {
724 fnvlist_add_uint64(nv, ZPOOL_CONFIG_ORIG_GUID,
725 vd->vdev_orig_guid);
726 }
727 }
728
729 return (nv);
730 }
731
732 /*
733 * Generate a view of the top-level vdevs. If we currently have holes
734 * in the namespace, then generate an array which contains a list of holey
735 * vdevs. Additionally, add the number of top-level children that currently
736 * exist.
737 */
738 void
739 vdev_top_config_generate(spa_t *spa, nvlist_t *config)
740 {
741 vdev_t *rvd = spa->spa_root_vdev;
742 uint64_t *array;
743 uint_t c, idx;
744
745 array = kmem_alloc(rvd->vdev_children * sizeof (uint64_t), KM_SLEEP);
746
747 for (c = 0, idx = 0; c < rvd->vdev_children; c++) {
748 vdev_t *tvd = rvd->vdev_child[c];
749
750 if (tvd->vdev_ishole) {
751 array[idx++] = c;
752 }
753 }
754
755 if (idx) {
756 VERIFY(nvlist_add_uint64_array(config, ZPOOL_CONFIG_HOLE_ARRAY,
757 array, idx) == 0);
758 }
759
760 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN,
761 rvd->vdev_children) == 0);
762
763 kmem_free(array, rvd->vdev_children * sizeof (uint64_t));
764 }
765
766 /*
767 * Returns the configuration from the label of the given vdev. For vdevs
768 * which don't have a txg value stored on their label (i.e. spares/cache)
769 * or have not been completely initialized (txg = 0) just return
770 * the configuration from the first valid label we find. Otherwise,
771 * find the most up-to-date label that does not exceed the specified
772 * 'txg' value.
773 */
774 nvlist_t *
775 vdev_label_read_config(vdev_t *vd, uint64_t txg)
776 {
777 spa_t *spa = vd->vdev_spa;
778 nvlist_t *config = NULL;
779 vdev_phys_t *vp[VDEV_LABELS];
780 abd_t *vp_abd[VDEV_LABELS];
781 zio_t *zio[VDEV_LABELS];
782 uint64_t best_txg = 0;
783 uint64_t label_txg = 0;
784 int error = 0;
785 int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL |
786 ZIO_FLAG_SPECULATIVE;
787
788 ASSERT(vd->vdev_validate_thread == curthread ||
789 spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
790
791 if (!vdev_readable(vd))
792 return (NULL);
793
794 /*
795 * The label for a dRAID distributed spare is not stored on disk.
796 * Instead it is generated when needed which allows us to bypass
797 * the pipeline when reading the config from the label.
798 */
799 if (vd->vdev_ops == &vdev_draid_spare_ops)
800 return (vdev_draid_read_config_spare(vd));
801
802 for (int l = 0; l < VDEV_LABELS; l++) {
803 vp_abd[l] = abd_alloc_linear(sizeof (vdev_phys_t), B_TRUE);
804 vp[l] = abd_to_buf(vp_abd[l]);
805 }
806
807 retry:
808 for (int l = 0; l < VDEV_LABELS; l++) {
809 zio[l] = zio_root(spa, NULL, NULL, flags);
810
811 vdev_label_read(zio[l], vd, l, vp_abd[l],
812 offsetof(vdev_label_t, vl_vdev_phys), sizeof (vdev_phys_t),
813 NULL, NULL, flags);
814 }
815 for (int l = 0; l < VDEV_LABELS; l++) {
816 nvlist_t *label = NULL;
817
818 if (zio_wait(zio[l]) == 0 &&
819 nvlist_unpack(vp[l]->vp_nvlist, sizeof (vp[l]->vp_nvlist),
820 &label, 0) == 0) {
821 /*
822 * Auxiliary vdevs won't have txg values in their
823 * labels and newly added vdevs may not have been
824 * completely initialized so just return the
825 * configuration from the first valid label we
826 * encounter.
827 */
828 error = nvlist_lookup_uint64(label,
829 ZPOOL_CONFIG_POOL_TXG, &label_txg);
830 if ((error || label_txg == 0) && !config) {
831 config = label;
832 for (l++; l < VDEV_LABELS; l++)
833 zio_wait(zio[l]);
834 break;
835 } else if (label_txg <= txg && label_txg > best_txg) {
836 best_txg = label_txg;
837 nvlist_free(config);
838 config = fnvlist_dup(label);
839 }
840 }
841
842 if (label != NULL) {
843 nvlist_free(label);
844 label = NULL;
845 }
846 }
847
848 if (config == NULL && !(flags & ZIO_FLAG_TRYHARD)) {
849 flags |= ZIO_FLAG_TRYHARD;
850 goto retry;
851 }
852
853 /*
854 * We found a valid label but it didn't pass txg restrictions.
855 */
856 if (config == NULL && label_txg != 0) {
857 vdev_dbgmsg(vd, "label discarded as txg is too large "
858 "(%llu > %llu)", (u_longlong_t)label_txg,
859 (u_longlong_t)txg);
860 }
861
862 for (int l = 0; l < VDEV_LABELS; l++) {
863 abd_free(vp_abd[l]);
864 }
865
866 return (config);
867 }
868
869 /*
870 * Determine if a device is in use. The 'spare_guid' parameter will be filled
871 * in with the device guid if this spare is active elsewhere on the system.
872 */
873 static boolean_t
874 vdev_inuse(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason,
875 uint64_t *spare_guid, uint64_t *l2cache_guid)
876 {
877 spa_t *spa = vd->vdev_spa;
878 uint64_t state, pool_guid, device_guid, txg, spare_pool;
879 uint64_t vdtxg = 0;
880 nvlist_t *label;
881
882 if (spare_guid)
883 *spare_guid = 0ULL;
884 if (l2cache_guid)
885 *l2cache_guid = 0ULL;
886
887 /*
888 * Read the label, if any, and perform some basic sanity checks.
889 */
890 if ((label = vdev_label_read_config(vd, -1ULL)) == NULL)
891 return (B_FALSE);
892
893 (void) nvlist_lookup_uint64(label, ZPOOL_CONFIG_CREATE_TXG,
894 &vdtxg);
895
896 if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE,
897 &state) != 0 ||
898 nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID,
899 &device_guid) != 0) {
900 nvlist_free(label);
901 return (B_FALSE);
902 }
903
904 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
905 (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID,
906 &pool_guid) != 0 ||
907 nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG,
908 &txg) != 0)) {
909 nvlist_free(label);
910 return (B_FALSE);
911 }
912
913 nvlist_free(label);
914
915 /*
916 * Check to see if this device indeed belongs to the pool it claims to
917 * be a part of. The only way this is allowed is if the device is a hot
918 * spare (which we check for later on).
919 */
920 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
921 !spa_guid_exists(pool_guid, device_guid) &&
922 !spa_spare_exists(device_guid, NULL, NULL) &&
923 !spa_l2cache_exists(device_guid, NULL))
924 return (B_FALSE);
925
926 /*
927 * If the transaction group is zero, then this an initialized (but
928 * unused) label. This is only an error if the create transaction
929 * on-disk is the same as the one we're using now, in which case the
930 * user has attempted to add the same vdev multiple times in the same
931 * transaction.
932 */
933 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
934 txg == 0 && vdtxg == crtxg)
935 return (B_TRUE);
936
937 /*
938 * Check to see if this is a spare device. We do an explicit check for
939 * spa_has_spare() here because it may be on our pending list of spares
940 * to add.
941 */
942 if (spa_spare_exists(device_guid, &spare_pool, NULL) ||
943 spa_has_spare(spa, device_guid)) {
944 if (spare_guid)
945 *spare_guid = device_guid;
946
947 switch (reason) {
948 case VDEV_LABEL_CREATE:
949 return (B_TRUE);
950
951 case VDEV_LABEL_REPLACE:
952 return (!spa_has_spare(spa, device_guid) ||
953 spare_pool != 0ULL);
954
955 case VDEV_LABEL_SPARE:
956 return (spa_has_spare(spa, device_guid));
957 default:
958 break;
959 }
960 }
961
962 /*
963 * Check to see if this is an l2cache device.
964 */
965 if (spa_l2cache_exists(device_guid, NULL) ||
966 spa_has_l2cache(spa, device_guid)) {
967 if (l2cache_guid)
968 *l2cache_guid = device_guid;
969
970 switch (reason) {
971 case VDEV_LABEL_CREATE:
972 return (B_TRUE);
973
974 case VDEV_LABEL_REPLACE:
975 return (!spa_has_l2cache(spa, device_guid));
976
977 case VDEV_LABEL_L2CACHE:
978 return (spa_has_l2cache(spa, device_guid));
979 default:
980 break;
981 }
982 }
983
984 /*
985 * We can't rely on a pool's state if it's been imported
986 * read-only. Instead we look to see if the pools is marked
987 * read-only in the namespace and set the state to active.
988 */
989 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
990 (spa = spa_by_guid(pool_guid, device_guid)) != NULL &&
991 spa_mode(spa) == SPA_MODE_READ)
992 state = POOL_STATE_ACTIVE;
993
994 /*
995 * If the device is marked ACTIVE, then this device is in use by another
996 * pool on the system.
997 */
998 return (state == POOL_STATE_ACTIVE);
999 }
1000
1001 /*
1002 * Initialize a vdev label. We check to make sure each leaf device is not in
1003 * use, and writable. We put down an initial label which we will later
1004 * overwrite with a complete label. Note that it's important to do this
1005 * sequentially, not in parallel, so that we catch cases of multiple use of the
1006 * same leaf vdev in the vdev we're creating -- e.g. mirroring a disk with
1007 * itself.
1008 */
1009 int
1010 vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason)
1011 {
1012 spa_t *spa = vd->vdev_spa;
1013 nvlist_t *label;
1014 vdev_phys_t *vp;
1015 abd_t *vp_abd;
1016 abd_t *bootenv;
1017 uberblock_t *ub;
1018 abd_t *ub_abd;
1019 zio_t *zio;
1020 char *buf;
1021 size_t buflen;
1022 int error;
1023 uint64_t spare_guid = 0, l2cache_guid = 0;
1024 int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
1025
1026 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1027
1028 for (int c = 0; c < vd->vdev_children; c++)
1029 if ((error = vdev_label_init(vd->vdev_child[c],
1030 crtxg, reason)) != 0)
1031 return (error);
1032
1033 /* Track the creation time for this vdev */
1034 vd->vdev_crtxg = crtxg;
1035
1036 if (!vd->vdev_ops->vdev_op_leaf || !spa_writeable(spa))
1037 return (0);
1038
1039 /*
1040 * Dead vdevs cannot be initialized.
1041 */
1042 if (vdev_is_dead(vd))
1043 return (SET_ERROR(EIO));
1044
1045 /*
1046 * Determine if the vdev is in use.
1047 */
1048 if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_SPLIT &&
1049 vdev_inuse(vd, crtxg, reason, &spare_guid, &l2cache_guid))
1050 return (SET_ERROR(EBUSY));
1051
1052 /*
1053 * If this is a request to add or replace a spare or l2cache device
1054 * that is in use elsewhere on the system, then we must update the
1055 * guid (which was initialized to a random value) to reflect the
1056 * actual GUID (which is shared between multiple pools).
1057 */
1058 if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_L2CACHE &&
1059 spare_guid != 0ULL) {
1060 uint64_t guid_delta = spare_guid - vd->vdev_guid;
1061
1062 vd->vdev_guid += guid_delta;
1063
1064 for (vdev_t *pvd = vd; pvd != NULL; pvd = pvd->vdev_parent)
1065 pvd->vdev_guid_sum += guid_delta;
1066
1067 /*
1068 * If this is a replacement, then we want to fallthrough to the
1069 * rest of the code. If we're adding a spare, then it's already
1070 * labeled appropriately and we can just return.
1071 */
1072 if (reason == VDEV_LABEL_SPARE)
1073 return (0);
1074 ASSERT(reason == VDEV_LABEL_REPLACE ||
1075 reason == VDEV_LABEL_SPLIT);
1076 }
1077
1078 if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_SPARE &&
1079 l2cache_guid != 0ULL) {
1080 uint64_t guid_delta = l2cache_guid - vd->vdev_guid;
1081
1082 vd->vdev_guid += guid_delta;
1083
1084 for (vdev_t *pvd = vd; pvd != NULL; pvd = pvd->vdev_parent)
1085 pvd->vdev_guid_sum += guid_delta;
1086
1087 /*
1088 * If this is a replacement, then we want to fallthrough to the
1089 * rest of the code. If we're adding an l2cache, then it's
1090 * already labeled appropriately and we can just return.
1091 */
1092 if (reason == VDEV_LABEL_L2CACHE)
1093 return (0);
1094 ASSERT(reason == VDEV_LABEL_REPLACE);
1095 }
1096
1097 /*
1098 * Initialize its label.
1099 */
1100 vp_abd = abd_alloc_linear(sizeof (vdev_phys_t), B_TRUE);
1101 abd_zero(vp_abd, sizeof (vdev_phys_t));
1102 vp = abd_to_buf(vp_abd);
1103
1104 /*
1105 * Generate a label describing the pool and our top-level vdev.
1106 * We mark it as being from txg 0 to indicate that it's not
1107 * really part of an active pool just yet. The labels will
1108 * be written again with a meaningful txg by spa_sync().
1109 */
1110 if (reason == VDEV_LABEL_SPARE ||
1111 (reason == VDEV_LABEL_REMOVE && vd->vdev_isspare)) {
1112 /*
1113 * For inactive hot spares, we generate a special label that
1114 * identifies as a mutually shared hot spare. We write the
1115 * label if we are adding a hot spare, or if we are removing an
1116 * active hot spare (in which case we want to revert the
1117 * labels).
1118 */
1119 VERIFY(nvlist_alloc(&label, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1120
1121 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_VERSION,
1122 spa_version(spa)) == 0);
1123 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_POOL_STATE,
1124 POOL_STATE_SPARE) == 0);
1125 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_GUID,
1126 vd->vdev_guid) == 0);
1127 } else if (reason == VDEV_LABEL_L2CACHE ||
1128 (reason == VDEV_LABEL_REMOVE && vd->vdev_isl2cache)) {
1129 /*
1130 * For level 2 ARC devices, add a special label.
1131 */
1132 VERIFY(nvlist_alloc(&label, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1133
1134 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_VERSION,
1135 spa_version(spa)) == 0);
1136 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_POOL_STATE,
1137 POOL_STATE_L2CACHE) == 0);
1138 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_GUID,
1139 vd->vdev_guid) == 0);
1140 } else {
1141 uint64_t txg = 0ULL;
1142
1143 if (reason == VDEV_LABEL_SPLIT)
1144 txg = spa->spa_uberblock.ub_txg;
1145 label = spa_config_generate(spa, vd, txg, B_FALSE);
1146
1147 /*
1148 * Add our creation time. This allows us to detect multiple
1149 * vdev uses as described above, and automatically expires if we
1150 * fail.
1151 */
1152 VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_CREATE_TXG,
1153 crtxg) == 0);
1154 }
1155
1156 buf = vp->vp_nvlist;
1157 buflen = sizeof (vp->vp_nvlist);
1158
1159 error = nvlist_pack(label, &buf, &buflen, NV_ENCODE_XDR, KM_SLEEP);
1160 if (error != 0) {
1161 nvlist_free(label);
1162 abd_free(vp_abd);
1163 /* EFAULT means nvlist_pack ran out of room */
1164 return (SET_ERROR(error == EFAULT ? ENAMETOOLONG : EINVAL));
1165 }
1166
1167 /*
1168 * Initialize uberblock template.
1169 */
1170 ub_abd = abd_alloc_linear(VDEV_UBERBLOCK_RING, B_TRUE);
1171 abd_zero(ub_abd, VDEV_UBERBLOCK_RING);
1172 abd_copy_from_buf(ub_abd, &spa->spa_uberblock, sizeof (uberblock_t));
1173 ub = abd_to_buf(ub_abd);
1174 ub->ub_txg = 0;
1175
1176 /* Initialize the 2nd padding area. */
1177 bootenv = abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE);
1178 abd_zero(bootenv, VDEV_PAD_SIZE);
1179
1180 /*
1181 * Write everything in parallel.
1182 */
1183 retry:
1184 zio = zio_root(spa, NULL, NULL, flags);
1185
1186 for (int l = 0; l < VDEV_LABELS; l++) {
1187
1188 vdev_label_write(zio, vd, l, vp_abd,
1189 offsetof(vdev_label_t, vl_vdev_phys),
1190 sizeof (vdev_phys_t), NULL, NULL, flags);
1191
1192 /*
1193 * Skip the 1st padding area.
1194 * Zero out the 2nd padding area where it might have
1195 * left over data from previous filesystem format.
1196 */
1197 vdev_label_write(zio, vd, l, bootenv,
1198 offsetof(vdev_label_t, vl_be),
1199 VDEV_PAD_SIZE, NULL, NULL, flags);
1200
1201 vdev_label_write(zio, vd, l, ub_abd,
1202 offsetof(vdev_label_t, vl_uberblock),
1203 VDEV_UBERBLOCK_RING, NULL, NULL, flags);
1204 }
1205
1206 error = zio_wait(zio);
1207
1208 if (error != 0 && !(flags & ZIO_FLAG_TRYHARD)) {
1209 flags |= ZIO_FLAG_TRYHARD;
1210 goto retry;
1211 }
1212
1213 nvlist_free(label);
1214 abd_free(bootenv);
1215 abd_free(ub_abd);
1216 abd_free(vp_abd);
1217
1218 /*
1219 * If this vdev hasn't been previously identified as a spare, then we
1220 * mark it as such only if a) we are labeling it as a spare, or b) it
1221 * exists as a spare elsewhere in the system. Do the same for
1222 * level 2 ARC devices.
1223 */
1224 if (error == 0 && !vd->vdev_isspare &&
1225 (reason == VDEV_LABEL_SPARE ||
1226 spa_spare_exists(vd->vdev_guid, NULL, NULL)))
1227 spa_spare_add(vd);
1228
1229 if (error == 0 && !vd->vdev_isl2cache &&
1230 (reason == VDEV_LABEL_L2CACHE ||
1231 spa_l2cache_exists(vd->vdev_guid, NULL)))
1232 spa_l2cache_add(vd);
1233
1234 return (error);
1235 }
1236
1237 /*
1238 * Done callback for vdev_label_read_bootenv_impl. If this is the first
1239 * callback to finish, store our abd in the callback pointer. Otherwise, we
1240 * just free our abd and return.
1241 */
1242 static void
1243 vdev_label_read_bootenv_done(zio_t *zio)
1244 {
1245 zio_t *rio = zio->io_private;
1246 abd_t **cbp = rio->io_private;
1247
1248 ASSERT3U(zio->io_size, ==, VDEV_PAD_SIZE);
1249
1250 if (zio->io_error == 0) {
1251 mutex_enter(&rio->io_lock);
1252 if (*cbp == NULL) {
1253 /* Will free this buffer in vdev_label_read_bootenv. */
1254 *cbp = zio->io_abd;
1255 } else {
1256 abd_free(zio->io_abd);
1257 }
1258 mutex_exit(&rio->io_lock);
1259 } else {
1260 abd_free(zio->io_abd);
1261 }
1262 }
1263
1264 static void
1265 vdev_label_read_bootenv_impl(zio_t *zio, vdev_t *vd, int flags)
1266 {
1267 for (int c = 0; c < vd->vdev_children; c++)
1268 vdev_label_read_bootenv_impl(zio, vd->vdev_child[c], flags);
1269
1270 /*
1271 * We just use the first label that has a correct checksum; the
1272 * bootloader should have rewritten them all to be the same on boot,
1273 * and any changes we made since boot have been the same across all
1274 * labels.
1275 */
1276 if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) {
1277 for (int l = 0; l < VDEV_LABELS; l++) {
1278 vdev_label_read(zio, vd, l,
1279 abd_alloc_linear(VDEV_PAD_SIZE, B_FALSE),
1280 offsetof(vdev_label_t, vl_be), VDEV_PAD_SIZE,
1281 vdev_label_read_bootenv_done, zio, flags);
1282 }
1283 }
1284 }
1285
1286 int
1287 vdev_label_read_bootenv(vdev_t *rvd, nvlist_t *bootenv)
1288 {
1289 nvlist_t *config;
1290 spa_t *spa = rvd->vdev_spa;
1291 abd_t *abd = NULL;
1292 int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL |
1293 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_TRYHARD;
1294
1295 ASSERT(bootenv);
1296 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1297
1298 zio_t *zio = zio_root(spa, NULL, &abd, flags);
1299 vdev_label_read_bootenv_impl(zio, rvd, flags);
1300 int err = zio_wait(zio);
1301
1302 if (abd != NULL) {
1303 char *buf;
1304 vdev_boot_envblock_t *vbe = abd_to_buf(abd);
1305
1306 vbe->vbe_version = ntohll(vbe->vbe_version);
1307 switch (vbe->vbe_version) {
1308 case VB_RAW:
1309 /*
1310 * if we have textual data in vbe_bootenv, create nvlist
1311 * with key "envmap".
1312 */
1313 fnvlist_add_uint64(bootenv, BOOTENV_VERSION, VB_RAW);
1314 vbe->vbe_bootenv[sizeof (vbe->vbe_bootenv) - 1] = '\0';
1315 fnvlist_add_string(bootenv, GRUB_ENVMAP,
1316 vbe->vbe_bootenv);
1317 break;
1318
1319 case VB_NVLIST:
1320 err = nvlist_unpack(vbe->vbe_bootenv,
1321 sizeof (vbe->vbe_bootenv), &config, 0);
1322 if (err == 0) {
1323 fnvlist_merge(bootenv, config);
1324 nvlist_free(config);
1325 break;
1326 }
1327 zfs_fallthrough;
1328 default:
1329 /* Check for FreeBSD zfs bootonce command string */
1330 buf = abd_to_buf(abd);
1331 if (*buf == '\0') {
1332 fnvlist_add_uint64(bootenv, BOOTENV_VERSION,
1333 VB_NVLIST);
1334 break;
1335 }
1336 fnvlist_add_string(bootenv, FREEBSD_BOOTONCE, buf);
1337 }
1338
1339 /*
1340 * abd was allocated in vdev_label_read_bootenv_impl()
1341 */
1342 abd_free(abd);
1343 /*
1344 * If we managed to read any successfully,
1345 * return success.
1346 */
1347 return (0);
1348 }
1349 return (err);
1350 }
1351
1352 int
1353 vdev_label_write_bootenv(vdev_t *vd, nvlist_t *env)
1354 {
1355 zio_t *zio;
1356 spa_t *spa = vd->vdev_spa;
1357 vdev_boot_envblock_t *bootenv;
1358 int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
1359 int error;
1360 size_t nvsize;
1361 char *nvbuf;
1362
1363 error = nvlist_size(env, &nvsize, NV_ENCODE_XDR);
1364 if (error != 0)
1365 return (SET_ERROR(error));
1366
1367 if (nvsize >= sizeof (bootenv->vbe_bootenv)) {
1368 return (SET_ERROR(E2BIG));
1369 }
1370
1371 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1372
1373 error = ENXIO;
1374 for (int c = 0; c < vd->vdev_children; c++) {
1375 int child_err;
1376
1377 child_err = vdev_label_write_bootenv(vd->vdev_child[c], env);
1378 /*
1379 * As long as any of the disks managed to write all of their
1380 * labels successfully, return success.
1381 */
1382 if (child_err == 0)
1383 error = child_err;
1384 }
1385
1386 if (!vd->vdev_ops->vdev_op_leaf || vdev_is_dead(vd) ||
1387 !vdev_writeable(vd)) {
1388 return (error);
1389 }
1390 ASSERT3U(sizeof (*bootenv), ==, VDEV_PAD_SIZE);
1391 abd_t *abd = abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE);
1392 abd_zero(abd, VDEV_PAD_SIZE);
1393
1394 bootenv = abd_borrow_buf_copy(abd, VDEV_PAD_SIZE);
1395 nvbuf = bootenv->vbe_bootenv;
1396 nvsize = sizeof (bootenv->vbe_bootenv);
1397
1398 bootenv->vbe_version = fnvlist_lookup_uint64(env, BOOTENV_VERSION);
1399 switch (bootenv->vbe_version) {
1400 case VB_RAW:
1401 if (nvlist_lookup_string(env, GRUB_ENVMAP, &nvbuf) == 0) {
1402 (void) strlcpy(bootenv->vbe_bootenv, nvbuf, nvsize);
1403 }
1404 error = 0;
1405 break;
1406
1407 case VB_NVLIST:
1408 error = nvlist_pack(env, &nvbuf, &nvsize, NV_ENCODE_XDR,
1409 KM_SLEEP);
1410 break;
1411
1412 default:
1413 error = EINVAL;
1414 break;
1415 }
1416
1417 if (error == 0) {
1418 bootenv->vbe_version = htonll(bootenv->vbe_version);
1419 abd_return_buf_copy(abd, bootenv, VDEV_PAD_SIZE);
1420 } else {
1421 abd_free(abd);
1422 return (SET_ERROR(error));
1423 }
1424
1425 retry:
1426 zio = zio_root(spa, NULL, NULL, flags);
1427 for (int l = 0; l < VDEV_LABELS; l++) {
1428 vdev_label_write(zio, vd, l, abd,
1429 offsetof(vdev_label_t, vl_be),
1430 VDEV_PAD_SIZE, NULL, NULL, flags);
1431 }
1432
1433 error = zio_wait(zio);
1434 if (error != 0 && !(flags & ZIO_FLAG_TRYHARD)) {
1435 flags |= ZIO_FLAG_TRYHARD;
1436 goto retry;
1437 }
1438
1439 abd_free(abd);
1440 return (error);
1441 }
1442
1443 /*
1444 * ==========================================================================
1445 * uberblock load/sync
1446 * ==========================================================================
1447 */
1448
1449 /*
1450 * Consider the following situation: txg is safely synced to disk. We've
1451 * written the first uberblock for txg + 1, and then we lose power. When we
1452 * come back up, we fail to see the uberblock for txg + 1 because, say,
1453 * it was on a mirrored device and the replica to which we wrote txg + 1
1454 * is now offline. If we then make some changes and sync txg + 1, and then
1455 * the missing replica comes back, then for a few seconds we'll have two
1456 * conflicting uberblocks on disk with the same txg. The solution is simple:
1457 * among uberblocks with equal txg, choose the one with the latest timestamp.
1458 */
1459 static int
1460 vdev_uberblock_compare(const uberblock_t *ub1, const uberblock_t *ub2)
1461 {
1462 int cmp = TREE_CMP(ub1->ub_txg, ub2->ub_txg);
1463
1464 if (likely(cmp))
1465 return (cmp);
1466
1467 cmp = TREE_CMP(ub1->ub_timestamp, ub2->ub_timestamp);
1468 if (likely(cmp))
1469 return (cmp);
1470
1471 /*
1472 * If MMP_VALID(ub) && MMP_SEQ_VALID(ub) then the host has an MMP-aware
1473 * ZFS, e.g. OpenZFS >= 0.7.
1474 *
1475 * If one ub has MMP and the other does not, they were written by
1476 * different hosts, which matters for MMP. So we treat no MMP/no SEQ as
1477 * a 0 value.
1478 *
1479 * Since timestamp and txg are the same if we get this far, either is
1480 * acceptable for importing the pool.
1481 */
1482 unsigned int seq1 = 0;
1483 unsigned int seq2 = 0;
1484
1485 if (MMP_VALID(ub1) && MMP_SEQ_VALID(ub1))
1486 seq1 = MMP_SEQ(ub1);
1487
1488 if (MMP_VALID(ub2) && MMP_SEQ_VALID(ub2))
1489 seq2 = MMP_SEQ(ub2);
1490
1491 return (TREE_CMP(seq1, seq2));
1492 }
1493
1494 struct ubl_cbdata {
1495 uberblock_t *ubl_ubbest; /* Best uberblock */
1496 vdev_t *ubl_vd; /* vdev associated with the above */
1497 };
1498
1499 static void
1500 vdev_uberblock_load_done(zio_t *zio)
1501 {
1502 vdev_t *vd = zio->io_vd;
1503 spa_t *spa = zio->io_spa;
1504 zio_t *rio = zio->io_private;
1505 uberblock_t *ub = abd_to_buf(zio->io_abd);
1506 struct ubl_cbdata *cbp = rio->io_private;
1507
1508 ASSERT3U(zio->io_size, ==, VDEV_UBERBLOCK_SIZE(vd));
1509
1510 if (zio->io_error == 0 && uberblock_verify(ub) == 0) {
1511 mutex_enter(&rio->io_lock);
1512 if (ub->ub_txg <= spa->spa_load_max_txg &&
1513 vdev_uberblock_compare(ub, cbp->ubl_ubbest) > 0) {
1514 /*
1515 * Keep track of the vdev in which this uberblock
1516 * was found. We will use this information later
1517 * to obtain the config nvlist associated with
1518 * this uberblock.
1519 */
1520 *cbp->ubl_ubbest = *ub;
1521 cbp->ubl_vd = vd;
1522 }
1523 mutex_exit(&rio->io_lock);
1524 }
1525
1526 abd_free(zio->io_abd);
1527 }
1528
1529 static void
1530 vdev_uberblock_load_impl(zio_t *zio, vdev_t *vd, int flags,
1531 struct ubl_cbdata *cbp)
1532 {
1533 for (int c = 0; c < vd->vdev_children; c++)
1534 vdev_uberblock_load_impl(zio, vd->vdev_child[c], flags, cbp);
1535
1536 if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd) &&
1537 vd->vdev_ops != &vdev_draid_spare_ops) {
1538 for (int l = 0; l < VDEV_LABELS; l++) {
1539 for (int n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) {
1540 vdev_label_read(zio, vd, l,
1541 abd_alloc_linear(VDEV_UBERBLOCK_SIZE(vd),
1542 B_TRUE), VDEV_UBERBLOCK_OFFSET(vd, n),
1543 VDEV_UBERBLOCK_SIZE(vd),
1544 vdev_uberblock_load_done, zio, flags);
1545 }
1546 }
1547 }
1548 }
1549
1550 /*
1551 * Reads the 'best' uberblock from disk along with its associated
1552 * configuration. First, we read the uberblock array of each label of each
1553 * vdev, keeping track of the uberblock with the highest txg in each array.
1554 * Then, we read the configuration from the same vdev as the best uberblock.
1555 */
1556 void
1557 vdev_uberblock_load(vdev_t *rvd, uberblock_t *ub, nvlist_t **config)
1558 {
1559 zio_t *zio;
1560 spa_t *spa = rvd->vdev_spa;
1561 struct ubl_cbdata cb;
1562 int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL |
1563 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_TRYHARD;
1564
1565 ASSERT(ub);
1566 ASSERT(config);
1567
1568 bzero(ub, sizeof (uberblock_t));
1569 *config = NULL;
1570
1571 cb.ubl_ubbest = ub;
1572 cb.ubl_vd = NULL;
1573
1574 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1575 zio = zio_root(spa, NULL, &cb, flags);
1576 vdev_uberblock_load_impl(zio, rvd, flags, &cb);
1577 (void) zio_wait(zio);
1578
1579 /*
1580 * It's possible that the best uberblock was discovered on a label
1581 * that has a configuration which was written in a future txg.
1582 * Search all labels on this vdev to find the configuration that
1583 * matches the txg for our uberblock.
1584 */
1585 if (cb.ubl_vd != NULL) {
1586 vdev_dbgmsg(cb.ubl_vd, "best uberblock found for spa %s. "
1587 "txg %llu", spa->spa_name, (u_longlong_t)ub->ub_txg);
1588
1589 *config = vdev_label_read_config(cb.ubl_vd, ub->ub_txg);
1590 if (*config == NULL && spa->spa_extreme_rewind) {
1591 vdev_dbgmsg(cb.ubl_vd, "failed to read label config. "
1592 "Trying again without txg restrictions.");
1593 *config = vdev_label_read_config(cb.ubl_vd, UINT64_MAX);
1594 }
1595 if (*config == NULL) {
1596 vdev_dbgmsg(cb.ubl_vd, "failed to read label config");
1597 }
1598 }
1599 spa_config_exit(spa, SCL_ALL, FTAG);
1600 }
1601
1602 /*
1603 * For use when a leaf vdev is expanded.
1604 * The location of labels 2 and 3 changed, and at the new location the
1605 * uberblock rings are either empty or contain garbage. The sync will write
1606 * new configs there because the vdev is dirty, but expansion also needs the
1607 * uberblock rings copied. Read them from label 0 which did not move.
1608 *
1609 * Since the point is to populate labels {2,3} with valid uberblocks,
1610 * we zero uberblocks we fail to read or which are not valid.
1611 */
1612
1613 static void
1614 vdev_copy_uberblocks(vdev_t *vd)
1615 {
1616 abd_t *ub_abd;
1617 zio_t *write_zio;
1618 int locks = (SCL_L2ARC | SCL_ZIO);
1619 int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL |
1620 ZIO_FLAG_SPECULATIVE;
1621
1622 ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_READER) ==
1623 SCL_STATE);
1624 ASSERT(vd->vdev_ops->vdev_op_leaf);
1625
1626 /*
1627 * No uberblocks are stored on distributed spares, they may be
1628 * safely skipped when expanding a leaf vdev.
1629 */
1630 if (vd->vdev_ops == &vdev_draid_spare_ops)
1631 return;
1632
1633 spa_config_enter(vd->vdev_spa, locks, FTAG, RW_READER);
1634
1635 ub_abd = abd_alloc_linear(VDEV_UBERBLOCK_SIZE(vd), B_TRUE);
1636
1637 write_zio = zio_root(vd->vdev_spa, NULL, NULL, flags);
1638 for (int n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) {
1639 const int src_label = 0;
1640 zio_t *zio;
1641
1642 zio = zio_root(vd->vdev_spa, NULL, NULL, flags);
1643 vdev_label_read(zio, vd, src_label, ub_abd,
1644 VDEV_UBERBLOCK_OFFSET(vd, n), VDEV_UBERBLOCK_SIZE(vd),
1645 NULL, NULL, flags);
1646
1647 if (zio_wait(zio) || uberblock_verify(abd_to_buf(ub_abd)))
1648 abd_zero(ub_abd, VDEV_UBERBLOCK_SIZE(vd));
1649
1650 for (int l = 2; l < VDEV_LABELS; l++)
1651 vdev_label_write(write_zio, vd, l, ub_abd,
1652 VDEV_UBERBLOCK_OFFSET(vd, n),
1653 VDEV_UBERBLOCK_SIZE(vd), NULL, NULL,
1654 flags | ZIO_FLAG_DONT_PROPAGATE);
1655 }
1656 (void) zio_wait(write_zio);
1657
1658 spa_config_exit(vd->vdev_spa, locks, FTAG);
1659
1660 abd_free(ub_abd);
1661 }
1662
1663 /*
1664 * On success, increment root zio's count of good writes.
1665 * We only get credit for writes to known-visible vdevs; see spa_vdev_add().
1666 */
1667 static void
1668 vdev_uberblock_sync_done(zio_t *zio)
1669 {
1670 uint64_t *good_writes = zio->io_private;
1671
1672 if (zio->io_error == 0 && zio->io_vd->vdev_top->vdev_ms_array != 0)
1673 atomic_inc_64(good_writes);
1674 }
1675
1676 /*
1677 * Write the uberblock to all labels of all leaves of the specified vdev.
1678 */
1679 static void
1680 vdev_uberblock_sync(zio_t *zio, uint64_t *good_writes,
1681 uberblock_t *ub, vdev_t *vd, int flags)
1682 {
1683 for (uint64_t c = 0; c < vd->vdev_children; c++) {
1684 vdev_uberblock_sync(zio, good_writes,
1685 ub, vd->vdev_child[c], flags);
1686 }
1687
1688 if (!vd->vdev_ops->vdev_op_leaf)
1689 return;
1690
1691 if (!vdev_writeable(vd))
1692 return;
1693
1694 /*
1695 * There's no need to write uberblocks to a distributed spare, they
1696 * are already stored on all the leaves of the parent dRAID. For
1697 * this same reason vdev_uberblock_load_impl() skips distributed
1698 * spares when reading uberblocks.
1699 */
1700 if (vd->vdev_ops == &vdev_draid_spare_ops)
1701 return;
1702
1703 /* If the vdev was expanded, need to copy uberblock rings. */
1704 if (vd->vdev_state == VDEV_STATE_HEALTHY &&
1705 vd->vdev_copy_uberblocks == B_TRUE) {
1706 vdev_copy_uberblocks(vd);
1707 vd->vdev_copy_uberblocks = B_FALSE;
1708 }
1709
1710 int m = spa_multihost(vd->vdev_spa) ? MMP_BLOCKS_PER_LABEL : 0;
1711 int n = ub->ub_txg % (VDEV_UBERBLOCK_COUNT(vd) - m);
1712
1713 /* Copy the uberblock_t into the ABD */
1714 abd_t *ub_abd = abd_alloc_for_io(VDEV_UBERBLOCK_SIZE(vd), B_TRUE);
1715 abd_zero(ub_abd, VDEV_UBERBLOCK_SIZE(vd));
1716 abd_copy_from_buf(ub_abd, ub, sizeof (uberblock_t));
1717
1718 for (int l = 0; l < VDEV_LABELS; l++)
1719 vdev_label_write(zio, vd, l, ub_abd,
1720 VDEV_UBERBLOCK_OFFSET(vd, n), VDEV_UBERBLOCK_SIZE(vd),
1721 vdev_uberblock_sync_done, good_writes,
1722 flags | ZIO_FLAG_DONT_PROPAGATE);
1723
1724 abd_free(ub_abd);
1725 }
1726
1727 /* Sync the uberblocks to all vdevs in svd[] */
1728 static int
1729 vdev_uberblock_sync_list(vdev_t **svd, int svdcount, uberblock_t *ub, int flags)
1730 {
1731 spa_t *spa = svd[0]->vdev_spa;
1732 zio_t *zio;
1733 uint64_t good_writes = 0;
1734
1735 zio = zio_root(spa, NULL, NULL, flags);
1736
1737 for (int v = 0; v < svdcount; v++)
1738 vdev_uberblock_sync(zio, &good_writes, ub, svd[v], flags);
1739
1740 (void) zio_wait(zio);
1741
1742 /*
1743 * Flush the uberblocks to disk. This ensures that the odd labels
1744 * are no longer needed (because the new uberblocks and the even
1745 * labels are safely on disk), so it is safe to overwrite them.
1746 */
1747 zio = zio_root(spa, NULL, NULL, flags);
1748
1749 for (int v = 0; v < svdcount; v++) {
1750 if (vdev_writeable(svd[v])) {
1751 zio_flush(zio, svd[v]);
1752 }
1753 }
1754
1755 (void) zio_wait(zio);
1756
1757 return (good_writes >= 1 ? 0 : EIO);
1758 }
1759
1760 /*
1761 * On success, increment the count of good writes for our top-level vdev.
1762 */
1763 static void
1764 vdev_label_sync_done(zio_t *zio)
1765 {
1766 uint64_t *good_writes = zio->io_private;
1767
1768 if (zio->io_error == 0)
1769 atomic_inc_64(good_writes);
1770 }
1771
1772 /*
1773 * If there weren't enough good writes, indicate failure to the parent.
1774 */
1775 static void
1776 vdev_label_sync_top_done(zio_t *zio)
1777 {
1778 uint64_t *good_writes = zio->io_private;
1779
1780 if (*good_writes == 0)
1781 zio->io_error = SET_ERROR(EIO);
1782
1783 kmem_free(good_writes, sizeof (uint64_t));
1784 }
1785
1786 /*
1787 * We ignore errors for log and cache devices, simply free the private data.
1788 */
1789 static void
1790 vdev_label_sync_ignore_done(zio_t *zio)
1791 {
1792 kmem_free(zio->io_private, sizeof (uint64_t));
1793 }
1794
1795 /*
1796 * Write all even or odd labels to all leaves of the specified vdev.
1797 */
1798 static void
1799 vdev_label_sync(zio_t *zio, uint64_t *good_writes,
1800 vdev_t *vd, int l, uint64_t txg, int flags)
1801 {
1802 nvlist_t *label;
1803 vdev_phys_t *vp;
1804 abd_t *vp_abd;
1805 char *buf;
1806 size_t buflen;
1807
1808 for (int c = 0; c < vd->vdev_children; c++) {
1809 vdev_label_sync(zio, good_writes,
1810 vd->vdev_child[c], l, txg, flags);
1811 }
1812
1813 if (!vd->vdev_ops->vdev_op_leaf)
1814 return;
1815
1816 if (!vdev_writeable(vd))
1817 return;
1818
1819 /*
1820 * The top-level config never needs to be written to a distributed
1821 * spare. When read vdev_dspare_label_read_config() will generate
1822 * the config for the vdev_label_read_config().
1823 */
1824 if (vd->vdev_ops == &vdev_draid_spare_ops)
1825 return;
1826
1827 /*
1828 * Generate a label describing the top-level config to which we belong.
1829 */
1830 label = spa_config_generate(vd->vdev_spa, vd, txg, B_FALSE);
1831
1832 vp_abd = abd_alloc_linear(sizeof (vdev_phys_t), B_TRUE);
1833 abd_zero(vp_abd, sizeof (vdev_phys_t));
1834 vp = abd_to_buf(vp_abd);
1835
1836 buf = vp->vp_nvlist;
1837 buflen = sizeof (vp->vp_nvlist);
1838
1839 if (!nvlist_pack(label, &buf, &buflen, NV_ENCODE_XDR, KM_SLEEP)) {
1840 for (; l < VDEV_LABELS; l += 2) {
1841 vdev_label_write(zio, vd, l, vp_abd,
1842 offsetof(vdev_label_t, vl_vdev_phys),
1843 sizeof (vdev_phys_t),
1844 vdev_label_sync_done, good_writes,
1845 flags | ZIO_FLAG_DONT_PROPAGATE);
1846 }
1847 }
1848
1849 abd_free(vp_abd);
1850 nvlist_free(label);
1851 }
1852
1853 static int
1854 vdev_label_sync_list(spa_t *spa, int l, uint64_t txg, int flags)
1855 {
1856 list_t *dl = &spa->spa_config_dirty_list;
1857 vdev_t *vd;
1858 zio_t *zio;
1859 int error;
1860
1861 /*
1862 * Write the new labels to disk.
1863 */
1864 zio = zio_root(spa, NULL, NULL, flags);
1865
1866 for (vd = list_head(dl); vd != NULL; vd = list_next(dl, vd)) {
1867 uint64_t *good_writes;
1868
1869 ASSERT(!vd->vdev_ishole);
1870
1871 good_writes = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
1872 zio_t *vio = zio_null(zio, spa, NULL,
1873 (vd->vdev_islog || vd->vdev_aux != NULL) ?
1874 vdev_label_sync_ignore_done : vdev_label_sync_top_done,
1875 good_writes, flags);
1876 vdev_label_sync(vio, good_writes, vd, l, txg, flags);
1877 zio_nowait(vio);
1878 }
1879
1880 error = zio_wait(zio);
1881
1882 /*
1883 * Flush the new labels to disk.
1884 */
1885 zio = zio_root(spa, NULL, NULL, flags);
1886
1887 for (vd = list_head(dl); vd != NULL; vd = list_next(dl, vd))
1888 zio_flush(zio, vd);
1889
1890 (void) zio_wait(zio);
1891
1892 return (error);
1893 }
1894
1895 /*
1896 * Sync the uberblock and any changes to the vdev configuration.
1897 *
1898 * The order of operations is carefully crafted to ensure that
1899 * if the system panics or loses power at any time, the state on disk
1900 * is still transactionally consistent. The in-line comments below
1901 * describe the failure semantics at each stage.
1902 *
1903 * Moreover, vdev_config_sync() is designed to be idempotent: if it fails
1904 * at any time, you can just call it again, and it will resume its work.
1905 */
1906 int
1907 vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg)
1908 {
1909 spa_t *spa = svd[0]->vdev_spa;
1910 uberblock_t *ub = &spa->spa_uberblock;
1911 int error = 0;
1912 int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
1913
1914 ASSERT(svdcount != 0);
1915 retry:
1916 /*
1917 * Normally, we don't want to try too hard to write every label and
1918 * uberblock. If there is a flaky disk, we don't want the rest of the
1919 * sync process to block while we retry. But if we can't write a
1920 * single label out, we should retry with ZIO_FLAG_TRYHARD before
1921 * bailing out and declaring the pool faulted.
1922 */
1923 if (error != 0) {
1924 if ((flags & ZIO_FLAG_TRYHARD) != 0)
1925 return (error);
1926 flags |= ZIO_FLAG_TRYHARD;
1927 }
1928
1929 ASSERT(ub->ub_txg <= txg);
1930
1931 /*
1932 * If this isn't a resync due to I/O errors,
1933 * and nothing changed in this transaction group,
1934 * and the vdev configuration hasn't changed,
1935 * then there's nothing to do.
1936 */
1937 if (ub->ub_txg < txg) {
1938 boolean_t changed = uberblock_update(ub, spa->spa_root_vdev,
1939 txg, spa->spa_mmp.mmp_delay);
1940
1941 if (!changed && list_is_empty(&spa->spa_config_dirty_list))
1942 return (0);
1943 }
1944
1945 if (txg > spa_freeze_txg(spa))
1946 return (0);
1947
1948 ASSERT(txg <= spa->spa_final_txg);
1949
1950 /*
1951 * Flush the write cache of every disk that's been written to
1952 * in this transaction group. This ensures that all blocks
1953 * written in this txg will be committed to stable storage
1954 * before any uberblock that references them.
1955 */
1956 zio_t *zio = zio_root(spa, NULL, NULL, flags);
1957
1958 for (vdev_t *vd =
1959 txg_list_head(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)); vd != NULL;
1960 vd = txg_list_next(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg)))
1961 zio_flush(zio, vd);
1962
1963 (void) zio_wait(zio);
1964
1965 /*
1966 * Sync out the even labels (L0, L2) for every dirty vdev. If the
1967 * system dies in the middle of this process, that's OK: all of the
1968 * even labels that made it to disk will be newer than any uberblock,
1969 * and will therefore be considered invalid. The odd labels (L1, L3),
1970 * which have not yet been touched, will still be valid. We flush
1971 * the new labels to disk to ensure that all even-label updates
1972 * are committed to stable storage before the uberblock update.
1973 */
1974 if ((error = vdev_label_sync_list(spa, 0, txg, flags)) != 0) {
1975 if ((flags & ZIO_FLAG_TRYHARD) != 0) {
1976 zfs_dbgmsg("vdev_label_sync_list() returned error %d "
1977 "for pool '%s' when syncing out the even labels "
1978 "of dirty vdevs", error, spa_name(spa));
1979 }
1980 goto retry;
1981 }
1982
1983 /*
1984 * Sync the uberblocks to all vdevs in svd[].
1985 * If the system dies in the middle of this step, there are two cases
1986 * to consider, and the on-disk state is consistent either way:
1987 *
1988 * (1) If none of the new uberblocks made it to disk, then the
1989 * previous uberblock will be the newest, and the odd labels
1990 * (which had not yet been touched) will be valid with respect
1991 * to that uberblock.
1992 *
1993 * (2) If one or more new uberblocks made it to disk, then they
1994 * will be the newest, and the even labels (which had all
1995 * been successfully committed) will be valid with respect
1996 * to the new uberblocks.
1997 */
1998 if ((error = vdev_uberblock_sync_list(svd, svdcount, ub, flags)) != 0) {
1999 if ((flags & ZIO_FLAG_TRYHARD) != 0) {
2000 zfs_dbgmsg("vdev_uberblock_sync_list() returned error "
2001 "%d for pool '%s'", error, spa_name(spa));
2002 }
2003 goto retry;
2004 }
2005
2006 if (spa_multihost(spa))
2007 mmp_update_uberblock(spa, ub);
2008
2009 /*
2010 * Sync out odd labels for every dirty vdev. If the system dies
2011 * in the middle of this process, the even labels and the new
2012 * uberblocks will suffice to open the pool. The next time
2013 * the pool is opened, the first thing we'll do -- before any
2014 * user data is modified -- is mark every vdev dirty so that
2015 * all labels will be brought up to date. We flush the new labels
2016 * to disk to ensure that all odd-label updates are committed to
2017 * stable storage before the next transaction group begins.
2018 */
2019 if ((error = vdev_label_sync_list(spa, 1, txg, flags)) != 0) {
2020 if ((flags & ZIO_FLAG_TRYHARD) != 0) {
2021 zfs_dbgmsg("vdev_label_sync_list() returned error %d "
2022 "for pool '%s' when syncing out the odd labels of "
2023 "dirty vdevs", error, spa_name(spa));
2024 }
2025 goto retry;
2026 }
2027
2028 return (0);
2029 }