]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | ||
22 | /* | |
428870ff | 23 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
03e02e5b | 24 | * Copyright (c) 2011, 2021 by Delphix. All rights reserved. |
153b2285 | 25 | * Copyright 2017 Nexenta Systems, Inc. |
e550644f BB |
26 | * Copyright (c) 2014 Integros [integros.com] |
27 | * Copyright 2016 Toomas Soome <tsoome@me.com> | |
12fa0466 | 28 | * Copyright 2017 Joyent, Inc. |
cc99f275 | 29 | * Copyright (c) 2017, Intel Corporation. |
3c819a2c | 30 | * Copyright (c) 2019, Datto Inc. All rights reserved. |
2a673e76 | 31 | * Copyright (c) 2021, Klara Inc. |
dce1bf99 | 32 | * Copyright [2021] Hewlett Packard Enterprise Development LP |
34dc7c2f BB |
33 | */ |
34 | ||
34dc7c2f BB |
35 | #include <sys/zfs_context.h> |
36 | #include <sys/fm/fs/zfs.h> | |
37 | #include <sys/spa.h> | |
38 | #include <sys/spa_impl.h> | |
a1d477c2 | 39 | #include <sys/bpobj.h> |
34dc7c2f BB |
40 | #include <sys/dmu.h> |
41 | #include <sys/dmu_tx.h> | |
a1d477c2 | 42 | #include <sys/dsl_dir.h> |
34dc7c2f | 43 | #include <sys/vdev_impl.h> |
9a49d3f3 | 44 | #include <sys/vdev_rebuild.h> |
b2255edc | 45 | #include <sys/vdev_draid.h> |
34dc7c2f BB |
46 | #include <sys/uberblock_impl.h> |
47 | #include <sys/metaslab.h> | |
48 | #include <sys/metaslab_impl.h> | |
49 | #include <sys/space_map.h> | |
93cf2076 | 50 | #include <sys/space_reftree.h> |
34dc7c2f BB |
51 | #include <sys/zio.h> |
52 | #include <sys/zap.h> | |
53 | #include <sys/fs/zfs.h> | |
b128c09f | 54 | #include <sys/arc.h> |
9babb374 | 55 | #include <sys/zil.h> |
428870ff | 56 | #include <sys/dsl_scan.h> |
b2255edc | 57 | #include <sys/vdev_raidz.h> |
a6255b7f | 58 | #include <sys/abd.h> |
619f0976 | 59 | #include <sys/vdev_initialize.h> |
1b939560 | 60 | #include <sys/vdev_trim.h> |
6c285672 | 61 | #include <sys/zvol.h> |
6078881a | 62 | #include <sys/zfs_ratelimit.h> |
2a673e76 | 63 | #include "zfs_prop.h" |
34dc7c2f | 64 | |
aa755b35 MA |
65 | /* |
66 | * One metaslab from each (normal-class) vdev is used by the ZIL. These are | |
67 | * called "embedded slog metaslabs", are referenced by vdev_log_mg, and are | |
68 | * part of the spa_embedded_log_class. The metaslab with the most free space | |
69 | * in each vdev is selected for this purpose when the pool is opened (or a | |
70 | * vdev is added). See vdev_metaslab_init(). | |
71 | * | |
72 | * Log blocks can be allocated from the following locations. Each one is tried | |
73 | * in order until the allocation succeeds: | |
74 | * 1. dedicated log vdevs, aka "slog" (spa_log_class) | |
75 | * 2. embedded slog metaslabs (spa_embedded_log_class) | |
76 | * 3. other metaslabs in normal vdevs (spa_normal_class) | |
77 | * | |
78 | * zfs_embedded_slog_min_ms disables the embedded slog if there are fewer | |
79 | * than this number of metaslabs in the vdev. This ensures that we don't set | |
80 | * aside an unreasonable amount of space for the ZIL. If set to less than | |
81 | * 1 << (spa_slop_shift + 1), on small pools the usable space may be reduced | |
82 | * (by more than 1<<spa_slop_shift) due to the embedded slog metaslab. | |
83 | */ | |
18168da7 | 84 | static int zfs_embedded_slog_min_ms = 64; |
aa755b35 | 85 | |
c853f382 | 86 | /* default target for number of metaslabs per top-level vdev */ |
18168da7 | 87 | static int zfs_vdev_default_ms_count = 200; |
d2734cce | 88 | |
e4e94ca3 | 89 | /* minimum number of metaslabs per top-level vdev */ |
18168da7 | 90 | static int zfs_vdev_min_ms_count = 16; |
d2734cce | 91 | |
e4e94ca3 | 92 | /* practical upper limit of total metaslabs per top-level vdev */ |
18168da7 | 93 | static int zfs_vdev_ms_count_limit = 1ULL << 17; |
e4e94ca3 DB |
94 | |
95 | /* lower limit for metaslab size (512M) */ | |
18168da7 | 96 | static int zfs_vdev_default_ms_shift = 29; |
d2734cce | 97 | |
c853f382 | 98 | /* upper limit for metaslab size (16G) */ |
18168da7 | 99 | static const int zfs_vdev_max_ms_shift = 34; |
e4e94ca3 | 100 | |
d2734cce SD |
101 | int vdev_validate_skip = B_FALSE; |
102 | ||
b8bcca18 | 103 | /* |
d2734cce SD |
104 | * Since the DTL space map of a vdev is not expected to have a lot of |
105 | * entries, we default its block size to 4K. | |
b8bcca18 | 106 | */ |
93e28d66 | 107 | int zfs_vdev_dtl_sm_blksz = (1 << 12); |
b8bcca18 | 108 | |
80d52c39 | 109 | /* |
ad796b8a | 110 | * Rate limit slow IO (delay) events to this many per second. |
80d52c39 | 111 | */ |
18168da7 | 112 | static unsigned int zfs_slow_io_events_per_second = 20; |
80d52c39 TH |
113 | |
114 | /* | |
115 | * Rate limit checksum events after this many checksum errors per second. | |
116 | */ | |
18168da7 | 117 | static unsigned int zfs_checksum_events_per_second = 20; |
80d52c39 | 118 | |
02638a30 TC |
119 | /* |
120 | * Ignore errors during scrub/resilver. Allows to work around resilver | |
121 | * upon import when there are pool errors. | |
122 | */ | |
18168da7 | 123 | static int zfs_scan_ignore_errors = 0; |
02638a30 | 124 | |
d2734cce SD |
125 | /* |
126 | * vdev-wide space maps that have lots of entries written to them at | |
127 | * the end of each transaction can benefit from a higher I/O bandwidth | |
128 | * (e.g. vdev_obsolete_sm), thus we default their block size to 128K. | |
129 | */ | |
93e28d66 | 130 | int zfs_vdev_standard_sm_blksz = (1 << 17); |
6cb8e530 | 131 | |
53b1f5ea PS |
132 | /* |
133 | * Tunable parameter for debugging or performance analysis. Setting this | |
134 | * will cause pool corruption on power loss if a volatile out-of-order | |
135 | * write cache is enabled. | |
136 | */ | |
137 | int zfs_nocacheflush = 0; | |
138 | ||
6fe3498c RM |
139 | uint64_t zfs_vdev_max_auto_ashift = ASHIFT_MAX; |
140 | uint64_t zfs_vdev_min_auto_ashift = ASHIFT_MIN; | |
141 | ||
4a0ee12a PZ |
142 | void |
143 | vdev_dbgmsg(vdev_t *vd, const char *fmt, ...) | |
144 | { | |
145 | va_list adx; | |
146 | char buf[256]; | |
147 | ||
148 | va_start(adx, fmt); | |
149 | (void) vsnprintf(buf, sizeof (buf), fmt, adx); | |
150 | va_end(adx); | |
151 | ||
152 | if (vd->vdev_path != NULL) { | |
153 | zfs_dbgmsg("%s vdev '%s': %s", vd->vdev_ops->vdev_op_type, | |
154 | vd->vdev_path, buf); | |
155 | } else { | |
156 | zfs_dbgmsg("%s-%llu vdev (guid %llu): %s", | |
157 | vd->vdev_ops->vdev_op_type, | |
158 | (u_longlong_t)vd->vdev_id, | |
159 | (u_longlong_t)vd->vdev_guid, buf); | |
160 | } | |
161 | } | |
162 | ||
6cb8e530 PZ |
163 | void |
164 | vdev_dbgmsg_print_tree(vdev_t *vd, int indent) | |
165 | { | |
166 | char state[20]; | |
167 | ||
168 | if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) { | |
8e739b2c RE |
169 | zfs_dbgmsg("%*svdev %llu: %s", indent, "", |
170 | (u_longlong_t)vd->vdev_id, | |
6cb8e530 PZ |
171 | vd->vdev_ops->vdev_op_type); |
172 | return; | |
173 | } | |
174 | ||
175 | switch (vd->vdev_state) { | |
176 | case VDEV_STATE_UNKNOWN: | |
177 | (void) snprintf(state, sizeof (state), "unknown"); | |
178 | break; | |
179 | case VDEV_STATE_CLOSED: | |
180 | (void) snprintf(state, sizeof (state), "closed"); | |
181 | break; | |
182 | case VDEV_STATE_OFFLINE: | |
183 | (void) snprintf(state, sizeof (state), "offline"); | |
184 | break; | |
185 | case VDEV_STATE_REMOVED: | |
186 | (void) snprintf(state, sizeof (state), "removed"); | |
187 | break; | |
188 | case VDEV_STATE_CANT_OPEN: | |
189 | (void) snprintf(state, sizeof (state), "can't open"); | |
190 | break; | |
191 | case VDEV_STATE_FAULTED: | |
192 | (void) snprintf(state, sizeof (state), "faulted"); | |
193 | break; | |
194 | case VDEV_STATE_DEGRADED: | |
195 | (void) snprintf(state, sizeof (state), "degraded"); | |
196 | break; | |
197 | case VDEV_STATE_HEALTHY: | |
198 | (void) snprintf(state, sizeof (state), "healthy"); | |
199 | break; | |
200 | default: | |
201 | (void) snprintf(state, sizeof (state), "<state %u>", | |
202 | (uint_t)vd->vdev_state); | |
203 | } | |
204 | ||
205 | zfs_dbgmsg("%*svdev %u: %s%s, guid: %llu, path: %s, %s", indent, | |
e902ddb0 | 206 | "", (int)vd->vdev_id, vd->vdev_ops->vdev_op_type, |
6cb8e530 PZ |
207 | vd->vdev_islog ? " (log)" : "", |
208 | (u_longlong_t)vd->vdev_guid, | |
209 | vd->vdev_path ? vd->vdev_path : "N/A", state); | |
210 | ||
211 | for (uint64_t i = 0; i < vd->vdev_children; i++) | |
212 | vdev_dbgmsg_print_tree(vd->vdev_child[i], indent + 2); | |
213 | } | |
214 | ||
34dc7c2f BB |
215 | /* |
216 | * Virtual device management. | |
217 | */ | |
218 | ||
18168da7 | 219 | static const vdev_ops_t *const vdev_ops_table[] = { |
34dc7c2f BB |
220 | &vdev_root_ops, |
221 | &vdev_raidz_ops, | |
b2255edc BB |
222 | &vdev_draid_ops, |
223 | &vdev_draid_spare_ops, | |
34dc7c2f BB |
224 | &vdev_mirror_ops, |
225 | &vdev_replacing_ops, | |
226 | &vdev_spare_ops, | |
227 | &vdev_disk_ops, | |
228 | &vdev_file_ops, | |
229 | &vdev_missing_ops, | |
428870ff | 230 | &vdev_hole_ops, |
a1d477c2 | 231 | &vdev_indirect_ops, |
34dc7c2f BB |
232 | NULL |
233 | }; | |
234 | ||
34dc7c2f BB |
235 | /* |
236 | * Given a vdev type, return the appropriate ops vector. | |
237 | */ | |
238 | static vdev_ops_t * | |
239 | vdev_getops(const char *type) | |
240 | { | |
18168da7 | 241 | const vdev_ops_t *ops, *const *opspp; |
34dc7c2f BB |
242 | |
243 | for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++) | |
244 | if (strcmp(ops->vdev_op_type, type) == 0) | |
245 | break; | |
246 | ||
247 | return (ops); | |
248 | } | |
249 | ||
aa755b35 MA |
250 | /* |
251 | * Given a vdev and a metaslab class, find which metaslab group we're | |
252 | * interested in. All vdevs may belong to two different metaslab classes. | |
253 | * Dedicated slog devices use only the primary metaslab group, rather than a | |
254 | * separate log group. For embedded slogs, the vdev_log_mg will be non-NULL. | |
255 | */ | |
256 | metaslab_group_t * | |
257 | vdev_get_mg(vdev_t *vd, metaslab_class_t *mc) | |
258 | { | |
259 | if (mc == spa_embedded_log_class(vd->vdev_spa) && | |
260 | vd->vdev_log_mg != NULL) | |
261 | return (vd->vdev_log_mg); | |
262 | else | |
263 | return (vd->vdev_mg); | |
264 | } | |
265 | ||
619f0976 | 266 | void |
b2255edc BB |
267 | vdev_default_xlate(vdev_t *vd, const range_seg64_t *logical_rs, |
268 | range_seg64_t *physical_rs, range_seg64_t *remain_rs) | |
619f0976 | 269 | { |
14e4e3cb AZ |
270 | (void) vd, (void) remain_rs; |
271 | ||
b2255edc BB |
272 | physical_rs->rs_start = logical_rs->rs_start; |
273 | physical_rs->rs_end = logical_rs->rs_end; | |
619f0976 GW |
274 | } |
275 | ||
cc99f275 | 276 | /* |
e1cfd73f | 277 | * Derive the enumerated allocation bias from string input. |
76d04993 | 278 | * String origin is either the per-vdev zap or zpool(8). |
cc99f275 DB |
279 | */ |
280 | static vdev_alloc_bias_t | |
281 | vdev_derive_alloc_bias(const char *bias) | |
282 | { | |
283 | vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE; | |
284 | ||
285 | if (strcmp(bias, VDEV_ALLOC_BIAS_LOG) == 0) | |
286 | alloc_bias = VDEV_BIAS_LOG; | |
287 | else if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0) | |
288 | alloc_bias = VDEV_BIAS_SPECIAL; | |
289 | else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0) | |
290 | alloc_bias = VDEV_BIAS_DEDUP; | |
291 | ||
292 | return (alloc_bias); | |
293 | } | |
294 | ||
34dc7c2f BB |
295 | /* |
296 | * Default asize function: return the MAX of psize with the asize of | |
297 | * all children. This is what's used by anything other than RAID-Z. | |
298 | */ | |
299 | uint64_t | |
300 | vdev_default_asize(vdev_t *vd, uint64_t psize) | |
301 | { | |
302 | uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift); | |
303 | uint64_t csize; | |
34dc7c2f | 304 | |
1c27024e | 305 | for (int c = 0; c < vd->vdev_children; c++) { |
34dc7c2f BB |
306 | csize = vdev_psize_to_asize(vd->vdev_child[c], psize); |
307 | asize = MAX(asize, csize); | |
308 | } | |
309 | ||
310 | return (asize); | |
311 | } | |
312 | ||
b2255edc BB |
313 | uint64_t |
314 | vdev_default_min_asize(vdev_t *vd) | |
315 | { | |
316 | return (vd->vdev_min_asize); | |
317 | } | |
318 | ||
34dc7c2f | 319 | /* |
9babb374 BB |
320 | * Get the minimum allocatable size. We define the allocatable size as |
321 | * the vdev's asize rounded to the nearest metaslab. This allows us to | |
322 | * replace or attach devices which don't have the same physical size but | |
323 | * can still satisfy the same number of allocations. | |
34dc7c2f BB |
324 | */ |
325 | uint64_t | |
9babb374 | 326 | vdev_get_min_asize(vdev_t *vd) |
34dc7c2f | 327 | { |
9babb374 | 328 | vdev_t *pvd = vd->vdev_parent; |
34dc7c2f | 329 | |
9babb374 | 330 | /* |
1bd201e7 | 331 | * If our parent is NULL (inactive spare or cache) or is the root, |
9babb374 BB |
332 | * just return our own asize. |
333 | */ | |
334 | if (pvd == NULL) | |
335 | return (vd->vdev_asize); | |
34dc7c2f BB |
336 | |
337 | /* | |
9babb374 BB |
338 | * The top-level vdev just returns the allocatable size rounded |
339 | * to the nearest metaslab. | |
34dc7c2f | 340 | */ |
9babb374 BB |
341 | if (vd == vd->vdev_top) |
342 | return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift)); | |
34dc7c2f | 343 | |
b2255edc | 344 | return (pvd->vdev_ops->vdev_op_min_asize(pvd)); |
9babb374 BB |
345 | } |
346 | ||
347 | void | |
348 | vdev_set_min_asize(vdev_t *vd) | |
349 | { | |
350 | vd->vdev_min_asize = vdev_get_min_asize(vd); | |
34dc7c2f | 351 | |
1c27024e | 352 | for (int c = 0; c < vd->vdev_children; c++) |
9babb374 | 353 | vdev_set_min_asize(vd->vdev_child[c]); |
34dc7c2f BB |
354 | } |
355 | ||
b2255edc BB |
356 | /* |
357 | * Get the minimal allocation size for the top-level vdev. | |
358 | */ | |
359 | uint64_t | |
360 | vdev_get_min_alloc(vdev_t *vd) | |
361 | { | |
362 | uint64_t min_alloc = 1ULL << vd->vdev_ashift; | |
363 | ||
364 | if (vd->vdev_ops->vdev_op_min_alloc != NULL) | |
365 | min_alloc = vd->vdev_ops->vdev_op_min_alloc(vd); | |
366 | ||
367 | return (min_alloc); | |
368 | } | |
369 | ||
370 | /* | |
371 | * Get the parity level for a top-level vdev. | |
372 | */ | |
373 | uint64_t | |
374 | vdev_get_nparity(vdev_t *vd) | |
375 | { | |
376 | uint64_t nparity = 0; | |
377 | ||
378 | if (vd->vdev_ops->vdev_op_nparity != NULL) | |
379 | nparity = vd->vdev_ops->vdev_op_nparity(vd); | |
380 | ||
381 | return (nparity); | |
382 | } | |
383 | ||
384 | /* | |
385 | * Get the number of data disks for a top-level vdev. | |
386 | */ | |
387 | uint64_t | |
388 | vdev_get_ndisks(vdev_t *vd) | |
389 | { | |
390 | uint64_t ndisks = 1; | |
391 | ||
392 | if (vd->vdev_ops->vdev_op_ndisks != NULL) | |
393 | ndisks = vd->vdev_ops->vdev_op_ndisks(vd); | |
394 | ||
395 | return (ndisks); | |
396 | } | |
397 | ||
34dc7c2f BB |
398 | vdev_t * |
399 | vdev_lookup_top(spa_t *spa, uint64_t vdev) | |
400 | { | |
401 | vdev_t *rvd = spa->spa_root_vdev; | |
402 | ||
b128c09f | 403 | ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); |
34dc7c2f | 404 | |
b128c09f BB |
405 | if (vdev < rvd->vdev_children) { |
406 | ASSERT(rvd->vdev_child[vdev] != NULL); | |
34dc7c2f | 407 | return (rvd->vdev_child[vdev]); |
b128c09f | 408 | } |
34dc7c2f BB |
409 | |
410 | return (NULL); | |
411 | } | |
412 | ||
413 | vdev_t * | |
414 | vdev_lookup_by_guid(vdev_t *vd, uint64_t guid) | |
415 | { | |
34dc7c2f BB |
416 | vdev_t *mvd; |
417 | ||
418 | if (vd->vdev_guid == guid) | |
419 | return (vd); | |
420 | ||
1c27024e | 421 | for (int c = 0; c < vd->vdev_children; c++) |
34dc7c2f BB |
422 | if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) != |
423 | NULL) | |
424 | return (mvd); | |
425 | ||
426 | return (NULL); | |
427 | } | |
428 | ||
9c43027b AJ |
429 | static int |
430 | vdev_count_leaves_impl(vdev_t *vd) | |
431 | { | |
432 | int n = 0; | |
9c43027b AJ |
433 | |
434 | if (vd->vdev_ops->vdev_op_leaf) | |
435 | return (1); | |
436 | ||
1c27024e | 437 | for (int c = 0; c < vd->vdev_children; c++) |
9c43027b AJ |
438 | n += vdev_count_leaves_impl(vd->vdev_child[c]); |
439 | ||
440 | return (n); | |
441 | } | |
442 | ||
443 | int | |
444 | vdev_count_leaves(spa_t *spa) | |
445 | { | |
743253df OF |
446 | int rc; |
447 | ||
448 | spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); | |
449 | rc = vdev_count_leaves_impl(spa->spa_root_vdev); | |
450 | spa_config_exit(spa, SCL_VDEV, FTAG); | |
451 | ||
452 | return (rc); | |
9c43027b AJ |
453 | } |
454 | ||
34dc7c2f BB |
455 | void |
456 | vdev_add_child(vdev_t *pvd, vdev_t *cvd) | |
457 | { | |
458 | size_t oldsize, newsize; | |
459 | uint64_t id = cvd->vdev_id; | |
460 | vdev_t **newchild; | |
461 | ||
44de2f02 | 462 | ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); |
34dc7c2f BB |
463 | ASSERT(cvd->vdev_parent == NULL); |
464 | ||
465 | cvd->vdev_parent = pvd; | |
466 | ||
467 | if (pvd == NULL) | |
468 | return; | |
469 | ||
470 | ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL); | |
471 | ||
472 | oldsize = pvd->vdev_children * sizeof (vdev_t *); | |
473 | pvd->vdev_children = MAX(pvd->vdev_children, id + 1); | |
474 | newsize = pvd->vdev_children * sizeof (vdev_t *); | |
475 | ||
79c76d5b | 476 | newchild = kmem_alloc(newsize, KM_SLEEP); |
34dc7c2f | 477 | if (pvd->vdev_child != NULL) { |
861166b0 | 478 | memcpy(newchild, pvd->vdev_child, oldsize); |
34dc7c2f BB |
479 | kmem_free(pvd->vdev_child, oldsize); |
480 | } | |
481 | ||
482 | pvd->vdev_child = newchild; | |
483 | pvd->vdev_child[id] = cvd; | |
484 | ||
485 | cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd); | |
486 | ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL); | |
487 | ||
488 | /* | |
489 | * Walk up all ancestors to update guid sum. | |
490 | */ | |
491 | for (; pvd != NULL; pvd = pvd->vdev_parent) | |
492 | pvd->vdev_guid_sum += cvd->vdev_guid_sum; | |
3d31aad8 OF |
493 | |
494 | if (cvd->vdev_ops->vdev_op_leaf) { | |
495 | list_insert_head(&cvd->vdev_spa->spa_leaf_list, cvd); | |
496 | cvd->vdev_spa->spa_leaf_list_gen++; | |
497 | } | |
34dc7c2f BB |
498 | } |
499 | ||
500 | void | |
501 | vdev_remove_child(vdev_t *pvd, vdev_t *cvd) | |
502 | { | |
503 | int c; | |
504 | uint_t id = cvd->vdev_id; | |
505 | ||
506 | ASSERT(cvd->vdev_parent == pvd); | |
507 | ||
508 | if (pvd == NULL) | |
509 | return; | |
510 | ||
511 | ASSERT(id < pvd->vdev_children); | |
512 | ASSERT(pvd->vdev_child[id] == cvd); | |
513 | ||
514 | pvd->vdev_child[id] = NULL; | |
515 | cvd->vdev_parent = NULL; | |
516 | ||
517 | for (c = 0; c < pvd->vdev_children; c++) | |
518 | if (pvd->vdev_child[c]) | |
519 | break; | |
520 | ||
521 | if (c == pvd->vdev_children) { | |
522 | kmem_free(pvd->vdev_child, c * sizeof (vdev_t *)); | |
523 | pvd->vdev_child = NULL; | |
524 | pvd->vdev_children = 0; | |
525 | } | |
526 | ||
3d31aad8 OF |
527 | if (cvd->vdev_ops->vdev_op_leaf) { |
528 | spa_t *spa = cvd->vdev_spa; | |
529 | list_remove(&spa->spa_leaf_list, cvd); | |
530 | spa->spa_leaf_list_gen++; | |
531 | } | |
532 | ||
34dc7c2f BB |
533 | /* |
534 | * Walk up all ancestors to update guid sum. | |
535 | */ | |
536 | for (; pvd != NULL; pvd = pvd->vdev_parent) | |
537 | pvd->vdev_guid_sum -= cvd->vdev_guid_sum; | |
34dc7c2f BB |
538 | } |
539 | ||
540 | /* | |
541 | * Remove any holes in the child array. | |
542 | */ | |
543 | void | |
544 | vdev_compact_children(vdev_t *pvd) | |
545 | { | |
546 | vdev_t **newchild, *cvd; | |
547 | int oldc = pvd->vdev_children; | |
9babb374 | 548 | int newc; |
34dc7c2f | 549 | |
b128c09f | 550 | ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); |
34dc7c2f | 551 | |
a1d477c2 MA |
552 | if (oldc == 0) |
553 | return; | |
554 | ||
1c27024e | 555 | for (int c = newc = 0; c < oldc; c++) |
34dc7c2f BB |
556 | if (pvd->vdev_child[c]) |
557 | newc++; | |
558 | ||
a1d477c2 MA |
559 | if (newc > 0) { |
560 | newchild = kmem_zalloc(newc * sizeof (vdev_t *), KM_SLEEP); | |
34dc7c2f | 561 | |
a1d477c2 MA |
562 | for (int c = newc = 0; c < oldc; c++) { |
563 | if ((cvd = pvd->vdev_child[c]) != NULL) { | |
564 | newchild[newc] = cvd; | |
565 | cvd->vdev_id = newc++; | |
566 | } | |
34dc7c2f | 567 | } |
a1d477c2 MA |
568 | } else { |
569 | newchild = NULL; | |
34dc7c2f BB |
570 | } |
571 | ||
572 | kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *)); | |
573 | pvd->vdev_child = newchild; | |
574 | pvd->vdev_children = newc; | |
575 | } | |
576 | ||
577 | /* | |
578 | * Allocate and minimally initialize a vdev_t. | |
579 | */ | |
428870ff | 580 | vdev_t * |
34dc7c2f BB |
581 | vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops) |
582 | { | |
583 | vdev_t *vd; | |
a1d477c2 | 584 | vdev_indirect_config_t *vic; |
34dc7c2f | 585 | |
79c76d5b | 586 | vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP); |
a1d477c2 | 587 | vic = &vd->vdev_indirect_config; |
34dc7c2f BB |
588 | |
589 | if (spa->spa_root_vdev == NULL) { | |
590 | ASSERT(ops == &vdev_root_ops); | |
591 | spa->spa_root_vdev = vd; | |
3541dc6d | 592 | spa->spa_load_guid = spa_generate_guid(NULL); |
34dc7c2f BB |
593 | } |
594 | ||
428870ff | 595 | if (guid == 0 && ops != &vdev_hole_ops) { |
34dc7c2f BB |
596 | if (spa->spa_root_vdev == vd) { |
597 | /* | |
598 | * The root vdev's guid will also be the pool guid, | |
599 | * which must be unique among all pools. | |
600 | */ | |
428870ff | 601 | guid = spa_generate_guid(NULL); |
34dc7c2f BB |
602 | } else { |
603 | /* | |
604 | * Any other vdev's guid must be unique within the pool. | |
605 | */ | |
428870ff | 606 | guid = spa_generate_guid(spa); |
34dc7c2f BB |
607 | } |
608 | ASSERT(!spa_guid_exists(spa_guid(spa), guid)); | |
609 | } | |
610 | ||
611 | vd->vdev_spa = spa; | |
612 | vd->vdev_id = id; | |
613 | vd->vdev_guid = guid; | |
614 | vd->vdev_guid_sum = guid; | |
615 | vd->vdev_ops = ops; | |
616 | vd->vdev_state = VDEV_STATE_CLOSED; | |
428870ff | 617 | vd->vdev_ishole = (ops == &vdev_hole_ops); |
a1d477c2 MA |
618 | vic->vic_prev_indirect_vdev = UINT64_MAX; |
619 | ||
620 | rw_init(&vd->vdev_indirect_rwlock, NULL, RW_DEFAULT, NULL); | |
621 | mutex_init(&vd->vdev_obsolete_lock, NULL, MUTEX_DEFAULT, NULL); | |
ca577779 PD |
622 | vd->vdev_obsolete_segments = range_tree_create(NULL, RANGE_SEG64, NULL, |
623 | 0, 0); | |
34dc7c2f | 624 | |
6078881a TH |
625 | /* |
626 | * Initialize rate limit structs for events. We rate limit ZIO delay | |
627 | * and checksum events so that we don't overwhelm ZED with thousands | |
628 | * of events when a disk is acting up. | |
629 | */ | |
ad796b8a TH |
630 | zfs_ratelimit_init(&vd->vdev_delay_rl, &zfs_slow_io_events_per_second, |
631 | 1); | |
e778b048 RM |
632 | zfs_ratelimit_init(&vd->vdev_deadman_rl, &zfs_slow_io_events_per_second, |
633 | 1); | |
ad796b8a TH |
634 | zfs_ratelimit_init(&vd->vdev_checksum_rl, |
635 | &zfs_checksum_events_per_second, 1); | |
6078881a | 636 | |
98f72a53 BB |
637 | list_link_init(&vd->vdev_config_dirty_node); |
638 | list_link_init(&vd->vdev_state_dirty_node); | |
c10d37dd | 639 | list_link_init(&vd->vdev_initialize_node); |
3d31aad8 | 640 | list_link_init(&vd->vdev_leaf_node); |
1b939560 | 641 | list_link_init(&vd->vdev_trim_node); |
b2255edc | 642 | |
448d7aaa | 643 | mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_NOLOCKDEP, NULL); |
34dc7c2f | 644 | mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL); |
b128c09f | 645 | mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL); |
d4a72f23 | 646 | mutex_init(&vd->vdev_scan_io_queue_lock, NULL, MUTEX_DEFAULT, NULL); |
9a49d3f3 | 647 | |
619f0976 GW |
648 | mutex_init(&vd->vdev_initialize_lock, NULL, MUTEX_DEFAULT, NULL); |
649 | mutex_init(&vd->vdev_initialize_io_lock, NULL, MUTEX_DEFAULT, NULL); | |
650 | cv_init(&vd->vdev_initialize_cv, NULL, CV_DEFAULT, NULL); | |
651 | cv_init(&vd->vdev_initialize_io_cv, NULL, CV_DEFAULT, NULL); | |
9a49d3f3 | 652 | |
1b939560 BB |
653 | mutex_init(&vd->vdev_trim_lock, NULL, MUTEX_DEFAULT, NULL); |
654 | mutex_init(&vd->vdev_autotrim_lock, NULL, MUTEX_DEFAULT, NULL); | |
655 | mutex_init(&vd->vdev_trim_io_lock, NULL, MUTEX_DEFAULT, NULL); | |
656 | cv_init(&vd->vdev_trim_cv, NULL, CV_DEFAULT, NULL); | |
657 | cv_init(&vd->vdev_autotrim_cv, NULL, CV_DEFAULT, NULL); | |
658 | cv_init(&vd->vdev_trim_io_cv, NULL, CV_DEFAULT, NULL); | |
6078881a | 659 | |
9a49d3f3 | 660 | mutex_init(&vd->vdev_rebuild_lock, NULL, MUTEX_DEFAULT, NULL); |
9a49d3f3 | 661 | cv_init(&vd->vdev_rebuild_cv, NULL, CV_DEFAULT, NULL); |
9a49d3f3 | 662 | |
1c27024e | 663 | for (int t = 0; t < DTL_TYPES; t++) { |
ca577779 PD |
664 | vd->vdev_dtl[t] = range_tree_create(NULL, RANGE_SEG64, NULL, 0, |
665 | 0); | |
fb5f0bc8 | 666 | } |
9a49d3f3 | 667 | |
4747a7d3 | 668 | txg_list_create(&vd->vdev_ms_list, spa, |
34dc7c2f | 669 | offsetof(struct metaslab, ms_txg_node)); |
4747a7d3 | 670 | txg_list_create(&vd->vdev_dtl_list, spa, |
34dc7c2f BB |
671 | offsetof(struct vdev, vdev_dtl_node)); |
672 | vd->vdev_stat.vs_timestamp = gethrtime(); | |
673 | vdev_queue_init(vd); | |
674 | vdev_cache_init(vd); | |
675 | ||
676 | return (vd); | |
677 | } | |
678 | ||
679 | /* | |
680 | * Allocate a new vdev. The 'alloctype' is used to control whether we are | |
681 | * creating a new vdev or loading an existing one - the behavior is slightly | |
682 | * different for each case. | |
683 | */ | |
684 | int | |
685 | vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id, | |
686 | int alloctype) | |
687 | { | |
688 | vdev_ops_t *ops; | |
689 | char *type; | |
b2255edc | 690 | uint64_t guid = 0, islog; |
34dc7c2f | 691 | vdev_t *vd; |
a1d477c2 | 692 | vdev_indirect_config_t *vic; |
4a283c7f TH |
693 | char *tmp = NULL; |
694 | int rc; | |
cc99f275 DB |
695 | vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE; |
696 | boolean_t top_level = (parent && !parent->vdev_parent); | |
34dc7c2f | 697 | |
b128c09f | 698 | ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); |
34dc7c2f BB |
699 | |
700 | if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) | |
2e528b49 | 701 | return (SET_ERROR(EINVAL)); |
34dc7c2f BB |
702 | |
703 | if ((ops = vdev_getops(type)) == NULL) | |
2e528b49 | 704 | return (SET_ERROR(EINVAL)); |
34dc7c2f BB |
705 | |
706 | /* | |
707 | * If this is a load, get the vdev guid from the nvlist. | |
708 | * Otherwise, vdev_alloc_common() will generate one for us. | |
709 | */ | |
710 | if (alloctype == VDEV_ALLOC_LOAD) { | |
711 | uint64_t label_id; | |
712 | ||
713 | if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) || | |
714 | label_id != id) | |
2e528b49 | 715 | return (SET_ERROR(EINVAL)); |
34dc7c2f BB |
716 | |
717 | if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) | |
2e528b49 | 718 | return (SET_ERROR(EINVAL)); |
34dc7c2f BB |
719 | } else if (alloctype == VDEV_ALLOC_SPARE) { |
720 | if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) | |
2e528b49 | 721 | return (SET_ERROR(EINVAL)); |
34dc7c2f BB |
722 | } else if (alloctype == VDEV_ALLOC_L2CACHE) { |
723 | if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) | |
2e528b49 | 724 | return (SET_ERROR(EINVAL)); |
9babb374 BB |
725 | } else if (alloctype == VDEV_ALLOC_ROOTPOOL) { |
726 | if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) | |
2e528b49 | 727 | return (SET_ERROR(EINVAL)); |
34dc7c2f BB |
728 | } |
729 | ||
730 | /* | |
731 | * The first allocated vdev must be of type 'root'. | |
732 | */ | |
733 | if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL) | |
2e528b49 | 734 | return (SET_ERROR(EINVAL)); |
34dc7c2f BB |
735 | |
736 | /* | |
737 | * Determine whether we're a log vdev. | |
738 | */ | |
739 | islog = 0; | |
740 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog); | |
741 | if (islog && spa_version(spa) < SPA_VERSION_SLOGS) | |
2e528b49 | 742 | return (SET_ERROR(ENOTSUP)); |
34dc7c2f | 743 | |
428870ff | 744 | if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES) |
2e528b49 | 745 | return (SET_ERROR(ENOTSUP)); |
428870ff | 746 | |
cc99f275 DB |
747 | if (top_level && alloctype == VDEV_ALLOC_ADD) { |
748 | char *bias; | |
749 | ||
b2255edc BB |
750 | /* |
751 | * If creating a top-level vdev, check for allocation | |
752 | * classes input. | |
753 | */ | |
cc99f275 DB |
754 | if (nvlist_lookup_string(nv, ZPOOL_CONFIG_ALLOCATION_BIAS, |
755 | &bias) == 0) { | |
756 | alloc_bias = vdev_derive_alloc_bias(bias); | |
757 | ||
758 | /* spa_vdev_add() expects feature to be enabled */ | |
759 | if (spa->spa_load_state != SPA_LOAD_CREATE && | |
760 | !spa_feature_is_enabled(spa, | |
761 | SPA_FEATURE_ALLOCATION_CLASSES)) { | |
762 | return (SET_ERROR(ENOTSUP)); | |
763 | } | |
764 | } | |
b2255edc BB |
765 | |
766 | /* spa_vdev_add() expects feature to be enabled */ | |
767 | if (ops == &vdev_draid_ops && | |
768 | spa->spa_load_state != SPA_LOAD_CREATE && | |
769 | !spa_feature_is_enabled(spa, SPA_FEATURE_DRAID)) { | |
770 | return (SET_ERROR(ENOTSUP)); | |
771 | } | |
cc99f275 DB |
772 | } |
773 | ||
b2255edc BB |
774 | /* |
775 | * Initialize the vdev specific data. This is done before calling | |
776 | * vdev_alloc_common() since it may fail and this simplifies the | |
777 | * error reporting and cleanup code paths. | |
778 | */ | |
779 | void *tsd = NULL; | |
780 | if (ops->vdev_op_init != NULL) { | |
781 | rc = ops->vdev_op_init(spa, nv, &tsd); | |
782 | if (rc != 0) { | |
783 | return (rc); | |
784 | } | |
785 | } | |
34dc7c2f | 786 | |
b2255edc BB |
787 | vd = vdev_alloc_common(spa, id, guid, ops); |
788 | vd->vdev_tsd = tsd; | |
34dc7c2f | 789 | vd->vdev_islog = islog; |
b2255edc | 790 | |
cc99f275 DB |
791 | if (top_level && alloc_bias != VDEV_BIAS_NONE) |
792 | vd->vdev_alloc_bias = alloc_bias; | |
34dc7c2f BB |
793 | |
794 | if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0) | |
795 | vd->vdev_path = spa_strdup(vd->vdev_path); | |
4a283c7f TH |
796 | |
797 | /* | |
798 | * ZPOOL_CONFIG_AUX_STATE = "external" means we previously forced a | |
799 | * fault on a vdev and want it to persist across imports (like with | |
800 | * zpool offline -f). | |
801 | */ | |
802 | rc = nvlist_lookup_string(nv, ZPOOL_CONFIG_AUX_STATE, &tmp); | |
803 | if (rc == 0 && tmp != NULL && strcmp(tmp, "external") == 0) { | |
804 | vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL; | |
805 | vd->vdev_faulted = 1; | |
806 | vd->vdev_label_aux = VDEV_AUX_EXTERNAL; | |
807 | } | |
808 | ||
34dc7c2f BB |
809 | if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0) |
810 | vd->vdev_devid = spa_strdup(vd->vdev_devid); | |
811 | if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH, | |
812 | &vd->vdev_physpath) == 0) | |
813 | vd->vdev_physpath = spa_strdup(vd->vdev_physpath); | |
1bbd8770 TH |
814 | |
815 | if (nvlist_lookup_string(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH, | |
816 | &vd->vdev_enc_sysfs_path) == 0) | |
817 | vd->vdev_enc_sysfs_path = spa_strdup(vd->vdev_enc_sysfs_path); | |
818 | ||
9babb374 BB |
819 | if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &vd->vdev_fru) == 0) |
820 | vd->vdev_fru = spa_strdup(vd->vdev_fru); | |
34dc7c2f BB |
821 | |
822 | /* | |
823 | * Set the whole_disk property. If it's not specified, leave the value | |
824 | * as -1. | |
825 | */ | |
826 | if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, | |
827 | &vd->vdev_wholedisk) != 0) | |
828 | vd->vdev_wholedisk = -1ULL; | |
829 | ||
b2255edc BB |
830 | vic = &vd->vdev_indirect_config; |
831 | ||
a1d477c2 MA |
832 | ASSERT0(vic->vic_mapping_object); |
833 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT, | |
834 | &vic->vic_mapping_object); | |
835 | ASSERT0(vic->vic_births_object); | |
836 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS, | |
837 | &vic->vic_births_object); | |
838 | ASSERT3U(vic->vic_prev_indirect_vdev, ==, UINT64_MAX); | |
839 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV, | |
840 | &vic->vic_prev_indirect_vdev); | |
841 | ||
34dc7c2f BB |
842 | /* |
843 | * Look for the 'not present' flag. This will only be set if the device | |
844 | * was not present at the time of import. | |
845 | */ | |
9babb374 BB |
846 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, |
847 | &vd->vdev_not_present); | |
34dc7c2f BB |
848 | |
849 | /* | |
850 | * Get the alignment requirement. | |
851 | */ | |
852 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift); | |
853 | ||
428870ff BB |
854 | /* |
855 | * Retrieve the vdev creation time. | |
856 | */ | |
857 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, | |
858 | &vd->vdev_crtxg); | |
859 | ||
34dc7c2f BB |
860 | /* |
861 | * If we're a top-level vdev, try to load the allocation parameters. | |
862 | */ | |
cc99f275 | 863 | if (top_level && |
428870ff | 864 | (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) { |
34dc7c2f BB |
865 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY, |
866 | &vd->vdev_ms_array); | |
867 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT, | |
868 | &vd->vdev_ms_shift); | |
869 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE, | |
870 | &vd->vdev_asize); | |
2a673e76 AJ |
871 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NONALLOCATING, |
872 | &vd->vdev_noalloc); | |
428870ff BB |
873 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVING, |
874 | &vd->vdev_removing); | |
e0ab3ab5 JS |
875 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP, |
876 | &vd->vdev_top_zap); | |
877 | } else { | |
878 | ASSERT0(vd->vdev_top_zap); | |
428870ff BB |
879 | } |
880 | ||
cc99f275 | 881 | if (top_level && alloctype != VDEV_ALLOC_ATTACH) { |
428870ff BB |
882 | ASSERT(alloctype == VDEV_ALLOC_LOAD || |
883 | alloctype == VDEV_ALLOC_ADD || | |
884 | alloctype == VDEV_ALLOC_SPLIT || | |
885 | alloctype == VDEV_ALLOC_ROOTPOOL); | |
cc99f275 | 886 | /* Note: metaslab_group_create() is now deferred */ |
34dc7c2f BB |
887 | } |
888 | ||
e0ab3ab5 JS |
889 | if (vd->vdev_ops->vdev_op_leaf && |
890 | (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) { | |
891 | (void) nvlist_lookup_uint64(nv, | |
892 | ZPOOL_CONFIG_VDEV_LEAF_ZAP, &vd->vdev_leaf_zap); | |
893 | } else { | |
894 | ASSERT0(vd->vdev_leaf_zap); | |
895 | } | |
896 | ||
34dc7c2f BB |
897 | /* |
898 | * If we're a leaf vdev, try to load the DTL object and other state. | |
899 | */ | |
e0ab3ab5 | 900 | |
b128c09f | 901 | if (vd->vdev_ops->vdev_op_leaf && |
9babb374 BB |
902 | (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE || |
903 | alloctype == VDEV_ALLOC_ROOTPOOL)) { | |
b128c09f BB |
904 | if (alloctype == VDEV_ALLOC_LOAD) { |
905 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL, | |
93cf2076 | 906 | &vd->vdev_dtl_object); |
b128c09f BB |
907 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE, |
908 | &vd->vdev_unspare); | |
909 | } | |
9babb374 BB |
910 | |
911 | if (alloctype == VDEV_ALLOC_ROOTPOOL) { | |
912 | uint64_t spare = 0; | |
913 | ||
914 | if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, | |
915 | &spare) == 0 && spare) | |
916 | spa_spare_add(vd); | |
917 | } | |
918 | ||
34dc7c2f BB |
919 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, |
920 | &vd->vdev_offline); | |
b128c09f | 921 | |
5d1f7fb6 GW |
922 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG, |
923 | &vd->vdev_resilver_txg); | |
572e2857 | 924 | |
9a49d3f3 BB |
925 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REBUILD_TXG, |
926 | &vd->vdev_rebuild_txg); | |
927 | ||
80a91e74 | 928 | if (nvlist_exists(nv, ZPOOL_CONFIG_RESILVER_DEFER)) |
3c819a2c | 929 | vdev_defer_resilver(vd); |
80a91e74 | 930 | |
34dc7c2f | 931 | /* |
4a283c7f TH |
932 | * In general, when importing a pool we want to ignore the |
933 | * persistent fault state, as the diagnosis made on another | |
934 | * system may not be valid in the current context. The only | |
935 | * exception is if we forced a vdev to a persistently faulted | |
936 | * state with 'zpool offline -f'. The persistent fault will | |
937 | * remain across imports until cleared. | |
938 | * | |
939 | * Local vdevs will remain in the faulted state. | |
34dc7c2f | 940 | */ |
4a283c7f TH |
941 | if (spa_load_state(spa) == SPA_LOAD_OPEN || |
942 | spa_load_state(spa) == SPA_LOAD_IMPORT) { | |
34dc7c2f BB |
943 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, |
944 | &vd->vdev_faulted); | |
945 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED, | |
946 | &vd->vdev_degraded); | |
947 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, | |
948 | &vd->vdev_removed); | |
428870ff BB |
949 | |
950 | if (vd->vdev_faulted || vd->vdev_degraded) { | |
951 | char *aux; | |
952 | ||
953 | vd->vdev_label_aux = | |
954 | VDEV_AUX_ERR_EXCEEDED; | |
955 | if (nvlist_lookup_string(nv, | |
956 | ZPOOL_CONFIG_AUX_STATE, &aux) == 0 && | |
957 | strcmp(aux, "external") == 0) | |
958 | vd->vdev_label_aux = VDEV_AUX_EXTERNAL; | |
a0ad7ca5 TC |
959 | else |
960 | vd->vdev_faulted = 0ULL; | |
428870ff | 961 | } |
34dc7c2f BB |
962 | } |
963 | } | |
964 | ||
965 | /* | |
966 | * Add ourselves to the parent's list of children. | |
967 | */ | |
968 | vdev_add_child(parent, vd); | |
969 | ||
970 | *vdp = vd; | |
971 | ||
972 | return (0); | |
973 | } | |
974 | ||
975 | void | |
976 | vdev_free(vdev_t *vd) | |
977 | { | |
34dc7c2f | 978 | spa_t *spa = vd->vdev_spa; |
1b939560 | 979 | |
619f0976 | 980 | ASSERT3P(vd->vdev_initialize_thread, ==, NULL); |
1b939560 BB |
981 | ASSERT3P(vd->vdev_trim_thread, ==, NULL); |
982 | ASSERT3P(vd->vdev_autotrim_thread, ==, NULL); | |
9a49d3f3 | 983 | ASSERT3P(vd->vdev_rebuild_thread, ==, NULL); |
34dc7c2f | 984 | |
d4a72f23 TC |
985 | /* |
986 | * Scan queues are normally destroyed at the end of a scan. If the | |
987 | * queue exists here, that implies the vdev is being removed while | |
988 | * the scan is still running. | |
989 | */ | |
990 | if (vd->vdev_scan_io_queue != NULL) { | |
991 | mutex_enter(&vd->vdev_scan_io_queue_lock); | |
992 | dsl_scan_io_queue_destroy(vd->vdev_scan_io_queue); | |
993 | vd->vdev_scan_io_queue = NULL; | |
994 | mutex_exit(&vd->vdev_scan_io_queue_lock); | |
995 | } | |
996 | ||
34dc7c2f BB |
997 | /* |
998 | * vdev_free() implies closing the vdev first. This is simpler than | |
999 | * trying to ensure complicated semantics for all callers. | |
1000 | */ | |
1001 | vdev_close(vd); | |
1002 | ||
b128c09f | 1003 | ASSERT(!list_link_active(&vd->vdev_config_dirty_node)); |
428870ff | 1004 | ASSERT(!list_link_active(&vd->vdev_state_dirty_node)); |
34dc7c2f BB |
1005 | |
1006 | /* | |
1007 | * Free all children. | |
1008 | */ | |
1c27024e | 1009 | for (int c = 0; c < vd->vdev_children; c++) |
34dc7c2f BB |
1010 | vdev_free(vd->vdev_child[c]); |
1011 | ||
1012 | ASSERT(vd->vdev_child == NULL); | |
1013 | ASSERT(vd->vdev_guid_sum == vd->vdev_guid); | |
1014 | ||
b2255edc BB |
1015 | if (vd->vdev_ops->vdev_op_fini != NULL) |
1016 | vd->vdev_ops->vdev_op_fini(vd); | |
1017 | ||
34dc7c2f BB |
1018 | /* |
1019 | * Discard allocation state. | |
1020 | */ | |
428870ff | 1021 | if (vd->vdev_mg != NULL) { |
34dc7c2f | 1022 | vdev_metaslab_fini(vd); |
428870ff | 1023 | metaslab_group_destroy(vd->vdev_mg); |
93e28d66 | 1024 | vd->vdev_mg = NULL; |
428870ff | 1025 | } |
aa755b35 MA |
1026 | if (vd->vdev_log_mg != NULL) { |
1027 | ASSERT0(vd->vdev_ms_count); | |
1028 | metaslab_group_destroy(vd->vdev_log_mg); | |
1029 | vd->vdev_log_mg = NULL; | |
1030 | } | |
34dc7c2f | 1031 | |
c99c9001 MS |
1032 | ASSERT0(vd->vdev_stat.vs_space); |
1033 | ASSERT0(vd->vdev_stat.vs_dspace); | |
1034 | ASSERT0(vd->vdev_stat.vs_alloc); | |
34dc7c2f BB |
1035 | |
1036 | /* | |
1037 | * Remove this vdev from its parent's child list. | |
1038 | */ | |
1039 | vdev_remove_child(vd->vdev_parent, vd); | |
1040 | ||
1041 | ASSERT(vd->vdev_parent == NULL); | |
3d31aad8 | 1042 | ASSERT(!list_link_active(&vd->vdev_leaf_node)); |
34dc7c2f BB |
1043 | |
1044 | /* | |
1045 | * Clean up vdev structure. | |
1046 | */ | |
1047 | vdev_queue_fini(vd); | |
1048 | vdev_cache_fini(vd); | |
1049 | ||
1050 | if (vd->vdev_path) | |
1051 | spa_strfree(vd->vdev_path); | |
1052 | if (vd->vdev_devid) | |
1053 | spa_strfree(vd->vdev_devid); | |
1054 | if (vd->vdev_physpath) | |
1055 | spa_strfree(vd->vdev_physpath); | |
1bbd8770 TH |
1056 | |
1057 | if (vd->vdev_enc_sysfs_path) | |
1058 | spa_strfree(vd->vdev_enc_sysfs_path); | |
1059 | ||
9babb374 BB |
1060 | if (vd->vdev_fru) |
1061 | spa_strfree(vd->vdev_fru); | |
34dc7c2f BB |
1062 | |
1063 | if (vd->vdev_isspare) | |
1064 | spa_spare_remove(vd); | |
1065 | if (vd->vdev_isl2cache) | |
1066 | spa_l2cache_remove(vd); | |
1067 | ||
1068 | txg_list_destroy(&vd->vdev_ms_list); | |
1069 | txg_list_destroy(&vd->vdev_dtl_list); | |
fb5f0bc8 | 1070 | |
34dc7c2f | 1071 | mutex_enter(&vd->vdev_dtl_lock); |
93cf2076 | 1072 | space_map_close(vd->vdev_dtl_sm); |
1c27024e | 1073 | for (int t = 0; t < DTL_TYPES; t++) { |
93cf2076 GW |
1074 | range_tree_vacate(vd->vdev_dtl[t], NULL, NULL); |
1075 | range_tree_destroy(vd->vdev_dtl[t]); | |
fb5f0bc8 | 1076 | } |
34dc7c2f | 1077 | mutex_exit(&vd->vdev_dtl_lock); |
fb5f0bc8 | 1078 | |
a1d477c2 MA |
1079 | EQUIV(vd->vdev_indirect_births != NULL, |
1080 | vd->vdev_indirect_mapping != NULL); | |
1081 | if (vd->vdev_indirect_births != NULL) { | |
1082 | vdev_indirect_mapping_close(vd->vdev_indirect_mapping); | |
1083 | vdev_indirect_births_close(vd->vdev_indirect_births); | |
1084 | } | |
1085 | ||
1086 | if (vd->vdev_obsolete_sm != NULL) { | |
1087 | ASSERT(vd->vdev_removing || | |
1088 | vd->vdev_ops == &vdev_indirect_ops); | |
1089 | space_map_close(vd->vdev_obsolete_sm); | |
1090 | vd->vdev_obsolete_sm = NULL; | |
1091 | } | |
1092 | range_tree_destroy(vd->vdev_obsolete_segments); | |
1093 | rw_destroy(&vd->vdev_indirect_rwlock); | |
1094 | mutex_destroy(&vd->vdev_obsolete_lock); | |
1095 | ||
34dc7c2f BB |
1096 | mutex_destroy(&vd->vdev_dtl_lock); |
1097 | mutex_destroy(&vd->vdev_stat_lock); | |
b128c09f | 1098 | mutex_destroy(&vd->vdev_probe_lock); |
d4a72f23 | 1099 | mutex_destroy(&vd->vdev_scan_io_queue_lock); |
9a49d3f3 | 1100 | |
619f0976 GW |
1101 | mutex_destroy(&vd->vdev_initialize_lock); |
1102 | mutex_destroy(&vd->vdev_initialize_io_lock); | |
1103 | cv_destroy(&vd->vdev_initialize_io_cv); | |
1104 | cv_destroy(&vd->vdev_initialize_cv); | |
9a49d3f3 | 1105 | |
1b939560 BB |
1106 | mutex_destroy(&vd->vdev_trim_lock); |
1107 | mutex_destroy(&vd->vdev_autotrim_lock); | |
1108 | mutex_destroy(&vd->vdev_trim_io_lock); | |
1109 | cv_destroy(&vd->vdev_trim_cv); | |
1110 | cv_destroy(&vd->vdev_autotrim_cv); | |
1111 | cv_destroy(&vd->vdev_trim_io_cv); | |
34dc7c2f | 1112 | |
9a49d3f3 | 1113 | mutex_destroy(&vd->vdev_rebuild_lock); |
9a49d3f3 | 1114 | cv_destroy(&vd->vdev_rebuild_cv); |
9a49d3f3 | 1115 | |
c17486b2 | 1116 | zfs_ratelimit_fini(&vd->vdev_delay_rl); |
e778b048 | 1117 | zfs_ratelimit_fini(&vd->vdev_deadman_rl); |
c17486b2 GN |
1118 | zfs_ratelimit_fini(&vd->vdev_checksum_rl); |
1119 | ||
34dc7c2f BB |
1120 | if (vd == spa->spa_root_vdev) |
1121 | spa->spa_root_vdev = NULL; | |
1122 | ||
1123 | kmem_free(vd, sizeof (vdev_t)); | |
1124 | } | |
1125 | ||
1126 | /* | |
1127 | * Transfer top-level vdev state from svd to tvd. | |
1128 | */ | |
1129 | static void | |
1130 | vdev_top_transfer(vdev_t *svd, vdev_t *tvd) | |
1131 | { | |
1132 | spa_t *spa = svd->vdev_spa; | |
1133 | metaslab_t *msp; | |
1134 | vdev_t *vd; | |
1135 | int t; | |
1136 | ||
1137 | ASSERT(tvd == tvd->vdev_top); | |
1138 | ||
77943bc1 | 1139 | tvd->vdev_pending_fastwrite = svd->vdev_pending_fastwrite; |
34dc7c2f BB |
1140 | tvd->vdev_ms_array = svd->vdev_ms_array; |
1141 | tvd->vdev_ms_shift = svd->vdev_ms_shift; | |
1142 | tvd->vdev_ms_count = svd->vdev_ms_count; | |
e0ab3ab5 | 1143 | tvd->vdev_top_zap = svd->vdev_top_zap; |
34dc7c2f BB |
1144 | |
1145 | svd->vdev_ms_array = 0; | |
1146 | svd->vdev_ms_shift = 0; | |
1147 | svd->vdev_ms_count = 0; | |
e0ab3ab5 | 1148 | svd->vdev_top_zap = 0; |
34dc7c2f | 1149 | |
5ffb9d1d GW |
1150 | if (tvd->vdev_mg) |
1151 | ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg); | |
aa755b35 MA |
1152 | if (tvd->vdev_log_mg) |
1153 | ASSERT3P(tvd->vdev_log_mg, ==, svd->vdev_log_mg); | |
34dc7c2f | 1154 | tvd->vdev_mg = svd->vdev_mg; |
aa755b35 | 1155 | tvd->vdev_log_mg = svd->vdev_log_mg; |
34dc7c2f BB |
1156 | tvd->vdev_ms = svd->vdev_ms; |
1157 | ||
1158 | svd->vdev_mg = NULL; | |
aa755b35 | 1159 | svd->vdev_log_mg = NULL; |
34dc7c2f BB |
1160 | svd->vdev_ms = NULL; |
1161 | ||
1162 | if (tvd->vdev_mg != NULL) | |
1163 | tvd->vdev_mg->mg_vd = tvd; | |
aa755b35 MA |
1164 | if (tvd->vdev_log_mg != NULL) |
1165 | tvd->vdev_log_mg->mg_vd = tvd; | |
34dc7c2f | 1166 | |
d2734cce SD |
1167 | tvd->vdev_checkpoint_sm = svd->vdev_checkpoint_sm; |
1168 | svd->vdev_checkpoint_sm = NULL; | |
1169 | ||
cc99f275 DB |
1170 | tvd->vdev_alloc_bias = svd->vdev_alloc_bias; |
1171 | svd->vdev_alloc_bias = VDEV_BIAS_NONE; | |
1172 | ||
34dc7c2f BB |
1173 | tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc; |
1174 | tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space; | |
1175 | tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace; | |
1176 | ||
1177 | svd->vdev_stat.vs_alloc = 0; | |
1178 | svd->vdev_stat.vs_space = 0; | |
1179 | svd->vdev_stat.vs_dspace = 0; | |
1180 | ||
9e052db4 MA |
1181 | /* |
1182 | * State which may be set on a top-level vdev that's in the | |
1183 | * process of being removed. | |
1184 | */ | |
1185 | ASSERT0(tvd->vdev_indirect_config.vic_births_object); | |
1186 | ASSERT0(tvd->vdev_indirect_config.vic_mapping_object); | |
1187 | ASSERT3U(tvd->vdev_indirect_config.vic_prev_indirect_vdev, ==, -1ULL); | |
1188 | ASSERT3P(tvd->vdev_indirect_mapping, ==, NULL); | |
1189 | ASSERT3P(tvd->vdev_indirect_births, ==, NULL); | |
1190 | ASSERT3P(tvd->vdev_obsolete_sm, ==, NULL); | |
2a673e76 | 1191 | ASSERT0(tvd->vdev_noalloc); |
9e052db4 | 1192 | ASSERT0(tvd->vdev_removing); |
9a49d3f3 | 1193 | ASSERT0(tvd->vdev_rebuilding); |
2a673e76 | 1194 | tvd->vdev_noalloc = svd->vdev_noalloc; |
9e052db4 | 1195 | tvd->vdev_removing = svd->vdev_removing; |
9a49d3f3 BB |
1196 | tvd->vdev_rebuilding = svd->vdev_rebuilding; |
1197 | tvd->vdev_rebuild_config = svd->vdev_rebuild_config; | |
9e052db4 MA |
1198 | tvd->vdev_indirect_config = svd->vdev_indirect_config; |
1199 | tvd->vdev_indirect_mapping = svd->vdev_indirect_mapping; | |
1200 | tvd->vdev_indirect_births = svd->vdev_indirect_births; | |
1201 | range_tree_swap(&svd->vdev_obsolete_segments, | |
1202 | &tvd->vdev_obsolete_segments); | |
1203 | tvd->vdev_obsolete_sm = svd->vdev_obsolete_sm; | |
1204 | svd->vdev_indirect_config.vic_mapping_object = 0; | |
1205 | svd->vdev_indirect_config.vic_births_object = 0; | |
1206 | svd->vdev_indirect_config.vic_prev_indirect_vdev = -1ULL; | |
1207 | svd->vdev_indirect_mapping = NULL; | |
1208 | svd->vdev_indirect_births = NULL; | |
1209 | svd->vdev_obsolete_sm = NULL; | |
2a673e76 | 1210 | svd->vdev_noalloc = 0; |
9e052db4 | 1211 | svd->vdev_removing = 0; |
9a49d3f3 | 1212 | svd->vdev_rebuilding = 0; |
9e052db4 | 1213 | |
34dc7c2f BB |
1214 | for (t = 0; t < TXG_SIZE; t++) { |
1215 | while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL) | |
1216 | (void) txg_list_add(&tvd->vdev_ms_list, msp, t); | |
1217 | while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL) | |
1218 | (void) txg_list_add(&tvd->vdev_dtl_list, vd, t); | |
1219 | if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t)) | |
1220 | (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t); | |
1221 | } | |
1222 | ||
b128c09f | 1223 | if (list_link_active(&svd->vdev_config_dirty_node)) { |
34dc7c2f BB |
1224 | vdev_config_clean(svd); |
1225 | vdev_config_dirty(tvd); | |
1226 | } | |
1227 | ||
b128c09f BB |
1228 | if (list_link_active(&svd->vdev_state_dirty_node)) { |
1229 | vdev_state_clean(svd); | |
1230 | vdev_state_dirty(tvd); | |
1231 | } | |
1232 | ||
34dc7c2f BB |
1233 | tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio; |
1234 | svd->vdev_deflate_ratio = 0; | |
1235 | ||
1236 | tvd->vdev_islog = svd->vdev_islog; | |
1237 | svd->vdev_islog = 0; | |
d4a72f23 TC |
1238 | |
1239 | dsl_scan_io_queue_vdev_xfer(svd, tvd); | |
34dc7c2f BB |
1240 | } |
1241 | ||
1242 | static void | |
1243 | vdev_top_update(vdev_t *tvd, vdev_t *vd) | |
1244 | { | |
34dc7c2f BB |
1245 | if (vd == NULL) |
1246 | return; | |
1247 | ||
1248 | vd->vdev_top = tvd; | |
1249 | ||
1c27024e | 1250 | for (int c = 0; c < vd->vdev_children; c++) |
34dc7c2f BB |
1251 | vdev_top_update(tvd, vd->vdev_child[c]); |
1252 | } | |
1253 | ||
1254 | /* | |
b2255edc BB |
1255 | * Add a mirror/replacing vdev above an existing vdev. There is no need to |
1256 | * call .vdev_op_init() since mirror/replacing vdevs do not have private state. | |
34dc7c2f BB |
1257 | */ |
1258 | vdev_t * | |
1259 | vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops) | |
1260 | { | |
1261 | spa_t *spa = cvd->vdev_spa; | |
1262 | vdev_t *pvd = cvd->vdev_parent; | |
1263 | vdev_t *mvd; | |
1264 | ||
b128c09f | 1265 | ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); |
34dc7c2f BB |
1266 | |
1267 | mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops); | |
1268 | ||
1269 | mvd->vdev_asize = cvd->vdev_asize; | |
9babb374 | 1270 | mvd->vdev_min_asize = cvd->vdev_min_asize; |
1bd201e7 | 1271 | mvd->vdev_max_asize = cvd->vdev_max_asize; |
a1d477c2 | 1272 | mvd->vdev_psize = cvd->vdev_psize; |
34dc7c2f | 1273 | mvd->vdev_ashift = cvd->vdev_ashift; |
6fe3498c RM |
1274 | mvd->vdev_logical_ashift = cvd->vdev_logical_ashift; |
1275 | mvd->vdev_physical_ashift = cvd->vdev_physical_ashift; | |
34dc7c2f | 1276 | mvd->vdev_state = cvd->vdev_state; |
428870ff | 1277 | mvd->vdev_crtxg = cvd->vdev_crtxg; |
34dc7c2f BB |
1278 | |
1279 | vdev_remove_child(pvd, cvd); | |
1280 | vdev_add_child(pvd, mvd); | |
1281 | cvd->vdev_id = mvd->vdev_children; | |
1282 | vdev_add_child(mvd, cvd); | |
1283 | vdev_top_update(cvd->vdev_top, cvd->vdev_top); | |
1284 | ||
1285 | if (mvd == mvd->vdev_top) | |
1286 | vdev_top_transfer(cvd, mvd); | |
1287 | ||
1288 | return (mvd); | |
1289 | } | |
1290 | ||
1291 | /* | |
1292 | * Remove a 1-way mirror/replacing vdev from the tree. | |
1293 | */ | |
1294 | void | |
1295 | vdev_remove_parent(vdev_t *cvd) | |
1296 | { | |
1297 | vdev_t *mvd = cvd->vdev_parent; | |
1298 | vdev_t *pvd = mvd->vdev_parent; | |
1299 | ||
b128c09f | 1300 | ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); |
34dc7c2f BB |
1301 | |
1302 | ASSERT(mvd->vdev_children == 1); | |
1303 | ASSERT(mvd->vdev_ops == &vdev_mirror_ops || | |
1304 | mvd->vdev_ops == &vdev_replacing_ops || | |
1305 | mvd->vdev_ops == &vdev_spare_ops); | |
1306 | cvd->vdev_ashift = mvd->vdev_ashift; | |
6fe3498c RM |
1307 | cvd->vdev_logical_ashift = mvd->vdev_logical_ashift; |
1308 | cvd->vdev_physical_ashift = mvd->vdev_physical_ashift; | |
34dc7c2f BB |
1309 | vdev_remove_child(mvd, cvd); |
1310 | vdev_remove_child(pvd, mvd); | |
fb5f0bc8 | 1311 | |
34dc7c2f | 1312 | /* |
b128c09f BB |
1313 | * If cvd will replace mvd as a top-level vdev, preserve mvd's guid. |
1314 | * Otherwise, we could have detached an offline device, and when we | |
1315 | * go to import the pool we'll think we have two top-level vdevs, | |
1316 | * instead of a different version of the same top-level vdev. | |
34dc7c2f | 1317 | */ |
fb5f0bc8 BB |
1318 | if (mvd->vdev_top == mvd) { |
1319 | uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid; | |
428870ff | 1320 | cvd->vdev_orig_guid = cvd->vdev_guid; |
fb5f0bc8 BB |
1321 | cvd->vdev_guid += guid_delta; |
1322 | cvd->vdev_guid_sum += guid_delta; | |
61e99a73 AB |
1323 | |
1324 | /* | |
1325 | * If pool not set for autoexpand, we need to also preserve | |
1326 | * mvd's asize to prevent automatic expansion of cvd. | |
1327 | * Otherwise if we are adjusting the mirror by attaching and | |
1328 | * detaching children of non-uniform sizes, the mirror could | |
1329 | * autoexpand, unexpectedly requiring larger devices to | |
1330 | * re-establish the mirror. | |
1331 | */ | |
1332 | if (!cvd->vdev_spa->spa_autoexpand) | |
1333 | cvd->vdev_asize = mvd->vdev_asize; | |
fb5f0bc8 | 1334 | } |
b128c09f BB |
1335 | cvd->vdev_id = mvd->vdev_id; |
1336 | vdev_add_child(pvd, cvd); | |
34dc7c2f BB |
1337 | vdev_top_update(cvd->vdev_top, cvd->vdev_top); |
1338 | ||
1339 | if (cvd == cvd->vdev_top) | |
1340 | vdev_top_transfer(mvd, cvd); | |
1341 | ||
1342 | ASSERT(mvd->vdev_children == 0); | |
1343 | vdev_free(mvd); | |
1344 | } | |
1345 | ||
aa755b35 | 1346 | void |
cc99f275 DB |
1347 | vdev_metaslab_group_create(vdev_t *vd) |
1348 | { | |
1349 | spa_t *spa = vd->vdev_spa; | |
1350 | ||
1351 | /* | |
1352 | * metaslab_group_create was delayed until allocation bias was available | |
1353 | */ | |
1354 | if (vd->vdev_mg == NULL) { | |
1355 | metaslab_class_t *mc; | |
1356 | ||
1357 | if (vd->vdev_islog && vd->vdev_alloc_bias == VDEV_BIAS_NONE) | |
1358 | vd->vdev_alloc_bias = VDEV_BIAS_LOG; | |
1359 | ||
1360 | ASSERT3U(vd->vdev_islog, ==, | |
1361 | (vd->vdev_alloc_bias == VDEV_BIAS_LOG)); | |
1362 | ||
1363 | switch (vd->vdev_alloc_bias) { | |
1364 | case VDEV_BIAS_LOG: | |
1365 | mc = spa_log_class(spa); | |
1366 | break; | |
1367 | case VDEV_BIAS_SPECIAL: | |
1368 | mc = spa_special_class(spa); | |
1369 | break; | |
1370 | case VDEV_BIAS_DEDUP: | |
1371 | mc = spa_dedup_class(spa); | |
1372 | break; | |
1373 | default: | |
1374 | mc = spa_normal_class(spa); | |
1375 | } | |
1376 | ||
1377 | vd->vdev_mg = metaslab_group_create(mc, vd, | |
1378 | spa->spa_alloc_count); | |
1379 | ||
aa755b35 MA |
1380 | if (!vd->vdev_islog) { |
1381 | vd->vdev_log_mg = metaslab_group_create( | |
1382 | spa_embedded_log_class(spa), vd, 1); | |
1383 | } | |
1384 | ||
cc99f275 | 1385 | /* |
dff71c79 | 1386 | * The spa ashift min/max only apply for the normal metaslab |
bf169e9f | 1387 | * class. Class destination is late binding so ashift boundary |
dff71c79 | 1388 | * setting had to wait until now. |
cc99f275 DB |
1389 | */ |
1390 | if (vd->vdev_top == vd && vd->vdev_ashift != 0 && | |
1391 | mc == spa_normal_class(spa) && vd->vdev_aux == NULL) { | |
1392 | if (vd->vdev_ashift > spa->spa_max_ashift) | |
1393 | spa->spa_max_ashift = vd->vdev_ashift; | |
1394 | if (vd->vdev_ashift < spa->spa_min_ashift) | |
1395 | spa->spa_min_ashift = vd->vdev_ashift; | |
b2255edc BB |
1396 | |
1397 | uint64_t min_alloc = vdev_get_min_alloc(vd); | |
1398 | if (min_alloc < spa->spa_min_alloc) | |
1399 | spa->spa_min_alloc = min_alloc; | |
cc99f275 DB |
1400 | } |
1401 | } | |
1402 | } | |
1403 | ||
34dc7c2f BB |
1404 | int |
1405 | vdev_metaslab_init(vdev_t *vd, uint64_t txg) | |
1406 | { | |
1407 | spa_t *spa = vd->vdev_spa; | |
34dc7c2f BB |
1408 | uint64_t oldc = vd->vdev_ms_count; |
1409 | uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift; | |
1410 | metaslab_t **mspp; | |
1411 | int error; | |
cc99f275 | 1412 | boolean_t expanding = (oldc != 0); |
34dc7c2f | 1413 | |
428870ff BB |
1414 | ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER)); |
1415 | ||
1416 | /* | |
1417 | * This vdev is not being allocated from yet or is a hole. | |
1418 | */ | |
1419 | if (vd->vdev_ms_shift == 0) | |
34dc7c2f BB |
1420 | return (0); |
1421 | ||
428870ff BB |
1422 | ASSERT(!vd->vdev_ishole); |
1423 | ||
34dc7c2f BB |
1424 | ASSERT(oldc <= newc); |
1425 | ||
bffb68a2 | 1426 | mspp = vmem_zalloc(newc * sizeof (*mspp), KM_SLEEP); |
34dc7c2f | 1427 | |
cc99f275 | 1428 | if (expanding) { |
861166b0 | 1429 | memcpy(mspp, vd->vdev_ms, oldc * sizeof (*mspp)); |
bffb68a2 | 1430 | vmem_free(vd->vdev_ms, oldc * sizeof (*mspp)); |
34dc7c2f BB |
1431 | } |
1432 | ||
1433 | vd->vdev_ms = mspp; | |
1434 | vd->vdev_ms_count = newc; | |
93cf2076 | 1435 | |
aa755b35 MA |
1436 | for (uint64_t m = oldc; m < newc; m++) { |
1437 | uint64_t object = 0; | |
a1d477c2 MA |
1438 | /* |
1439 | * vdev_ms_array may be 0 if we are creating the "fake" | |
1440 | * metaslabs for an indirect vdev for zdb's leak detection. | |
1441 | * See zdb_leak_init(). | |
1442 | */ | |
1443 | if (txg == 0 && vd->vdev_ms_array != 0) { | |
aa755b35 MA |
1444 | error = dmu_read(spa->spa_meta_objset, |
1445 | vd->vdev_ms_array, | |
9babb374 BB |
1446 | m * sizeof (uint64_t), sizeof (uint64_t), &object, |
1447 | DMU_READ_PREFETCH); | |
4a0ee12a PZ |
1448 | if (error != 0) { |
1449 | vdev_dbgmsg(vd, "unable to read the metaslab " | |
1450 | "array [error=%d]", error); | |
34dc7c2f | 1451 | return (error); |
4a0ee12a | 1452 | } |
34dc7c2f | 1453 | } |
fb42a493 PS |
1454 | |
1455 | error = metaslab_init(vd->vdev_mg, m, object, txg, | |
1456 | &(vd->vdev_ms[m])); | |
4a0ee12a PZ |
1457 | if (error != 0) { |
1458 | vdev_dbgmsg(vd, "metaslab_init failed [error=%d]", | |
1459 | error); | |
fb42a493 | 1460 | return (error); |
4a0ee12a | 1461 | } |
34dc7c2f BB |
1462 | } |
1463 | ||
aa755b35 MA |
1464 | /* |
1465 | * Find the emptiest metaslab on the vdev and mark it for use for | |
1466 | * embedded slog by moving it from the regular to the log metaslab | |
1467 | * group. | |
1468 | */ | |
1469 | if (vd->vdev_mg->mg_class == spa_normal_class(spa) && | |
1470 | vd->vdev_ms_count > zfs_embedded_slog_min_ms && | |
1471 | avl_is_empty(&vd->vdev_log_mg->mg_metaslab_tree)) { | |
1472 | uint64_t slog_msid = 0; | |
1473 | uint64_t smallest = UINT64_MAX; | |
1474 | ||
1475 | /* | |
1476 | * Note, we only search the new metaslabs, because the old | |
1477 | * (pre-existing) ones may be active (e.g. have non-empty | |
1478 | * range_tree's), and we don't move them to the new | |
1479 | * metaslab_t. | |
1480 | */ | |
1481 | for (uint64_t m = oldc; m < newc; m++) { | |
1482 | uint64_t alloc = | |
1483 | space_map_allocated(vd->vdev_ms[m]->ms_sm); | |
1484 | if (alloc < smallest) { | |
1485 | slog_msid = m; | |
1486 | smallest = alloc; | |
1487 | } | |
1488 | } | |
1489 | metaslab_t *slog_ms = vd->vdev_ms[slog_msid]; | |
1490 | /* | |
1491 | * The metaslab was marked as dirty at the end of | |
1492 | * metaslab_init(). Remove it from the dirty list so that we | |
1493 | * can uninitialize and reinitialize it to the new class. | |
1494 | */ | |
1495 | if (txg != 0) { | |
1496 | (void) txg_list_remove_this(&vd->vdev_ms_list, | |
1497 | slog_ms, txg); | |
1498 | } | |
1499 | uint64_t sm_obj = space_map_object(slog_ms->ms_sm); | |
1500 | metaslab_fini(slog_ms); | |
1501 | VERIFY0(metaslab_init(vd->vdev_log_mg, slog_msid, sm_obj, txg, | |
1502 | &vd->vdev_ms[slog_msid])); | |
1503 | } | |
1504 | ||
428870ff BB |
1505 | if (txg == 0) |
1506 | spa_config_enter(spa, SCL_ALLOC, FTAG, RW_WRITER); | |
1507 | ||
1508 | /* | |
2a673e76 AJ |
1509 | * If the vdev is marked as non-allocating then don't |
1510 | * activate the metaslabs since we want to ensure that | |
1511 | * no allocations are performed on this device. | |
428870ff | 1512 | */ |
2a673e76 AJ |
1513 | if (vd->vdev_noalloc) { |
1514 | /* track non-allocating vdev space */ | |
1515 | spa->spa_nonallocating_dspace += spa_deflate(spa) ? | |
1516 | vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space; | |
1517 | } else if (!expanding) { | |
428870ff | 1518 | metaslab_group_activate(vd->vdev_mg); |
aa755b35 MA |
1519 | if (vd->vdev_log_mg != NULL) |
1520 | metaslab_group_activate(vd->vdev_log_mg); | |
cc99f275 | 1521 | } |
428870ff BB |
1522 | |
1523 | if (txg == 0) | |
1524 | spa_config_exit(spa, SCL_ALLOC, FTAG); | |
1525 | ||
34dc7c2f BB |
1526 | return (0); |
1527 | } | |
1528 | ||
1529 | void | |
1530 | vdev_metaslab_fini(vdev_t *vd) | |
1531 | { | |
d2734cce SD |
1532 | if (vd->vdev_checkpoint_sm != NULL) { |
1533 | ASSERT(spa_feature_is_active(vd->vdev_spa, | |
1534 | SPA_FEATURE_POOL_CHECKPOINT)); | |
1535 | space_map_close(vd->vdev_checkpoint_sm); | |
1536 | /* | |
1537 | * Even though we close the space map, we need to set its | |
1538 | * pointer to NULL. The reason is that vdev_metaslab_fini() | |
1539 | * may be called multiple times for certain operations | |
1540 | * (i.e. when destroying a pool) so we need to ensure that | |
1541 | * this clause never executes twice. This logic is similar | |
1542 | * to the one used for the vdev_ms clause below. | |
1543 | */ | |
1544 | vd->vdev_checkpoint_sm = NULL; | |
1545 | } | |
1546 | ||
34dc7c2f | 1547 | if (vd->vdev_ms != NULL) { |
928e8ad4 | 1548 | metaslab_group_t *mg = vd->vdev_mg; |
aa755b35 | 1549 | |
928e8ad4 | 1550 | metaslab_group_passivate(mg); |
aa755b35 MA |
1551 | if (vd->vdev_log_mg != NULL) { |
1552 | ASSERT(!vd->vdev_islog); | |
1553 | metaslab_group_passivate(vd->vdev_log_mg); | |
1554 | } | |
a1d477c2 | 1555 | |
928e8ad4 | 1556 | uint64_t count = vd->vdev_ms_count; |
a1d477c2 | 1557 | for (uint64_t m = 0; m < count; m++) { |
93cf2076 | 1558 | metaslab_t *msp = vd->vdev_ms[m]; |
93cf2076 GW |
1559 | if (msp != NULL) |
1560 | metaslab_fini(msp); | |
1561 | } | |
bffb68a2 | 1562 | vmem_free(vd->vdev_ms, count * sizeof (metaslab_t *)); |
34dc7c2f | 1563 | vd->vdev_ms = NULL; |
a1d477c2 | 1564 | vd->vdev_ms_count = 0; |
928e8ad4 | 1565 | |
aa755b35 | 1566 | for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) { |
928e8ad4 | 1567 | ASSERT0(mg->mg_histogram[i]); |
aa755b35 MA |
1568 | if (vd->vdev_log_mg != NULL) |
1569 | ASSERT0(vd->vdev_log_mg->mg_histogram[i]); | |
1570 | } | |
a1d477c2 MA |
1571 | } |
1572 | ASSERT0(vd->vdev_ms_count); | |
920dd524 | 1573 | ASSERT3U(vd->vdev_pending_fastwrite, ==, 0); |
34dc7c2f BB |
1574 | } |
1575 | ||
b128c09f BB |
1576 | typedef struct vdev_probe_stats { |
1577 | boolean_t vps_readable; | |
1578 | boolean_t vps_writeable; | |
1579 | int vps_flags; | |
b128c09f BB |
1580 | } vdev_probe_stats_t; |
1581 | ||
1582 | static void | |
1583 | vdev_probe_done(zio_t *zio) | |
34dc7c2f | 1584 | { |
fb5f0bc8 | 1585 | spa_t *spa = zio->io_spa; |
d164b209 | 1586 | vdev_t *vd = zio->io_vd; |
b128c09f | 1587 | vdev_probe_stats_t *vps = zio->io_private; |
d164b209 BB |
1588 | |
1589 | ASSERT(vd->vdev_probe_zio != NULL); | |
b128c09f BB |
1590 | |
1591 | if (zio->io_type == ZIO_TYPE_READ) { | |
b128c09f BB |
1592 | if (zio->io_error == 0) |
1593 | vps->vps_readable = 1; | |
fb5f0bc8 | 1594 | if (zio->io_error == 0 && spa_writeable(spa)) { |
d164b209 | 1595 | zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd, |
a6255b7f | 1596 | zio->io_offset, zio->io_size, zio->io_abd, |
b128c09f BB |
1597 | ZIO_CHECKSUM_OFF, vdev_probe_done, vps, |
1598 | ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE)); | |
1599 | } else { | |
a6255b7f | 1600 | abd_free(zio->io_abd); |
b128c09f BB |
1601 | } |
1602 | } else if (zio->io_type == ZIO_TYPE_WRITE) { | |
b128c09f BB |
1603 | if (zio->io_error == 0) |
1604 | vps->vps_writeable = 1; | |
a6255b7f | 1605 | abd_free(zio->io_abd); |
b128c09f | 1606 | } else if (zio->io_type == ZIO_TYPE_NULL) { |
d164b209 | 1607 | zio_t *pio; |
3dfb57a3 | 1608 | zio_link_t *zl; |
b128c09f BB |
1609 | |
1610 | vd->vdev_cant_read |= !vps->vps_readable; | |
1611 | vd->vdev_cant_write |= !vps->vps_writeable; | |
1612 | ||
1613 | if (vdev_readable(vd) && | |
fb5f0bc8 | 1614 | (vdev_writeable(vd) || !spa_writeable(spa))) { |
b128c09f BB |
1615 | zio->io_error = 0; |
1616 | } else { | |
1617 | ASSERT(zio->io_error != 0); | |
4a0ee12a | 1618 | vdev_dbgmsg(vd, "failed probe"); |
1144586b | 1619 | (void) zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE, |
4f072827 | 1620 | spa, vd, NULL, NULL, 0); |
2e528b49 | 1621 | zio->io_error = SET_ERROR(ENXIO); |
b128c09f | 1622 | } |
d164b209 BB |
1623 | |
1624 | mutex_enter(&vd->vdev_probe_lock); | |
1625 | ASSERT(vd->vdev_probe_zio == zio); | |
1626 | vd->vdev_probe_zio = NULL; | |
1627 | mutex_exit(&vd->vdev_probe_lock); | |
1628 | ||
3dfb57a3 DB |
1629 | zl = NULL; |
1630 | while ((pio = zio_walk_parents(zio, &zl)) != NULL) | |
d164b209 | 1631 | if (!vdev_accessible(vd, pio)) |
2e528b49 | 1632 | pio->io_error = SET_ERROR(ENXIO); |
d164b209 | 1633 | |
b128c09f BB |
1634 | kmem_free(vps, sizeof (*vps)); |
1635 | } | |
1636 | } | |
34dc7c2f | 1637 | |
b128c09f | 1638 | /* |
d3cc8b15 WA |
1639 | * Determine whether this device is accessible. |
1640 | * | |
1641 | * Read and write to several known locations: the pad regions of each | |
1642 | * vdev label but the first, which we leave alone in case it contains | |
1643 | * a VTOC. | |
b128c09f BB |
1644 | */ |
1645 | zio_t * | |
d164b209 | 1646 | vdev_probe(vdev_t *vd, zio_t *zio) |
b128c09f BB |
1647 | { |
1648 | spa_t *spa = vd->vdev_spa; | |
d164b209 BB |
1649 | vdev_probe_stats_t *vps = NULL; |
1650 | zio_t *pio; | |
1651 | ||
1652 | ASSERT(vd->vdev_ops->vdev_op_leaf); | |
34dc7c2f | 1653 | |
d164b209 BB |
1654 | /* |
1655 | * Don't probe the probe. | |
1656 | */ | |
1657 | if (zio && (zio->io_flags & ZIO_FLAG_PROBE)) | |
1658 | return (NULL); | |
b128c09f | 1659 | |
d164b209 BB |
1660 | /* |
1661 | * To prevent 'probe storms' when a device fails, we create | |
1662 | * just one probe i/o at a time. All zios that want to probe | |
1663 | * this vdev will become parents of the probe io. | |
1664 | */ | |
1665 | mutex_enter(&vd->vdev_probe_lock); | |
b128c09f | 1666 | |
d164b209 | 1667 | if ((pio = vd->vdev_probe_zio) == NULL) { |
79c76d5b | 1668 | vps = kmem_zalloc(sizeof (*vps), KM_SLEEP); |
d164b209 BB |
1669 | |
1670 | vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE | | |
1671 | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE | | |
9babb374 | 1672 | ZIO_FLAG_TRYHARD; |
d164b209 BB |
1673 | |
1674 | if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) { | |
1675 | /* | |
1676 | * vdev_cant_read and vdev_cant_write can only | |
1677 | * transition from TRUE to FALSE when we have the | |
1678 | * SCL_ZIO lock as writer; otherwise they can only | |
1679 | * transition from FALSE to TRUE. This ensures that | |
1680 | * any zio looking at these values can assume that | |
1681 | * failures persist for the life of the I/O. That's | |
1682 | * important because when a device has intermittent | |
1683 | * connectivity problems, we want to ensure that | |
1684 | * they're ascribed to the device (ENXIO) and not | |
1685 | * the zio (EIO). | |
1686 | * | |
1687 | * Since we hold SCL_ZIO as writer here, clear both | |
1688 | * values so the probe can reevaluate from first | |
1689 | * principles. | |
1690 | */ | |
1691 | vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER; | |
1692 | vd->vdev_cant_read = B_FALSE; | |
1693 | vd->vdev_cant_write = B_FALSE; | |
1694 | } | |
1695 | ||
1696 | vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd, | |
1697 | vdev_probe_done, vps, | |
1698 | vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE); | |
1699 | ||
428870ff BB |
1700 | /* |
1701 | * We can't change the vdev state in this context, so we | |
1702 | * kick off an async task to do it on our behalf. | |
1703 | */ | |
d164b209 BB |
1704 | if (zio != NULL) { |
1705 | vd->vdev_probe_wanted = B_TRUE; | |
1706 | spa_async_request(spa, SPA_ASYNC_PROBE); | |
1707 | } | |
b128c09f BB |
1708 | } |
1709 | ||
d164b209 BB |
1710 | if (zio != NULL) |
1711 | zio_add_child(zio, pio); | |
b128c09f | 1712 | |
d164b209 | 1713 | mutex_exit(&vd->vdev_probe_lock); |
b128c09f | 1714 | |
d164b209 BB |
1715 | if (vps == NULL) { |
1716 | ASSERT(zio != NULL); | |
1717 | return (NULL); | |
1718 | } | |
b128c09f | 1719 | |
1c27024e | 1720 | for (int l = 1; l < VDEV_LABELS; l++) { |
d164b209 | 1721 | zio_nowait(zio_read_phys(pio, vd, |
b128c09f | 1722 | vdev_label_offset(vd->vdev_psize, l, |
108a454a | 1723 | offsetof(vdev_label_t, vl_be)), VDEV_PAD_SIZE, |
a6255b7f | 1724 | abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE), |
b128c09f BB |
1725 | ZIO_CHECKSUM_OFF, vdev_probe_done, vps, |
1726 | ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE)); | |
1727 | } | |
1728 | ||
d164b209 BB |
1729 | if (zio == NULL) |
1730 | return (pio); | |
1731 | ||
1732 | zio_nowait(pio); | |
1733 | return (NULL); | |
34dc7c2f BB |
1734 | } |
1735 | ||
a0e01997 AS |
1736 | static void |
1737 | vdev_load_child(void *arg) | |
1738 | { | |
1739 | vdev_t *vd = arg; | |
1740 | ||
1741 | vd->vdev_load_error = vdev_load(vd); | |
1742 | } | |
1743 | ||
45d1cae3 BB |
1744 | static void |
1745 | vdev_open_child(void *arg) | |
1746 | { | |
1747 | vdev_t *vd = arg; | |
1748 | ||
1749 | vd->vdev_open_thread = curthread; | |
1750 | vd->vdev_open_error = vdev_open(vd); | |
1751 | vd->vdev_open_thread = NULL; | |
1752 | } | |
1753 | ||
6c285672 | 1754 | static boolean_t |
428870ff BB |
1755 | vdev_uses_zvols(vdev_t *vd) |
1756 | { | |
6c285672 JL |
1757 | #ifdef _KERNEL |
1758 | if (zvol_is_zvol(vd->vdev_path)) | |
428870ff | 1759 | return (B_TRUE); |
6c285672 JL |
1760 | #endif |
1761 | ||
1c27024e | 1762 | for (int c = 0; c < vd->vdev_children; c++) |
428870ff BB |
1763 | if (vdev_uses_zvols(vd->vdev_child[c])) |
1764 | return (B_TRUE); | |
6c285672 | 1765 | |
428870ff BB |
1766 | return (B_FALSE); |
1767 | } | |
1768 | ||
b2255edc BB |
1769 | /* |
1770 | * Returns B_TRUE if the passed child should be opened. | |
1771 | */ | |
1772 | static boolean_t | |
1773 | vdev_default_open_children_func(vdev_t *vd) | |
1774 | { | |
14e4e3cb | 1775 | (void) vd; |
b2255edc BB |
1776 | return (B_TRUE); |
1777 | } | |
1778 | ||
1779 | /* | |
1780 | * Open the requested child vdevs. If any of the leaf vdevs are using | |
1781 | * a ZFS volume then do the opens in a single thread. This avoids a | |
1782 | * deadlock when the current thread is holding the spa_namespace_lock. | |
1783 | */ | |
1784 | static void | |
1785 | vdev_open_children_impl(vdev_t *vd, vdev_open_children_func_t *open_func) | |
45d1cae3 | 1786 | { |
45d1cae3 BB |
1787 | int children = vd->vdev_children; |
1788 | ||
b2255edc BB |
1789 | taskq_t *tq = taskq_create("vdev_open", children, minclsyspri, |
1790 | children, children, TASKQ_PREPOPULATE); | |
1791 | vd->vdev_nonrot = B_TRUE; | |
45d1cae3 | 1792 | |
b2255edc BB |
1793 | for (int c = 0; c < children; c++) { |
1794 | vdev_t *cvd = vd->vdev_child[c]; | |
1795 | ||
1796 | if (open_func(cvd) == B_FALSE) | |
1797 | continue; | |
1798 | ||
1799 | if (tq == NULL || vdev_uses_zvols(vd)) { | |
1800 | cvd->vdev_open_error = vdev_open(cvd); | |
1801 | } else { | |
4770aa06 | 1802 | VERIFY(taskq_dispatch(tq, vdev_open_child, |
b2255edc BB |
1803 | cvd, TQ_SLEEP) != TASKQID_INVALID); |
1804 | } | |
45d1cae3 | 1805 | |
b2255edc BB |
1806 | vd->vdev_nonrot &= cvd->vdev_nonrot; |
1807 | } | |
1808 | ||
1809 | if (tq != NULL) { | |
1810 | taskq_wait(tq); | |
4770aa06 HJ |
1811 | taskq_destroy(tq); |
1812 | } | |
b2255edc | 1813 | } |
4770aa06 | 1814 | |
b2255edc BB |
1815 | /* |
1816 | * Open all child vdevs. | |
1817 | */ | |
1818 | void | |
1819 | vdev_open_children(vdev_t *vd) | |
1820 | { | |
1821 | vdev_open_children_impl(vd, vdev_default_open_children_func); | |
1822 | } | |
fb40095f | 1823 | |
b2255edc BB |
1824 | /* |
1825 | * Conditionally open a subset of child vdevs. | |
1826 | */ | |
1827 | void | |
1828 | vdev_open_children_subset(vdev_t *vd, vdev_open_children_func_t *open_func) | |
1829 | { | |
1830 | vdev_open_children_impl(vd, open_func); | |
45d1cae3 BB |
1831 | } |
1832 | ||
a1d477c2 MA |
1833 | /* |
1834 | * Compute the raidz-deflation ratio. Note, we hard-code | |
1835 | * in 128k (1 << 17) because it is the "typical" blocksize. | |
1836 | * Even though SPA_MAXBLOCKSIZE changed, this algorithm can not change, | |
1837 | * otherwise it would inconsistently account for existing bp's. | |
1838 | */ | |
1839 | static void | |
1840 | vdev_set_deflate_ratio(vdev_t *vd) | |
1841 | { | |
1842 | if (vd == vd->vdev_top && !vd->vdev_ishole && vd->vdev_ashift != 0) { | |
1843 | vd->vdev_deflate_ratio = (1 << 17) / | |
1844 | (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT); | |
1845 | } | |
1846 | } | |
1847 | ||
c494aa7f GW |
1848 | /* |
1849 | * Maximize performance by inflating the configured ashift for top level | |
1850 | * vdevs to be as close to the physical ashift as possible while maintaining | |
1851 | * administrator defined limits and ensuring it doesn't go below the | |
1852 | * logical ashift. | |
1853 | */ | |
1854 | static void | |
1855 | vdev_ashift_optimize(vdev_t *vd) | |
1856 | { | |
1857 | ASSERT(vd == vd->vdev_top); | |
1858 | ||
1859 | if (vd->vdev_ashift < vd->vdev_physical_ashift) { | |
1860 | vd->vdev_ashift = MIN( | |
1861 | MAX(zfs_vdev_max_auto_ashift, vd->vdev_ashift), | |
1862 | MAX(zfs_vdev_min_auto_ashift, | |
1863 | vd->vdev_physical_ashift)); | |
1864 | } else { | |
1865 | /* | |
1866 | * If the logical and physical ashifts are the same, then | |
1867 | * we ensure that the top-level vdev's ashift is not smaller | |
1868 | * than our minimum ashift value. For the unusual case | |
1869 | * where logical ashift > physical ashift, we can't cap | |
1870 | * the calculated ashift based on max ashift as that | |
1871 | * would cause failures. | |
1872 | * We still check if we need to increase it to match | |
1873 | * the min ashift. | |
1874 | */ | |
1875 | vd->vdev_ashift = MAX(zfs_vdev_min_auto_ashift, | |
1876 | vd->vdev_ashift); | |
1877 | } | |
1878 | } | |
1879 | ||
34dc7c2f BB |
1880 | /* |
1881 | * Prepare a virtual device for access. | |
1882 | */ | |
1883 | int | |
1884 | vdev_open(vdev_t *vd) | |
1885 | { | |
fb5f0bc8 | 1886 | spa_t *spa = vd->vdev_spa; |
34dc7c2f | 1887 | int error; |
34dc7c2f | 1888 | uint64_t osize = 0; |
1bd201e7 CS |
1889 | uint64_t max_osize = 0; |
1890 | uint64_t asize, max_asize, psize; | |
6fe3498c RM |
1891 | uint64_t logical_ashift = 0; |
1892 | uint64_t physical_ashift = 0; | |
34dc7c2f | 1893 | |
45d1cae3 BB |
1894 | ASSERT(vd->vdev_open_thread == curthread || |
1895 | spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); | |
34dc7c2f BB |
1896 | ASSERT(vd->vdev_state == VDEV_STATE_CLOSED || |
1897 | vd->vdev_state == VDEV_STATE_CANT_OPEN || | |
1898 | vd->vdev_state == VDEV_STATE_OFFLINE); | |
1899 | ||
34dc7c2f | 1900 | vd->vdev_stat.vs_aux = VDEV_AUX_NONE; |
9babb374 BB |
1901 | vd->vdev_cant_read = B_FALSE; |
1902 | vd->vdev_cant_write = B_FALSE; | |
1903 | vd->vdev_min_asize = vdev_get_min_asize(vd); | |
34dc7c2f | 1904 | |
428870ff BB |
1905 | /* |
1906 | * If this vdev is not removed, check its fault status. If it's | |
1907 | * faulted, bail out of the open. | |
1908 | */ | |
34dc7c2f BB |
1909 | if (!vd->vdev_removed && vd->vdev_faulted) { |
1910 | ASSERT(vd->vdev_children == 0); | |
428870ff BB |
1911 | ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED || |
1912 | vd->vdev_label_aux == VDEV_AUX_EXTERNAL); | |
34dc7c2f | 1913 | vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, |
428870ff | 1914 | vd->vdev_label_aux); |
2e528b49 | 1915 | return (SET_ERROR(ENXIO)); |
34dc7c2f BB |
1916 | } else if (vd->vdev_offline) { |
1917 | ASSERT(vd->vdev_children == 0); | |
1918 | vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE); | |
2e528b49 | 1919 | return (SET_ERROR(ENXIO)); |
34dc7c2f BB |
1920 | } |
1921 | ||
6fe3498c RM |
1922 | error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize, |
1923 | &logical_ashift, &physical_ashift); | |
0c637f31 | 1924 | /* |
1925 | * Physical volume size should never be larger than its max size, unless | |
1926 | * the disk has shrunk while we were reading it or the device is buggy | |
1927 | * or damaged: either way it's not safe for use, bail out of the open. | |
1928 | */ | |
1929 | if (osize > max_osize) { | |
1930 | vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, | |
1931 | VDEV_AUX_OPEN_FAILED); | |
1932 | return (SET_ERROR(ENXIO)); | |
1933 | } | |
1934 | ||
428870ff BB |
1935 | /* |
1936 | * Reset the vdev_reopening flag so that we actually close | |
1937 | * the vdev on error. | |
1938 | */ | |
1939 | vd->vdev_reopening = B_FALSE; | |
34dc7c2f | 1940 | if (zio_injection_enabled && error == 0) |
28caa74b | 1941 | error = zio_handle_device_injection(vd, NULL, SET_ERROR(ENXIO)); |
34dc7c2f BB |
1942 | |
1943 | if (error) { | |
1944 | if (vd->vdev_removed && | |
1945 | vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED) | |
1946 | vd->vdev_removed = B_FALSE; | |
1947 | ||
6cb8e530 PZ |
1948 | if (vd->vdev_stat.vs_aux == VDEV_AUX_CHILDREN_OFFLINE) { |
1949 | vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, | |
1950 | vd->vdev_stat.vs_aux); | |
1951 | } else { | |
1952 | vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, | |
1953 | vd->vdev_stat.vs_aux); | |
1954 | } | |
34dc7c2f BB |
1955 | return (error); |
1956 | } | |
1957 | ||
1958 | vd->vdev_removed = B_FALSE; | |
1959 | ||
428870ff BB |
1960 | /* |
1961 | * Recheck the faulted flag now that we have confirmed that | |
1962 | * the vdev is accessible. If we're faulted, bail. | |
1963 | */ | |
1964 | if (vd->vdev_faulted) { | |
1965 | ASSERT(vd->vdev_children == 0); | |
1966 | ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED || | |
1967 | vd->vdev_label_aux == VDEV_AUX_EXTERNAL); | |
1968 | vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, | |
1969 | vd->vdev_label_aux); | |
2e528b49 | 1970 | return (SET_ERROR(ENXIO)); |
428870ff BB |
1971 | } |
1972 | ||
34dc7c2f BB |
1973 | if (vd->vdev_degraded) { |
1974 | ASSERT(vd->vdev_children == 0); | |
1975 | vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, | |
1976 | VDEV_AUX_ERR_EXCEEDED); | |
1977 | } else { | |
428870ff | 1978 | vdev_set_state(vd, B_TRUE, VDEV_STATE_HEALTHY, 0); |
34dc7c2f BB |
1979 | } |
1980 | ||
428870ff BB |
1981 | /* |
1982 | * For hole or missing vdevs we just return success. | |
1983 | */ | |
1984 | if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) | |
1985 | return (0); | |
1986 | ||
1c27024e | 1987 | for (int c = 0; c < vd->vdev_children; c++) { |
34dc7c2f BB |
1988 | if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) { |
1989 | vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, | |
1990 | VDEV_AUX_NONE); | |
1991 | break; | |
1992 | } | |
9babb374 | 1993 | } |
34dc7c2f BB |
1994 | |
1995 | osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t)); | |
1bd201e7 | 1996 | max_osize = P2ALIGN(max_osize, (uint64_t)sizeof (vdev_label_t)); |
34dc7c2f BB |
1997 | |
1998 | if (vd->vdev_children == 0) { | |
1999 | if (osize < SPA_MINDEVSIZE) { | |
2000 | vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, | |
2001 | VDEV_AUX_TOO_SMALL); | |
2e528b49 | 2002 | return (SET_ERROR(EOVERFLOW)); |
34dc7c2f BB |
2003 | } |
2004 | psize = osize; | |
2005 | asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE); | |
1bd201e7 CS |
2006 | max_asize = max_osize - (VDEV_LABEL_START_SIZE + |
2007 | VDEV_LABEL_END_SIZE); | |
34dc7c2f BB |
2008 | } else { |
2009 | if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE - | |
2010 | (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) { | |
2011 | vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, | |
2012 | VDEV_AUX_TOO_SMALL); | |
2e528b49 | 2013 | return (SET_ERROR(EOVERFLOW)); |
34dc7c2f BB |
2014 | } |
2015 | psize = 0; | |
2016 | asize = osize; | |
1bd201e7 | 2017 | max_asize = max_osize; |
34dc7c2f BB |
2018 | } |
2019 | ||
9d3f7b87 OF |
2020 | /* |
2021 | * If the vdev was expanded, record this so that we can re-create the | |
2022 | * uberblock rings in labels {2,3}, during the next sync. | |
2023 | */ | |
2024 | if ((psize > vd->vdev_psize) && (vd->vdev_psize != 0)) | |
2025 | vd->vdev_copy_uberblocks = B_TRUE; | |
2026 | ||
34dc7c2f BB |
2027 | vd->vdev_psize = psize; |
2028 | ||
9babb374 | 2029 | /* |
2e215fec | 2030 | * Make sure the allocatable size hasn't shrunk too much. |
9babb374 BB |
2031 | */ |
2032 | if (asize < vd->vdev_min_asize) { | |
2033 | vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, | |
2034 | VDEV_AUX_BAD_LABEL); | |
2e528b49 | 2035 | return (SET_ERROR(EINVAL)); |
9babb374 BB |
2036 | } |
2037 | ||
c494aa7f GW |
2038 | /* |
2039 | * We can always set the logical/physical ashift members since | |
2040 | * their values are only used to calculate the vdev_ashift when | |
2041 | * the device is first added to the config. These values should | |
2042 | * not be used for anything else since they may change whenever | |
2043 | * the device is reopened and we don't store them in the label. | |
2044 | */ | |
6fe3498c RM |
2045 | vd->vdev_physical_ashift = |
2046 | MAX(physical_ashift, vd->vdev_physical_ashift); | |
c494aa7f GW |
2047 | vd->vdev_logical_ashift = MAX(logical_ashift, |
2048 | vd->vdev_logical_ashift); | |
6fe3498c | 2049 | |
34dc7c2f BB |
2050 | if (vd->vdev_asize == 0) { |
2051 | /* | |
2052 | * This is the first-ever open, so use the computed values. | |
b28e57cb | 2053 | * For compatibility, a different ashift can be requested. |
34dc7c2f BB |
2054 | */ |
2055 | vd->vdev_asize = asize; | |
1bd201e7 | 2056 | vd->vdev_max_asize = max_asize; |
c494aa7f GW |
2057 | |
2058 | /* | |
bf169e9f | 2059 | * If the vdev_ashift was not overridden at creation time, |
c494aa7f GW |
2060 | * then set it the logical ashift and optimize the ashift. |
2061 | */ | |
2062 | if (vd->vdev_ashift == 0) { | |
2063 | vd->vdev_ashift = vd->vdev_logical_ashift; | |
2064 | ||
2065 | if (vd->vdev_logical_ashift > ASHIFT_MAX) { | |
2066 | vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, | |
2067 | VDEV_AUX_ASHIFT_TOO_BIG); | |
2068 | return (SET_ERROR(EDOM)); | |
2069 | } | |
2070 | ||
2071 | if (vd->vdev_top == vd) { | |
2072 | vdev_ashift_optimize(vd); | |
2073 | } | |
2074 | } | |
ff61d1a4 | 2075 | if (vd->vdev_ashift != 0 && (vd->vdev_ashift < ASHIFT_MIN || |
2076 | vd->vdev_ashift > ASHIFT_MAX)) { | |
2077 | vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, | |
2078 | VDEV_AUX_BAD_ASHIFT); | |
2079 | return (SET_ERROR(EDOM)); | |
2080 | } | |
34dc7c2f BB |
2081 | } else { |
2082 | /* | |
6fe3498c | 2083 | * Make sure the alignment required hasn't increased. |
34dc7c2f | 2084 | */ |
6fe3498c | 2085 | if (vd->vdev_ashift > vd->vdev_top->vdev_ashift && |
32a9872b | 2086 | vd->vdev_ops->vdev_op_leaf) { |
1144586b TS |
2087 | (void) zfs_ereport_post( |
2088 | FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT, | |
4f072827 | 2089 | spa, vd, NULL, NULL, 0); |
6fe3498c RM |
2090 | vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, |
2091 | VDEV_AUX_BAD_LABEL); | |
2092 | return (SET_ERROR(EDOM)); | |
6fe3498c | 2093 | } |
1bd201e7 | 2094 | vd->vdev_max_asize = max_asize; |
9babb374 | 2095 | } |
34dc7c2f | 2096 | |
9babb374 | 2097 | /* |
2e215fec SH |
2098 | * If all children are healthy we update asize if either: |
2099 | * The asize has increased, due to a device expansion caused by dynamic | |
2100 | * LUN growth or vdev replacement, and automatic expansion is enabled; | |
2101 | * making the additional space available. | |
2102 | * | |
2103 | * The asize has decreased, due to a device shrink usually caused by a | |
2104 | * vdev replace with a smaller device. This ensures that calculations | |
2105 | * based of max_asize and asize e.g. esize are always valid. It's safe | |
2106 | * to do this as we've already validated that asize is greater than | |
2107 | * vdev_min_asize. | |
9babb374 | 2108 | */ |
2e215fec SH |
2109 | if (vd->vdev_state == VDEV_STATE_HEALTHY && |
2110 | ((asize > vd->vdev_asize && | |
2111 | (vd->vdev_expanding || spa->spa_autoexpand)) || | |
2112 | (asize < vd->vdev_asize))) | |
9babb374 | 2113 | vd->vdev_asize = asize; |
34dc7c2f | 2114 | |
9babb374 | 2115 | vdev_set_min_asize(vd); |
34dc7c2f BB |
2116 | |
2117 | /* | |
2118 | * Ensure we can issue some IO before declaring the | |
2119 | * vdev open for business. | |
2120 | */ | |
b128c09f BB |
2121 | if (vd->vdev_ops->vdev_op_leaf && |
2122 | (error = zio_wait(vdev_probe(vd, NULL))) != 0) { | |
428870ff BB |
2123 | vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, |
2124 | VDEV_AUX_ERR_EXCEEDED); | |
34dc7c2f BB |
2125 | return (error); |
2126 | } | |
2127 | ||
b2255edc | 2128 | /* |
bf169e9f | 2129 | * Track the minimum allocation size. |
b2255edc BB |
2130 | */ |
2131 | if (vd->vdev_top == vd && vd->vdev_ashift != 0 && | |
2132 | vd->vdev_islog == 0 && vd->vdev_aux == NULL) { | |
2133 | uint64_t min_alloc = vdev_get_min_alloc(vd); | |
2134 | if (min_alloc < spa->spa_min_alloc) | |
2135 | spa->spa_min_alloc = min_alloc; | |
2136 | } | |
2137 | ||
34dc7c2f | 2138 | /* |
3c819a2c JP |
2139 | * If this is a leaf vdev, assess whether a resilver is needed. |
2140 | * But don't do this if we are doing a reopen for a scrub, since | |
2141 | * this would just restart the scrub we are already doing. | |
34dc7c2f | 2142 | */ |
3c819a2c JP |
2143 | if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen) |
2144 | dsl_scan_assess_vdev(spa->spa_dsl_pool, vd); | |
34dc7c2f BB |
2145 | |
2146 | return (0); | |
2147 | } | |
2148 | ||
cf0977ad AS |
2149 | static void |
2150 | vdev_validate_child(void *arg) | |
2151 | { | |
2152 | vdev_t *vd = arg; | |
2153 | ||
2154 | vd->vdev_validate_thread = curthread; | |
2155 | vd->vdev_validate_error = vdev_validate(vd); | |
2156 | vd->vdev_validate_thread = NULL; | |
2157 | } | |
2158 | ||
34dc7c2f BB |
2159 | /* |
2160 | * Called once the vdevs are all opened, this routine validates the label | |
6cb8e530 | 2161 | * contents. This needs to be done before vdev_load() so that we don't |
34dc7c2f BB |
2162 | * inadvertently do repair I/Os to the wrong device. |
2163 | * | |
2164 | * This function will only return failure if one of the vdevs indicates that it | |
2165 | * has since been destroyed or exported. This is only possible if | |
2166 | * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state | |
2167 | * will be updated but the function will return 0. | |
2168 | */ | |
2169 | int | |
6cb8e530 | 2170 | vdev_validate(vdev_t *vd) |
34dc7c2f BB |
2171 | { |
2172 | spa_t *spa = vd->vdev_spa; | |
cf0977ad | 2173 | taskq_t *tq = NULL; |
34dc7c2f | 2174 | nvlist_t *label; |
6cb8e530 | 2175 | uint64_t guid = 0, aux_guid = 0, top_guid; |
34dc7c2f | 2176 | uint64_t state; |
6cb8e530 PZ |
2177 | nvlist_t *nvl; |
2178 | uint64_t txg; | |
cf0977ad | 2179 | int children = vd->vdev_children; |
34dc7c2f | 2180 | |
6cb8e530 PZ |
2181 | if (vdev_validate_skip) |
2182 | return (0); | |
2183 | ||
cf0977ad AS |
2184 | if (children > 0) { |
2185 | tq = taskq_create("vdev_validate", children, minclsyspri, | |
2186 | children, children, TASKQ_PREPOPULATE); | |
2187 | } | |
2188 | ||
2189 | for (uint64_t c = 0; c < children; c++) { | |
2190 | vdev_t *cvd = vd->vdev_child[c]; | |
2191 | ||
2192 | if (tq == NULL || vdev_uses_zvols(cvd)) { | |
2193 | vdev_validate_child(cvd); | |
2194 | } else { | |
2195 | VERIFY(taskq_dispatch(tq, vdev_validate_child, cvd, | |
2196 | TQ_SLEEP) != TASKQID_INVALID); | |
2197 | } | |
2198 | } | |
2199 | if (tq != NULL) { | |
2200 | taskq_wait(tq); | |
2201 | taskq_destroy(tq); | |
2202 | } | |
2203 | for (int c = 0; c < children; c++) { | |
2204 | int error = vd->vdev_child[c]->vdev_validate_error; | |
2205 | ||
2206 | if (error != 0) | |
2e528b49 | 2207 | return (SET_ERROR(EBADF)); |
cf0977ad AS |
2208 | } |
2209 | ||
34dc7c2f BB |
2210 | |
2211 | /* | |
2212 | * If the device has already failed, or was marked offline, don't do | |
2213 | * any further validation. Otherwise, label I/O will fail and we will | |
2214 | * overwrite the previous state. | |
2215 | */ | |
6cb8e530 PZ |
2216 | if (!vd->vdev_ops->vdev_op_leaf || !vdev_readable(vd)) |
2217 | return (0); | |
34dc7c2f | 2218 | |
6cb8e530 PZ |
2219 | /* |
2220 | * If we are performing an extreme rewind, we allow for a label that | |
2221 | * was modified at a point after the current txg. | |
a11c7aae PZ |
2222 | * If config lock is not held do not check for the txg. spa_sync could |
2223 | * be updating the vdev's label before updating spa_last_synced_txg. | |
6cb8e530 | 2224 | */ |
a11c7aae PZ |
2225 | if (spa->spa_extreme_rewind || spa_last_synced_txg(spa) == 0 || |
2226 | spa_config_held(spa, SCL_CONFIG, RW_WRITER) != SCL_CONFIG) | |
6cb8e530 PZ |
2227 | txg = UINT64_MAX; |
2228 | else | |
2229 | txg = spa_last_synced_txg(spa); | |
34dc7c2f | 2230 | |
6cb8e530 | 2231 | if ((label = vdev_label_read_config(vd, txg)) == NULL) { |
dce1bf99 | 2232 | vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, |
6cb8e530 | 2233 | VDEV_AUX_BAD_LABEL); |
38a19edd PZ |
2234 | vdev_dbgmsg(vd, "vdev_validate: failed reading config for " |
2235 | "txg %llu", (u_longlong_t)txg); | |
6cb8e530 PZ |
2236 | return (0); |
2237 | } | |
428870ff | 2238 | |
6cb8e530 PZ |
2239 | /* |
2240 | * Determine if this vdev has been split off into another | |
2241 | * pool. If so, then refuse to open it. | |
2242 | */ | |
2243 | if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_SPLIT_GUID, | |
2244 | &aux_guid) == 0 && aux_guid == spa_guid(spa)) { | |
2245 | vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, | |
2246 | VDEV_AUX_SPLIT_POOL); | |
2247 | nvlist_free(label); | |
2248 | vdev_dbgmsg(vd, "vdev_validate: vdev split into other pool"); | |
2249 | return (0); | |
2250 | } | |
34dc7c2f | 2251 | |
6cb8e530 PZ |
2252 | if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &guid) != 0) { |
2253 | vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, | |
2254 | VDEV_AUX_CORRUPT_DATA); | |
2255 | nvlist_free(label); | |
2256 | vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label", | |
2257 | ZPOOL_CONFIG_POOL_GUID); | |
2258 | return (0); | |
2259 | } | |
428870ff | 2260 | |
6cb8e530 PZ |
2261 | /* |
2262 | * If config is not trusted then ignore the spa guid check. This is | |
2263 | * necessary because if the machine crashed during a re-guid the new | |
2264 | * guid might have been written to all of the vdev labels, but not the | |
2265 | * cached config. The check will be performed again once we have the | |
2266 | * trusted config from the MOS. | |
2267 | */ | |
2268 | if (spa->spa_trust_config && guid != spa_guid(spa)) { | |
2269 | vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, | |
2270 | VDEV_AUX_CORRUPT_DATA); | |
2271 | nvlist_free(label); | |
2272 | vdev_dbgmsg(vd, "vdev_validate: vdev label pool_guid doesn't " | |
2273 | "match config (%llu != %llu)", (u_longlong_t)guid, | |
2274 | (u_longlong_t)spa_guid(spa)); | |
2275 | return (0); | |
2276 | } | |
2277 | ||
2278 | if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvl) | |
2279 | != 0 || nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_ORIG_GUID, | |
2280 | &aux_guid) != 0) | |
2281 | aux_guid = 0; | |
2282 | ||
2283 | if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0) { | |
2284 | vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, | |
2285 | VDEV_AUX_CORRUPT_DATA); | |
2286 | nvlist_free(label); | |
2287 | vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label", | |
2288 | ZPOOL_CONFIG_GUID); | |
2289 | return (0); | |
2290 | } | |
2291 | ||
2292 | if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, &top_guid) | |
2293 | != 0) { | |
2294 | vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, | |
2295 | VDEV_AUX_CORRUPT_DATA); | |
2296 | nvlist_free(label); | |
2297 | vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label", | |
2298 | ZPOOL_CONFIG_TOP_GUID); | |
2299 | return (0); | |
2300 | } | |
2301 | ||
2302 | /* | |
2303 | * If this vdev just became a top-level vdev because its sibling was | |
2304 | * detached, it will have adopted the parent's vdev guid -- but the | |
2305 | * label may or may not be on disk yet. Fortunately, either version | |
2306 | * of the label will have the same top guid, so if we're a top-level | |
2307 | * vdev, we can safely compare to that instead. | |
2308 | * However, if the config comes from a cachefile that failed to update | |
2309 | * after the detach, a top-level vdev will appear as a non top-level | |
2310 | * vdev in the config. Also relax the constraints if we perform an | |
2311 | * extreme rewind. | |
2312 | * | |
2313 | * If we split this vdev off instead, then we also check the | |
2314 | * original pool's guid. We don't want to consider the vdev | |
2315 | * corrupt if it is partway through a split operation. | |
2316 | */ | |
2317 | if (vd->vdev_guid != guid && vd->vdev_guid != aux_guid) { | |
2318 | boolean_t mismatch = B_FALSE; | |
2319 | if (spa->spa_trust_config && !spa->spa_extreme_rewind) { | |
2320 | if (vd != vd->vdev_top || vd->vdev_guid != top_guid) | |
2321 | mismatch = B_TRUE; | |
2322 | } else { | |
2323 | if (vd->vdev_guid != top_guid && | |
2324 | vd->vdev_top->vdev_guid != guid) | |
2325 | mismatch = B_TRUE; | |
34dc7c2f BB |
2326 | } |
2327 | ||
6cb8e530 | 2328 | if (mismatch) { |
34dc7c2f BB |
2329 | vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, |
2330 | VDEV_AUX_CORRUPT_DATA); | |
2331 | nvlist_free(label); | |
6cb8e530 PZ |
2332 | vdev_dbgmsg(vd, "vdev_validate: config guid " |
2333 | "doesn't match label guid"); | |
2334 | vdev_dbgmsg(vd, "CONFIG: guid %llu, top_guid %llu", | |
2335 | (u_longlong_t)vd->vdev_guid, | |
2336 | (u_longlong_t)vd->vdev_top->vdev_guid); | |
2337 | vdev_dbgmsg(vd, "LABEL: guid %llu, top_guid %llu, " | |
2338 | "aux_guid %llu", (u_longlong_t)guid, | |
2339 | (u_longlong_t)top_guid, (u_longlong_t)aux_guid); | |
34dc7c2f BB |
2340 | return (0); |
2341 | } | |
6cb8e530 | 2342 | } |
34dc7c2f | 2343 | |
6cb8e530 PZ |
2344 | if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, |
2345 | &state) != 0) { | |
2346 | vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, | |
2347 | VDEV_AUX_CORRUPT_DATA); | |
34dc7c2f | 2348 | nvlist_free(label); |
6cb8e530 PZ |
2349 | vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label", |
2350 | ZPOOL_CONFIG_POOL_STATE); | |
2351 | return (0); | |
2352 | } | |
34dc7c2f | 2353 | |
6cb8e530 PZ |
2354 | nvlist_free(label); |
2355 | ||
2356 | /* | |
2357 | * If this is a verbatim import, no need to check the | |
2358 | * state of the pool. | |
2359 | */ | |
2360 | if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) && | |
2361 | spa_load_state(spa) == SPA_LOAD_OPEN && | |
2362 | state != POOL_STATE_ACTIVE) { | |
2363 | vdev_dbgmsg(vd, "vdev_validate: invalid pool state (%llu) " | |
2364 | "for spa %s", (u_longlong_t)state, spa->spa_name); | |
2365 | return (SET_ERROR(EBADF)); | |
2366 | } | |
2367 | ||
2368 | /* | |
2369 | * If we were able to open and validate a vdev that was | |
2370 | * previously marked permanently unavailable, clear that state | |
2371 | * now. | |
2372 | */ | |
2373 | if (vd->vdev_not_present) | |
2374 | vd->vdev_not_present = 0; | |
2375 | ||
2376 | return (0); | |
2377 | } | |
2378 | ||
2379 | static void | |
2380 | vdev_copy_path_impl(vdev_t *svd, vdev_t *dvd) | |
2381 | { | |
2a8430a2 | 2382 | char *old, *new; |
6cb8e530 PZ |
2383 | if (svd->vdev_path != NULL && dvd->vdev_path != NULL) { |
2384 | if (strcmp(svd->vdev_path, dvd->vdev_path) != 0) { | |
2385 | zfs_dbgmsg("vdev_copy_path: vdev %llu: path changed " | |
2386 | "from '%s' to '%s'", (u_longlong_t)dvd->vdev_guid, | |
2387 | dvd->vdev_path, svd->vdev_path); | |
2388 | spa_strfree(dvd->vdev_path); | |
2389 | dvd->vdev_path = spa_strdup(svd->vdev_path); | |
4a0ee12a | 2390 | } |
6cb8e530 PZ |
2391 | } else if (svd->vdev_path != NULL) { |
2392 | dvd->vdev_path = spa_strdup(svd->vdev_path); | |
2393 | zfs_dbgmsg("vdev_copy_path: vdev %llu: path set to '%s'", | |
2394 | (u_longlong_t)dvd->vdev_guid, dvd->vdev_path); | |
2395 | } | |
2a8430a2 TH |
2396 | |
2397 | /* | |
2398 | * Our enclosure sysfs path may have changed between imports | |
2399 | */ | |
2400 | old = dvd->vdev_enc_sysfs_path; | |
2401 | new = svd->vdev_enc_sysfs_path; | |
2402 | if ((old != NULL && new == NULL) || | |
2403 | (old == NULL && new != NULL) || | |
2404 | ((old != NULL && new != NULL) && strcmp(new, old) != 0)) { | |
2405 | zfs_dbgmsg("vdev_copy_path: vdev %llu: vdev_enc_sysfs_path " | |
2406 | "changed from '%s' to '%s'", (u_longlong_t)dvd->vdev_guid, | |
2407 | old, new); | |
2408 | ||
2409 | if (dvd->vdev_enc_sysfs_path) | |
2410 | spa_strfree(dvd->vdev_enc_sysfs_path); | |
2411 | ||
2412 | if (svd->vdev_enc_sysfs_path) { | |
2413 | dvd->vdev_enc_sysfs_path = spa_strdup( | |
2414 | svd->vdev_enc_sysfs_path); | |
2415 | } else { | |
2416 | dvd->vdev_enc_sysfs_path = NULL; | |
2417 | } | |
2418 | } | |
6cb8e530 | 2419 | } |
34dc7c2f | 2420 | |
6cb8e530 PZ |
2421 | /* |
2422 | * Recursively copy vdev paths from one vdev to another. Source and destination | |
2423 | * vdev trees must have same geometry otherwise return error. Intended to copy | |
2424 | * paths from userland config into MOS config. | |
2425 | */ | |
2426 | int | |
2427 | vdev_copy_path_strict(vdev_t *svd, vdev_t *dvd) | |
2428 | { | |
2429 | if ((svd->vdev_ops == &vdev_missing_ops) || | |
2430 | (svd->vdev_ishole && dvd->vdev_ishole) || | |
2431 | (dvd->vdev_ops == &vdev_indirect_ops)) | |
2432 | return (0); | |
2433 | ||
2434 | if (svd->vdev_ops != dvd->vdev_ops) { | |
2435 | vdev_dbgmsg(svd, "vdev_copy_path: vdev type mismatch: %s != %s", | |
2436 | svd->vdev_ops->vdev_op_type, dvd->vdev_ops->vdev_op_type); | |
2437 | return (SET_ERROR(EINVAL)); | |
2438 | } | |
2439 | ||
2440 | if (svd->vdev_guid != dvd->vdev_guid) { | |
2441 | vdev_dbgmsg(svd, "vdev_copy_path: guids mismatch (%llu != " | |
2442 | "%llu)", (u_longlong_t)svd->vdev_guid, | |
2443 | (u_longlong_t)dvd->vdev_guid); | |
2444 | return (SET_ERROR(EINVAL)); | |
b128c09f | 2445 | } |
34dc7c2f | 2446 | |
6cb8e530 PZ |
2447 | if (svd->vdev_children != dvd->vdev_children) { |
2448 | vdev_dbgmsg(svd, "vdev_copy_path: children count mismatch: " | |
2449 | "%llu != %llu", (u_longlong_t)svd->vdev_children, | |
2450 | (u_longlong_t)dvd->vdev_children); | |
2451 | return (SET_ERROR(EINVAL)); | |
2452 | } | |
2453 | ||
2454 | for (uint64_t i = 0; i < svd->vdev_children; i++) { | |
2455 | int error = vdev_copy_path_strict(svd->vdev_child[i], | |
2456 | dvd->vdev_child[i]); | |
2457 | if (error != 0) | |
2458 | return (error); | |
2459 | } | |
2460 | ||
2461 | if (svd->vdev_ops->vdev_op_leaf) | |
2462 | vdev_copy_path_impl(svd, dvd); | |
2463 | ||
34dc7c2f BB |
2464 | return (0); |
2465 | } | |
2466 | ||
6cb8e530 PZ |
2467 | static void |
2468 | vdev_copy_path_search(vdev_t *stvd, vdev_t *dvd) | |
2469 | { | |
2470 | ASSERT(stvd->vdev_top == stvd); | |
2471 | ASSERT3U(stvd->vdev_id, ==, dvd->vdev_top->vdev_id); | |
2472 | ||
2473 | for (uint64_t i = 0; i < dvd->vdev_children; i++) { | |
2474 | vdev_copy_path_search(stvd, dvd->vdev_child[i]); | |
2475 | } | |
2476 | ||
2477 | if (!dvd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(dvd)) | |
2478 | return; | |
2479 | ||
2480 | /* | |
2481 | * The idea here is that while a vdev can shift positions within | |
2482 | * a top vdev (when replacing, attaching mirror, etc.) it cannot | |
2483 | * step outside of it. | |
2484 | */ | |
2485 | vdev_t *vd = vdev_lookup_by_guid(stvd, dvd->vdev_guid); | |
2486 | ||
2487 | if (vd == NULL || vd->vdev_ops != dvd->vdev_ops) | |
2488 | return; | |
2489 | ||
2490 | ASSERT(vd->vdev_ops->vdev_op_leaf); | |
2491 | ||
2492 | vdev_copy_path_impl(vd, dvd); | |
2493 | } | |
2494 | ||
2495 | /* | |
2496 | * Recursively copy vdev paths from one root vdev to another. Source and | |
2497 | * destination vdev trees may differ in geometry. For each destination leaf | |
2498 | * vdev, search a vdev with the same guid and top vdev id in the source. | |
2499 | * Intended to copy paths from userland config into MOS config. | |
2500 | */ | |
2501 | void | |
2502 | vdev_copy_path_relaxed(vdev_t *srvd, vdev_t *drvd) | |
2503 | { | |
2504 | uint64_t children = MIN(srvd->vdev_children, drvd->vdev_children); | |
2505 | ASSERT(srvd->vdev_ops == &vdev_root_ops); | |
2506 | ASSERT(drvd->vdev_ops == &vdev_root_ops); | |
2507 | ||
2508 | for (uint64_t i = 0; i < children; i++) { | |
2509 | vdev_copy_path_search(srvd->vdev_child[i], | |
2510 | drvd->vdev_child[i]); | |
2511 | } | |
2512 | } | |
2513 | ||
34dc7c2f BB |
2514 | /* |
2515 | * Close a virtual device. | |
2516 | */ | |
2517 | void | |
2518 | vdev_close(vdev_t *vd) | |
2519 | { | |
428870ff | 2520 | vdev_t *pvd = vd->vdev_parent; |
2a8ba608 | 2521 | spa_t *spa __maybe_unused = vd->vdev_spa; |
fb5f0bc8 | 2522 | |
b2255edc BB |
2523 | ASSERT(vd != NULL); |
2524 | ASSERT(vd->vdev_open_thread == curthread || | |
2525 | spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); | |
fb5f0bc8 | 2526 | |
428870ff BB |
2527 | /* |
2528 | * If our parent is reopening, then we are as well, unless we are | |
2529 | * going offline. | |
2530 | */ | |
2531 | if (pvd != NULL && pvd->vdev_reopening) | |
2532 | vd->vdev_reopening = (pvd->vdev_reopening && !vd->vdev_offline); | |
2533 | ||
34dc7c2f BB |
2534 | vd->vdev_ops->vdev_op_close(vd); |
2535 | ||
2536 | vdev_cache_purge(vd); | |
2537 | ||
2538 | /* | |
9babb374 | 2539 | * We record the previous state before we close it, so that if we are |
34dc7c2f BB |
2540 | * doing a reopen(), we don't generate FMA ereports if we notice that |
2541 | * it's still faulted. | |
2542 | */ | |
2543 | vd->vdev_prevstate = vd->vdev_state; | |
2544 | ||
2545 | if (vd->vdev_offline) | |
2546 | vd->vdev_state = VDEV_STATE_OFFLINE; | |
2547 | else | |
2548 | vd->vdev_state = VDEV_STATE_CLOSED; | |
2549 | vd->vdev_stat.vs_aux = VDEV_AUX_NONE; | |
2550 | } | |
2551 | ||
428870ff BB |
2552 | void |
2553 | vdev_hold(vdev_t *vd) | |
2554 | { | |
2555 | spa_t *spa = vd->vdev_spa; | |
2556 | ||
2557 | ASSERT(spa_is_root(spa)); | |
2558 | if (spa->spa_state == POOL_STATE_UNINITIALIZED) | |
2559 | return; | |
2560 | ||
1c27024e | 2561 | for (int c = 0; c < vd->vdev_children; c++) |
428870ff BB |
2562 | vdev_hold(vd->vdev_child[c]); |
2563 | ||
11f2e9a4 | 2564 | if (vd->vdev_ops->vdev_op_leaf && vd->vdev_ops->vdev_op_hold != NULL) |
428870ff BB |
2565 | vd->vdev_ops->vdev_op_hold(vd); |
2566 | } | |
2567 | ||
2568 | void | |
2569 | vdev_rele(vdev_t *vd) | |
2570 | { | |
d6320ddb | 2571 | ASSERT(spa_is_root(vd->vdev_spa)); |
1c27024e | 2572 | for (int c = 0; c < vd->vdev_children; c++) |
428870ff BB |
2573 | vdev_rele(vd->vdev_child[c]); |
2574 | ||
11f2e9a4 | 2575 | if (vd->vdev_ops->vdev_op_leaf && vd->vdev_ops->vdev_op_rele != NULL) |
428870ff BB |
2576 | vd->vdev_ops->vdev_op_rele(vd); |
2577 | } | |
2578 | ||
2579 | /* | |
2580 | * Reopen all interior vdevs and any unopened leaves. We don't actually | |
2581 | * reopen leaf vdevs which had previously been opened as they might deadlock | |
2582 | * on the spa_config_lock. Instead we only obtain the leaf's physical size. | |
2583 | * If the leaf has never been opened then open it, as usual. | |
2584 | */ | |
34dc7c2f BB |
2585 | void |
2586 | vdev_reopen(vdev_t *vd) | |
2587 | { | |
2588 | spa_t *spa = vd->vdev_spa; | |
2589 | ||
b128c09f | 2590 | ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); |
34dc7c2f | 2591 | |
428870ff BB |
2592 | /* set the reopening flag unless we're taking the vdev offline */ |
2593 | vd->vdev_reopening = !vd->vdev_offline; | |
34dc7c2f BB |
2594 | vdev_close(vd); |
2595 | (void) vdev_open(vd); | |
2596 | ||
2597 | /* | |
2598 | * Call vdev_validate() here to make sure we have the same device. | |
2599 | * Otherwise, a device with an invalid label could be successfully | |
2600 | * opened in response to vdev_reopen(). | |
2601 | */ | |
b128c09f BB |
2602 | if (vd->vdev_aux) { |
2603 | (void) vdev_validate_aux(vd); | |
2604 | if (vdev_readable(vd) && vdev_writeable(vd) && | |
77f6826b GA |
2605 | vd->vdev_aux == &spa->spa_l2cache) { |
2606 | /* | |
77f6826b GA |
2607 | * In case the vdev is present we should evict all ARC |
2608 | * buffers and pointers to log blocks and reclaim their | |
2609 | * space before restoring its contents to L2ARC. | |
2610 | */ | |
2611 | if (l2arc_vdev_present(vd)) { | |
2612 | l2arc_rebuild_vdev(vd, B_TRUE); | |
2613 | } else { | |
2614 | l2arc_add_vdev(spa, vd); | |
2615 | } | |
2616 | spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD); | |
b7654bd7 | 2617 | spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM); |
77f6826b | 2618 | } |
b128c09f | 2619 | } else { |
6cb8e530 | 2620 | (void) vdev_validate(vd); |
b128c09f | 2621 | } |
34dc7c2f BB |
2622 | |
2623 | /* | |
2624 | * Reassess parent vdev's health. | |
2625 | */ | |
2626 | vdev_propagate_state(vd); | |
2627 | } | |
2628 | ||
2629 | int | |
2630 | vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing) | |
2631 | { | |
2632 | int error; | |
2633 | ||
2634 | /* | |
2635 | * Normally, partial opens (e.g. of a mirror) are allowed. | |
2636 | * For a create, however, we want to fail the request if | |
2637 | * there are any components we can't open. | |
2638 | */ | |
2639 | error = vdev_open(vd); | |
2640 | ||
2641 | if (error || vd->vdev_state != VDEV_STATE_HEALTHY) { | |
2642 | vdev_close(vd); | |
28caa74b | 2643 | return (error ? error : SET_ERROR(ENXIO)); |
34dc7c2f BB |
2644 | } |
2645 | ||
2646 | /* | |
93cf2076 | 2647 | * Recursively load DTLs and initialize all labels. |
34dc7c2f | 2648 | */ |
93cf2076 GW |
2649 | if ((error = vdev_dtl_load(vd)) != 0 || |
2650 | (error = vdev_label_init(vd, txg, isreplacing ? | |
34dc7c2f BB |
2651 | VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) { |
2652 | vdev_close(vd); | |
2653 | return (error); | |
2654 | } | |
2655 | ||
2656 | return (0); | |
2657 | } | |
2658 | ||
34dc7c2f | 2659 | void |
9babb374 | 2660 | vdev_metaslab_set_size(vdev_t *vd) |
34dc7c2f | 2661 | { |
d2734cce | 2662 | uint64_t asize = vd->vdev_asize; |
c853f382 | 2663 | uint64_t ms_count = asize >> zfs_vdev_default_ms_shift; |
e4e94ca3 | 2664 | uint64_t ms_shift; |
d2734cce | 2665 | |
34dc7c2f | 2666 | /* |
e4e94ca3 DB |
2667 | * There are two dimensions to the metaslab sizing calculation: |
2668 | * the size of the metaslab and the count of metaslabs per vdev. | |
e4e94ca3 | 2669 | * |
c853f382 SD |
2670 | * The default values used below are a good balance between memory |
2671 | * usage (larger metaslab size means more memory needed for loaded | |
2672 | * metaslabs; more metaslabs means more memory needed for the | |
2673 | * metaslab_t structs), metaslab load time (larger metaslabs take | |
2674 | * longer to load), and metaslab sync time (more metaslabs means | |
2675 | * more time spent syncing all of them). | |
2676 | * | |
2677 | * In general, we aim for zfs_vdev_default_ms_count (200) metaslabs. | |
2678 | * The range of the dimensions are as follows: | |
2679 | * | |
2680 | * 2^29 <= ms_size <= 2^34 | |
e4e94ca3 DB |
2681 | * 16 <= ms_count <= 131,072 |
2682 | * | |
2683 | * On the lower end of vdev sizes, we aim for metaslabs sizes of | |
2684 | * at least 512MB (2^29) to minimize fragmentation effects when | |
2685 | * testing with smaller devices. However, the count constraint | |
2686 | * of at least 16 metaslabs will override this minimum size goal. | |
2687 | * | |
2688 | * On the upper end of vdev sizes, we aim for a maximum metaslab | |
c853f382 SD |
2689 | * size of 16GB. However, we will cap the total count to 2^17 |
2690 | * metaslabs to keep our memory footprint in check and let the | |
2691 | * metaslab size grow from there if that limit is hit. | |
e4e94ca3 DB |
2692 | * |
2693 | * The net effect of applying above constrains is summarized below. | |
2694 | * | |
c853f382 SD |
2695 | * vdev size metaslab count |
2696 | * --------------|----------------- | |
2697 | * < 8GB ~16 | |
2698 | * 8GB - 100GB one per 512MB | |
2699 | * 100GB - 3TB ~200 | |
2700 | * 3TB - 2PB one per 16GB | |
2701 | * > 2PB ~131,072 | |
2702 | * -------------------------------- | |
2703 | * | |
2704 | * Finally, note that all of the above calculate the initial | |
2705 | * number of metaslabs. Expanding a top-level vdev will result | |
2706 | * in additional metaslabs being allocated making it possible | |
2707 | * to exceed the zfs_vdev_ms_count_limit. | |
34dc7c2f | 2708 | */ |
d2734cce | 2709 | |
c853f382 SD |
2710 | if (ms_count < zfs_vdev_min_ms_count) |
2711 | ms_shift = highbit64(asize / zfs_vdev_min_ms_count); | |
2712 | else if (ms_count > zfs_vdev_default_ms_count) | |
2713 | ms_shift = highbit64(asize / zfs_vdev_default_ms_count); | |
e4e94ca3 | 2714 | else |
c853f382 | 2715 | ms_shift = zfs_vdev_default_ms_shift; |
e4e94ca3 DB |
2716 | |
2717 | if (ms_shift < SPA_MAXBLOCKSHIFT) { | |
2718 | ms_shift = SPA_MAXBLOCKSHIFT; | |
c853f382 SD |
2719 | } else if (ms_shift > zfs_vdev_max_ms_shift) { |
2720 | ms_shift = zfs_vdev_max_ms_shift; | |
e4e94ca3 | 2721 | /* cap the total count to constrain memory footprint */ |
c853f382 SD |
2722 | if ((asize >> ms_shift) > zfs_vdev_ms_count_limit) |
2723 | ms_shift = highbit64(asize / zfs_vdev_ms_count_limit); | |
d2734cce SD |
2724 | } |
2725 | ||
2726 | vd->vdev_ms_shift = ms_shift; | |
2727 | ASSERT3U(vd->vdev_ms_shift, >=, SPA_MAXBLOCKSHIFT); | |
34dc7c2f BB |
2728 | } |
2729 | ||
2730 | void | |
2731 | vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg) | |
2732 | { | |
2733 | ASSERT(vd == vd->vdev_top); | |
a1d477c2 MA |
2734 | /* indirect vdevs don't have metaslabs or dtls */ |
2735 | ASSERT(vdev_is_concrete(vd) || flags == 0); | |
34dc7c2f | 2736 | ASSERT(ISP2(flags)); |
572e2857 | 2737 | ASSERT(spa_writeable(vd->vdev_spa)); |
34dc7c2f BB |
2738 | |
2739 | if (flags & VDD_METASLAB) | |
2740 | (void) txg_list_add(&vd->vdev_ms_list, arg, txg); | |
2741 | ||
2742 | if (flags & VDD_DTL) | |
2743 | (void) txg_list_add(&vd->vdev_dtl_list, arg, txg); | |
2744 | ||
2745 | (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg); | |
2746 | } | |
2747 | ||
93cf2076 GW |
2748 | void |
2749 | vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg) | |
2750 | { | |
1c27024e | 2751 | for (int c = 0; c < vd->vdev_children; c++) |
93cf2076 GW |
2752 | vdev_dirty_leaves(vd->vdev_child[c], flags, txg); |
2753 | ||
2754 | if (vd->vdev_ops->vdev_op_leaf) | |
2755 | vdev_dirty(vd->vdev_top, flags, vd, txg); | |
2756 | } | |
2757 | ||
fb5f0bc8 BB |
2758 | /* |
2759 | * DTLs. | |
2760 | * | |
2761 | * A vdev's DTL (dirty time log) is the set of transaction groups for which | |
428870ff | 2762 | * the vdev has less than perfect replication. There are four kinds of DTL: |
fb5f0bc8 BB |
2763 | * |
2764 | * DTL_MISSING: txgs for which the vdev has no valid copies of the data | |
2765 | * | |
2766 | * DTL_PARTIAL: txgs for which data is available, but not fully replicated | |
2767 | * | |
2768 | * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon | |
2769 | * scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of | |
2770 | * txgs that was scrubbed. | |
2771 | * | |
2772 | * DTL_OUTAGE: txgs which cannot currently be read, whether due to | |
2773 | * persistent errors or just some device being offline. | |
2774 | * Unlike the other three, the DTL_OUTAGE map is not generally | |
2775 | * maintained; it's only computed when needed, typically to | |
2776 | * determine whether a device can be detached. | |
2777 | * | |
2778 | * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device | |
2779 | * either has the data or it doesn't. | |
2780 | * | |
2781 | * For interior vdevs such as mirror and RAID-Z the picture is more complex. | |
2782 | * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because | |
2783 | * if any child is less than fully replicated, then so is its parent. | |
2784 | * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs, | |
2785 | * comprising only those txgs which appear in 'maxfaults' or more children; | |
2786 | * those are the txgs we don't have enough replication to read. For example, | |
2787 | * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2); | |
2788 | * thus, its DTL_MISSING consists of the set of txgs that appear in more than | |
2789 | * two child DTL_MISSING maps. | |
2790 | * | |
2791 | * It should be clear from the above that to compute the DTLs and outage maps | |
2792 | * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps. | |
2793 | * Therefore, that is all we keep on disk. When loading the pool, or after | |
2794 | * a configuration change, we generate all other DTLs from first principles. | |
2795 | */ | |
34dc7c2f | 2796 | void |
fb5f0bc8 | 2797 | vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) |
34dc7c2f | 2798 | { |
93cf2076 | 2799 | range_tree_t *rt = vd->vdev_dtl[t]; |
fb5f0bc8 BB |
2800 | |
2801 | ASSERT(t < DTL_TYPES); | |
2802 | ASSERT(vd != vd->vdev_spa->spa_root_vdev); | |
572e2857 | 2803 | ASSERT(spa_writeable(vd->vdev_spa)); |
fb5f0bc8 | 2804 | |
a1d477c2 | 2805 | mutex_enter(&vd->vdev_dtl_lock); |
93cf2076 GW |
2806 | if (!range_tree_contains(rt, txg, size)) |
2807 | range_tree_add(rt, txg, size); | |
a1d477c2 | 2808 | mutex_exit(&vd->vdev_dtl_lock); |
34dc7c2f BB |
2809 | } |
2810 | ||
fb5f0bc8 BB |
2811 | boolean_t |
2812 | vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) | |
34dc7c2f | 2813 | { |
93cf2076 | 2814 | range_tree_t *rt = vd->vdev_dtl[t]; |
fb5f0bc8 | 2815 | boolean_t dirty = B_FALSE; |
34dc7c2f | 2816 | |
fb5f0bc8 BB |
2817 | ASSERT(t < DTL_TYPES); |
2818 | ASSERT(vd != vd->vdev_spa->spa_root_vdev); | |
34dc7c2f | 2819 | |
a1d477c2 MA |
2820 | /* |
2821 | * While we are loading the pool, the DTLs have not been loaded yet. | |
4d0ba941 BB |
2822 | * This isn't a problem but it can result in devices being tried |
2823 | * which are known to not have the data. In which case, the import | |
2824 | * is relying on the checksum to ensure that we get the right data. | |
2825 | * Note that while importing we are only reading the MOS, which is | |
2826 | * always checksummed. | |
a1d477c2 | 2827 | */ |
a1d477c2 | 2828 | mutex_enter(&vd->vdev_dtl_lock); |
d2734cce | 2829 | if (!range_tree_is_empty(rt)) |
93cf2076 | 2830 | dirty = range_tree_contains(rt, txg, size); |
a1d477c2 | 2831 | mutex_exit(&vd->vdev_dtl_lock); |
34dc7c2f BB |
2832 | |
2833 | return (dirty); | |
2834 | } | |
2835 | ||
fb5f0bc8 BB |
2836 | boolean_t |
2837 | vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t) | |
2838 | { | |
93cf2076 | 2839 | range_tree_t *rt = vd->vdev_dtl[t]; |
fb5f0bc8 BB |
2840 | boolean_t empty; |
2841 | ||
a1d477c2 | 2842 | mutex_enter(&vd->vdev_dtl_lock); |
d2734cce | 2843 | empty = range_tree_is_empty(rt); |
a1d477c2 | 2844 | mutex_exit(&vd->vdev_dtl_lock); |
fb5f0bc8 BB |
2845 | |
2846 | return (empty); | |
2847 | } | |
2848 | ||
3d6da72d | 2849 | /* |
b2255edc BB |
2850 | * Check if the txg falls within the range which must be |
2851 | * resilvered. DVAs outside this range can always be skipped. | |
2852 | */ | |
2853 | boolean_t | |
2854 | vdev_default_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize, | |
2855 | uint64_t phys_birth) | |
2856 | { | |
14e4e3cb AZ |
2857 | (void) dva, (void) psize; |
2858 | ||
b2255edc BB |
2859 | /* Set by sequential resilver. */ |
2860 | if (phys_birth == TXG_UNKNOWN) | |
2861 | return (B_TRUE); | |
2862 | ||
2863 | return (vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1)); | |
2864 | } | |
2865 | ||
2866 | /* | |
2867 | * Returns B_TRUE if the vdev determines the DVA needs to be resilvered. | |
3d6da72d IH |
2868 | */ |
2869 | boolean_t | |
b2255edc BB |
2870 | vdev_dtl_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize, |
2871 | uint64_t phys_birth) | |
3d6da72d IH |
2872 | { |
2873 | ASSERT(vd != vd->vdev_spa->spa_root_vdev); | |
2874 | ||
2875 | if (vd->vdev_ops->vdev_op_need_resilver == NULL || | |
2876 | vd->vdev_ops->vdev_op_leaf) | |
2877 | return (B_TRUE); | |
2878 | ||
b2255edc BB |
2879 | return (vd->vdev_ops->vdev_op_need_resilver(vd, dva, psize, |
2880 | phys_birth)); | |
3d6da72d IH |
2881 | } |
2882 | ||
5d1f7fb6 GW |
2883 | /* |
2884 | * Returns the lowest txg in the DTL range. | |
2885 | */ | |
2886 | static uint64_t | |
2887 | vdev_dtl_min(vdev_t *vd) | |
2888 | { | |
5d1f7fb6 | 2889 | ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock)); |
93cf2076 | 2890 | ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0); |
5d1f7fb6 GW |
2891 | ASSERT0(vd->vdev_children); |
2892 | ||
ca577779 | 2893 | return (range_tree_min(vd->vdev_dtl[DTL_MISSING]) - 1); |
5d1f7fb6 GW |
2894 | } |
2895 | ||
2896 | /* | |
2897 | * Returns the highest txg in the DTL. | |
2898 | */ | |
2899 | static uint64_t | |
2900 | vdev_dtl_max(vdev_t *vd) | |
2901 | { | |
5d1f7fb6 | 2902 | ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock)); |
93cf2076 | 2903 | ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0); |
5d1f7fb6 GW |
2904 | ASSERT0(vd->vdev_children); |
2905 | ||
ca577779 | 2906 | return (range_tree_max(vd->vdev_dtl[DTL_MISSING])); |
5d1f7fb6 GW |
2907 | } |
2908 | ||
2909 | /* | |
2910 | * Determine if a resilvering vdev should remove any DTL entries from | |
2911 | * its range. If the vdev was resilvering for the entire duration of the | |
2912 | * scan then it should excise that range from its DTLs. Otherwise, this | |
2913 | * vdev is considered partially resilvered and should leave its DTL | |
2914 | * entries intact. The comment in vdev_dtl_reassess() describes how we | |
2915 | * excise the DTLs. | |
2916 | */ | |
2917 | static boolean_t | |
9a49d3f3 | 2918 | vdev_dtl_should_excise(vdev_t *vd, boolean_t rebuild_done) |
5d1f7fb6 | 2919 | { |
5d1f7fb6 GW |
2920 | ASSERT0(vd->vdev_children); |
2921 | ||
335b251a MA |
2922 | if (vd->vdev_state < VDEV_STATE_DEGRADED) |
2923 | return (B_FALSE); | |
2924 | ||
80a91e74 TC |
2925 | if (vd->vdev_resilver_deferred) |
2926 | return (B_FALSE); | |
2927 | ||
9a49d3f3 | 2928 | if (range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) |
5d1f7fb6 GW |
2929 | return (B_TRUE); |
2930 | ||
9a49d3f3 BB |
2931 | if (rebuild_done) { |
2932 | vdev_rebuild_t *vr = &vd->vdev_top->vdev_rebuild_config; | |
2933 | vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; | |
2934 | ||
2935 | /* Rebuild not initiated by attach */ | |
2936 | if (vd->vdev_rebuild_txg == 0) | |
2937 | return (B_TRUE); | |
2938 | ||
2939 | /* | |
2940 | * When a rebuild completes without error then all missing data | |
2941 | * up to the rebuild max txg has been reconstructed and the DTL | |
2942 | * is eligible for excision. | |
2943 | */ | |
2944 | if (vrp->vrp_rebuild_state == VDEV_REBUILD_COMPLETE && | |
2945 | vdev_dtl_max(vd) <= vrp->vrp_max_txg) { | |
2946 | ASSERT3U(vrp->vrp_min_txg, <=, vdev_dtl_min(vd)); | |
2947 | ASSERT3U(vrp->vrp_min_txg, <, vd->vdev_rebuild_txg); | |
2948 | ASSERT3U(vd->vdev_rebuild_txg, <=, vrp->vrp_max_txg); | |
2949 | return (B_TRUE); | |
2950 | } | |
2951 | } else { | |
2952 | dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan; | |
2953 | dsl_scan_phys_t *scnp __maybe_unused = &scn->scn_phys; | |
2954 | ||
2955 | /* Resilver not initiated by attach */ | |
2956 | if (vd->vdev_resilver_txg == 0) | |
2957 | return (B_TRUE); | |
2958 | ||
2959 | /* | |
2960 | * When a resilver is initiated the scan will assign the | |
2961 | * scn_max_txg value to the highest txg value that exists | |
2962 | * in all DTLs. If this device's max DTL is not part of this | |
2963 | * scan (i.e. it is not in the range (scn_min_txg, scn_max_txg] | |
2964 | * then it is not eligible for excision. | |
2965 | */ | |
2966 | if (vdev_dtl_max(vd) <= scn->scn_phys.scn_max_txg) { | |
2967 | ASSERT3U(scnp->scn_min_txg, <=, vdev_dtl_min(vd)); | |
2968 | ASSERT3U(scnp->scn_min_txg, <, vd->vdev_resilver_txg); | |
2969 | ASSERT3U(vd->vdev_resilver_txg, <=, scnp->scn_max_txg); | |
2970 | return (B_TRUE); | |
2971 | } | |
5d1f7fb6 | 2972 | } |
9a49d3f3 | 2973 | |
5d1f7fb6 GW |
2974 | return (B_FALSE); |
2975 | } | |
2976 | ||
34dc7c2f | 2977 | /* |
fde25c0a TC |
2978 | * Reassess DTLs after a config change or scrub completion. If txg == 0 no |
2979 | * write operations will be issued to the pool. | |
34dc7c2f BB |
2980 | */ |
2981 | void | |
9a49d3f3 BB |
2982 | vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, |
2983 | boolean_t scrub_done, boolean_t rebuild_done) | |
34dc7c2f BB |
2984 | { |
2985 | spa_t *spa = vd->vdev_spa; | |
fb5f0bc8 | 2986 | avl_tree_t reftree; |
1c27024e | 2987 | int minref; |
34dc7c2f | 2988 | |
fb5f0bc8 | 2989 | ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); |
34dc7c2f | 2990 | |
1c27024e | 2991 | for (int c = 0; c < vd->vdev_children; c++) |
fb5f0bc8 | 2992 | vdev_dtl_reassess(vd->vdev_child[c], txg, |
9a49d3f3 | 2993 | scrub_txg, scrub_done, rebuild_done); |
fb5f0bc8 | 2994 | |
a1d477c2 | 2995 | if (vd == spa->spa_root_vdev || !vdev_is_concrete(vd) || vd->vdev_aux) |
fb5f0bc8 BB |
2996 | return; |
2997 | ||
2998 | if (vd->vdev_ops->vdev_op_leaf) { | |
428870ff | 2999 | dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan; |
9a49d3f3 BB |
3000 | vdev_rebuild_t *vr = &vd->vdev_top->vdev_rebuild_config; |
3001 | boolean_t check_excise = B_FALSE; | |
41035a04 | 3002 | boolean_t wasempty = B_TRUE; |
428870ff | 3003 | |
34dc7c2f | 3004 | mutex_enter(&vd->vdev_dtl_lock); |
5d1f7fb6 | 3005 | |
02638a30 | 3006 | /* |
9a49d3f3 | 3007 | * If requested, pretend the scan or rebuild completed cleanly. |
02638a30 | 3008 | */ |
9a49d3f3 BB |
3009 | if (zfs_scan_ignore_errors) { |
3010 | if (scn != NULL) | |
3011 | scn->scn_phys.scn_errors = 0; | |
3012 | if (vr != NULL) | |
3013 | vr->vr_rebuild_phys.vrp_errors = 0; | |
3014 | } | |
02638a30 | 3015 | |
41035a04 JP |
3016 | if (scrub_txg != 0 && |
3017 | !range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) { | |
3018 | wasempty = B_FALSE; | |
3019 | zfs_dbgmsg("guid:%llu txg:%llu scrub:%llu started:%d " | |
3020 | "dtl:%llu/%llu errors:%llu", | |
3021 | (u_longlong_t)vd->vdev_guid, (u_longlong_t)txg, | |
3022 | (u_longlong_t)scrub_txg, spa->spa_scrub_started, | |
3023 | (u_longlong_t)vdev_dtl_min(vd), | |
3024 | (u_longlong_t)vdev_dtl_max(vd), | |
3025 | (u_longlong_t)(scn ? scn->scn_phys.scn_errors : 0)); | |
3026 | } | |
3027 | ||
5d1f7fb6 | 3028 | /* |
9a49d3f3 BB |
3029 | * If we've completed a scrub/resilver or a rebuild cleanly |
3030 | * then determine if this vdev should remove any DTLs. We | |
3031 | * only want to excise regions on vdevs that were available | |
3032 | * during the entire duration of this scan. | |
5d1f7fb6 | 3033 | */ |
9a49d3f3 BB |
3034 | if (rebuild_done && |
3035 | vr != NULL && vr->vr_rebuild_phys.vrp_errors == 0) { | |
3036 | check_excise = B_TRUE; | |
3037 | } else { | |
3038 | if (spa->spa_scrub_started || | |
3039 | (scn != NULL && scn->scn_phys.scn_errors == 0)) { | |
3040 | check_excise = B_TRUE; | |
3041 | } | |
3042 | } | |
3043 | ||
3044 | if (scrub_txg && check_excise && | |
3045 | vdev_dtl_should_excise(vd, rebuild_done)) { | |
b128c09f | 3046 | /* |
9a49d3f3 BB |
3047 | * We completed a scrub, resilver or rebuild up to |
3048 | * scrub_txg. If we did it without rebooting, then | |
3049 | * the scrub dtl will be valid, so excise the old | |
3050 | * region and fold in the scrub dtl. Otherwise, | |
3051 | * leave the dtl as-is if there was an error. | |
fb5f0bc8 BB |
3052 | * |
3053 | * There's little trick here: to excise the beginning | |
3054 | * of the DTL_MISSING map, we put it into a reference | |
3055 | * tree and then add a segment with refcnt -1 that | |
3056 | * covers the range [0, scrub_txg). This means | |
3057 | * that each txg in that range has refcnt -1 or 0. | |
3058 | * We then add DTL_SCRUB with a refcnt of 2, so that | |
3059 | * entries in the range [0, scrub_txg) will have a | |
3060 | * positive refcnt -- either 1 or 2. We then convert | |
3061 | * the reference tree into the new DTL_MISSING map. | |
b128c09f | 3062 | */ |
93cf2076 GW |
3063 | space_reftree_create(&reftree); |
3064 | space_reftree_add_map(&reftree, | |
3065 | vd->vdev_dtl[DTL_MISSING], 1); | |
3066 | space_reftree_add_seg(&reftree, 0, scrub_txg, -1); | |
3067 | space_reftree_add_map(&reftree, | |
3068 | vd->vdev_dtl[DTL_SCRUB], 2); | |
3069 | space_reftree_generate_map(&reftree, | |
3070 | vd->vdev_dtl[DTL_MISSING], 1); | |
3071 | space_reftree_destroy(&reftree); | |
41035a04 JP |
3072 | |
3073 | if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) { | |
3074 | zfs_dbgmsg("update DTL_MISSING:%llu/%llu", | |
3075 | (u_longlong_t)vdev_dtl_min(vd), | |
3076 | (u_longlong_t)vdev_dtl_max(vd)); | |
3077 | } else if (!wasempty) { | |
3078 | zfs_dbgmsg("DTL_MISSING is now empty"); | |
3079 | } | |
34dc7c2f | 3080 | } |
93cf2076 GW |
3081 | range_tree_vacate(vd->vdev_dtl[DTL_PARTIAL], NULL, NULL); |
3082 | range_tree_walk(vd->vdev_dtl[DTL_MISSING], | |
3083 | range_tree_add, vd->vdev_dtl[DTL_PARTIAL]); | |
34dc7c2f | 3084 | if (scrub_done) |
93cf2076 GW |
3085 | range_tree_vacate(vd->vdev_dtl[DTL_SCRUB], NULL, NULL); |
3086 | range_tree_vacate(vd->vdev_dtl[DTL_OUTAGE], NULL, NULL); | |
fb5f0bc8 | 3087 | if (!vdev_readable(vd)) |
93cf2076 | 3088 | range_tree_add(vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL); |
fb5f0bc8 | 3089 | else |
93cf2076 GW |
3090 | range_tree_walk(vd->vdev_dtl[DTL_MISSING], |
3091 | range_tree_add, vd->vdev_dtl[DTL_OUTAGE]); | |
5d1f7fb6 GW |
3092 | |
3093 | /* | |
9a49d3f3 BB |
3094 | * If the vdev was resilvering or rebuilding and no longer |
3095 | * has any DTLs then reset the appropriate flag and dirty | |
d14fa5db | 3096 | * the top level so that we persist the change. |
5d1f7fb6 | 3097 | */ |
9a49d3f3 | 3098 | if (txg != 0 && |
d2734cce SD |
3099 | range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) && |
3100 | range_tree_is_empty(vd->vdev_dtl[DTL_OUTAGE])) { | |
9a49d3f3 BB |
3101 | if (vd->vdev_rebuild_txg != 0) { |
3102 | vd->vdev_rebuild_txg = 0; | |
3103 | vdev_config_dirty(vd->vdev_top); | |
3104 | } else if (vd->vdev_resilver_txg != 0) { | |
3105 | vd->vdev_resilver_txg = 0; | |
3106 | vdev_config_dirty(vd->vdev_top); | |
3107 | } | |
d14fa5db | 3108 | } |
5d1f7fb6 | 3109 | |
34dc7c2f | 3110 | mutex_exit(&vd->vdev_dtl_lock); |
b128c09f | 3111 | |
34dc7c2f BB |
3112 | if (txg != 0) |
3113 | vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg); | |
3114 | return; | |
3115 | } | |
3116 | ||
34dc7c2f | 3117 | mutex_enter(&vd->vdev_dtl_lock); |
1c27024e | 3118 | for (int t = 0; t < DTL_TYPES; t++) { |
428870ff BB |
3119 | /* account for child's outage in parent's missing map */ |
3120 | int s = (t == DTL_MISSING) ? DTL_OUTAGE: t; | |
fb5f0bc8 BB |
3121 | if (t == DTL_SCRUB) |
3122 | continue; /* leaf vdevs only */ | |
3123 | if (t == DTL_PARTIAL) | |
3124 | minref = 1; /* i.e. non-zero */ | |
b2255edc BB |
3125 | else if (vdev_get_nparity(vd) != 0) |
3126 | minref = vdev_get_nparity(vd) + 1; /* RAID-Z, dRAID */ | |
fb5f0bc8 BB |
3127 | else |
3128 | minref = vd->vdev_children; /* any kind of mirror */ | |
93cf2076 | 3129 | space_reftree_create(&reftree); |
1c27024e | 3130 | for (int c = 0; c < vd->vdev_children; c++) { |
fb5f0bc8 BB |
3131 | vdev_t *cvd = vd->vdev_child[c]; |
3132 | mutex_enter(&cvd->vdev_dtl_lock); | |
93cf2076 | 3133 | space_reftree_add_map(&reftree, cvd->vdev_dtl[s], 1); |
fb5f0bc8 BB |
3134 | mutex_exit(&cvd->vdev_dtl_lock); |
3135 | } | |
93cf2076 GW |
3136 | space_reftree_generate_map(&reftree, vd->vdev_dtl[t], minref); |
3137 | space_reftree_destroy(&reftree); | |
34dc7c2f | 3138 | } |
fb5f0bc8 | 3139 | mutex_exit(&vd->vdev_dtl_lock); |
34dc7c2f BB |
3140 | } |
3141 | ||
93cf2076 | 3142 | int |
34dc7c2f BB |
3143 | vdev_dtl_load(vdev_t *vd) |
3144 | { | |
3145 | spa_t *spa = vd->vdev_spa; | |
34dc7c2f | 3146 | objset_t *mos = spa->spa_meta_objset; |
4d0ba941 | 3147 | range_tree_t *rt; |
93cf2076 | 3148 | int error = 0; |
34dc7c2f | 3149 | |
93cf2076 | 3150 | if (vd->vdev_ops->vdev_op_leaf && vd->vdev_dtl_object != 0) { |
a1d477c2 | 3151 | ASSERT(vdev_is_concrete(vd)); |
34dc7c2f | 3152 | |
e39fe05b FU |
3153 | /* |
3154 | * If the dtl cannot be sync'd there is no need to open it. | |
3155 | */ | |
3156 | if (spa->spa_mode == SPA_MODE_READ && !spa->spa_read_spacemaps) | |
3157 | return (0); | |
3158 | ||
93cf2076 | 3159 | error = space_map_open(&vd->vdev_dtl_sm, mos, |
a1d477c2 | 3160 | vd->vdev_dtl_object, 0, -1ULL, 0); |
93cf2076 GW |
3161 | if (error) |
3162 | return (error); | |
3163 | ASSERT(vd->vdev_dtl_sm != NULL); | |
34dc7c2f | 3164 | |
4d0ba941 BB |
3165 | rt = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0); |
3166 | error = space_map_load(vd->vdev_dtl_sm, rt, SM_ALLOC); | |
3167 | if (error == 0) { | |
3168 | mutex_enter(&vd->vdev_dtl_lock); | |
3169 | range_tree_walk(rt, range_tree_add, | |
3170 | vd->vdev_dtl[DTL_MISSING]); | |
3171 | mutex_exit(&vd->vdev_dtl_lock); | |
3172 | } | |
3173 | ||
3174 | range_tree_vacate(rt, NULL, NULL); | |
3175 | range_tree_destroy(rt); | |
34dc7c2f | 3176 | |
93cf2076 GW |
3177 | return (error); |
3178 | } | |
3179 | ||
1c27024e | 3180 | for (int c = 0; c < vd->vdev_children; c++) { |
93cf2076 GW |
3181 | error = vdev_dtl_load(vd->vdev_child[c]); |
3182 | if (error != 0) | |
3183 | break; | |
3184 | } | |
34dc7c2f BB |
3185 | |
3186 | return (error); | |
3187 | } | |
3188 | ||
cc99f275 DB |
3189 | static void |
3190 | vdev_zap_allocation_data(vdev_t *vd, dmu_tx_t *tx) | |
3191 | { | |
3192 | spa_t *spa = vd->vdev_spa; | |
3193 | objset_t *mos = spa->spa_meta_objset; | |
3194 | vdev_alloc_bias_t alloc_bias = vd->vdev_alloc_bias; | |
3195 | const char *string; | |
3196 | ||
3197 | ASSERT(alloc_bias != VDEV_BIAS_NONE); | |
3198 | ||
3199 | string = | |
3200 | (alloc_bias == VDEV_BIAS_LOG) ? VDEV_ALLOC_BIAS_LOG : | |
3201 | (alloc_bias == VDEV_BIAS_SPECIAL) ? VDEV_ALLOC_BIAS_SPECIAL : | |
3202 | (alloc_bias == VDEV_BIAS_DEDUP) ? VDEV_ALLOC_BIAS_DEDUP : NULL; | |
3203 | ||
3204 | ASSERT(string != NULL); | |
3205 | VERIFY0(zap_add(mos, vd->vdev_top_zap, VDEV_TOP_ZAP_ALLOCATION_BIAS, | |
3206 | 1, strlen(string) + 1, string, tx)); | |
3207 | ||
3208 | if (alloc_bias == VDEV_BIAS_SPECIAL || alloc_bias == VDEV_BIAS_DEDUP) { | |
3209 | spa_activate_allocation_classes(spa, tx); | |
3210 | } | |
3211 | } | |
3212 | ||
e0ab3ab5 JS |
3213 | void |
3214 | vdev_destroy_unlink_zap(vdev_t *vd, uint64_t zapobj, dmu_tx_t *tx) | |
3215 | { | |
3216 | spa_t *spa = vd->vdev_spa; | |
3217 | ||
3218 | VERIFY0(zap_destroy(spa->spa_meta_objset, zapobj, tx)); | |
3219 | VERIFY0(zap_remove_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps, | |
3220 | zapobj, tx)); | |
3221 | } | |
3222 | ||
3223 | uint64_t | |
3224 | vdev_create_link_zap(vdev_t *vd, dmu_tx_t *tx) | |
3225 | { | |
3226 | spa_t *spa = vd->vdev_spa; | |
3227 | uint64_t zap = zap_create(spa->spa_meta_objset, DMU_OTN_ZAP_METADATA, | |
3228 | DMU_OT_NONE, 0, tx); | |
3229 | ||
3230 | ASSERT(zap != 0); | |
3231 | VERIFY0(zap_add_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps, | |
3232 | zap, tx)); | |
3233 | ||
3234 | return (zap); | |
3235 | } | |
3236 | ||
3237 | void | |
3238 | vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx) | |
3239 | { | |
e0ab3ab5 JS |
3240 | if (vd->vdev_ops != &vdev_hole_ops && |
3241 | vd->vdev_ops != &vdev_missing_ops && | |
3242 | vd->vdev_ops != &vdev_root_ops && | |
3243 | !vd->vdev_top->vdev_removing) { | |
3244 | if (vd->vdev_ops->vdev_op_leaf && vd->vdev_leaf_zap == 0) { | |
3245 | vd->vdev_leaf_zap = vdev_create_link_zap(vd, tx); | |
3246 | } | |
3247 | if (vd == vd->vdev_top && vd->vdev_top_zap == 0) { | |
3248 | vd->vdev_top_zap = vdev_create_link_zap(vd, tx); | |
cc99f275 DB |
3249 | if (vd->vdev_alloc_bias != VDEV_BIAS_NONE) |
3250 | vdev_zap_allocation_data(vd, tx); | |
e0ab3ab5 JS |
3251 | } |
3252 | } | |
cc99f275 | 3253 | |
1c27024e | 3254 | for (uint64_t i = 0; i < vd->vdev_children; i++) { |
e0ab3ab5 JS |
3255 | vdev_construct_zaps(vd->vdev_child[i], tx); |
3256 | } | |
3257 | } | |
3258 | ||
65c7cc49 | 3259 | static void |
34dc7c2f BB |
3260 | vdev_dtl_sync(vdev_t *vd, uint64_t txg) |
3261 | { | |
3262 | spa_t *spa = vd->vdev_spa; | |
93cf2076 | 3263 | range_tree_t *rt = vd->vdev_dtl[DTL_MISSING]; |
34dc7c2f | 3264 | objset_t *mos = spa->spa_meta_objset; |
93cf2076 | 3265 | range_tree_t *rtsync; |
34dc7c2f | 3266 | dmu_tx_t *tx; |
93cf2076 | 3267 | uint64_t object = space_map_object(vd->vdev_dtl_sm); |
34dc7c2f | 3268 | |
a1d477c2 | 3269 | ASSERT(vdev_is_concrete(vd)); |
93cf2076 | 3270 | ASSERT(vd->vdev_ops->vdev_op_leaf); |
428870ff | 3271 | |
34dc7c2f BB |
3272 | tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); |
3273 | ||
93cf2076 GW |
3274 | if (vd->vdev_detached || vd->vdev_top->vdev_removing) { |
3275 | mutex_enter(&vd->vdev_dtl_lock); | |
3276 | space_map_free(vd->vdev_dtl_sm, tx); | |
3277 | space_map_close(vd->vdev_dtl_sm); | |
3278 | vd->vdev_dtl_sm = NULL; | |
3279 | mutex_exit(&vd->vdev_dtl_lock); | |
e0ab3ab5 JS |
3280 | |
3281 | /* | |
3282 | * We only destroy the leaf ZAP for detached leaves or for | |
3283 | * removed log devices. Removed data devices handle leaf ZAP | |
3284 | * cleanup later, once cancellation is no longer possible. | |
3285 | */ | |
3286 | if (vd->vdev_leaf_zap != 0 && (vd->vdev_detached || | |
3287 | vd->vdev_top->vdev_islog)) { | |
3288 | vdev_destroy_unlink_zap(vd, vd->vdev_leaf_zap, tx); | |
3289 | vd->vdev_leaf_zap = 0; | |
3290 | } | |
3291 | ||
34dc7c2f | 3292 | dmu_tx_commit(tx); |
34dc7c2f BB |
3293 | return; |
3294 | } | |
3295 | ||
93cf2076 GW |
3296 | if (vd->vdev_dtl_sm == NULL) { |
3297 | uint64_t new_object; | |
3298 | ||
93e28d66 | 3299 | new_object = space_map_alloc(mos, zfs_vdev_dtl_sm_blksz, tx); |
93cf2076 GW |
3300 | VERIFY3U(new_object, !=, 0); |
3301 | ||
3302 | VERIFY0(space_map_open(&vd->vdev_dtl_sm, mos, new_object, | |
a1d477c2 | 3303 | 0, -1ULL, 0)); |
93cf2076 | 3304 | ASSERT(vd->vdev_dtl_sm != NULL); |
34dc7c2f BB |
3305 | } |
3306 | ||
ca577779 | 3307 | rtsync = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0); |
34dc7c2f BB |
3308 | |
3309 | mutex_enter(&vd->vdev_dtl_lock); | |
93cf2076 | 3310 | range_tree_walk(rt, range_tree_add, rtsync); |
34dc7c2f BB |
3311 | mutex_exit(&vd->vdev_dtl_lock); |
3312 | ||
93e28d66 | 3313 | space_map_truncate(vd->vdev_dtl_sm, zfs_vdev_dtl_sm_blksz, tx); |
4d044c4c | 3314 | space_map_write(vd->vdev_dtl_sm, rtsync, SM_ALLOC, SM_NO_VDEVID, tx); |
93cf2076 | 3315 | range_tree_vacate(rtsync, NULL, NULL); |
34dc7c2f | 3316 | |
93cf2076 | 3317 | range_tree_destroy(rtsync); |
34dc7c2f | 3318 | |
93cf2076 GW |
3319 | /* |
3320 | * If the object for the space map has changed then dirty | |
3321 | * the top level so that we update the config. | |
3322 | */ | |
3323 | if (object != space_map_object(vd->vdev_dtl_sm)) { | |
4a0ee12a PZ |
3324 | vdev_dbgmsg(vd, "txg %llu, spa %s, DTL old object %llu, " |
3325 | "new object %llu", (u_longlong_t)txg, spa_name(spa), | |
3326 | (u_longlong_t)object, | |
3327 | (u_longlong_t)space_map_object(vd->vdev_dtl_sm)); | |
93cf2076 GW |
3328 | vdev_config_dirty(vd->vdev_top); |
3329 | } | |
34dc7c2f BB |
3330 | |
3331 | dmu_tx_commit(tx); | |
3332 | } | |
3333 | ||
fb5f0bc8 BB |
3334 | /* |
3335 | * Determine whether the specified vdev can be offlined/detached/removed | |
3336 | * without losing data. | |
3337 | */ | |
3338 | boolean_t | |
3339 | vdev_dtl_required(vdev_t *vd) | |
3340 | { | |
3341 | spa_t *spa = vd->vdev_spa; | |
3342 | vdev_t *tvd = vd->vdev_top; | |
3343 | uint8_t cant_read = vd->vdev_cant_read; | |
3344 | boolean_t required; | |
3345 | ||
3346 | ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); | |
3347 | ||
3348 | if (vd == spa->spa_root_vdev || vd == tvd) | |
3349 | return (B_TRUE); | |
3350 | ||
3351 | /* | |
3352 | * Temporarily mark the device as unreadable, and then determine | |
3353 | * whether this results in any DTL outages in the top-level vdev. | |
3354 | * If not, we can safely offline/detach/remove the device. | |
3355 | */ | |
3356 | vd->vdev_cant_read = B_TRUE; | |
9a49d3f3 | 3357 | vdev_dtl_reassess(tvd, 0, 0, B_FALSE, B_FALSE); |
fb5f0bc8 BB |
3358 | required = !vdev_dtl_empty(tvd, DTL_OUTAGE); |
3359 | vd->vdev_cant_read = cant_read; | |
9a49d3f3 | 3360 | vdev_dtl_reassess(tvd, 0, 0, B_FALSE, B_FALSE); |
fb5f0bc8 | 3361 | |
28caa74b MM |
3362 | if (!required && zio_injection_enabled) { |
3363 | required = !!zio_handle_device_injection(vd, NULL, | |
3364 | SET_ERROR(ECHILD)); | |
3365 | } | |
572e2857 | 3366 | |
fb5f0bc8 BB |
3367 | return (required); |
3368 | } | |
3369 | ||
b128c09f BB |
3370 | /* |
3371 | * Determine if resilver is needed, and if so the txg range. | |
3372 | */ | |
3373 | boolean_t | |
3374 | vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp) | |
3375 | { | |
3376 | boolean_t needed = B_FALSE; | |
3377 | uint64_t thismin = UINT64_MAX; | |
3378 | uint64_t thismax = 0; | |
3379 | ||
3380 | if (vd->vdev_children == 0) { | |
3381 | mutex_enter(&vd->vdev_dtl_lock); | |
d2734cce | 3382 | if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) && |
fb5f0bc8 | 3383 | vdev_writeable(vd)) { |
b128c09f | 3384 | |
5d1f7fb6 GW |
3385 | thismin = vdev_dtl_min(vd); |
3386 | thismax = vdev_dtl_max(vd); | |
b128c09f BB |
3387 | needed = B_TRUE; |
3388 | } | |
3389 | mutex_exit(&vd->vdev_dtl_lock); | |
3390 | } else { | |
1c27024e | 3391 | for (int c = 0; c < vd->vdev_children; c++) { |
b128c09f BB |
3392 | vdev_t *cvd = vd->vdev_child[c]; |
3393 | uint64_t cmin, cmax; | |
3394 | ||
3395 | if (vdev_resilver_needed(cvd, &cmin, &cmax)) { | |
3396 | thismin = MIN(thismin, cmin); | |
3397 | thismax = MAX(thismax, cmax); | |
3398 | needed = B_TRUE; | |
3399 | } | |
3400 | } | |
3401 | } | |
3402 | ||
3403 | if (needed && minp) { | |
3404 | *minp = thismin; | |
3405 | *maxp = thismax; | |
3406 | } | |
3407 | return (needed); | |
3408 | } | |
3409 | ||
d2734cce | 3410 | /* |
27f80e85 BB |
3411 | * Gets the checkpoint space map object from the vdev's ZAP. On success sm_obj |
3412 | * will contain either the checkpoint spacemap object or zero if none exists. | |
3413 | * All other errors are returned to the caller. | |
d2734cce SD |
3414 | */ |
3415 | int | |
27f80e85 | 3416 | vdev_checkpoint_sm_object(vdev_t *vd, uint64_t *sm_obj) |
d2734cce SD |
3417 | { |
3418 | ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER)); | |
27f80e85 | 3419 | |
d2734cce | 3420 | if (vd->vdev_top_zap == 0) { |
27f80e85 | 3421 | *sm_obj = 0; |
d2734cce SD |
3422 | return (0); |
3423 | } | |
3424 | ||
27f80e85 BB |
3425 | int error = zap_lookup(spa_meta_objset(vd->vdev_spa), vd->vdev_top_zap, |
3426 | VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1, sm_obj); | |
3427 | if (error == ENOENT) { | |
3428 | *sm_obj = 0; | |
3429 | error = 0; | |
3430 | } | |
d2734cce | 3431 | |
27f80e85 | 3432 | return (error); |
d2734cce SD |
3433 | } |
3434 | ||
a1d477c2 | 3435 | int |
34dc7c2f BB |
3436 | vdev_load(vdev_t *vd) |
3437 | { | |
a0e01997 | 3438 | int children = vd->vdev_children; |
a1d477c2 | 3439 | int error = 0; |
a0e01997 AS |
3440 | taskq_t *tq = NULL; |
3441 | ||
3442 | /* | |
3443 | * It's only worthwhile to use the taskq for the root vdev, because the | |
3444 | * slow part is metaslab_init, and that only happens for top-level | |
3445 | * vdevs. | |
3446 | */ | |
3447 | if (vd->vdev_ops == &vdev_root_ops && vd->vdev_children > 0) { | |
3448 | tq = taskq_create("vdev_load", children, minclsyspri, | |
3449 | children, children, TASKQ_PREPOPULATE); | |
3450 | } | |
a1d477c2 | 3451 | |
34dc7c2f BB |
3452 | /* |
3453 | * Recursively load all children. | |
3454 | */ | |
a1d477c2 | 3455 | for (int c = 0; c < vd->vdev_children; c++) { |
a0e01997 AS |
3456 | vdev_t *cvd = vd->vdev_child[c]; |
3457 | ||
3458 | if (tq == NULL || vdev_uses_zvols(cvd)) { | |
3459 | cvd->vdev_load_error = vdev_load(cvd); | |
3460 | } else { | |
3461 | VERIFY(taskq_dispatch(tq, vdev_load_child, | |
3462 | cvd, TQ_SLEEP) != TASKQID_INVALID); | |
a1d477c2 MA |
3463 | } |
3464 | } | |
3465 | ||
a0e01997 AS |
3466 | if (tq != NULL) { |
3467 | taskq_wait(tq); | |
3468 | taskq_destroy(tq); | |
3469 | } | |
3470 | ||
3471 | for (int c = 0; c < vd->vdev_children; c++) { | |
3472 | int error = vd->vdev_child[c]->vdev_load_error; | |
3473 | ||
3474 | if (error != 0) | |
3475 | return (error); | |
3476 | } | |
3477 | ||
a1d477c2 | 3478 | vdev_set_deflate_ratio(vd); |
34dc7c2f | 3479 | |
cc99f275 DB |
3480 | /* |
3481 | * On spa_load path, grab the allocation bias from our zap | |
3482 | */ | |
3483 | if (vd == vd->vdev_top && vd->vdev_top_zap != 0) { | |
3484 | spa_t *spa = vd->vdev_spa; | |
3485 | char bias_str[64]; | |
3486 | ||
3a92552f | 3487 | error = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap, |
cc99f275 | 3488 | VDEV_TOP_ZAP_ALLOCATION_BIAS, 1, sizeof (bias_str), |
3a92552f MA |
3489 | bias_str); |
3490 | if (error == 0) { | |
cc99f275 DB |
3491 | ASSERT(vd->vdev_alloc_bias == VDEV_BIAS_NONE); |
3492 | vd->vdev_alloc_bias = vdev_derive_alloc_bias(bias_str); | |
3a92552f MA |
3493 | } else if (error != ENOENT) { |
3494 | vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, | |
3495 | VDEV_AUX_CORRUPT_DATA); | |
3496 | vdev_dbgmsg(vd, "vdev_load: zap_lookup(top_zap=%llu) " | |
5dbf6c5a AZ |
3497 | "failed [error=%d]", |
3498 | (u_longlong_t)vd->vdev_top_zap, error); | |
3a92552f | 3499 | return (error); |
cc99f275 DB |
3500 | } |
3501 | } | |
3502 | ||
9a49d3f3 BB |
3503 | /* |
3504 | * Load any rebuild state from the top-level vdev zap. | |
3505 | */ | |
3506 | if (vd == vd->vdev_top && vd->vdev_top_zap != 0) { | |
3507 | error = vdev_rebuild_load(vd); | |
3508 | if (error && error != ENOTSUP) { | |
3509 | vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, | |
3510 | VDEV_AUX_CORRUPT_DATA); | |
3511 | vdev_dbgmsg(vd, "vdev_load: vdev_rebuild_load " | |
3512 | "failed [error=%d]", error); | |
3513 | return (error); | |
3514 | } | |
3515 | } | |
3516 | ||
34dc7c2f BB |
3517 | /* |
3518 | * If this is a top-level vdev, initialize its metaslabs. | |
3519 | */ | |
a1d477c2 | 3520 | if (vd == vd->vdev_top && vdev_is_concrete(vd)) { |
cc99f275 DB |
3521 | vdev_metaslab_group_create(vd); |
3522 | ||
a1d477c2 MA |
3523 | if (vd->vdev_ashift == 0 || vd->vdev_asize == 0) { |
3524 | vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, | |
3525 | VDEV_AUX_CORRUPT_DATA); | |
4a0ee12a PZ |
3526 | vdev_dbgmsg(vd, "vdev_load: invalid size. ashift=%llu, " |
3527 | "asize=%llu", (u_longlong_t)vd->vdev_ashift, | |
3528 | (u_longlong_t)vd->vdev_asize); | |
a1d477c2 | 3529 | return (SET_ERROR(ENXIO)); |
928e8ad4 SD |
3530 | } |
3531 | ||
3532 | error = vdev_metaslab_init(vd, 0); | |
3533 | if (error != 0) { | |
4a0ee12a PZ |
3534 | vdev_dbgmsg(vd, "vdev_load: metaslab_init failed " |
3535 | "[error=%d]", error); | |
a1d477c2 MA |
3536 | vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, |
3537 | VDEV_AUX_CORRUPT_DATA); | |
3538 | return (error); | |
3539 | } | |
d2734cce | 3540 | |
27f80e85 BB |
3541 | uint64_t checkpoint_sm_obj; |
3542 | error = vdev_checkpoint_sm_object(vd, &checkpoint_sm_obj); | |
3543 | if (error == 0 && checkpoint_sm_obj != 0) { | |
d2734cce SD |
3544 | objset_t *mos = spa_meta_objset(vd->vdev_spa); |
3545 | ASSERT(vd->vdev_asize != 0); | |
3546 | ASSERT3P(vd->vdev_checkpoint_sm, ==, NULL); | |
3547 | ||
928e8ad4 | 3548 | error = space_map_open(&vd->vdev_checkpoint_sm, |
d2734cce | 3549 | mos, checkpoint_sm_obj, 0, vd->vdev_asize, |
928e8ad4 SD |
3550 | vd->vdev_ashift); |
3551 | if (error != 0) { | |
d2734cce SD |
3552 | vdev_dbgmsg(vd, "vdev_load: space_map_open " |
3553 | "failed for checkpoint spacemap (obj %llu) " | |
3554 | "[error=%d]", | |
3555 | (u_longlong_t)checkpoint_sm_obj, error); | |
3556 | return (error); | |
3557 | } | |
3558 | ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL); | |
d2734cce SD |
3559 | |
3560 | /* | |
3561 | * Since the checkpoint_sm contains free entries | |
425d3237 SD |
3562 | * exclusively we can use space_map_allocated() to |
3563 | * indicate the cumulative checkpointed space that | |
3564 | * has been freed. | |
d2734cce SD |
3565 | */ |
3566 | vd->vdev_stat.vs_checkpoint_space = | |
425d3237 | 3567 | -space_map_allocated(vd->vdev_checkpoint_sm); |
d2734cce SD |
3568 | vd->vdev_spa->spa_checkpoint_info.sci_dspace += |
3569 | vd->vdev_stat.vs_checkpoint_space; | |
27f80e85 BB |
3570 | } else if (error != 0) { |
3571 | vdev_dbgmsg(vd, "vdev_load: failed to retrieve " | |
3572 | "checkpoint space map object from vdev ZAP " | |
3573 | "[error=%d]", error); | |
3574 | return (error); | |
d2734cce | 3575 | } |
a1d477c2 MA |
3576 | } |
3577 | ||
34dc7c2f BB |
3578 | /* |
3579 | * If this is a leaf vdev, load its DTL. | |
3580 | */ | |
a1d477c2 | 3581 | if (vd->vdev_ops->vdev_op_leaf && (error = vdev_dtl_load(vd)) != 0) { |
34dc7c2f BB |
3582 | vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, |
3583 | VDEV_AUX_CORRUPT_DATA); | |
4a0ee12a PZ |
3584 | vdev_dbgmsg(vd, "vdev_load: vdev_dtl_load failed " |
3585 | "[error=%d]", error); | |
a1d477c2 MA |
3586 | return (error); |
3587 | } | |
3588 | ||
27f80e85 BB |
3589 | uint64_t obsolete_sm_object; |
3590 | error = vdev_obsolete_sm_object(vd, &obsolete_sm_object); | |
3591 | if (error == 0 && obsolete_sm_object != 0) { | |
a1d477c2 MA |
3592 | objset_t *mos = vd->vdev_spa->spa_meta_objset; |
3593 | ASSERT(vd->vdev_asize != 0); | |
d2734cce | 3594 | ASSERT3P(vd->vdev_obsolete_sm, ==, NULL); |
a1d477c2 MA |
3595 | |
3596 | if ((error = space_map_open(&vd->vdev_obsolete_sm, mos, | |
3597 | obsolete_sm_object, 0, vd->vdev_asize, 0))) { | |
3598 | vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, | |
3599 | VDEV_AUX_CORRUPT_DATA); | |
4a0ee12a PZ |
3600 | vdev_dbgmsg(vd, "vdev_load: space_map_open failed for " |
3601 | "obsolete spacemap (obj %llu) [error=%d]", | |
3602 | (u_longlong_t)obsolete_sm_object, error); | |
a1d477c2 MA |
3603 | return (error); |
3604 | } | |
27f80e85 BB |
3605 | } else if (error != 0) { |
3606 | vdev_dbgmsg(vd, "vdev_load: failed to retrieve obsolete " | |
3607 | "space map object from vdev ZAP [error=%d]", error); | |
3608 | return (error); | |
a1d477c2 MA |
3609 | } |
3610 | ||
3611 | return (0); | |
34dc7c2f BB |
3612 | } |
3613 | ||
3614 | /* | |
3615 | * The special vdev case is used for hot spares and l2cache devices. Its | |
3616 | * sole purpose it to set the vdev state for the associated vdev. To do this, | |
3617 | * we make sure that we can open the underlying device, then try to read the | |
3618 | * label, and make sure that the label is sane and that it hasn't been | |
3619 | * repurposed to another pool. | |
3620 | */ | |
3621 | int | |
3622 | vdev_validate_aux(vdev_t *vd) | |
3623 | { | |
3624 | nvlist_t *label; | |
3625 | uint64_t guid, version; | |
3626 | uint64_t state; | |
3627 | ||
b128c09f BB |
3628 | if (!vdev_readable(vd)) |
3629 | return (0); | |
3630 | ||
3bc7e0fb | 3631 | if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) { |
34dc7c2f BB |
3632 | vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, |
3633 | VDEV_AUX_CORRUPT_DATA); | |
3634 | return (-1); | |
3635 | } | |
3636 | ||
3637 | if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 || | |
9ae529ec | 3638 | !SPA_VERSION_IS_SUPPORTED(version) || |
34dc7c2f BB |
3639 | nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 || |
3640 | guid != vd->vdev_guid || | |
3641 | nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) { | |
3642 | vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, | |
3643 | VDEV_AUX_CORRUPT_DATA); | |
3644 | nvlist_free(label); | |
3645 | return (-1); | |
3646 | } | |
3647 | ||
3648 | /* | |
3649 | * We don't actually check the pool state here. If it's in fact in | |
3650 | * use by another pool, we update this fact on the fly when requested. | |
3651 | */ | |
3652 | nvlist_free(label); | |
3653 | return (0); | |
3654 | } | |
3655 | ||
93e28d66 SD |
3656 | static void |
3657 | vdev_destroy_ms_flush_data(vdev_t *vd, dmu_tx_t *tx) | |
3658 | { | |
3659 | objset_t *mos = spa_meta_objset(vd->vdev_spa); | |
3660 | ||
3661 | if (vd->vdev_top_zap == 0) | |
3662 | return; | |
3663 | ||
3664 | uint64_t object = 0; | |
3665 | int err = zap_lookup(mos, vd->vdev_top_zap, | |
3666 | VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1, &object); | |
3667 | if (err == ENOENT) | |
3668 | return; | |
3a92552f | 3669 | VERIFY0(err); |
93e28d66 SD |
3670 | |
3671 | VERIFY0(dmu_object_free(mos, object, tx)); | |
3672 | VERIFY0(zap_remove(mos, vd->vdev_top_zap, | |
3673 | VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, tx)); | |
3674 | } | |
3675 | ||
a1d477c2 MA |
3676 | /* |
3677 | * Free the objects used to store this vdev's spacemaps, and the array | |
3678 | * that points to them. | |
3679 | */ | |
428870ff | 3680 | void |
a1d477c2 MA |
3681 | vdev_destroy_spacemaps(vdev_t *vd, dmu_tx_t *tx) |
3682 | { | |
3683 | if (vd->vdev_ms_array == 0) | |
3684 | return; | |
3685 | ||
3686 | objset_t *mos = vd->vdev_spa->spa_meta_objset; | |
3687 | uint64_t array_count = vd->vdev_asize >> vd->vdev_ms_shift; | |
3688 | size_t array_bytes = array_count * sizeof (uint64_t); | |
3689 | uint64_t *smobj_array = kmem_alloc(array_bytes, KM_SLEEP); | |
3690 | VERIFY0(dmu_read(mos, vd->vdev_ms_array, 0, | |
3691 | array_bytes, smobj_array, 0)); | |
3692 | ||
3693 | for (uint64_t i = 0; i < array_count; i++) { | |
3694 | uint64_t smobj = smobj_array[i]; | |
3695 | if (smobj == 0) | |
3696 | continue; | |
3697 | ||
3698 | space_map_free_obj(mos, smobj, tx); | |
3699 | } | |
3700 | ||
3701 | kmem_free(smobj_array, array_bytes); | |
3702 | VERIFY0(dmu_object_free(mos, vd->vdev_ms_array, tx)); | |
93e28d66 | 3703 | vdev_destroy_ms_flush_data(vd, tx); |
a1d477c2 MA |
3704 | vd->vdev_ms_array = 0; |
3705 | } | |
3706 | ||
3707 | static void | |
ee900344 | 3708 | vdev_remove_empty_log(vdev_t *vd, uint64_t txg) |
428870ff BB |
3709 | { |
3710 | spa_t *spa = vd->vdev_spa; | |
428870ff | 3711 | |
ee900344 | 3712 | ASSERT(vd->vdev_islog); |
e0ab3ab5 JS |
3713 | ASSERT(vd == vd->vdev_top); |
3714 | ASSERT3U(txg, ==, spa_syncing_txg(spa)); | |
428870ff | 3715 | |
ee900344 | 3716 | dmu_tx_t *tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); |
e0ab3ab5 | 3717 | |
ee900344 SD |
3718 | vdev_destroy_spacemaps(vd, tx); |
3719 | if (vd->vdev_top_zap != 0) { | |
e0ab3ab5 JS |
3720 | vdev_destroy_unlink_zap(vd, vd->vdev_top_zap, tx); |
3721 | vd->vdev_top_zap = 0; | |
3722 | } | |
ee900344 | 3723 | |
428870ff BB |
3724 | dmu_tx_commit(tx); |
3725 | } | |
3726 | ||
34dc7c2f BB |
3727 | void |
3728 | vdev_sync_done(vdev_t *vd, uint64_t txg) | |
3729 | { | |
3730 | metaslab_t *msp; | |
428870ff BB |
3731 | boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg)); |
3732 | ||
a1d477c2 | 3733 | ASSERT(vdev_is_concrete(vd)); |
34dc7c2f | 3734 | |
619f0976 GW |
3735 | while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))) |
3736 | != NULL) | |
34dc7c2f | 3737 | metaslab_sync_done(msp, txg); |
428870ff | 3738 | |
aa755b35 | 3739 | if (reassess) { |
428870ff | 3740 | metaslab_sync_reassess(vd->vdev_mg); |
aa755b35 MA |
3741 | if (vd->vdev_log_mg != NULL) |
3742 | metaslab_sync_reassess(vd->vdev_log_mg); | |
3743 | } | |
34dc7c2f BB |
3744 | } |
3745 | ||
3746 | void | |
3747 | vdev_sync(vdev_t *vd, uint64_t txg) | |
3748 | { | |
3749 | spa_t *spa = vd->vdev_spa; | |
3750 | vdev_t *lvd; | |
3751 | metaslab_t *msp; | |
34dc7c2f | 3752 | |
6c926f42 SD |
3753 | ASSERT3U(txg, ==, spa->spa_syncing_txg); |
3754 | dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); | |
a1d477c2 | 3755 | if (range_tree_space(vd->vdev_obsolete_segments) > 0) { |
a1d477c2 MA |
3756 | ASSERT(vd->vdev_removing || |
3757 | vd->vdev_ops == &vdev_indirect_ops); | |
3758 | ||
a1d477c2 | 3759 | vdev_indirect_sync_obsolete(vd, tx); |
a1d477c2 MA |
3760 | |
3761 | /* | |
3762 | * If the vdev is indirect, it can't have dirty | |
3763 | * metaslabs or DTLs. | |
3764 | */ | |
3765 | if (vd->vdev_ops == &vdev_indirect_ops) { | |
3766 | ASSERT(txg_list_empty(&vd->vdev_ms_list, txg)); | |
3767 | ASSERT(txg_list_empty(&vd->vdev_dtl_list, txg)); | |
6c926f42 | 3768 | dmu_tx_commit(tx); |
a1d477c2 MA |
3769 | return; |
3770 | } | |
3771 | } | |
3772 | ||
3773 | ASSERT(vdev_is_concrete(vd)); | |
3774 | ||
3775 | if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0 && | |
3776 | !vd->vdev_removing) { | |
34dc7c2f | 3777 | ASSERT(vd == vd->vdev_top); |
a1d477c2 | 3778 | ASSERT0(vd->vdev_indirect_config.vic_mapping_object); |
34dc7c2f BB |
3779 | vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset, |
3780 | DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx); | |
3781 | ASSERT(vd->vdev_ms_array != 0); | |
3782 | vdev_config_dirty(vd); | |
34dc7c2f BB |
3783 | } |
3784 | ||
3785 | while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) { | |
3786 | metaslab_sync(msp, txg); | |
3787 | (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg)); | |
3788 | } | |
3789 | ||
3790 | while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL) | |
3791 | vdev_dtl_sync(lvd, txg); | |
3792 | ||
a1d477c2 | 3793 | /* |
ee900344 SD |
3794 | * If this is an empty log device being removed, destroy the |
3795 | * metadata associated with it. | |
a1d477c2 | 3796 | */ |
ee900344 SD |
3797 | if (vd->vdev_islog && vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing) |
3798 | vdev_remove_empty_log(vd, txg); | |
a1d477c2 | 3799 | |
34dc7c2f | 3800 | (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg)); |
6c926f42 | 3801 | dmu_tx_commit(tx); |
34dc7c2f BB |
3802 | } |
3803 | ||
3804 | uint64_t | |
3805 | vdev_psize_to_asize(vdev_t *vd, uint64_t psize) | |
3806 | { | |
3807 | return (vd->vdev_ops->vdev_op_asize(vd, psize)); | |
3808 | } | |
3809 | ||
34dc7c2f BB |
3810 | /* |
3811 | * Mark the given vdev faulted. A faulted vdev behaves as if the device could | |
3812 | * not be opened, and no I/O is attempted. | |
3813 | */ | |
3814 | int | |
428870ff | 3815 | vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux) |
34dc7c2f | 3816 | { |
572e2857 | 3817 | vdev_t *vd, *tvd; |
34dc7c2f | 3818 | |
428870ff | 3819 | spa_vdev_state_enter(spa, SCL_NONE); |
34dc7c2f | 3820 | |
b128c09f | 3821 | if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) |
28caa74b | 3822 | return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV))); |
34dc7c2f | 3823 | |
34dc7c2f | 3824 | if (!vd->vdev_ops->vdev_op_leaf) |
28caa74b | 3825 | return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP))); |
34dc7c2f | 3826 | |
572e2857 BB |
3827 | tvd = vd->vdev_top; |
3828 | ||
4a283c7f TH |
3829 | /* |
3830 | * If user did a 'zpool offline -f' then make the fault persist across | |
3831 | * reboots. | |
3832 | */ | |
3833 | if (aux == VDEV_AUX_EXTERNAL_PERSIST) { | |
3834 | /* | |
3835 | * There are two kinds of forced faults: temporary and | |
3836 | * persistent. Temporary faults go away at pool import, while | |
3837 | * persistent faults stay set. Both types of faults can be | |
3838 | * cleared with a zpool clear. | |
3839 | * | |
3840 | * We tell if a vdev is persistently faulted by looking at the | |
3841 | * ZPOOL_CONFIG_AUX_STATE nvpair. If it's set to "external" at | |
3842 | * import then it's a persistent fault. Otherwise, it's | |
3843 | * temporary. We get ZPOOL_CONFIG_AUX_STATE set to "external" | |
3844 | * by setting vd.vdev_stat.vs_aux to VDEV_AUX_EXTERNAL. This | |
3845 | * tells vdev_config_generate() (which gets run later) to set | |
3846 | * ZPOOL_CONFIG_AUX_STATE to "external" in the nvlist. | |
3847 | */ | |
3848 | vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL; | |
3849 | vd->vdev_tmpoffline = B_FALSE; | |
3850 | aux = VDEV_AUX_EXTERNAL; | |
3851 | } else { | |
3852 | vd->vdev_tmpoffline = B_TRUE; | |
3853 | } | |
3854 | ||
428870ff BB |
3855 | /* |
3856 | * We don't directly use the aux state here, but if we do a | |
3857 | * vdev_reopen(), we need this value to be present to remember why we | |
3858 | * were faulted. | |
3859 | */ | |
3860 | vd->vdev_label_aux = aux; | |
3861 | ||
34dc7c2f BB |
3862 | /* |
3863 | * Faulted state takes precedence over degraded. | |
3864 | */ | |
428870ff | 3865 | vd->vdev_delayed_close = B_FALSE; |
34dc7c2f BB |
3866 | vd->vdev_faulted = 1ULL; |
3867 | vd->vdev_degraded = 0ULL; | |
428870ff | 3868 | vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, aux); |
34dc7c2f BB |
3869 | |
3870 | /* | |
428870ff BB |
3871 | * If this device has the only valid copy of the data, then |
3872 | * back off and simply mark the vdev as degraded instead. | |
34dc7c2f | 3873 | */ |
572e2857 | 3874 | if (!tvd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) { |
34dc7c2f BB |
3875 | vd->vdev_degraded = 1ULL; |
3876 | vd->vdev_faulted = 0ULL; | |
3877 | ||
3878 | /* | |
3879 | * If we reopen the device and it's not dead, only then do we | |
3880 | * mark it degraded. | |
3881 | */ | |
572e2857 | 3882 | vdev_reopen(tvd); |
34dc7c2f | 3883 | |
428870ff BB |
3884 | if (vdev_readable(vd)) |
3885 | vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux); | |
34dc7c2f BB |
3886 | } |
3887 | ||
b128c09f | 3888 | return (spa_vdev_state_exit(spa, vd, 0)); |
34dc7c2f BB |
3889 | } |
3890 | ||
3891 | /* | |
3892 | * Mark the given vdev degraded. A degraded vdev is purely an indication to the | |
3893 | * user that something is wrong. The vdev continues to operate as normal as far | |
3894 | * as I/O is concerned. | |
3895 | */ | |
3896 | int | |
428870ff | 3897 | vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux) |
34dc7c2f | 3898 | { |
b128c09f | 3899 | vdev_t *vd; |
34dc7c2f | 3900 | |
428870ff | 3901 | spa_vdev_state_enter(spa, SCL_NONE); |
34dc7c2f | 3902 | |
b128c09f | 3903 | if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) |
28caa74b | 3904 | return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV))); |
34dc7c2f | 3905 | |
34dc7c2f | 3906 | if (!vd->vdev_ops->vdev_op_leaf) |
28caa74b | 3907 | return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP))); |
34dc7c2f BB |
3908 | |
3909 | /* | |
3910 | * If the vdev is already faulted, then don't do anything. | |
3911 | */ | |
b128c09f BB |
3912 | if (vd->vdev_faulted || vd->vdev_degraded) |
3913 | return (spa_vdev_state_exit(spa, NULL, 0)); | |
34dc7c2f BB |
3914 | |
3915 | vd->vdev_degraded = 1ULL; | |
3916 | if (!vdev_is_dead(vd)) | |
3917 | vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, | |
428870ff | 3918 | aux); |
34dc7c2f | 3919 | |
b128c09f | 3920 | return (spa_vdev_state_exit(spa, vd, 0)); |
34dc7c2f BB |
3921 | } |
3922 | ||
3923 | /* | |
d3cc8b15 WA |
3924 | * Online the given vdev. |
3925 | * | |
3926 | * If 'ZFS_ONLINE_UNSPARE' is set, it implies two things. First, any attached | |
3927 | * spare device should be detached when the device finishes resilvering. | |
3928 | * Second, the online should be treated like a 'test' online case, so no FMA | |
3929 | * events are generated if the device fails to open. | |
34dc7c2f BB |
3930 | */ |
3931 | int | |
b128c09f | 3932 | vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate) |
34dc7c2f | 3933 | { |
9babb374 | 3934 | vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev; |
153b2285 YP |
3935 | boolean_t wasoffline; |
3936 | vdev_state_t oldstate; | |
34dc7c2f | 3937 | |
428870ff | 3938 | spa_vdev_state_enter(spa, SCL_NONE); |
34dc7c2f | 3939 | |
b128c09f | 3940 | if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) |
28caa74b | 3941 | return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV))); |
34dc7c2f BB |
3942 | |
3943 | if (!vd->vdev_ops->vdev_op_leaf) | |
28caa74b | 3944 | return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP))); |
34dc7c2f | 3945 | |
153b2285 YP |
3946 | wasoffline = (vd->vdev_offline || vd->vdev_tmpoffline); |
3947 | oldstate = vd->vdev_state; | |
fb390aaf | 3948 | |
9babb374 | 3949 | tvd = vd->vdev_top; |
34dc7c2f BB |
3950 | vd->vdev_offline = B_FALSE; |
3951 | vd->vdev_tmpoffline = B_FALSE; | |
b128c09f BB |
3952 | vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE); |
3953 | vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT); | |
9babb374 BB |
3954 | |
3955 | /* XXX - L2ARC 1.0 does not support expansion */ | |
3956 | if (!vd->vdev_aux) { | |
3957 | for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) | |
d441e85d BB |
3958 | pvd->vdev_expanding = !!((flags & ZFS_ONLINE_EXPAND) || |
3959 | spa->spa_autoexpand); | |
d48091de | 3960 | vd->vdev_expansion_time = gethrestime_sec(); |
9babb374 BB |
3961 | } |
3962 | ||
3963 | vdev_reopen(tvd); | |
34dc7c2f BB |
3964 | vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE; |
3965 | ||
9babb374 BB |
3966 | if (!vd->vdev_aux) { |
3967 | for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) | |
3968 | pvd->vdev_expanding = B_FALSE; | |
3969 | } | |
3970 | ||
34dc7c2f BB |
3971 | if (newstate) |
3972 | *newstate = vd->vdev_state; | |
3973 | if ((flags & ZFS_ONLINE_UNSPARE) && | |
3974 | !vdev_is_dead(vd) && vd->vdev_parent && | |
3975 | vd->vdev_parent->vdev_ops == &vdev_spare_ops && | |
3976 | vd->vdev_parent->vdev_child[0] == vd) | |
3977 | vd->vdev_unspare = B_TRUE; | |
3978 | ||
9babb374 BB |
3979 | if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) { |
3980 | ||
3981 | /* XXX - L2ARC 1.0 does not support expansion */ | |
3982 | if (vd->vdev_aux) | |
3983 | return (spa_vdev_state_exit(spa, vd, ENOTSUP)); | |
3984 | spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); | |
3985 | } | |
fb390aaf | 3986 | |
619f0976 GW |
3987 | /* Restart initializing if necessary */ |
3988 | mutex_enter(&vd->vdev_initialize_lock); | |
3989 | if (vdev_writeable(vd) && | |
3990 | vd->vdev_initialize_thread == NULL && | |
3991 | vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) { | |
3992 | (void) vdev_initialize(vd); | |
3993 | } | |
3994 | mutex_exit(&vd->vdev_initialize_lock); | |
3995 | ||
b7654bd7 GA |
3996 | /* |
3997 | * Restart trimming if necessary. We do not restart trimming for cache | |
3998 | * devices here. This is triggered by l2arc_rebuild_vdev() | |
3999 | * asynchronously for the whole device or in l2arc_evict() as it evicts | |
4000 | * space for upcoming writes. | |
4001 | */ | |
1b939560 | 4002 | mutex_enter(&vd->vdev_trim_lock); |
b7654bd7 | 4003 | if (vdev_writeable(vd) && !vd->vdev_isl2cache && |
1b939560 BB |
4004 | vd->vdev_trim_thread == NULL && |
4005 | vd->vdev_trim_state == VDEV_TRIM_ACTIVE) { | |
4006 | (void) vdev_trim(vd, vd->vdev_trim_rate, vd->vdev_trim_partial, | |
4007 | vd->vdev_trim_secure); | |
4008 | } | |
4009 | mutex_exit(&vd->vdev_trim_lock); | |
4010 | ||
153b2285 YP |
4011 | if (wasoffline || |
4012 | (oldstate < VDEV_STATE_DEGRADED && | |
4013 | vd->vdev_state >= VDEV_STATE_DEGRADED)) | |
12fa0466 | 4014 | spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_ONLINE); |
fb390aaf | 4015 | |
fb5f0bc8 | 4016 | return (spa_vdev_state_exit(spa, vd, 0)); |
34dc7c2f BB |
4017 | } |
4018 | ||
428870ff BB |
4019 | static int |
4020 | vdev_offline_locked(spa_t *spa, uint64_t guid, uint64_t flags) | |
34dc7c2f | 4021 | { |
9babb374 | 4022 | vdev_t *vd, *tvd; |
428870ff BB |
4023 | int error = 0; |
4024 | uint64_t generation; | |
4025 | metaslab_group_t *mg; | |
34dc7c2f | 4026 | |
428870ff BB |
4027 | top: |
4028 | spa_vdev_state_enter(spa, SCL_ALLOC); | |
34dc7c2f | 4029 | |
b128c09f | 4030 | if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) |
28caa74b | 4031 | return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV))); |
34dc7c2f BB |
4032 | |
4033 | if (!vd->vdev_ops->vdev_op_leaf) | |
28caa74b | 4034 | return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP))); |
34dc7c2f | 4035 | |
b2255edc BB |
4036 | if (vd->vdev_ops == &vdev_draid_spare_ops) |
4037 | return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); | |
4038 | ||
9babb374 | 4039 | tvd = vd->vdev_top; |
428870ff BB |
4040 | mg = tvd->vdev_mg; |
4041 | generation = spa->spa_config_generation + 1; | |
9babb374 | 4042 | |
34dc7c2f BB |
4043 | /* |
4044 | * If the device isn't already offline, try to offline it. | |
4045 | */ | |
4046 | if (!vd->vdev_offline) { | |
4047 | /* | |
fb5f0bc8 | 4048 | * If this device has the only valid copy of some data, |
9babb374 BB |
4049 | * don't allow it to be offlined. Log devices are always |
4050 | * expendable. | |
34dc7c2f | 4051 | */ |
9babb374 BB |
4052 | if (!tvd->vdev_islog && vd->vdev_aux == NULL && |
4053 | vdev_dtl_required(vd)) | |
28caa74b MM |
4054 | return (spa_vdev_state_exit(spa, NULL, |
4055 | SET_ERROR(EBUSY))); | |
34dc7c2f | 4056 | |
428870ff BB |
4057 | /* |
4058 | * If the top-level is a slog and it has had allocations | |
4059 | * then proceed. We check that the vdev's metaslab group | |
4060 | * is not NULL since it's possible that we may have just | |
4061 | * added this vdev but not yet initialized its metaslabs. | |
4062 | */ | |
4063 | if (tvd->vdev_islog && mg != NULL) { | |
4064 | /* | |
4065 | * Prevent any future allocations. | |
4066 | */ | |
aa755b35 | 4067 | ASSERT3P(tvd->vdev_log_mg, ==, NULL); |
428870ff BB |
4068 | metaslab_group_passivate(mg); |
4069 | (void) spa_vdev_state_exit(spa, vd, 0); | |
4070 | ||
a1d477c2 | 4071 | error = spa_reset_logs(spa); |
428870ff | 4072 | |
d2734cce SD |
4073 | /* |
4074 | * If the log device was successfully reset but has | |
4075 | * checkpointed data, do not offline it. | |
4076 | */ | |
4077 | if (error == 0 && | |
4078 | tvd->vdev_checkpoint_sm != NULL) { | |
425d3237 SD |
4079 | ASSERT3U(space_map_allocated( |
4080 | tvd->vdev_checkpoint_sm), !=, 0); | |
d2734cce SD |
4081 | error = ZFS_ERR_CHECKPOINT_EXISTS; |
4082 | } | |
4083 | ||
428870ff BB |
4084 | spa_vdev_state_enter(spa, SCL_ALLOC); |
4085 | ||
4086 | /* | |
4087 | * Check to see if the config has changed. | |
4088 | */ | |
4089 | if (error || generation != spa->spa_config_generation) { | |
4090 | metaslab_group_activate(mg); | |
4091 | if (error) | |
4092 | return (spa_vdev_state_exit(spa, | |
4093 | vd, error)); | |
4094 | (void) spa_vdev_state_exit(spa, vd, 0); | |
4095 | goto top; | |
4096 | } | |
c99c9001 | 4097 | ASSERT0(tvd->vdev_stat.vs_alloc); |
428870ff BB |
4098 | } |
4099 | ||
34dc7c2f BB |
4100 | /* |
4101 | * Offline this device and reopen its top-level vdev. | |
9babb374 BB |
4102 | * If the top-level vdev is a log device then just offline |
4103 | * it. Otherwise, if this action results in the top-level | |
4104 | * vdev becoming unusable, undo it and fail the request. | |
34dc7c2f BB |
4105 | */ |
4106 | vd->vdev_offline = B_TRUE; | |
9babb374 BB |
4107 | vdev_reopen(tvd); |
4108 | ||
4109 | if (!tvd->vdev_islog && vd->vdev_aux == NULL && | |
4110 | vdev_is_dead(tvd)) { | |
34dc7c2f | 4111 | vd->vdev_offline = B_FALSE; |
9babb374 | 4112 | vdev_reopen(tvd); |
28caa74b MM |
4113 | return (spa_vdev_state_exit(spa, NULL, |
4114 | SET_ERROR(EBUSY))); | |
34dc7c2f | 4115 | } |
428870ff BB |
4116 | |
4117 | /* | |
4118 | * Add the device back into the metaslab rotor so that | |
4119 | * once we online the device it's open for business. | |
4120 | */ | |
4121 | if (tvd->vdev_islog && mg != NULL) | |
4122 | metaslab_group_activate(mg); | |
34dc7c2f BB |
4123 | } |
4124 | ||
b128c09f | 4125 | vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY); |
34dc7c2f | 4126 | |
428870ff BB |
4127 | return (spa_vdev_state_exit(spa, vd, 0)); |
4128 | } | |
9babb374 | 4129 | |
428870ff BB |
4130 | int |
4131 | vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags) | |
4132 | { | |
4133 | int error; | |
9babb374 | 4134 | |
428870ff BB |
4135 | mutex_enter(&spa->spa_vdev_top_lock); |
4136 | error = vdev_offline_locked(spa, guid, flags); | |
4137 | mutex_exit(&spa->spa_vdev_top_lock); | |
4138 | ||
4139 | return (error); | |
34dc7c2f BB |
4140 | } |
4141 | ||
4142 | /* | |
4143 | * Clear the error counts associated with this vdev. Unlike vdev_online() and | |
4144 | * vdev_offline(), we assume the spa config is locked. We also clear all | |
4145 | * children. If 'vd' is NULL, then the user wants to clear all vdevs. | |
34dc7c2f BB |
4146 | */ |
4147 | void | |
b128c09f | 4148 | vdev_clear(spa_t *spa, vdev_t *vd) |
34dc7c2f | 4149 | { |
b128c09f BB |
4150 | vdev_t *rvd = spa->spa_root_vdev; |
4151 | ||
4152 | ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); | |
34dc7c2f BB |
4153 | |
4154 | if (vd == NULL) | |
b128c09f | 4155 | vd = rvd; |
34dc7c2f BB |
4156 | |
4157 | vd->vdev_stat.vs_read_errors = 0; | |
4158 | vd->vdev_stat.vs_write_errors = 0; | |
4159 | vd->vdev_stat.vs_checksum_errors = 0; | |
ad796b8a | 4160 | vd->vdev_stat.vs_slow_ios = 0; |
34dc7c2f | 4161 | |
1c27024e | 4162 | for (int c = 0; c < vd->vdev_children; c++) |
b128c09f | 4163 | vdev_clear(spa, vd->vdev_child[c]); |
34dc7c2f | 4164 | |
a1d477c2 MA |
4165 | /* |
4166 | * It makes no sense to "clear" an indirect vdev. | |
4167 | */ | |
4168 | if (!vdev_is_concrete(vd)) | |
4169 | return; | |
4170 | ||
34dc7c2f | 4171 | /* |
b128c09f BB |
4172 | * If we're in the FAULTED state or have experienced failed I/O, then |
4173 | * clear the persistent state and attempt to reopen the device. We | |
4174 | * also mark the vdev config dirty, so that the new faulted state is | |
4175 | * written out to disk. | |
34dc7c2f | 4176 | */ |
b128c09f BB |
4177 | if (vd->vdev_faulted || vd->vdev_degraded || |
4178 | !vdev_readable(vd) || !vdev_writeable(vd)) { | |
428870ff | 4179 | /* |
4e33ba4c | 4180 | * When reopening in response to a clear event, it may be due to |
428870ff BB |
4181 | * a fmadm repair request. In this case, if the device is |
4182 | * still broken, we want to still post the ereport again. | |
4183 | */ | |
4184 | vd->vdev_forcefault = B_TRUE; | |
4185 | ||
572e2857 | 4186 | vd->vdev_faulted = vd->vdev_degraded = 0ULL; |
b128c09f BB |
4187 | vd->vdev_cant_read = B_FALSE; |
4188 | vd->vdev_cant_write = B_FALSE; | |
4a283c7f | 4189 | vd->vdev_stat.vs_aux = 0; |
b128c09f | 4190 | |
572e2857 | 4191 | vdev_reopen(vd == rvd ? rvd : vd->vdev_top); |
34dc7c2f | 4192 | |
428870ff BB |
4193 | vd->vdev_forcefault = B_FALSE; |
4194 | ||
572e2857 | 4195 | if (vd != rvd && vdev_writeable(vd->vdev_top)) |
b128c09f BB |
4196 | vdev_state_dirty(vd->vdev_top); |
4197 | ||
3c819a2c JP |
4198 | /* If a resilver isn't required, check if vdevs can be culled */ |
4199 | if (vd->vdev_aux == NULL && !vdev_is_dead(vd) && | |
4200 | !dsl_scan_resilvering(spa->spa_dsl_pool) && | |
4201 | !dsl_scan_resilver_scheduled(spa->spa_dsl_pool)) | |
4202 | spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); | |
34dc7c2f | 4203 | |
12fa0466 | 4204 | spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_CLEAR); |
34dc7c2f | 4205 | } |
428870ff BB |
4206 | |
4207 | /* | |
4208 | * When clearing a FMA-diagnosed fault, we always want to | |
4209 | * unspare the device, as we assume that the original spare was | |
4210 | * done in response to the FMA fault. | |
4211 | */ | |
4212 | if (!vdev_is_dead(vd) && vd->vdev_parent != NULL && | |
4213 | vd->vdev_parent->vdev_ops == &vdev_spare_ops && | |
4214 | vd->vdev_parent->vdev_child[0] == vd) | |
4215 | vd->vdev_unspare = B_TRUE; | |
03e02e5b DB |
4216 | |
4217 | /* Clear recent error events cache (i.e. duplicate events tracking) */ | |
4218 | zfs_ereport_clear(spa, vd); | |
34dc7c2f BB |
4219 | } |
4220 | ||
b128c09f BB |
4221 | boolean_t |
4222 | vdev_is_dead(vdev_t *vd) | |
4223 | { | |
428870ff BB |
4224 | /* |
4225 | * Holes and missing devices are always considered "dead". | |
4226 | * This simplifies the code since we don't have to check for | |
4227 | * these types of devices in the various code paths. | |
4228 | * Instead we rely on the fact that we skip over dead devices | |
4229 | * before issuing I/O to them. | |
4230 | */ | |
a1d477c2 MA |
4231 | return (vd->vdev_state < VDEV_STATE_DEGRADED || |
4232 | vd->vdev_ops == &vdev_hole_ops || | |
428870ff | 4233 | vd->vdev_ops == &vdev_missing_ops); |
b128c09f BB |
4234 | } |
4235 | ||
4236 | boolean_t | |
34dc7c2f BB |
4237 | vdev_readable(vdev_t *vd) |
4238 | { | |
b128c09f | 4239 | return (!vdev_is_dead(vd) && !vd->vdev_cant_read); |
34dc7c2f BB |
4240 | } |
4241 | ||
b128c09f | 4242 | boolean_t |
34dc7c2f BB |
4243 | vdev_writeable(vdev_t *vd) |
4244 | { | |
a1d477c2 MA |
4245 | return (!vdev_is_dead(vd) && !vd->vdev_cant_write && |
4246 | vdev_is_concrete(vd)); | |
34dc7c2f BB |
4247 | } |
4248 | ||
b128c09f BB |
4249 | boolean_t |
4250 | vdev_allocatable(vdev_t *vd) | |
34dc7c2f | 4251 | { |
fb5f0bc8 BB |
4252 | uint64_t state = vd->vdev_state; |
4253 | ||
b128c09f | 4254 | /* |
fb5f0bc8 | 4255 | * We currently allow allocations from vdevs which may be in the |
b128c09f BB |
4256 | * process of reopening (i.e. VDEV_STATE_CLOSED). If the device |
4257 | * fails to reopen then we'll catch it later when we're holding | |
fb5f0bc8 BB |
4258 | * the proper locks. Note that we have to get the vdev state |
4259 | * in a local variable because although it changes atomically, | |
4260 | * we're asking two separate questions about it. | |
b128c09f | 4261 | */ |
fb5f0bc8 | 4262 | return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) && |
a1d477c2 | 4263 | !vd->vdev_cant_write && vdev_is_concrete(vd) && |
3dfb57a3 | 4264 | vd->vdev_mg->mg_initialized); |
34dc7c2f BB |
4265 | } |
4266 | ||
b128c09f BB |
4267 | boolean_t |
4268 | vdev_accessible(vdev_t *vd, zio_t *zio) | |
34dc7c2f | 4269 | { |
b128c09f | 4270 | ASSERT(zio->io_vd == vd); |
34dc7c2f | 4271 | |
b128c09f BB |
4272 | if (vdev_is_dead(vd) || vd->vdev_remove_wanted) |
4273 | return (B_FALSE); | |
34dc7c2f | 4274 | |
b128c09f BB |
4275 | if (zio->io_type == ZIO_TYPE_READ) |
4276 | return (!vd->vdev_cant_read); | |
34dc7c2f | 4277 | |
b128c09f BB |
4278 | if (zio->io_type == ZIO_TYPE_WRITE) |
4279 | return (!vd->vdev_cant_write); | |
34dc7c2f | 4280 | |
b128c09f | 4281 | return (B_TRUE); |
34dc7c2f BB |
4282 | } |
4283 | ||
193a37cb TH |
4284 | static void |
4285 | vdev_get_child_stat(vdev_t *cvd, vdev_stat_t *vs, vdev_stat_t *cvs) | |
34dc7c2f | 4286 | { |
b2255edc BB |
4287 | /* |
4288 | * Exclude the dRAID spare when aggregating to avoid double counting | |
4289 | * the ops and bytes. These IOs are counted by the physical leaves. | |
4290 | */ | |
4291 | if (cvd->vdev_ops == &vdev_draid_spare_ops) | |
4292 | return; | |
4293 | ||
1b939560 | 4294 | for (int t = 0; t < VS_ZIO_TYPES; t++) { |
193a37cb TH |
4295 | vs->vs_ops[t] += cvs->vs_ops[t]; |
4296 | vs->vs_bytes[t] += cvs->vs_bytes[t]; | |
4297 | } | |
34dc7c2f | 4298 | |
193a37cb TH |
4299 | cvs->vs_scan_removing = cvd->vdev_removing; |
4300 | } | |
f3a7f661 | 4301 | |
193a37cb TH |
4302 | /* |
4303 | * Get extended stats | |
4304 | */ | |
4305 | static void | |
4306 | vdev_get_child_stat_ex(vdev_t *cvd, vdev_stat_ex_t *vsx, vdev_stat_ex_t *cvsx) | |
4307 | { | |
14e4e3cb AZ |
4308 | (void) cvd; |
4309 | ||
193a37cb TH |
4310 | int t, b; |
4311 | for (t = 0; t < ZIO_TYPES; t++) { | |
7e945072 | 4312 | for (b = 0; b < ARRAY_SIZE(vsx->vsx_disk_histo[0]); b++) |
193a37cb | 4313 | vsx->vsx_disk_histo[t][b] += cvsx->vsx_disk_histo[t][b]; |
7e945072 TH |
4314 | |
4315 | for (b = 0; b < ARRAY_SIZE(vsx->vsx_total_histo[0]); b++) { | |
193a37cb TH |
4316 | vsx->vsx_total_histo[t][b] += |
4317 | cvsx->vsx_total_histo[t][b]; | |
4318 | } | |
f38dfec3 | 4319 | } |
34dc7c2f | 4320 | |
193a37cb | 4321 | for (t = 0; t < ZIO_PRIORITY_NUM_QUEUEABLE; t++) { |
7e945072 | 4322 | for (b = 0; b < ARRAY_SIZE(vsx->vsx_queue_histo[0]); b++) { |
193a37cb TH |
4323 | vsx->vsx_queue_histo[t][b] += |
4324 | cvsx->vsx_queue_histo[t][b]; | |
4325 | } | |
4326 | vsx->vsx_active_queue[t] += cvsx->vsx_active_queue[t]; | |
4327 | vsx->vsx_pend_queue[t] += cvsx->vsx_pend_queue[t]; | |
7e945072 TH |
4328 | |
4329 | for (b = 0; b < ARRAY_SIZE(vsx->vsx_ind_histo[0]); b++) | |
4330 | vsx->vsx_ind_histo[t][b] += cvsx->vsx_ind_histo[t][b]; | |
4331 | ||
4332 | for (b = 0; b < ARRAY_SIZE(vsx->vsx_agg_histo[0]); b++) | |
4333 | vsx->vsx_agg_histo[t][b] += cvsx->vsx_agg_histo[t][b]; | |
193a37cb | 4334 | } |
7e945072 | 4335 | |
193a37cb TH |
4336 | } |
4337 | ||
d2734cce SD |
4338 | boolean_t |
4339 | vdev_is_spacemap_addressable(vdev_t *vd) | |
4340 | { | |
419ba591 SD |
4341 | if (spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_SPACEMAP_V2)) |
4342 | return (B_TRUE); | |
4343 | ||
d2734cce | 4344 | /* |
419ba591 SD |
4345 | * If double-word space map entries are not enabled we assume |
4346 | * 47 bits of the space map entry are dedicated to the entry's | |
4347 | * offset (see SM_OFFSET_BITS in space_map.h). We then use that | |
4348 | * to calculate the maximum address that can be described by a | |
4349 | * space map entry for the given device. | |
d2734cce | 4350 | */ |
419ba591 | 4351 | uint64_t shift = vd->vdev_ashift + SM_OFFSET_BITS; |
d2734cce SD |
4352 | |
4353 | if (shift >= 63) /* detect potential overflow */ | |
4354 | return (B_TRUE); | |
4355 | ||
4356 | return (vd->vdev_asize < (1ULL << shift)); | |
4357 | } | |
4358 | ||
193a37cb TH |
4359 | /* |
4360 | * Get statistics for the given vdev. | |
4361 | */ | |
4362 | static void | |
4363 | vdev_get_stats_ex_impl(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx) | |
4364 | { | |
1c27024e | 4365 | int t; |
34dc7c2f BB |
4366 | /* |
4367 | * If we're getting stats on the root vdev, aggregate the I/O counts | |
4368 | * over all top-level vdevs (i.e. the direct children of the root). | |
4369 | */ | |
193a37cb TH |
4370 | if (!vd->vdev_ops->vdev_op_leaf) { |
4371 | if (vs) { | |
4372 | memset(vs->vs_ops, 0, sizeof (vs->vs_ops)); | |
4373 | memset(vs->vs_bytes, 0, sizeof (vs->vs_bytes)); | |
4374 | } | |
4375 | if (vsx) | |
4376 | memset(vsx, 0, sizeof (*vsx)); | |
4377 | ||
1c27024e | 4378 | for (int c = 0; c < vd->vdev_children; c++) { |
193a37cb | 4379 | vdev_t *cvd = vd->vdev_child[c]; |
34dc7c2f | 4380 | vdev_stat_t *cvs = &cvd->vdev_stat; |
193a37cb TH |
4381 | vdev_stat_ex_t *cvsx = &cvd->vdev_stat_ex; |
4382 | ||
4383 | vdev_get_stats_ex_impl(cvd, cvs, cvsx); | |
4384 | if (vs) | |
4385 | vdev_get_child_stat(cvd, vs, cvs); | |
4386 | if (vsx) | |
4387 | vdev_get_child_stat_ex(cvd, vsx, cvsx); | |
193a37cb TH |
4388 | } |
4389 | } else { | |
4390 | /* | |
4391 | * We're a leaf. Just copy our ZIO active queue stats in. The | |
4392 | * other leaf stats are updated in vdev_stat_update(). | |
4393 | */ | |
4394 | if (!vsx) | |
4395 | return; | |
4396 | ||
4397 | memcpy(vsx, &vd->vdev_stat_ex, sizeof (vd->vdev_stat_ex)); | |
4398 | ||
4399 | for (t = 0; t < ARRAY_SIZE(vd->vdev_queue.vq_class); t++) { | |
4400 | vsx->vsx_active_queue[t] = | |
4401 | vd->vdev_queue.vq_class[t].vqc_active; | |
4402 | vsx->vsx_pend_queue[t] = avl_numnodes( | |
4403 | &vd->vdev_queue.vq_class[t].vqc_queued_tree); | |
34dc7c2f BB |
4404 | } |
4405 | } | |
193a37cb TH |
4406 | } |
4407 | ||
4408 | void | |
4409 | vdev_get_stats_ex(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx) | |
4410 | { | |
0f676dc2 | 4411 | vdev_t *tvd = vd->vdev_top; |
193a37cb TH |
4412 | mutex_enter(&vd->vdev_stat_lock); |
4413 | if (vs) { | |
861166b0 | 4414 | memcpy(vs, &vd->vdev_stat, sizeof (*vs)); |
193a37cb TH |
4415 | vs->vs_timestamp = gethrtime() - vs->vs_timestamp; |
4416 | vs->vs_state = vd->vdev_state; | |
4417 | vs->vs_rsize = vdev_get_min_asize(vd); | |
9a49d3f3 | 4418 | |
619f0976 | 4419 | if (vd->vdev_ops->vdev_op_leaf) { |
1282274f | 4420 | vs->vs_pspace = vd->vdev_psize; |
193a37cb TH |
4421 | vs->vs_rsize += VDEV_LABEL_START_SIZE + |
4422 | VDEV_LABEL_END_SIZE; | |
619f0976 | 4423 | /* |
1b939560 | 4424 | * Report initializing progress. Since we don't |
619f0976 GW |
4425 | * have the initializing locks held, this is only |
4426 | * an estimate (although a fairly accurate one). | |
4427 | */ | |
4428 | vs->vs_initialize_bytes_done = | |
4429 | vd->vdev_initialize_bytes_done; | |
4430 | vs->vs_initialize_bytes_est = | |
4431 | vd->vdev_initialize_bytes_est; | |
4432 | vs->vs_initialize_state = vd->vdev_initialize_state; | |
4433 | vs->vs_initialize_action_time = | |
4434 | vd->vdev_initialize_action_time; | |
1b939560 BB |
4435 | |
4436 | /* | |
4437 | * Report manual TRIM progress. Since we don't have | |
4438 | * the manual TRIM locks held, this is only an | |
4439 | * estimate (although fairly accurate one). | |
4440 | */ | |
4441 | vs->vs_trim_notsup = !vd->vdev_has_trim; | |
4442 | vs->vs_trim_bytes_done = vd->vdev_trim_bytes_done; | |
4443 | vs->vs_trim_bytes_est = vd->vdev_trim_bytes_est; | |
4444 | vs->vs_trim_state = vd->vdev_trim_state; | |
4445 | vs->vs_trim_action_time = vd->vdev_trim_action_time; | |
9a49d3f3 BB |
4446 | |
4447 | /* Set when there is a deferred resilver. */ | |
4448 | vs->vs_resilver_deferred = vd->vdev_resilver_deferred; | |
619f0976 | 4449 | } |
9a49d3f3 | 4450 | |
0f676dc2 | 4451 | /* |
1b939560 | 4452 | * Report expandable space on top-level, non-auxiliary devices |
0f676dc2 GM |
4453 | * only. The expandable space is reported in terms of metaslab |
4454 | * sized units since that determines how much space the pool | |
4455 | * can expand. | |
4456 | */ | |
4457 | if (vd->vdev_aux == NULL && tvd != NULL) { | |
4458 | vs->vs_esize = P2ALIGN( | |
4459 | vd->vdev_max_asize - vd->vdev_asize, | |
4460 | 1ULL << tvd->vdev_ms_shift); | |
4461 | } | |
9a49d3f3 | 4462 | |
6fe3498c RM |
4463 | vs->vs_configured_ashift = vd->vdev_top != NULL |
4464 | ? vd->vdev_top->vdev_ashift : vd->vdev_ashift; | |
4465 | vs->vs_logical_ashift = vd->vdev_logical_ashift; | |
4466 | vs->vs_physical_ashift = vd->vdev_physical_ashift; | |
4467 | ||
9a49d3f3 BB |
4468 | /* |
4469 | * Report fragmentation and rebuild progress for top-level, | |
4470 | * non-auxiliary, concrete devices. | |
4471 | */ | |
193a37cb | 4472 | if (vd->vdev_aux == NULL && vd == vd->vdev_top && |
a1d477c2 | 4473 | vdev_is_concrete(vd)) { |
aa755b35 MA |
4474 | /* |
4475 | * The vdev fragmentation rating doesn't take into | |
4476 | * account the embedded slog metaslab (vdev_log_mg). | |
4477 | * Since it's only one metaslab, it would have a tiny | |
4478 | * impact on the overall fragmentation. | |
4479 | */ | |
cc99f275 DB |
4480 | vs->vs_fragmentation = (vd->vdev_mg != NULL) ? |
4481 | vd->vdev_mg->mg_fragmentation : 0; | |
193a37cb | 4482 | } |
2a673e76 AJ |
4483 | vs->vs_noalloc = MAX(vd->vdev_noalloc, |
4484 | tvd ? tvd->vdev_noalloc : 0); | |
193a37cb TH |
4485 | } |
4486 | ||
193a37cb | 4487 | vdev_get_stats_ex_impl(vd, vs, vsx); |
f3a7f661 | 4488 | mutex_exit(&vd->vdev_stat_lock); |
34dc7c2f BB |
4489 | } |
4490 | ||
193a37cb TH |
4491 | void |
4492 | vdev_get_stats(vdev_t *vd, vdev_stat_t *vs) | |
4493 | { | |
4494 | return (vdev_get_stats_ex(vd, vs, NULL)); | |
4495 | } | |
4496 | ||
34dc7c2f BB |
4497 | void |
4498 | vdev_clear_stats(vdev_t *vd) | |
4499 | { | |
4500 | mutex_enter(&vd->vdev_stat_lock); | |
4501 | vd->vdev_stat.vs_space = 0; | |
4502 | vd->vdev_stat.vs_dspace = 0; | |
4503 | vd->vdev_stat.vs_alloc = 0; | |
4504 | mutex_exit(&vd->vdev_stat_lock); | |
4505 | } | |
4506 | ||
428870ff BB |
4507 | void |
4508 | vdev_scan_stat_init(vdev_t *vd) | |
4509 | { | |
4510 | vdev_stat_t *vs = &vd->vdev_stat; | |
4511 | ||
1c27024e | 4512 | for (int c = 0; c < vd->vdev_children; c++) |
428870ff BB |
4513 | vdev_scan_stat_init(vd->vdev_child[c]); |
4514 | ||
4515 | mutex_enter(&vd->vdev_stat_lock); | |
4516 | vs->vs_scan_processed = 0; | |
4517 | mutex_exit(&vd->vdev_stat_lock); | |
4518 | } | |
4519 | ||
34dc7c2f | 4520 | void |
b128c09f | 4521 | vdev_stat_update(zio_t *zio, uint64_t psize) |
34dc7c2f | 4522 | { |
fb5f0bc8 BB |
4523 | spa_t *spa = zio->io_spa; |
4524 | vdev_t *rvd = spa->spa_root_vdev; | |
b128c09f | 4525 | vdev_t *vd = zio->io_vd ? zio->io_vd : rvd; |
34dc7c2f BB |
4526 | vdev_t *pvd; |
4527 | uint64_t txg = zio->io_txg; | |
63652e15 DS |
4528 | vdev_stat_t *vs = vd ? &vd->vdev_stat : NULL; |
4529 | vdev_stat_ex_t *vsx = vd ? &vd->vdev_stat_ex : NULL; | |
34dc7c2f BB |
4530 | zio_type_t type = zio->io_type; |
4531 | int flags = zio->io_flags; | |
4532 | ||
b128c09f BB |
4533 | /* |
4534 | * If this i/o is a gang leader, it didn't do any actual work. | |
4535 | */ | |
4536 | if (zio->io_gang_tree) | |
4537 | return; | |
4538 | ||
34dc7c2f | 4539 | if (zio->io_error == 0) { |
b128c09f BB |
4540 | /* |
4541 | * If this is a root i/o, don't count it -- we've already | |
4542 | * counted the top-level vdevs, and vdev_get_stats() will | |
4543 | * aggregate them when asked. This reduces contention on | |
4544 | * the root vdev_stat_lock and implicitly handles blocks | |
4545 | * that compress away to holes, for which there is no i/o. | |
4546 | * (Holes never create vdev children, so all the counters | |
4547 | * remain zero, which is what we want.) | |
4548 | * | |
4549 | * Note: this only applies to successful i/o (io_error == 0) | |
4550 | * because unlike i/o counts, errors are not additive. | |
4551 | * When reading a ditto block, for example, failure of | |
4552 | * one top-level vdev does not imply a root-level error. | |
4553 | */ | |
4554 | if (vd == rvd) | |
4555 | return; | |
4556 | ||
4557 | ASSERT(vd == zio->io_vd); | |
fb5f0bc8 BB |
4558 | |
4559 | if (flags & ZIO_FLAG_IO_BYPASS) | |
4560 | return; | |
4561 | ||
4562 | mutex_enter(&vd->vdev_stat_lock); | |
4563 | ||
b128c09f | 4564 | if (flags & ZIO_FLAG_IO_REPAIR) { |
9a49d3f3 BB |
4565 | /* |
4566 | * Repair is the result of a resilver issued by the | |
4567 | * scan thread (spa_sync). | |
4568 | */ | |
572e2857 | 4569 | if (flags & ZIO_FLAG_SCAN_THREAD) { |
9a49d3f3 BB |
4570 | dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan; |
4571 | dsl_scan_phys_t *scn_phys = &scn->scn_phys; | |
428870ff BB |
4572 | uint64_t *processed = &scn_phys->scn_processed; |
4573 | ||
428870ff BB |
4574 | if (vd->vdev_ops->vdev_op_leaf) |
4575 | atomic_add_64(processed, psize); | |
4576 | vs->vs_scan_processed += psize; | |
4577 | } | |
4578 | ||
9a49d3f3 BB |
4579 | /* |
4580 | * Repair is the result of a rebuild issued by the | |
b2255edc BB |
4581 | * rebuild thread (vdev_rebuild_thread). To avoid |
4582 | * double counting repaired bytes the virtual dRAID | |
4583 | * spare vdev is excluded from the processed bytes. | |
9a49d3f3 BB |
4584 | */ |
4585 | if (zio->io_priority == ZIO_PRIORITY_REBUILD) { | |
4586 | vdev_t *tvd = vd->vdev_top; | |
4587 | vdev_rebuild_t *vr = &tvd->vdev_rebuild_config; | |
4588 | vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; | |
4589 | uint64_t *rebuilt = &vrp->vrp_bytes_rebuilt; | |
4590 | ||
b2255edc BB |
4591 | if (vd->vdev_ops->vdev_op_leaf && |
4592 | vd->vdev_ops != &vdev_draid_spare_ops) { | |
9a49d3f3 | 4593 | atomic_add_64(rebuilt, psize); |
b2255edc | 4594 | } |
9a49d3f3 BB |
4595 | vs->vs_rebuild_processed += psize; |
4596 | } | |
4597 | ||
fb5f0bc8 | 4598 | if (flags & ZIO_FLAG_SELF_HEAL) |
b128c09f | 4599 | vs->vs_self_healed += psize; |
34dc7c2f | 4600 | } |
fb5f0bc8 | 4601 | |
193a37cb TH |
4602 | /* |
4603 | * The bytes/ops/histograms are recorded at the leaf level and | |
4604 | * aggregated into the higher level vdevs in vdev_get_stats(). | |
4605 | */ | |
4eb0db42 TH |
4606 | if (vd->vdev_ops->vdev_op_leaf && |
4607 | (zio->io_priority < ZIO_PRIORITY_NUM_QUEUEABLE)) { | |
1b939560 | 4608 | zio_type_t vs_type = type; |
9a49d3f3 | 4609 | zio_priority_t priority = zio->io_priority; |
1b939560 BB |
4610 | |
4611 | /* | |
4612 | * TRIM ops and bytes are reported to user space as | |
4613 | * ZIO_TYPE_IOCTL. This is done to preserve the | |
4614 | * vdev_stat_t structure layout for user space. | |
4615 | */ | |
4616 | if (type == ZIO_TYPE_TRIM) | |
4617 | vs_type = ZIO_TYPE_IOCTL; | |
193a37cb | 4618 | |
9a49d3f3 BB |
4619 | /* |
4620 | * Solely for the purposes of 'zpool iostat -lqrw' | |
bf169e9f | 4621 | * reporting use the priority to categorize the IO. |
9a49d3f3 BB |
4622 | * Only the following are reported to user space: |
4623 | * | |
4624 | * ZIO_PRIORITY_SYNC_READ, | |
4625 | * ZIO_PRIORITY_SYNC_WRITE, | |
4626 | * ZIO_PRIORITY_ASYNC_READ, | |
4627 | * ZIO_PRIORITY_ASYNC_WRITE, | |
4628 | * ZIO_PRIORITY_SCRUB, | |
00888c08 TB |
4629 | * ZIO_PRIORITY_TRIM, |
4630 | * ZIO_PRIORITY_REBUILD. | |
9a49d3f3 | 4631 | */ |
00888c08 | 4632 | if (priority == ZIO_PRIORITY_INITIALIZING) { |
9a49d3f3 BB |
4633 | ASSERT3U(type, ==, ZIO_TYPE_WRITE); |
4634 | priority = ZIO_PRIORITY_ASYNC_WRITE; | |
4635 | } else if (priority == ZIO_PRIORITY_REMOVAL) { | |
4636 | priority = ((type == ZIO_TYPE_WRITE) ? | |
4637 | ZIO_PRIORITY_ASYNC_WRITE : | |
4638 | ZIO_PRIORITY_ASYNC_READ); | |
4639 | } | |
4640 | ||
1b939560 BB |
4641 | vs->vs_ops[vs_type]++; |
4642 | vs->vs_bytes[vs_type] += psize; | |
193a37cb | 4643 | |
7e945072 | 4644 | if (flags & ZIO_FLAG_DELEGATED) { |
9a49d3f3 | 4645 | vsx->vsx_agg_histo[priority] |
7e945072 TH |
4646 | [RQ_HISTO(zio->io_size)]++; |
4647 | } else { | |
9a49d3f3 | 4648 | vsx->vsx_ind_histo[priority] |
7e945072 TH |
4649 | [RQ_HISTO(zio->io_size)]++; |
4650 | } | |
4651 | ||
193a37cb | 4652 | if (zio->io_delta && zio->io_delay) { |
9a49d3f3 | 4653 | vsx->vsx_queue_histo[priority] |
7e945072 | 4654 | [L_HISTO(zio->io_delta - zio->io_delay)]++; |
193a37cb | 4655 | vsx->vsx_disk_histo[type] |
7e945072 | 4656 | [L_HISTO(zio->io_delay)]++; |
193a37cb | 4657 | vsx->vsx_total_histo[type] |
7e945072 | 4658 | [L_HISTO(zio->io_delta)]++; |
193a37cb TH |
4659 | } |
4660 | } | |
fb5f0bc8 BB |
4661 | |
4662 | mutex_exit(&vd->vdev_stat_lock); | |
34dc7c2f BB |
4663 | return; |
4664 | } | |
4665 | ||
4666 | if (flags & ZIO_FLAG_SPECULATIVE) | |
4667 | return; | |
4668 | ||
9babb374 BB |
4669 | /* |
4670 | * If this is an I/O error that is going to be retried, then ignore the | |
4671 | * error. Otherwise, the user may interpret B_FAILFAST I/O errors as | |
4672 | * hard errors, when in reality they can happen for any number of | |
4673 | * innocuous reasons (bus resets, MPxIO link failure, etc). | |
4674 | */ | |
4675 | if (zio->io_error == EIO && | |
4676 | !(zio->io_flags & ZIO_FLAG_IO_RETRY)) | |
4677 | return; | |
4678 | ||
428870ff BB |
4679 | /* |
4680 | * Intent logs writes won't propagate their error to the root | |
4681 | * I/O so don't mark these types of failures as pool-level | |
4682 | * errors. | |
4683 | */ | |
4684 | if (zio->io_vd == NULL && (zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) | |
4685 | return; | |
4686 | ||
4d0ba941 | 4687 | if (type == ZIO_TYPE_WRITE && txg != 0 && |
fb5f0bc8 | 4688 | (!(flags & ZIO_FLAG_IO_REPAIR) || |
572e2857 | 4689 | (flags & ZIO_FLAG_SCAN_THREAD) || |
428870ff | 4690 | spa->spa_claiming)) { |
fb5f0bc8 | 4691 | /* |
428870ff BB |
4692 | * This is either a normal write (not a repair), or it's |
4693 | * a repair induced by the scrub thread, or it's a repair | |
4694 | * made by zil_claim() during spa_load() in the first txg. | |
4695 | * In the normal case, we commit the DTL change in the same | |
4696 | * txg as the block was born. In the scrub-induced repair | |
4697 | * case, we know that scrubs run in first-pass syncing context, | |
4698 | * so we commit the DTL change in spa_syncing_txg(spa). | |
4699 | * In the zil_claim() case, we commit in spa_first_txg(spa). | |
fb5f0bc8 BB |
4700 | * |
4701 | * We currently do not make DTL entries for failed spontaneous | |
4702 | * self-healing writes triggered by normal (non-scrubbing) | |
4703 | * reads, because we have no transactional context in which to | |
4704 | * do so -- and it's not clear that it'd be desirable anyway. | |
4705 | */ | |
4706 | if (vd->vdev_ops->vdev_op_leaf) { | |
4707 | uint64_t commit_txg = txg; | |
572e2857 | 4708 | if (flags & ZIO_FLAG_SCAN_THREAD) { |
fb5f0bc8 BB |
4709 | ASSERT(flags & ZIO_FLAG_IO_REPAIR); |
4710 | ASSERT(spa_sync_pass(spa) == 1); | |
4711 | vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1); | |
428870ff BB |
4712 | commit_txg = spa_syncing_txg(spa); |
4713 | } else if (spa->spa_claiming) { | |
4714 | ASSERT(flags & ZIO_FLAG_IO_REPAIR); | |
4715 | commit_txg = spa_first_txg(spa); | |
fb5f0bc8 | 4716 | } |
428870ff | 4717 | ASSERT(commit_txg >= spa_syncing_txg(spa)); |
fb5f0bc8 | 4718 | if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1)) |
34dc7c2f | 4719 | return; |
fb5f0bc8 BB |
4720 | for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) |
4721 | vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1); | |
4722 | vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg); | |
34dc7c2f | 4723 | } |
fb5f0bc8 BB |
4724 | if (vd != rvd) |
4725 | vdev_dtl_dirty(vd, DTL_MISSING, txg, 1); | |
34dc7c2f BB |
4726 | } |
4727 | } | |
4728 | ||
cc99f275 DB |
4729 | int64_t |
4730 | vdev_deflated_space(vdev_t *vd, int64_t space) | |
4731 | { | |
4732 | ASSERT((space & (SPA_MINBLOCKSIZE-1)) == 0); | |
4733 | ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache); | |
4734 | ||
4735 | return ((space >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio); | |
4736 | } | |
4737 | ||
34dc7c2f | 4738 | /* |
1b939560 BB |
4739 | * Update the in-core space usage stats for this vdev, its metaslab class, |
4740 | * and the root vdev. | |
34dc7c2f BB |
4741 | */ |
4742 | void | |
428870ff BB |
4743 | vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta, |
4744 | int64_t space_delta) | |
34dc7c2f | 4745 | { |
14e4e3cb | 4746 | (void) defer_delta; |
cc99f275 | 4747 | int64_t dspace_delta; |
34dc7c2f BB |
4748 | spa_t *spa = vd->vdev_spa; |
4749 | vdev_t *rvd = spa->spa_root_vdev; | |
4750 | ||
4751 | ASSERT(vd == vd->vdev_top); | |
4752 | ||
4753 | /* | |
4754 | * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion | |
4755 | * factor. We must calculate this here and not at the root vdev | |
4756 | * because the root vdev's psize-to-asize is simply the max of its | |
e1cfd73f | 4757 | * children's, thus not accurate enough for us. |
34dc7c2f | 4758 | */ |
cc99f275 | 4759 | dspace_delta = vdev_deflated_space(vd, space_delta); |
34dc7c2f BB |
4760 | |
4761 | mutex_enter(&vd->vdev_stat_lock); | |
7558997d SD |
4762 | /* ensure we won't underflow */ |
4763 | if (alloc_delta < 0) { | |
4764 | ASSERT3U(vd->vdev_stat.vs_alloc, >=, -alloc_delta); | |
4765 | } | |
4766 | ||
34dc7c2f | 4767 | vd->vdev_stat.vs_alloc += alloc_delta; |
428870ff | 4768 | vd->vdev_stat.vs_space += space_delta; |
34dc7c2f BB |
4769 | vd->vdev_stat.vs_dspace += dspace_delta; |
4770 | mutex_exit(&vd->vdev_stat_lock); | |
4771 | ||
cc99f275 DB |
4772 | /* every class but log contributes to root space stats */ |
4773 | if (vd->vdev_mg != NULL && !vd->vdev_islog) { | |
7558997d | 4774 | ASSERT(!vd->vdev_isl2cache); |
34dc7c2f | 4775 | mutex_enter(&rvd->vdev_stat_lock); |
34dc7c2f | 4776 | rvd->vdev_stat.vs_alloc += alloc_delta; |
428870ff | 4777 | rvd->vdev_stat.vs_space += space_delta; |
34dc7c2f BB |
4778 | rvd->vdev_stat.vs_dspace += dspace_delta; |
4779 | mutex_exit(&rvd->vdev_stat_lock); | |
4780 | } | |
cc99f275 | 4781 | /* Note: metaslab_class_space_update moved to metaslab_space_update */ |
34dc7c2f BB |
4782 | } |
4783 | ||
4784 | /* | |
4785 | * Mark a top-level vdev's config as dirty, placing it on the dirty list | |
4786 | * so that it will be written out next time the vdev configuration is synced. | |
4787 | * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs. | |
4788 | */ | |
4789 | void | |
4790 | vdev_config_dirty(vdev_t *vd) | |
4791 | { | |
4792 | spa_t *spa = vd->vdev_spa; | |
4793 | vdev_t *rvd = spa->spa_root_vdev; | |
4794 | int c; | |
4795 | ||
572e2857 BB |
4796 | ASSERT(spa_writeable(spa)); |
4797 | ||
34dc7c2f | 4798 | /* |
9babb374 BB |
4799 | * If this is an aux vdev (as with l2cache and spare devices), then we |
4800 | * update the vdev config manually and set the sync flag. | |
b128c09f BB |
4801 | */ |
4802 | if (vd->vdev_aux != NULL) { | |
4803 | spa_aux_vdev_t *sav = vd->vdev_aux; | |
4804 | nvlist_t **aux; | |
4805 | uint_t naux; | |
4806 | ||
4807 | for (c = 0; c < sav->sav_count; c++) { | |
4808 | if (sav->sav_vdevs[c] == vd) | |
4809 | break; | |
4810 | } | |
4811 | ||
4812 | if (c == sav->sav_count) { | |
4813 | /* | |
4814 | * We're being removed. There's nothing more to do. | |
4815 | */ | |
4816 | ASSERT(sav->sav_sync == B_TRUE); | |
4817 | return; | |
4818 | } | |
4819 | ||
4820 | sav->sav_sync = B_TRUE; | |
4821 | ||
9babb374 BB |
4822 | if (nvlist_lookup_nvlist_array(sav->sav_config, |
4823 | ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) { | |
4824 | VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, | |
4825 | ZPOOL_CONFIG_SPARES, &aux, &naux) == 0); | |
4826 | } | |
b128c09f BB |
4827 | |
4828 | ASSERT(c < naux); | |
4829 | ||
4830 | /* | |
4831 | * Setting the nvlist in the middle if the array is a little | |
4832 | * sketchy, but it will work. | |
4833 | */ | |
4834 | nvlist_free(aux[c]); | |
428870ff | 4835 | aux[c] = vdev_config_generate(spa, vd, B_TRUE, 0); |
b128c09f BB |
4836 | |
4837 | return; | |
4838 | } | |
4839 | ||
4840 | /* | |
4841 | * The dirty list is protected by the SCL_CONFIG lock. The caller | |
4842 | * must either hold SCL_CONFIG as writer, or must be the sync thread | |
4843 | * (which holds SCL_CONFIG as reader). There's only one sync thread, | |
34dc7c2f BB |
4844 | * so this is sufficient to ensure mutual exclusion. |
4845 | */ | |
b128c09f BB |
4846 | ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || |
4847 | (dsl_pool_sync_context(spa_get_dsl(spa)) && | |
4848 | spa_config_held(spa, SCL_CONFIG, RW_READER))); | |
34dc7c2f BB |
4849 | |
4850 | if (vd == rvd) { | |
4851 | for (c = 0; c < rvd->vdev_children; c++) | |
4852 | vdev_config_dirty(rvd->vdev_child[c]); | |
4853 | } else { | |
4854 | ASSERT(vd == vd->vdev_top); | |
4855 | ||
428870ff | 4856 | if (!list_link_active(&vd->vdev_config_dirty_node) && |
a1d477c2 | 4857 | vdev_is_concrete(vd)) { |
b128c09f | 4858 | list_insert_head(&spa->spa_config_dirty_list, vd); |
a1d477c2 | 4859 | } |
34dc7c2f BB |
4860 | } |
4861 | } | |
4862 | ||
4863 | void | |
4864 | vdev_config_clean(vdev_t *vd) | |
4865 | { | |
4866 | spa_t *spa = vd->vdev_spa; | |
4867 | ||
b128c09f BB |
4868 | ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || |
4869 | (dsl_pool_sync_context(spa_get_dsl(spa)) && | |
4870 | spa_config_held(spa, SCL_CONFIG, RW_READER))); | |
34dc7c2f | 4871 | |
b128c09f BB |
4872 | ASSERT(list_link_active(&vd->vdev_config_dirty_node)); |
4873 | list_remove(&spa->spa_config_dirty_list, vd); | |
34dc7c2f BB |
4874 | } |
4875 | ||
b128c09f BB |
4876 | /* |
4877 | * Mark a top-level vdev's state as dirty, so that the next pass of | |
4878 | * spa_sync() can convert this into vdev_config_dirty(). We distinguish | |
4879 | * the state changes from larger config changes because they require | |
4880 | * much less locking, and are often needed for administrative actions. | |
4881 | */ | |
4882 | void | |
4883 | vdev_state_dirty(vdev_t *vd) | |
4884 | { | |
4885 | spa_t *spa = vd->vdev_spa; | |
4886 | ||
572e2857 | 4887 | ASSERT(spa_writeable(spa)); |
b128c09f BB |
4888 | ASSERT(vd == vd->vdev_top); |
4889 | ||
4890 | /* | |
4891 | * The state list is protected by the SCL_STATE lock. The caller | |
4892 | * must either hold SCL_STATE as writer, or must be the sync thread | |
4893 | * (which holds SCL_STATE as reader). There's only one sync thread, | |
4894 | * so this is sufficient to ensure mutual exclusion. | |
4895 | */ | |
4896 | ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || | |
4897 | (dsl_pool_sync_context(spa_get_dsl(spa)) && | |
4898 | spa_config_held(spa, SCL_STATE, RW_READER))); | |
4899 | ||
a1d477c2 MA |
4900 | if (!list_link_active(&vd->vdev_state_dirty_node) && |
4901 | vdev_is_concrete(vd)) | |
b128c09f BB |
4902 | list_insert_head(&spa->spa_state_dirty_list, vd); |
4903 | } | |
4904 | ||
4905 | void | |
4906 | vdev_state_clean(vdev_t *vd) | |
4907 | { | |
4908 | spa_t *spa = vd->vdev_spa; | |
4909 | ||
4910 | ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || | |
4911 | (dsl_pool_sync_context(spa_get_dsl(spa)) && | |
4912 | spa_config_held(spa, SCL_STATE, RW_READER))); | |
4913 | ||
4914 | ASSERT(list_link_active(&vd->vdev_state_dirty_node)); | |
4915 | list_remove(&spa->spa_state_dirty_list, vd); | |
4916 | } | |
4917 | ||
4918 | /* | |
4919 | * Propagate vdev state up from children to parent. | |
4920 | */ | |
34dc7c2f BB |
4921 | void |
4922 | vdev_propagate_state(vdev_t *vd) | |
4923 | { | |
fb5f0bc8 BB |
4924 | spa_t *spa = vd->vdev_spa; |
4925 | vdev_t *rvd = spa->spa_root_vdev; | |
34dc7c2f BB |
4926 | int degraded = 0, faulted = 0; |
4927 | int corrupted = 0; | |
34dc7c2f BB |
4928 | vdev_t *child; |
4929 | ||
4930 | if (vd->vdev_children > 0) { | |
1c27024e | 4931 | for (int c = 0; c < vd->vdev_children; c++) { |
34dc7c2f | 4932 | child = vd->vdev_child[c]; |
b128c09f | 4933 | |
428870ff | 4934 | /* |
a1d477c2 MA |
4935 | * Don't factor holes or indirect vdevs into the |
4936 | * decision. | |
428870ff | 4937 | */ |
a1d477c2 | 4938 | if (!vdev_is_concrete(child)) |
428870ff BB |
4939 | continue; |
4940 | ||
b128c09f | 4941 | if (!vdev_readable(child) || |
fb5f0bc8 | 4942 | (!vdev_writeable(child) && spa_writeable(spa))) { |
b128c09f BB |
4943 | /* |
4944 | * Root special: if there is a top-level log | |
4945 | * device, treat the root vdev as if it were | |
4946 | * degraded. | |
4947 | */ | |
4948 | if (child->vdev_islog && vd == rvd) | |
4949 | degraded++; | |
4950 | else | |
4951 | faulted++; | |
4952 | } else if (child->vdev_state <= VDEV_STATE_DEGRADED) { | |
34dc7c2f | 4953 | degraded++; |
b128c09f | 4954 | } |
34dc7c2f BB |
4955 | |
4956 | if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA) | |
4957 | corrupted++; | |
4958 | } | |
4959 | ||
4960 | vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded); | |
4961 | ||
4962 | /* | |
b128c09f | 4963 | * Root special: if there is a top-level vdev that cannot be |
34dc7c2f BB |
4964 | * opened due to corrupted metadata, then propagate the root |
4965 | * vdev's aux state as 'corrupt' rather than 'insufficient | |
4966 | * replicas'. | |
4967 | */ | |
4968 | if (corrupted && vd == rvd && | |
4969 | rvd->vdev_state == VDEV_STATE_CANT_OPEN) | |
4970 | vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN, | |
4971 | VDEV_AUX_CORRUPT_DATA); | |
4972 | } | |
4973 | ||
b128c09f | 4974 | if (vd->vdev_parent) |
34dc7c2f BB |
4975 | vdev_propagate_state(vd->vdev_parent); |
4976 | } | |
4977 | ||
4978 | /* | |
4979 | * Set a vdev's state. If this is during an open, we don't update the parent | |
4980 | * state, because we're in the process of opening children depth-first. | |
4981 | * Otherwise, we propagate the change to the parent. | |
4982 | * | |
4983 | * If this routine places a device in a faulted state, an appropriate ereport is | |
4984 | * generated. | |
4985 | */ | |
4986 | void | |
4987 | vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux) | |
4988 | { | |
4989 | uint64_t save_state; | |
b128c09f | 4990 | spa_t *spa = vd->vdev_spa; |
34dc7c2f BB |
4991 | |
4992 | if (state == vd->vdev_state) { | |
976246fa DB |
4993 | /* |
4994 | * Since vdev_offline() code path is already in an offline | |
4995 | * state we can miss a statechange event to OFFLINE. Check | |
4996 | * the previous state to catch this condition. | |
4997 | */ | |
4998 | if (vd->vdev_ops->vdev_op_leaf && | |
4999 | (state == VDEV_STATE_OFFLINE) && | |
5000 | (vd->vdev_prevstate >= VDEV_STATE_FAULTED)) { | |
5001 | /* post an offline state change */ | |
5002 | zfs_post_state_change(spa, vd, vd->vdev_prevstate); | |
5003 | } | |
34dc7c2f BB |
5004 | vd->vdev_stat.vs_aux = aux; |
5005 | return; | |
5006 | } | |
5007 | ||
5008 | save_state = vd->vdev_state; | |
5009 | ||
5010 | vd->vdev_state = state; | |
5011 | vd->vdev_stat.vs_aux = aux; | |
5012 | ||
5013 | /* | |
5014 | * If we are setting the vdev state to anything but an open state, then | |
428870ff BB |
5015 | * always close the underlying device unless the device has requested |
5016 | * a delayed close (i.e. we're about to remove or fault the device). | |
5017 | * Otherwise, we keep accessible but invalid devices open forever. | |
5018 | * We don't call vdev_close() itself, because that implies some extra | |
5019 | * checks (offline, etc) that we don't want here. This is limited to | |
5020 | * leaf devices, because otherwise closing the device will affect other | |
5021 | * children. | |
34dc7c2f | 5022 | */ |
428870ff BB |
5023 | if (!vd->vdev_delayed_close && vdev_is_dead(vd) && |
5024 | vd->vdev_ops->vdev_op_leaf) | |
34dc7c2f BB |
5025 | vd->vdev_ops->vdev_op_close(vd); |
5026 | ||
5027 | if (vd->vdev_removed && | |
5028 | state == VDEV_STATE_CANT_OPEN && | |
5029 | (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) { | |
5030 | /* | |
5031 | * If the previous state is set to VDEV_STATE_REMOVED, then this | |
5032 | * device was previously marked removed and someone attempted to | |
5033 | * reopen it. If this failed due to a nonexistent device, then | |
5034 | * keep the device in the REMOVED state. We also let this be if | |
5035 | * it is one of our special test online cases, which is only | |
5036 | * attempting to online the device and shouldn't generate an FMA | |
5037 | * fault. | |
5038 | */ | |
5039 | vd->vdev_state = VDEV_STATE_REMOVED; | |
5040 | vd->vdev_stat.vs_aux = VDEV_AUX_NONE; | |
5041 | } else if (state == VDEV_STATE_REMOVED) { | |
34dc7c2f BB |
5042 | vd->vdev_removed = B_TRUE; |
5043 | } else if (state == VDEV_STATE_CANT_OPEN) { | |
5044 | /* | |
572e2857 BB |
5045 | * If we fail to open a vdev during an import or recovery, we |
5046 | * mark it as "not available", which signifies that it was | |
5047 | * never there to begin with. Failure to open such a device | |
5048 | * is not considered an error. | |
34dc7c2f | 5049 | */ |
572e2857 BB |
5050 | if ((spa_load_state(spa) == SPA_LOAD_IMPORT || |
5051 | spa_load_state(spa) == SPA_LOAD_RECOVER) && | |
34dc7c2f BB |
5052 | vd->vdev_ops->vdev_op_leaf) |
5053 | vd->vdev_not_present = 1; | |
5054 | ||
5055 | /* | |
5056 | * Post the appropriate ereport. If the 'prevstate' field is | |
5057 | * set to something other than VDEV_STATE_UNKNOWN, it indicates | |
5058 | * that this is part of a vdev_reopen(). In this case, we don't | |
5059 | * want to post the ereport if the device was already in the | |
5060 | * CANT_OPEN state beforehand. | |
5061 | * | |
5062 | * If the 'checkremove' flag is set, then this is an attempt to | |
5063 | * online the device in response to an insertion event. If we | |
5064 | * hit this case, then we have detected an insertion event for a | |
5065 | * faulted or offline device that wasn't in the removed state. | |
5066 | * In this scenario, we don't post an ereport because we are | |
5067 | * about to replace the device, or attempt an online with | |
5068 | * vdev_forcefault, which will generate the fault for us. | |
5069 | */ | |
5070 | if ((vd->vdev_prevstate != state || vd->vdev_forcefault) && | |
5071 | !vd->vdev_not_present && !vd->vdev_checkremove && | |
b128c09f | 5072 | vd != spa->spa_root_vdev) { |
34dc7c2f BB |
5073 | const char *class; |
5074 | ||
5075 | switch (aux) { | |
5076 | case VDEV_AUX_OPEN_FAILED: | |
5077 | class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED; | |
5078 | break; | |
5079 | case VDEV_AUX_CORRUPT_DATA: | |
5080 | class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA; | |
5081 | break; | |
5082 | case VDEV_AUX_NO_REPLICAS: | |
5083 | class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS; | |
5084 | break; | |
5085 | case VDEV_AUX_BAD_GUID_SUM: | |
5086 | class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM; | |
5087 | break; | |
5088 | case VDEV_AUX_TOO_SMALL: | |
5089 | class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL; | |
5090 | break; | |
5091 | case VDEV_AUX_BAD_LABEL: | |
5092 | class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL; | |
5093 | break; | |
ff61d1a4 | 5094 | case VDEV_AUX_BAD_ASHIFT: |
5095 | class = FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT; | |
5096 | break; | |
34dc7c2f BB |
5097 | default: |
5098 | class = FM_EREPORT_ZFS_DEVICE_UNKNOWN; | |
5099 | } | |
5100 | ||
1144586b | 5101 | (void) zfs_ereport_post(class, spa, vd, NULL, NULL, |
4f072827 | 5102 | save_state); |
34dc7c2f BB |
5103 | } |
5104 | ||
5105 | /* Erase any notion of persistent removed state */ | |
5106 | vd->vdev_removed = B_FALSE; | |
5107 | } else { | |
5108 | vd->vdev_removed = B_FALSE; | |
5109 | } | |
5110 | ||
d02ca379 DB |
5111 | /* |
5112 | * Notify ZED of any significant state-change on a leaf vdev. | |
5113 | * | |
d02ca379 | 5114 | */ |
6078881a TH |
5115 | if (vd->vdev_ops->vdev_op_leaf) { |
5116 | /* preserve original state from a vdev_reopen() */ | |
5117 | if ((vd->vdev_prevstate != VDEV_STATE_UNKNOWN) && | |
5118 | (vd->vdev_prevstate != vd->vdev_state) && | |
5119 | (save_state <= VDEV_STATE_CLOSED)) | |
5120 | save_state = vd->vdev_prevstate; | |
5121 | ||
5122 | /* filter out state change due to initial vdev_open */ | |
5123 | if (save_state > VDEV_STATE_CLOSED) | |
5124 | zfs_post_state_change(spa, vd, save_state); | |
d02ca379 DB |
5125 | } |
5126 | ||
9babb374 BB |
5127 | if (!isopen && vd->vdev_parent) |
5128 | vdev_propagate_state(vd->vdev_parent); | |
34dc7c2f | 5129 | } |
b128c09f | 5130 | |
6cb8e530 PZ |
5131 | boolean_t |
5132 | vdev_children_are_offline(vdev_t *vd) | |
5133 | { | |
5134 | ASSERT(!vd->vdev_ops->vdev_op_leaf); | |
5135 | ||
5136 | for (uint64_t i = 0; i < vd->vdev_children; i++) { | |
5137 | if (vd->vdev_child[i]->vdev_state != VDEV_STATE_OFFLINE) | |
5138 | return (B_FALSE); | |
5139 | } | |
5140 | ||
5141 | return (B_TRUE); | |
5142 | } | |
5143 | ||
b128c09f BB |
5144 | /* |
5145 | * Check the vdev configuration to ensure that it's capable of supporting | |
e550644f | 5146 | * a root pool. We do not support partial configuration. |
b128c09f BB |
5147 | */ |
5148 | boolean_t | |
5149 | vdev_is_bootable(vdev_t *vd) | |
5150 | { | |
b128c09f | 5151 | if (!vd->vdev_ops->vdev_op_leaf) { |
e550644f | 5152 | const char *vdev_type = vd->vdev_ops->vdev_op_type; |
b128c09f | 5153 | |
cd5b8128 | 5154 | if (strcmp(vdev_type, VDEV_TYPE_MISSING) == 0) |
b128c09f | 5155 | return (B_FALSE); |
b128c09f BB |
5156 | } |
5157 | ||
e550644f | 5158 | for (int c = 0; c < vd->vdev_children; c++) { |
b128c09f BB |
5159 | if (!vdev_is_bootable(vd->vdev_child[c])) |
5160 | return (B_FALSE); | |
5161 | } | |
5162 | return (B_TRUE); | |
5163 | } | |
9babb374 | 5164 | |
a1d477c2 MA |
5165 | boolean_t |
5166 | vdev_is_concrete(vdev_t *vd) | |
5167 | { | |
5168 | vdev_ops_t *ops = vd->vdev_ops; | |
5169 | if (ops == &vdev_indirect_ops || ops == &vdev_hole_ops || | |
5170 | ops == &vdev_missing_ops || ops == &vdev_root_ops) { | |
5171 | return (B_FALSE); | |
5172 | } else { | |
5173 | return (B_TRUE); | |
5174 | } | |
5175 | } | |
5176 | ||
572e2857 BB |
5177 | /* |
5178 | * Determine if a log device has valid content. If the vdev was | |
5179 | * removed or faulted in the MOS config then we know that | |
5180 | * the content on the log device has already been written to the pool. | |
5181 | */ | |
5182 | boolean_t | |
5183 | vdev_log_state_valid(vdev_t *vd) | |
5184 | { | |
5185 | if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted && | |
5186 | !vd->vdev_removed) | |
5187 | return (B_TRUE); | |
5188 | ||
1c27024e | 5189 | for (int c = 0; c < vd->vdev_children; c++) |
572e2857 BB |
5190 | if (vdev_log_state_valid(vd->vdev_child[c])) |
5191 | return (B_TRUE); | |
5192 | ||
5193 | return (B_FALSE); | |
5194 | } | |
5195 | ||
9babb374 BB |
5196 | /* |
5197 | * Expand a vdev if possible. | |
5198 | */ | |
5199 | void | |
5200 | vdev_expand(vdev_t *vd, uint64_t txg) | |
5201 | { | |
5202 | ASSERT(vd->vdev_top == vd); | |
5203 | ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); | |
7637ef8d | 5204 | ASSERT(vdev_is_concrete(vd)); |
9babb374 | 5205 | |
a1d477c2 MA |
5206 | vdev_set_deflate_ratio(vd); |
5207 | ||
cc99f275 DB |
5208 | if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count && |
5209 | vdev_is_concrete(vd)) { | |
5210 | vdev_metaslab_group_create(vd); | |
9babb374 BB |
5211 | VERIFY(vdev_metaslab_init(vd, txg) == 0); |
5212 | vdev_config_dirty(vd); | |
5213 | } | |
5214 | } | |
428870ff BB |
5215 | |
5216 | /* | |
5217 | * Split a vdev. | |
5218 | */ | |
5219 | void | |
5220 | vdev_split(vdev_t *vd) | |
5221 | { | |
5222 | vdev_t *cvd, *pvd = vd->vdev_parent; | |
5223 | ||
5224 | vdev_remove_child(pvd, vd); | |
5225 | vdev_compact_children(pvd); | |
5226 | ||
5227 | cvd = pvd->vdev_child[0]; | |
5228 | if (pvd->vdev_children == 1) { | |
5229 | vdev_remove_parent(cvd); | |
5230 | cvd->vdev_splitting = B_TRUE; | |
5231 | } | |
5232 | vdev_propagate_state(cvd); | |
5233 | } | |
c28b2279 | 5234 | |
cc92e9d0 | 5235 | void |
8fb1ede1 | 5236 | vdev_deadman(vdev_t *vd, char *tag) |
cc92e9d0 | 5237 | { |
1c27024e | 5238 | for (int c = 0; c < vd->vdev_children; c++) { |
cc92e9d0 GW |
5239 | vdev_t *cvd = vd->vdev_child[c]; |
5240 | ||
8fb1ede1 | 5241 | vdev_deadman(cvd, tag); |
cc92e9d0 GW |
5242 | } |
5243 | ||
5244 | if (vd->vdev_ops->vdev_op_leaf) { | |
5245 | vdev_queue_t *vq = &vd->vdev_queue; | |
5246 | ||
5247 | mutex_enter(&vq->vq_lock); | |
e8b96c60 | 5248 | if (avl_numnodes(&vq->vq_active_tree) > 0) { |
cc92e9d0 GW |
5249 | spa_t *spa = vd->vdev_spa; |
5250 | zio_t *fio; | |
5251 | uint64_t delta; | |
5252 | ||
8e739b2c | 5253 | zfs_dbgmsg("slow vdev: %s has %lu active IOs", |
8fb1ede1 BB |
5254 | vd->vdev_path, avl_numnodes(&vq->vq_active_tree)); |
5255 | ||
cc92e9d0 GW |
5256 | /* |
5257 | * Look at the head of all the pending queues, | |
5258 | * if any I/O has been outstanding for longer than | |
8fb1ede1 | 5259 | * the spa_deadman_synctime invoke the deadman logic. |
cc92e9d0 | 5260 | */ |
e8b96c60 | 5261 | fio = avl_first(&vq->vq_active_tree); |
cb682a17 | 5262 | delta = gethrtime() - fio->io_timestamp; |
8fb1ede1 BB |
5263 | if (delta > spa_deadman_synctime(spa)) |
5264 | zio_deadman(fio, tag); | |
cc92e9d0 GW |
5265 | } |
5266 | mutex_exit(&vq->vq_lock); | |
5267 | } | |
5268 | } | |
5269 | ||
80a91e74 | 5270 | void |
3c819a2c | 5271 | vdev_defer_resilver(vdev_t *vd) |
80a91e74 | 5272 | { |
3c819a2c | 5273 | ASSERT(vd->vdev_ops->vdev_op_leaf); |
4021ba4c | 5274 | |
3c819a2c JP |
5275 | vd->vdev_resilver_deferred = B_TRUE; |
5276 | vd->vdev_spa->spa_resilver_deferred = B_TRUE; | |
5277 | } | |
5278 | ||
5279 | /* | |
5280 | * Clears the resilver deferred flag on all leaf devs under vd. Returns | |
5281 | * B_TRUE if we have devices that need to be resilvered and are available to | |
5282 | * accept resilver I/Os. | |
5283 | */ | |
5284 | boolean_t | |
5285 | vdev_clear_resilver_deferred(vdev_t *vd, dmu_tx_t *tx) | |
5286 | { | |
5287 | boolean_t resilver_needed = B_FALSE; | |
5288 | spa_t *spa = vd->vdev_spa; | |
5289 | ||
5290 | for (int c = 0; c < vd->vdev_children; c++) { | |
5291 | vdev_t *cvd = vd->vdev_child[c]; | |
5292 | resilver_needed |= vdev_clear_resilver_deferred(cvd, tx); | |
4021ba4c TC |
5293 | } |
5294 | ||
3c819a2c JP |
5295 | if (vd == spa->spa_root_vdev && |
5296 | spa_feature_is_active(spa, SPA_FEATURE_RESILVER_DEFER)) { | |
5297 | spa_feature_decr(spa, SPA_FEATURE_RESILVER_DEFER, tx); | |
5298 | vdev_config_dirty(vd); | |
5299 | spa->spa_resilver_deferred = B_FALSE; | |
5300 | return (resilver_needed); | |
5301 | } | |
5302 | ||
5303 | if (!vdev_is_concrete(vd) || vd->vdev_aux || | |
5304 | !vd->vdev_ops->vdev_op_leaf) | |
5305 | return (resilver_needed); | |
5306 | ||
5307 | vd->vdev_resilver_deferred = B_FALSE; | |
5308 | ||
5309 | return (!vdev_is_dead(vd) && !vd->vdev_offline && | |
5310 | vdev_resilver_needed(vd, NULL, NULL)); | |
80a91e74 TC |
5311 | } |
5312 | ||
b2255edc BB |
5313 | boolean_t |
5314 | vdev_xlate_is_empty(range_seg64_t *rs) | |
5315 | { | |
5316 | return (rs->rs_start == rs->rs_end); | |
5317 | } | |
5318 | ||
1b939560 | 5319 | /* |
b2255edc BB |
5320 | * Translate a logical range to the first contiguous physical range for the |
5321 | * specified vdev_t. This function is initially called with a leaf vdev and | |
5322 | * will walk each parent vdev until it reaches a top-level vdev. Once the | |
5323 | * top-level is reached the physical range is initialized and the recursive | |
5324 | * function begins to unwind. As it unwinds it calls the parent's vdev | |
5325 | * specific translation function to do the real conversion. | |
1b939560 BB |
5326 | */ |
5327 | void | |
ca577779 | 5328 | vdev_xlate(vdev_t *vd, const range_seg64_t *logical_rs, |
b2255edc | 5329 | range_seg64_t *physical_rs, range_seg64_t *remain_rs) |
1b939560 BB |
5330 | { |
5331 | /* | |
5332 | * Walk up the vdev tree | |
5333 | */ | |
5334 | if (vd != vd->vdev_top) { | |
b2255edc BB |
5335 | vdev_xlate(vd->vdev_parent, logical_rs, physical_rs, |
5336 | remain_rs); | |
1b939560 BB |
5337 | } else { |
5338 | /* | |
b2255edc BB |
5339 | * We've reached the top-level vdev, initialize the physical |
5340 | * range to the logical range and set an empty remaining | |
5341 | * range then start to unwind. | |
1b939560 BB |
5342 | */ |
5343 | physical_rs->rs_start = logical_rs->rs_start; | |
5344 | physical_rs->rs_end = logical_rs->rs_end; | |
b2255edc BB |
5345 | |
5346 | remain_rs->rs_start = logical_rs->rs_start; | |
5347 | remain_rs->rs_end = logical_rs->rs_start; | |
5348 | ||
1b939560 BB |
5349 | return; |
5350 | } | |
5351 | ||
5352 | vdev_t *pvd = vd->vdev_parent; | |
5353 | ASSERT3P(pvd, !=, NULL); | |
5354 | ASSERT3P(pvd->vdev_ops->vdev_op_xlate, !=, NULL); | |
5355 | ||
5356 | /* | |
5357 | * As this recursive function unwinds, translate the logical | |
b2255edc BB |
5358 | * range into its physical and any remaining components by calling |
5359 | * the vdev specific translate function. | |
1b939560 | 5360 | */ |
ca577779 | 5361 | range_seg64_t intermediate = { 0 }; |
b2255edc | 5362 | pvd->vdev_ops->vdev_op_xlate(vd, physical_rs, &intermediate, remain_rs); |
1b939560 BB |
5363 | |
5364 | physical_rs->rs_start = intermediate.rs_start; | |
5365 | physical_rs->rs_end = intermediate.rs_end; | |
5366 | } | |
5367 | ||
b2255edc BB |
5368 | void |
5369 | vdev_xlate_walk(vdev_t *vd, const range_seg64_t *logical_rs, | |
5370 | vdev_xlate_func_t *func, void *arg) | |
5371 | { | |
5372 | range_seg64_t iter_rs = *logical_rs; | |
5373 | range_seg64_t physical_rs; | |
5374 | range_seg64_t remain_rs; | |
5375 | ||
5376 | while (!vdev_xlate_is_empty(&iter_rs)) { | |
5377 | ||
5378 | vdev_xlate(vd, &iter_rs, &physical_rs, &remain_rs); | |
5379 | ||
5380 | /* | |
5381 | * With raidz and dRAID, it's possible that the logical range | |
5382 | * does not live on this leaf vdev. Only when there is a non- | |
5383 | * zero physical size call the provided function. | |
5384 | */ | |
5385 | if (!vdev_xlate_is_empty(&physical_rs)) | |
5386 | func(arg, &physical_rs); | |
5387 | ||
5388 | iter_rs = remain_rs; | |
5389 | } | |
5390 | } | |
5391 | ||
2a673e76 AJ |
5392 | static char * |
5393 | vdev_name(vdev_t *vd, char *buf, int buflen) | |
5394 | { | |
5395 | if (vd->vdev_path == NULL) { | |
5396 | if (strcmp(vd->vdev_ops->vdev_op_type, "root") == 0) { | |
5397 | strlcpy(buf, vd->vdev_spa->spa_name, buflen); | |
5398 | } else if (!vd->vdev_ops->vdev_op_leaf) { | |
5399 | snprintf(buf, buflen, "%s-%llu", | |
5400 | vd->vdev_ops->vdev_op_type, | |
5401 | (u_longlong_t)vd->vdev_id); | |
5402 | } | |
5403 | } else { | |
5404 | strlcpy(buf, vd->vdev_path, buflen); | |
5405 | } | |
5406 | return (buf); | |
5407 | } | |
5408 | ||
e60e158e JG |
5409 | /* |
5410 | * Look at the vdev tree and determine whether any devices are currently being | |
5411 | * replaced. | |
5412 | */ | |
5413 | boolean_t | |
5414 | vdev_replace_in_progress(vdev_t *vdev) | |
5415 | { | |
5416 | ASSERT(spa_config_held(vdev->vdev_spa, SCL_ALL, RW_READER) != 0); | |
5417 | ||
5418 | if (vdev->vdev_ops == &vdev_replacing_ops) | |
5419 | return (B_TRUE); | |
5420 | ||
5421 | /* | |
5422 | * A 'spare' vdev indicates that we have a replace in progress, unless | |
5423 | * it has exactly two children, and the second, the hot spare, has | |
5424 | * finished being resilvered. | |
5425 | */ | |
5426 | if (vdev->vdev_ops == &vdev_spare_ops && (vdev->vdev_children > 2 || | |
5427 | !vdev_dtl_empty(vdev->vdev_child[1], DTL_MISSING))) | |
5428 | return (B_TRUE); | |
5429 | ||
5430 | for (int i = 0; i < vdev->vdev_children; i++) { | |
5431 | if (vdev_replace_in_progress(vdev->vdev_child[i])) | |
5432 | return (B_TRUE); | |
5433 | } | |
5434 | ||
5435 | return (B_FALSE); | |
5436 | } | |
5437 | ||
2a673e76 AJ |
5438 | /* |
5439 | * Add a (source=src, propname=propval) list to an nvlist. | |
5440 | */ | |
5441 | static void | |
5442 | vdev_prop_add_list(nvlist_t *nvl, const char *propname, char *strval, | |
5443 | uint64_t intval, zprop_source_t src) | |
5444 | { | |
5445 | nvlist_t *propval; | |
5446 | ||
5447 | propval = fnvlist_alloc(); | |
5448 | fnvlist_add_uint64(propval, ZPROP_SOURCE, src); | |
5449 | ||
5450 | if (strval != NULL) | |
5451 | fnvlist_add_string(propval, ZPROP_VALUE, strval); | |
5452 | else | |
5453 | fnvlist_add_uint64(propval, ZPROP_VALUE, intval); | |
5454 | ||
5455 | fnvlist_add_nvlist(nvl, propname, propval); | |
5456 | nvlist_free(propval); | |
5457 | } | |
5458 | ||
5459 | static void | |
5460 | vdev_props_set_sync(void *arg, dmu_tx_t *tx) | |
5461 | { | |
5462 | vdev_t *vd; | |
5463 | nvlist_t *nvp = arg; | |
5464 | spa_t *spa = dmu_tx_pool(tx)->dp_spa; | |
5465 | objset_t *mos = spa->spa_meta_objset; | |
5466 | nvpair_t *elem = NULL; | |
5467 | uint64_t vdev_guid; | |
5468 | nvlist_t *nvprops; | |
5469 | ||
5470 | vdev_guid = fnvlist_lookup_uint64(nvp, ZPOOL_VDEV_PROPS_SET_VDEV); | |
5471 | nvprops = fnvlist_lookup_nvlist(nvp, ZPOOL_VDEV_PROPS_SET_PROPS); | |
5472 | vd = spa_lookup_by_guid(spa, vdev_guid, B_TRUE); | |
da9c6c03 MM |
5473 | |
5474 | /* this vdev could get removed while waiting for this sync task */ | |
5475 | if (vd == NULL) | |
5476 | return; | |
2a673e76 AJ |
5477 | |
5478 | mutex_enter(&spa->spa_props_lock); | |
5479 | ||
5480 | while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) { | |
5481 | uint64_t intval, objid = 0; | |
5482 | char *strval; | |
5483 | vdev_prop_t prop; | |
5484 | const char *propname = nvpair_name(elem); | |
5485 | zprop_type_t proptype; | |
5486 | ||
5487 | /* | |
5488 | * Set vdev property values in the vdev props mos object. | |
5489 | */ | |
5490 | if (vd->vdev_top_zap != 0) { | |
5491 | objid = vd->vdev_top_zap; | |
5492 | } else if (vd->vdev_leaf_zap != 0) { | |
5493 | objid = vd->vdev_leaf_zap; | |
5494 | } else { | |
5495 | panic("vdev not top or leaf"); | |
5496 | } | |
5497 | ||
5498 | switch (prop = vdev_name_to_prop(propname)) { | |
5499 | case VDEV_PROP_USER: | |
5500 | if (vdev_prop_user(propname)) { | |
5501 | strval = fnvpair_value_string(elem); | |
5502 | if (strlen(strval) == 0) { | |
5503 | /* remove the property if value == "" */ | |
5504 | (void) zap_remove(mos, objid, propname, | |
5505 | tx); | |
5506 | } else { | |
5507 | VERIFY0(zap_update(mos, objid, propname, | |
5508 | 1, strlen(strval) + 1, strval, tx)); | |
5509 | } | |
5510 | spa_history_log_internal(spa, "vdev set", tx, | |
5511 | "vdev_guid=%llu: %s=%s", | |
5512 | (u_longlong_t)vdev_guid, nvpair_name(elem), | |
5513 | strval); | |
5514 | } | |
5515 | break; | |
5516 | default: | |
5517 | /* normalize the property name */ | |
5518 | propname = vdev_prop_to_name(prop); | |
5519 | proptype = vdev_prop_get_type(prop); | |
5520 | ||
5521 | if (nvpair_type(elem) == DATA_TYPE_STRING) { | |
5522 | ASSERT(proptype == PROP_TYPE_STRING); | |
5523 | strval = fnvpair_value_string(elem); | |
5524 | VERIFY0(zap_update(mos, objid, propname, | |
5525 | 1, strlen(strval) + 1, strval, tx)); | |
5526 | spa_history_log_internal(spa, "vdev set", tx, | |
5527 | "vdev_guid=%llu: %s=%s", | |
5528 | (u_longlong_t)vdev_guid, nvpair_name(elem), | |
5529 | strval); | |
5530 | } else if (nvpair_type(elem) == DATA_TYPE_UINT64) { | |
5531 | intval = fnvpair_value_uint64(elem); | |
5532 | ||
5533 | if (proptype == PROP_TYPE_INDEX) { | |
5534 | const char *unused; | |
5535 | VERIFY0(vdev_prop_index_to_string( | |
5536 | prop, intval, &unused)); | |
5537 | } | |
5538 | VERIFY0(zap_update(mos, objid, propname, | |
5539 | sizeof (uint64_t), 1, &intval, tx)); | |
5540 | spa_history_log_internal(spa, "vdev set", tx, | |
5541 | "vdev_guid=%llu: %s=%lld", | |
5542 | (u_longlong_t)vdev_guid, | |
5543 | nvpair_name(elem), (longlong_t)intval); | |
5544 | } else { | |
5545 | panic("invalid vdev property type %u", | |
5546 | nvpair_type(elem)); | |
5547 | } | |
5548 | } | |
5549 | ||
5550 | } | |
5551 | ||
5552 | mutex_exit(&spa->spa_props_lock); | |
5553 | } | |
5554 | ||
5555 | int | |
5556 | vdev_prop_set(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl) | |
5557 | { | |
5558 | spa_t *spa = vd->vdev_spa; | |
5559 | nvpair_t *elem = NULL; | |
5560 | uint64_t vdev_guid; | |
5561 | nvlist_t *nvprops; | |
5562 | int error; | |
5563 | ||
5564 | ASSERT(vd != NULL); | |
5565 | ||
5566 | if (nvlist_lookup_uint64(innvl, ZPOOL_VDEV_PROPS_SET_VDEV, | |
5567 | &vdev_guid) != 0) | |
5568 | return (SET_ERROR(EINVAL)); | |
5569 | ||
5570 | if (nvlist_lookup_nvlist(innvl, ZPOOL_VDEV_PROPS_SET_PROPS, | |
5571 | &nvprops) != 0) | |
5572 | return (SET_ERROR(EINVAL)); | |
5573 | ||
5574 | if ((vd = spa_lookup_by_guid(spa, vdev_guid, B_TRUE)) == NULL) | |
5575 | return (SET_ERROR(EINVAL)); | |
5576 | ||
5577 | while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) { | |
5578 | char *propname = nvpair_name(elem); | |
5579 | vdev_prop_t prop = vdev_name_to_prop(propname); | |
5580 | uint64_t intval = 0; | |
5581 | char *strval = NULL; | |
5582 | ||
5583 | if (prop == VDEV_PROP_USER && !vdev_prop_user(propname)) { | |
5584 | error = EINVAL; | |
5585 | goto end; | |
5586 | } | |
5587 | ||
5588 | if (vdev_prop_readonly(prop)) { | |
5589 | error = EROFS; | |
5590 | goto end; | |
5591 | } | |
5592 | ||
5593 | /* Special Processing */ | |
5594 | switch (prop) { | |
5595 | case VDEV_PROP_PATH: | |
5596 | if (vd->vdev_path == NULL) { | |
5597 | error = EROFS; | |
5598 | break; | |
5599 | } | |
5600 | if (nvpair_value_string(elem, &strval) != 0) { | |
5601 | error = EINVAL; | |
5602 | break; | |
5603 | } | |
5604 | /* New path must start with /dev/ */ | |
5605 | if (strncmp(strval, "/dev/", 5)) { | |
5606 | error = EINVAL; | |
5607 | break; | |
5608 | } | |
5609 | error = spa_vdev_setpath(spa, vdev_guid, strval); | |
5610 | break; | |
5611 | case VDEV_PROP_ALLOCATING: | |
5612 | if (nvpair_value_uint64(elem, &intval) != 0) { | |
5613 | error = EINVAL; | |
5614 | break; | |
5615 | } | |
5616 | if (intval != vd->vdev_noalloc) | |
5617 | break; | |
5618 | if (intval == 0) | |
5619 | error = spa_vdev_noalloc(spa, vdev_guid); | |
5620 | else | |
5621 | error = spa_vdev_alloc(spa, vdev_guid); | |
5622 | break; | |
5623 | default: | |
5624 | /* Most processing is done in vdev_props_set_sync */ | |
5625 | break; | |
5626 | } | |
5627 | end: | |
5628 | if (error != 0) { | |
5629 | intval = error; | |
5630 | vdev_prop_add_list(outnvl, propname, strval, intval, 0); | |
5631 | return (error); | |
5632 | } | |
5633 | } | |
5634 | ||
5635 | return (dsl_sync_task(spa->spa_name, NULL, vdev_props_set_sync, | |
5636 | innvl, 6, ZFS_SPACE_CHECK_EXTRA_RESERVED)); | |
5637 | } | |
5638 | ||
5639 | int | |
5640 | vdev_prop_get(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl) | |
5641 | { | |
5642 | spa_t *spa = vd->vdev_spa; | |
5643 | objset_t *mos = spa->spa_meta_objset; | |
5644 | int err = 0; | |
5645 | uint64_t objid; | |
5646 | uint64_t vdev_guid; | |
5647 | nvpair_t *elem = NULL; | |
5648 | nvlist_t *nvprops = NULL; | |
5649 | uint64_t intval = 0; | |
5650 | char *strval = NULL; | |
5651 | const char *propname = NULL; | |
5652 | vdev_prop_t prop; | |
5653 | ||
5654 | ASSERT(vd != NULL); | |
5655 | ASSERT(mos != NULL); | |
5656 | ||
5657 | if (nvlist_lookup_uint64(innvl, ZPOOL_VDEV_PROPS_GET_VDEV, | |
5658 | &vdev_guid) != 0) | |
5659 | return (SET_ERROR(EINVAL)); | |
5660 | ||
5661 | nvlist_lookup_nvlist(innvl, ZPOOL_VDEV_PROPS_GET_PROPS, &nvprops); | |
5662 | ||
5663 | if (vd->vdev_top_zap != 0) { | |
5664 | objid = vd->vdev_top_zap; | |
5665 | } else if (vd->vdev_leaf_zap != 0) { | |
5666 | objid = vd->vdev_leaf_zap; | |
5667 | } else { | |
5668 | return (SET_ERROR(EINVAL)); | |
5669 | } | |
5670 | ASSERT(objid != 0); | |
5671 | ||
5672 | mutex_enter(&spa->spa_props_lock); | |
5673 | ||
5674 | if (nvprops != NULL) { | |
5675 | char namebuf[64] = { 0 }; | |
5676 | ||
5677 | while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) { | |
5678 | intval = 0; | |
5679 | strval = NULL; | |
5680 | propname = nvpair_name(elem); | |
5681 | prop = vdev_name_to_prop(propname); | |
5682 | zprop_source_t src = ZPROP_SRC_DEFAULT; | |
5683 | uint64_t integer_size, num_integers; | |
5684 | ||
5685 | switch (prop) { | |
5686 | /* Special Read-only Properties */ | |
5687 | case VDEV_PROP_NAME: | |
5688 | strval = vdev_name(vd, namebuf, | |
5689 | sizeof (namebuf)); | |
5690 | if (strval == NULL) | |
5691 | continue; | |
5692 | vdev_prop_add_list(outnvl, propname, strval, 0, | |
5693 | ZPROP_SRC_NONE); | |
5694 | continue; | |
5695 | case VDEV_PROP_CAPACITY: | |
5696 | /* percent used */ | |
5697 | intval = (vd->vdev_stat.vs_dspace == 0) ? 0 : | |
5698 | (vd->vdev_stat.vs_alloc * 100 / | |
5699 | vd->vdev_stat.vs_dspace); | |
5700 | vdev_prop_add_list(outnvl, propname, NULL, | |
5701 | intval, ZPROP_SRC_NONE); | |
5702 | continue; | |
5703 | case VDEV_PROP_STATE: | |
5704 | vdev_prop_add_list(outnvl, propname, NULL, | |
5705 | vd->vdev_state, ZPROP_SRC_NONE); | |
5706 | continue; | |
5707 | case VDEV_PROP_GUID: | |
5708 | vdev_prop_add_list(outnvl, propname, NULL, | |
5709 | vd->vdev_guid, ZPROP_SRC_NONE); | |
5710 | continue; | |
5711 | case VDEV_PROP_ASIZE: | |
5712 | vdev_prop_add_list(outnvl, propname, NULL, | |
5713 | vd->vdev_asize, ZPROP_SRC_NONE); | |
5714 | continue; | |
5715 | case VDEV_PROP_PSIZE: | |
5716 | vdev_prop_add_list(outnvl, propname, NULL, | |
5717 | vd->vdev_psize, ZPROP_SRC_NONE); | |
5718 | continue; | |
5719 | case VDEV_PROP_ASHIFT: | |
5720 | vdev_prop_add_list(outnvl, propname, NULL, | |
5721 | vd->vdev_ashift, ZPROP_SRC_NONE); | |
5722 | continue; | |
5723 | case VDEV_PROP_SIZE: | |
5724 | vdev_prop_add_list(outnvl, propname, NULL, | |
5725 | vd->vdev_stat.vs_dspace, ZPROP_SRC_NONE); | |
5726 | continue; | |
5727 | case VDEV_PROP_FREE: | |
5728 | vdev_prop_add_list(outnvl, propname, NULL, | |
5729 | vd->vdev_stat.vs_dspace - | |
5730 | vd->vdev_stat.vs_alloc, ZPROP_SRC_NONE); | |
5731 | continue; | |
5732 | case VDEV_PROP_ALLOCATED: | |
5733 | vdev_prop_add_list(outnvl, propname, NULL, | |
5734 | vd->vdev_stat.vs_alloc, ZPROP_SRC_NONE); | |
5735 | continue; | |
5736 | case VDEV_PROP_EXPANDSZ: | |
5737 | vdev_prop_add_list(outnvl, propname, NULL, | |
5738 | vd->vdev_stat.vs_esize, ZPROP_SRC_NONE); | |
5739 | continue; | |
5740 | case VDEV_PROP_FRAGMENTATION: | |
5741 | vdev_prop_add_list(outnvl, propname, NULL, | |
5742 | vd->vdev_stat.vs_fragmentation, | |
5743 | ZPROP_SRC_NONE); | |
5744 | continue; | |
5745 | case VDEV_PROP_PARITY: | |
5746 | vdev_prop_add_list(outnvl, propname, NULL, | |
5747 | vdev_get_nparity(vd), ZPROP_SRC_NONE); | |
5748 | continue; | |
5749 | case VDEV_PROP_PATH: | |
5750 | if (vd->vdev_path == NULL) | |
5751 | continue; | |
5752 | vdev_prop_add_list(outnvl, propname, | |
5753 | vd->vdev_path, 0, ZPROP_SRC_NONE); | |
5754 | continue; | |
5755 | case VDEV_PROP_DEVID: | |
5756 | if (vd->vdev_devid == NULL) | |
5757 | continue; | |
5758 | vdev_prop_add_list(outnvl, propname, | |
5759 | vd->vdev_devid, 0, ZPROP_SRC_NONE); | |
5760 | continue; | |
5761 | case VDEV_PROP_PHYS_PATH: | |
5762 | if (vd->vdev_physpath == NULL) | |
5763 | continue; | |
5764 | vdev_prop_add_list(outnvl, propname, | |
5765 | vd->vdev_physpath, 0, ZPROP_SRC_NONE); | |
5766 | continue; | |
5767 | case VDEV_PROP_ENC_PATH: | |
5768 | if (vd->vdev_enc_sysfs_path == NULL) | |
5769 | continue; | |
5770 | vdev_prop_add_list(outnvl, propname, | |
5771 | vd->vdev_enc_sysfs_path, 0, ZPROP_SRC_NONE); | |
5772 | continue; | |
5773 | case VDEV_PROP_FRU: | |
5774 | if (vd->vdev_fru == NULL) | |
5775 | continue; | |
5776 | vdev_prop_add_list(outnvl, propname, | |
5777 | vd->vdev_fru, 0, ZPROP_SRC_NONE); | |
5778 | continue; | |
5779 | case VDEV_PROP_PARENT: | |
5780 | if (vd->vdev_parent != NULL) { | |
5781 | strval = vdev_name(vd->vdev_parent, | |
5782 | namebuf, sizeof (namebuf)); | |
5783 | vdev_prop_add_list(outnvl, propname, | |
5784 | strval, 0, ZPROP_SRC_NONE); | |
5785 | } | |
5786 | continue; | |
5787 | case VDEV_PROP_CHILDREN: | |
5788 | if (vd->vdev_children > 0) | |
5789 | strval = kmem_zalloc(ZAP_MAXVALUELEN, | |
5790 | KM_SLEEP); | |
5791 | for (uint64_t i = 0; i < vd->vdev_children; | |
5792 | i++) { | |
5793 | char *vname; | |
5794 | ||
5795 | vname = vdev_name(vd->vdev_child[i], | |
5796 | namebuf, sizeof (namebuf)); | |
5797 | if (vname == NULL) | |
5798 | vname = "(unknown)"; | |
5799 | if (strlen(strval) > 0) | |
5800 | strlcat(strval, ",", | |
5801 | ZAP_MAXVALUELEN); | |
5802 | strlcat(strval, vname, ZAP_MAXVALUELEN); | |
5803 | } | |
5804 | if (strval != NULL) { | |
5805 | vdev_prop_add_list(outnvl, propname, | |
5806 | strval, 0, ZPROP_SRC_NONE); | |
5807 | kmem_free(strval, ZAP_MAXVALUELEN); | |
5808 | } | |
5809 | continue; | |
5810 | case VDEV_PROP_NUMCHILDREN: | |
5811 | vdev_prop_add_list(outnvl, propname, NULL, | |
5812 | vd->vdev_children, ZPROP_SRC_NONE); | |
5813 | continue; | |
5814 | case VDEV_PROP_READ_ERRORS: | |
5815 | vdev_prop_add_list(outnvl, propname, NULL, | |
5816 | vd->vdev_stat.vs_read_errors, | |
5817 | ZPROP_SRC_NONE); | |
5818 | continue; | |
5819 | case VDEV_PROP_WRITE_ERRORS: | |
5820 | vdev_prop_add_list(outnvl, propname, NULL, | |
5821 | vd->vdev_stat.vs_write_errors, | |
5822 | ZPROP_SRC_NONE); | |
5823 | continue; | |
5824 | case VDEV_PROP_CHECKSUM_ERRORS: | |
5825 | vdev_prop_add_list(outnvl, propname, NULL, | |
5826 | vd->vdev_stat.vs_checksum_errors, | |
5827 | ZPROP_SRC_NONE); | |
5828 | continue; | |
5829 | case VDEV_PROP_INITIALIZE_ERRORS: | |
5830 | vdev_prop_add_list(outnvl, propname, NULL, | |
5831 | vd->vdev_stat.vs_initialize_errors, | |
5832 | ZPROP_SRC_NONE); | |
5833 | continue; | |
5834 | case VDEV_PROP_OPS_NULL: | |
5835 | vdev_prop_add_list(outnvl, propname, NULL, | |
5836 | vd->vdev_stat.vs_ops[ZIO_TYPE_NULL], | |
5837 | ZPROP_SRC_NONE); | |
5838 | continue; | |
5839 | case VDEV_PROP_OPS_READ: | |
5840 | vdev_prop_add_list(outnvl, propname, NULL, | |
5841 | vd->vdev_stat.vs_ops[ZIO_TYPE_READ], | |
5842 | ZPROP_SRC_NONE); | |
5843 | continue; | |
5844 | case VDEV_PROP_OPS_WRITE: | |
5845 | vdev_prop_add_list(outnvl, propname, NULL, | |
5846 | vd->vdev_stat.vs_ops[ZIO_TYPE_WRITE], | |
5847 | ZPROP_SRC_NONE); | |
5848 | continue; | |
5849 | case VDEV_PROP_OPS_FREE: | |
5850 | vdev_prop_add_list(outnvl, propname, NULL, | |
5851 | vd->vdev_stat.vs_ops[ZIO_TYPE_FREE], | |
5852 | ZPROP_SRC_NONE); | |
5853 | continue; | |
5854 | case VDEV_PROP_OPS_CLAIM: | |
5855 | vdev_prop_add_list(outnvl, propname, NULL, | |
5856 | vd->vdev_stat.vs_ops[ZIO_TYPE_CLAIM], | |
5857 | ZPROP_SRC_NONE); | |
5858 | continue; | |
5859 | case VDEV_PROP_OPS_TRIM: | |
5860 | /* | |
5861 | * TRIM ops and bytes are reported to user | |
5862 | * space as ZIO_TYPE_IOCTL. This is done to | |
5863 | * preserve the vdev_stat_t structure layout | |
5864 | * for user space. | |
5865 | */ | |
5866 | vdev_prop_add_list(outnvl, propname, NULL, | |
5867 | vd->vdev_stat.vs_ops[ZIO_TYPE_IOCTL], | |
5868 | ZPROP_SRC_NONE); | |
5869 | continue; | |
5870 | case VDEV_PROP_BYTES_NULL: | |
5871 | vdev_prop_add_list(outnvl, propname, NULL, | |
5872 | vd->vdev_stat.vs_bytes[ZIO_TYPE_NULL], | |
5873 | ZPROP_SRC_NONE); | |
5874 | continue; | |
5875 | case VDEV_PROP_BYTES_READ: | |
5876 | vdev_prop_add_list(outnvl, propname, NULL, | |
5877 | vd->vdev_stat.vs_bytes[ZIO_TYPE_READ], | |
5878 | ZPROP_SRC_NONE); | |
5879 | continue; | |
5880 | case VDEV_PROP_BYTES_WRITE: | |
5881 | vdev_prop_add_list(outnvl, propname, NULL, | |
5882 | vd->vdev_stat.vs_bytes[ZIO_TYPE_WRITE], | |
5883 | ZPROP_SRC_NONE); | |
5884 | continue; | |
5885 | case VDEV_PROP_BYTES_FREE: | |
5886 | vdev_prop_add_list(outnvl, propname, NULL, | |
5887 | vd->vdev_stat.vs_bytes[ZIO_TYPE_FREE], | |
5888 | ZPROP_SRC_NONE); | |
5889 | continue; | |
5890 | case VDEV_PROP_BYTES_CLAIM: | |
5891 | vdev_prop_add_list(outnvl, propname, NULL, | |
5892 | vd->vdev_stat.vs_bytes[ZIO_TYPE_CLAIM], | |
5893 | ZPROP_SRC_NONE); | |
5894 | continue; | |
5895 | case VDEV_PROP_BYTES_TRIM: | |
5896 | /* | |
5897 | * TRIM ops and bytes are reported to user | |
5898 | * space as ZIO_TYPE_IOCTL. This is done to | |
5899 | * preserve the vdev_stat_t structure layout | |
5900 | * for user space. | |
5901 | */ | |
5902 | vdev_prop_add_list(outnvl, propname, NULL, | |
5903 | vd->vdev_stat.vs_bytes[ZIO_TYPE_IOCTL], | |
5904 | ZPROP_SRC_NONE); | |
5905 | continue; | |
5906 | case VDEV_PROP_REMOVING: | |
5907 | vdev_prop_add_list(outnvl, propname, NULL, | |
5908 | vd->vdev_removing, ZPROP_SRC_NONE); | |
5909 | continue; | |
5910 | /* Numeric Properites */ | |
5911 | case VDEV_PROP_ALLOCATING: | |
5912 | src = ZPROP_SRC_LOCAL; | |
5913 | strval = NULL; | |
5914 | ||
5915 | err = zap_lookup(mos, objid, nvpair_name(elem), | |
5916 | sizeof (uint64_t), 1, &intval); | |
5917 | if (err == ENOENT) { | |
5918 | intval = | |
5919 | vdev_prop_default_numeric(prop); | |
5920 | err = 0; | |
5921 | } else if (err) | |
5922 | break; | |
5923 | if (intval == vdev_prop_default_numeric(prop)) | |
5924 | src = ZPROP_SRC_DEFAULT; | |
5925 | ||
5926 | /* Leaf vdevs cannot have this property */ | |
5927 | if (vd->vdev_mg == NULL && | |
5928 | vd->vdev_top != NULL) { | |
5929 | src = ZPROP_SRC_NONE; | |
5930 | intval = ZPROP_BOOLEAN_NA; | |
5931 | } | |
5932 | ||
5933 | vdev_prop_add_list(outnvl, propname, strval, | |
5934 | intval, src); | |
5935 | break; | |
5936 | /* Text Properties */ | |
5937 | case VDEV_PROP_COMMENT: | |
5938 | /* Exists in the ZAP below */ | |
5939 | /* FALLTHRU */ | |
5940 | case VDEV_PROP_USER: | |
5941 | /* User Properites */ | |
5942 | src = ZPROP_SRC_LOCAL; | |
5943 | ||
5944 | err = zap_length(mos, objid, nvpair_name(elem), | |
5945 | &integer_size, &num_integers); | |
5946 | if (err) | |
5947 | break; | |
5948 | ||
5949 | switch (integer_size) { | |
5950 | case 8: | |
5951 | /* User properties cannot be integers */ | |
5952 | err = EINVAL; | |
5953 | break; | |
5954 | case 1: | |
5955 | /* string property */ | |
5956 | strval = kmem_alloc(num_integers, | |
5957 | KM_SLEEP); | |
5958 | err = zap_lookup(mos, objid, | |
5959 | nvpair_name(elem), 1, | |
5960 | num_integers, strval); | |
5961 | if (err) { | |
5962 | kmem_free(strval, | |
5963 | num_integers); | |
5964 | break; | |
5965 | } | |
5966 | vdev_prop_add_list(outnvl, propname, | |
5967 | strval, 0, src); | |
5968 | kmem_free(strval, num_integers); | |
5969 | break; | |
5970 | } | |
5971 | break; | |
5972 | default: | |
5973 | err = ENOENT; | |
5974 | break; | |
5975 | } | |
5976 | if (err) | |
5977 | break; | |
5978 | } | |
5979 | } else { | |
5980 | /* | |
5981 | * Get all properties from the MOS vdev property object. | |
5982 | */ | |
5983 | zap_cursor_t zc; | |
5984 | zap_attribute_t za; | |
5985 | for (zap_cursor_init(&zc, mos, objid); | |
5986 | (err = zap_cursor_retrieve(&zc, &za)) == 0; | |
5987 | zap_cursor_advance(&zc)) { | |
5988 | intval = 0; | |
5989 | strval = NULL; | |
5990 | zprop_source_t src = ZPROP_SRC_DEFAULT; | |
5991 | propname = za.za_name; | |
5992 | prop = vdev_name_to_prop(propname); | |
5993 | ||
5994 | switch (za.za_integer_length) { | |
5995 | case 8: | |
5996 | /* We do not allow integer user properties */ | |
5997 | /* This is likely an internal value */ | |
5998 | break; | |
5999 | case 1: | |
6000 | /* string property */ | |
6001 | strval = kmem_alloc(za.za_num_integers, | |
6002 | KM_SLEEP); | |
6003 | err = zap_lookup(mos, objid, za.za_name, 1, | |
6004 | za.za_num_integers, strval); | |
6005 | if (err) { | |
6006 | kmem_free(strval, za.za_num_integers); | |
6007 | break; | |
6008 | } | |
6009 | vdev_prop_add_list(outnvl, propname, strval, 0, | |
6010 | src); | |
6011 | kmem_free(strval, za.za_num_integers); | |
6012 | break; | |
6013 | ||
6014 | default: | |
6015 | break; | |
6016 | } | |
6017 | } | |
6018 | zap_cursor_fini(&zc); | |
6019 | } | |
6020 | ||
6021 | mutex_exit(&spa->spa_props_lock); | |
6022 | if (err && err != ENOENT) { | |
6023 | return (err); | |
6024 | } | |
6025 | ||
6026 | return (0); | |
6027 | } | |
6028 | ||
c28b2279 BB |
6029 | EXPORT_SYMBOL(vdev_fault); |
6030 | EXPORT_SYMBOL(vdev_degrade); | |
6031 | EXPORT_SYMBOL(vdev_online); | |
6032 | EXPORT_SYMBOL(vdev_offline); | |
6033 | EXPORT_SYMBOL(vdev_clear); | |
1b939560 | 6034 | |
03fdcb9a | 6035 | ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_count, INT, ZMOD_RW, |
e4e94ca3 | 6036 | "Target number of metaslabs per top-level vdev"); |
80d52c39 | 6037 | |
03fdcb9a | 6038 | ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_shift, INT, ZMOD_RW, |
93e28d66 SD |
6039 | "Default limit for metaslab size"); |
6040 | ||
03fdcb9a | 6041 | ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, min_ms_count, INT, ZMOD_RW, |
d2734cce SD |
6042 | "Minimum number of metaslabs per top-level vdev"); |
6043 | ||
03fdcb9a | 6044 | ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, ms_count_limit, INT, ZMOD_RW, |
e4e94ca3 DB |
6045 | "Practical upper limit of total metaslabs per top-level vdev"); |
6046 | ||
03fdcb9a | 6047 | ZFS_MODULE_PARAM(zfs, zfs_, slow_io_events_per_second, UINT, ZMOD_RW, |
ad796b8a | 6048 | "Rate limit slow IO (delay) events to this many per second"); |
80d52c39 | 6049 | |
7ada752a | 6050 | /* BEGIN CSTYLED */ |
03fdcb9a MM |
6051 | ZFS_MODULE_PARAM(zfs, zfs_, checksum_events_per_second, UINT, ZMOD_RW, |
6052 | "Rate limit checksum events to this many checksum errors per second " | |
7ada752a AZ |
6053 | "(do not set below ZED threshold)."); |
6054 | /* END CSTYLED */ | |
02638a30 | 6055 | |
03fdcb9a | 6056 | ZFS_MODULE_PARAM(zfs, zfs_, scan_ignore_errors, INT, ZMOD_RW, |
02638a30 | 6057 | "Ignore errors during resilver/scrub"); |
6cb8e530 | 6058 | |
03fdcb9a | 6059 | ZFS_MODULE_PARAM(zfs_vdev, vdev_, validate_skip, INT, ZMOD_RW, |
6cb8e530 | 6060 | "Bypass vdev_validate()"); |
53b1f5ea | 6061 | |
03fdcb9a MM |
6062 | ZFS_MODULE_PARAM(zfs, zfs_, nocacheflush, INT, ZMOD_RW, |
6063 | "Disable cache flushes"); | |
6fe3498c | 6064 | |
aa755b35 MA |
6065 | ZFS_MODULE_PARAM(zfs, zfs_, embedded_slog_min_ms, INT, ZMOD_RW, |
6066 | "Minimum number of metaslabs required to dedicate one for log blocks"); | |
6067 | ||
7ada752a | 6068 | /* BEGIN CSTYLED */ |
6fe3498c RM |
6069 | ZFS_MODULE_PARAM_CALL(zfs_vdev, zfs_vdev_, min_auto_ashift, |
6070 | param_set_min_auto_ashift, param_get_ulong, ZMOD_RW, | |
6071 | "Minimum ashift used when creating new top-level vdevs"); | |
6072 | ||
6073 | ZFS_MODULE_PARAM_CALL(zfs_vdev, zfs_vdev_, max_auto_ashift, | |
6074 | param_set_max_auto_ashift, param_get_ulong, ZMOD_RW, | |
6075 | "Maximum ashift used when optimizing for logical -> physical sector " | |
6076 | "size on new top-level vdevs"); | |
4ea3f864 | 6077 | /* END CSTYLED */ |