]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
1d3ba0bf | 9 | * or https://opensource.org/licenses/CDDL-1.0. |
34dc7c2f BB |
10 | * See the License for the specific language governing permissions |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | ||
22 | /* | |
428870ff | 23 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
03e02e5b | 24 | * Copyright (c) 2011, 2021 by Delphix. All rights reserved. |
153b2285 | 25 | * Copyright 2017 Nexenta Systems, Inc. |
e550644f BB |
26 | * Copyright (c) 2014 Integros [integros.com] |
27 | * Copyright 2016 Toomas Soome <tsoome@me.com> | |
12fa0466 | 28 | * Copyright 2017 Joyent, Inc. |
cc99f275 | 29 | * Copyright (c) 2017, Intel Corporation. |
3c819a2c | 30 | * Copyright (c) 2019, Datto Inc. All rights reserved. |
2a673e76 | 31 | * Copyright (c) 2021, Klara Inc. |
9d618615 | 32 | * Copyright (c) 2021, 2023 Hewlett Packard Enterprise Development LP. |
34dc7c2f BB |
33 | */ |
34 | ||
34dc7c2f BB |
35 | #include <sys/zfs_context.h> |
36 | #include <sys/fm/fs/zfs.h> | |
37 | #include <sys/spa.h> | |
38 | #include <sys/spa_impl.h> | |
a1d477c2 | 39 | #include <sys/bpobj.h> |
34dc7c2f BB |
40 | #include <sys/dmu.h> |
41 | #include <sys/dmu_tx.h> | |
a1d477c2 | 42 | #include <sys/dsl_dir.h> |
34dc7c2f | 43 | #include <sys/vdev_impl.h> |
9a49d3f3 | 44 | #include <sys/vdev_rebuild.h> |
b2255edc | 45 | #include <sys/vdev_draid.h> |
34dc7c2f BB |
46 | #include <sys/uberblock_impl.h> |
47 | #include <sys/metaslab.h> | |
48 | #include <sys/metaslab_impl.h> | |
49 | #include <sys/space_map.h> | |
93cf2076 | 50 | #include <sys/space_reftree.h> |
34dc7c2f BB |
51 | #include <sys/zio.h> |
52 | #include <sys/zap.h> | |
53 | #include <sys/fs/zfs.h> | |
b128c09f | 54 | #include <sys/arc.h> |
9babb374 | 55 | #include <sys/zil.h> |
428870ff | 56 | #include <sys/dsl_scan.h> |
b2255edc | 57 | #include <sys/vdev_raidz.h> |
a6255b7f | 58 | #include <sys/abd.h> |
619f0976 | 59 | #include <sys/vdev_initialize.h> |
1b939560 | 60 | #include <sys/vdev_trim.h> |
6c285672 | 61 | #include <sys/zvol.h> |
6078881a | 62 | #include <sys/zfs_ratelimit.h> |
2a673e76 | 63 | #include "zfs_prop.h" |
34dc7c2f | 64 | |
aa755b35 MA |
65 | /* |
66 | * One metaslab from each (normal-class) vdev is used by the ZIL. These are | |
67 | * called "embedded slog metaslabs", are referenced by vdev_log_mg, and are | |
68 | * part of the spa_embedded_log_class. The metaslab with the most free space | |
69 | * in each vdev is selected for this purpose when the pool is opened (or a | |
70 | * vdev is added). See vdev_metaslab_init(). | |
71 | * | |
72 | * Log blocks can be allocated from the following locations. Each one is tried | |
73 | * in order until the allocation succeeds: | |
74 | * 1. dedicated log vdevs, aka "slog" (spa_log_class) | |
75 | * 2. embedded slog metaslabs (spa_embedded_log_class) | |
76 | * 3. other metaslabs in normal vdevs (spa_normal_class) | |
77 | * | |
78 | * zfs_embedded_slog_min_ms disables the embedded slog if there are fewer | |
79 | * than this number of metaslabs in the vdev. This ensures that we don't set | |
80 | * aside an unreasonable amount of space for the ZIL. If set to less than | |
81 | * 1 << (spa_slop_shift + 1), on small pools the usable space may be reduced | |
82 | * (by more than 1<<spa_slop_shift) due to the embedded slog metaslab. | |
83 | */ | |
fdc2d303 | 84 | static uint_t zfs_embedded_slog_min_ms = 64; |
aa755b35 | 85 | |
c853f382 | 86 | /* default target for number of metaslabs per top-level vdev */ |
fdc2d303 | 87 | static uint_t zfs_vdev_default_ms_count = 200; |
d2734cce | 88 | |
e4e94ca3 | 89 | /* minimum number of metaslabs per top-level vdev */ |
fdc2d303 | 90 | static uint_t zfs_vdev_min_ms_count = 16; |
d2734cce | 91 | |
e4e94ca3 | 92 | /* practical upper limit of total metaslabs per top-level vdev */ |
fdc2d303 | 93 | static uint_t zfs_vdev_ms_count_limit = 1ULL << 17; |
e4e94ca3 DB |
94 | |
95 | /* lower limit for metaslab size (512M) */ | |
fdc2d303 | 96 | static uint_t zfs_vdev_default_ms_shift = 29; |
d2734cce | 97 | |
c853f382 | 98 | /* upper limit for metaslab size (16G) */ |
ff73574c | 99 | static uint_t zfs_vdev_max_ms_shift = 34; |
e4e94ca3 | 100 | |
d2734cce SD |
101 | int vdev_validate_skip = B_FALSE; |
102 | ||
b8bcca18 | 103 | /* |
d2734cce SD |
104 | * Since the DTL space map of a vdev is not expected to have a lot of |
105 | * entries, we default its block size to 4K. | |
b8bcca18 | 106 | */ |
93e28d66 | 107 | int zfs_vdev_dtl_sm_blksz = (1 << 12); |
b8bcca18 | 108 | |
80d52c39 | 109 | /* |
ad796b8a | 110 | * Rate limit slow IO (delay) events to this many per second. |
80d52c39 | 111 | */ |
18168da7 | 112 | static unsigned int zfs_slow_io_events_per_second = 20; |
80d52c39 TH |
113 | |
114 | /* | |
115 | * Rate limit checksum events after this many checksum errors per second. | |
116 | */ | |
18168da7 | 117 | static unsigned int zfs_checksum_events_per_second = 20; |
80d52c39 | 118 | |
02638a30 TC |
119 | /* |
120 | * Ignore errors during scrub/resilver. Allows to work around resilver | |
121 | * upon import when there are pool errors. | |
122 | */ | |
18168da7 | 123 | static int zfs_scan_ignore_errors = 0; |
02638a30 | 124 | |
d2734cce SD |
125 | /* |
126 | * vdev-wide space maps that have lots of entries written to them at | |
127 | * the end of each transaction can benefit from a higher I/O bandwidth | |
128 | * (e.g. vdev_obsolete_sm), thus we default their block size to 128K. | |
129 | */ | |
93e28d66 | 130 | int zfs_vdev_standard_sm_blksz = (1 << 17); |
6cb8e530 | 131 | |
53b1f5ea PS |
132 | /* |
133 | * Tunable parameter for debugging or performance analysis. Setting this | |
134 | * will cause pool corruption on power loss if a volatile out-of-order | |
135 | * write cache is enabled. | |
136 | */ | |
137 | int zfs_nocacheflush = 0; | |
138 | ||
37f6845c AM |
139 | /* |
140 | * Maximum and minimum ashift values that can be automatically set based on | |
141 | * vdev's physical ashift (disk's physical sector size). While ASHIFT_MAX | |
142 | * is higher than the maximum value, it is intentionally limited here to not | |
143 | * excessively impact pool space efficiency. Higher ashift values may still | |
144 | * be forced by vdev logical ashift or by user via ashift property, but won't | |
145 | * be set automatically as a performance optimization. | |
146 | */ | |
ab8d9c17 RY |
147 | uint_t zfs_vdev_max_auto_ashift = 14; |
148 | uint_t zfs_vdev_min_auto_ashift = ASHIFT_MIN; | |
6fe3498c | 149 | |
4a0ee12a PZ |
150 | void |
151 | vdev_dbgmsg(vdev_t *vd, const char *fmt, ...) | |
152 | { | |
153 | va_list adx; | |
154 | char buf[256]; | |
155 | ||
156 | va_start(adx, fmt); | |
157 | (void) vsnprintf(buf, sizeof (buf), fmt, adx); | |
158 | va_end(adx); | |
159 | ||
160 | if (vd->vdev_path != NULL) { | |
161 | zfs_dbgmsg("%s vdev '%s': %s", vd->vdev_ops->vdev_op_type, | |
162 | vd->vdev_path, buf); | |
163 | } else { | |
164 | zfs_dbgmsg("%s-%llu vdev (guid %llu): %s", | |
165 | vd->vdev_ops->vdev_op_type, | |
166 | (u_longlong_t)vd->vdev_id, | |
167 | (u_longlong_t)vd->vdev_guid, buf); | |
168 | } | |
169 | } | |
170 | ||
6cb8e530 PZ |
171 | void |
172 | vdev_dbgmsg_print_tree(vdev_t *vd, int indent) | |
173 | { | |
174 | char state[20]; | |
175 | ||
176 | if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) { | |
8e739b2c RE |
177 | zfs_dbgmsg("%*svdev %llu: %s", indent, "", |
178 | (u_longlong_t)vd->vdev_id, | |
6cb8e530 PZ |
179 | vd->vdev_ops->vdev_op_type); |
180 | return; | |
181 | } | |
182 | ||
183 | switch (vd->vdev_state) { | |
184 | case VDEV_STATE_UNKNOWN: | |
185 | (void) snprintf(state, sizeof (state), "unknown"); | |
186 | break; | |
187 | case VDEV_STATE_CLOSED: | |
188 | (void) snprintf(state, sizeof (state), "closed"); | |
189 | break; | |
190 | case VDEV_STATE_OFFLINE: | |
191 | (void) snprintf(state, sizeof (state), "offline"); | |
192 | break; | |
193 | case VDEV_STATE_REMOVED: | |
194 | (void) snprintf(state, sizeof (state), "removed"); | |
195 | break; | |
196 | case VDEV_STATE_CANT_OPEN: | |
197 | (void) snprintf(state, sizeof (state), "can't open"); | |
198 | break; | |
199 | case VDEV_STATE_FAULTED: | |
200 | (void) snprintf(state, sizeof (state), "faulted"); | |
201 | break; | |
202 | case VDEV_STATE_DEGRADED: | |
203 | (void) snprintf(state, sizeof (state), "degraded"); | |
204 | break; | |
205 | case VDEV_STATE_HEALTHY: | |
206 | (void) snprintf(state, sizeof (state), "healthy"); | |
207 | break; | |
208 | default: | |
209 | (void) snprintf(state, sizeof (state), "<state %u>", | |
210 | (uint_t)vd->vdev_state); | |
211 | } | |
212 | ||
213 | zfs_dbgmsg("%*svdev %u: %s%s, guid: %llu, path: %s, %s", indent, | |
e902ddb0 | 214 | "", (int)vd->vdev_id, vd->vdev_ops->vdev_op_type, |
6cb8e530 PZ |
215 | vd->vdev_islog ? " (log)" : "", |
216 | (u_longlong_t)vd->vdev_guid, | |
217 | vd->vdev_path ? vd->vdev_path : "N/A", state); | |
218 | ||
219 | for (uint64_t i = 0; i < vd->vdev_children; i++) | |
220 | vdev_dbgmsg_print_tree(vd->vdev_child[i], indent + 2); | |
221 | } | |
222 | ||
34dc7c2f BB |
223 | /* |
224 | * Virtual device management. | |
225 | */ | |
226 | ||
a2d5643f | 227 | static vdev_ops_t *const vdev_ops_table[] = { |
34dc7c2f BB |
228 | &vdev_root_ops, |
229 | &vdev_raidz_ops, | |
b2255edc BB |
230 | &vdev_draid_ops, |
231 | &vdev_draid_spare_ops, | |
34dc7c2f BB |
232 | &vdev_mirror_ops, |
233 | &vdev_replacing_ops, | |
234 | &vdev_spare_ops, | |
235 | &vdev_disk_ops, | |
236 | &vdev_file_ops, | |
237 | &vdev_missing_ops, | |
428870ff | 238 | &vdev_hole_ops, |
a1d477c2 | 239 | &vdev_indirect_ops, |
34dc7c2f BB |
240 | NULL |
241 | }; | |
242 | ||
34dc7c2f BB |
243 | /* |
244 | * Given a vdev type, return the appropriate ops vector. | |
245 | */ | |
246 | static vdev_ops_t * | |
247 | vdev_getops(const char *type) | |
248 | { | |
a2d5643f | 249 | vdev_ops_t *ops, *const *opspp; |
34dc7c2f BB |
250 | |
251 | for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++) | |
252 | if (strcmp(ops->vdev_op_type, type) == 0) | |
253 | break; | |
254 | ||
255 | return (ops); | |
256 | } | |
257 | ||
aa755b35 MA |
258 | /* |
259 | * Given a vdev and a metaslab class, find which metaslab group we're | |
260 | * interested in. All vdevs may belong to two different metaslab classes. | |
261 | * Dedicated slog devices use only the primary metaslab group, rather than a | |
262 | * separate log group. For embedded slogs, the vdev_log_mg will be non-NULL. | |
263 | */ | |
264 | metaslab_group_t * | |
265 | vdev_get_mg(vdev_t *vd, metaslab_class_t *mc) | |
266 | { | |
267 | if (mc == spa_embedded_log_class(vd->vdev_spa) && | |
268 | vd->vdev_log_mg != NULL) | |
269 | return (vd->vdev_log_mg); | |
270 | else | |
271 | return (vd->vdev_mg); | |
272 | } | |
273 | ||
619f0976 | 274 | void |
b2255edc BB |
275 | vdev_default_xlate(vdev_t *vd, const range_seg64_t *logical_rs, |
276 | range_seg64_t *physical_rs, range_seg64_t *remain_rs) | |
619f0976 | 277 | { |
14e4e3cb AZ |
278 | (void) vd, (void) remain_rs; |
279 | ||
b2255edc BB |
280 | physical_rs->rs_start = logical_rs->rs_start; |
281 | physical_rs->rs_end = logical_rs->rs_end; | |
619f0976 GW |
282 | } |
283 | ||
cc99f275 | 284 | /* |
e1cfd73f | 285 | * Derive the enumerated allocation bias from string input. |
76d04993 | 286 | * String origin is either the per-vdev zap or zpool(8). |
cc99f275 DB |
287 | */ |
288 | static vdev_alloc_bias_t | |
289 | vdev_derive_alloc_bias(const char *bias) | |
290 | { | |
291 | vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE; | |
292 | ||
293 | if (strcmp(bias, VDEV_ALLOC_BIAS_LOG) == 0) | |
294 | alloc_bias = VDEV_BIAS_LOG; | |
295 | else if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0) | |
296 | alloc_bias = VDEV_BIAS_SPECIAL; | |
297 | else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0) | |
298 | alloc_bias = VDEV_BIAS_DEDUP; | |
299 | ||
300 | return (alloc_bias); | |
301 | } | |
302 | ||
34dc7c2f BB |
303 | /* |
304 | * Default asize function: return the MAX of psize with the asize of | |
305 | * all children. This is what's used by anything other than RAID-Z. | |
306 | */ | |
307 | uint64_t | |
308 | vdev_default_asize(vdev_t *vd, uint64_t psize) | |
309 | { | |
310 | uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift); | |
311 | uint64_t csize; | |
34dc7c2f | 312 | |
1c27024e | 313 | for (int c = 0; c < vd->vdev_children; c++) { |
34dc7c2f BB |
314 | csize = vdev_psize_to_asize(vd->vdev_child[c], psize); |
315 | asize = MAX(asize, csize); | |
316 | } | |
317 | ||
318 | return (asize); | |
319 | } | |
320 | ||
b2255edc BB |
321 | uint64_t |
322 | vdev_default_min_asize(vdev_t *vd) | |
323 | { | |
324 | return (vd->vdev_min_asize); | |
325 | } | |
326 | ||
34dc7c2f | 327 | /* |
9babb374 BB |
328 | * Get the minimum allocatable size. We define the allocatable size as |
329 | * the vdev's asize rounded to the nearest metaslab. This allows us to | |
330 | * replace or attach devices which don't have the same physical size but | |
331 | * can still satisfy the same number of allocations. | |
34dc7c2f BB |
332 | */ |
333 | uint64_t | |
9babb374 | 334 | vdev_get_min_asize(vdev_t *vd) |
34dc7c2f | 335 | { |
9babb374 | 336 | vdev_t *pvd = vd->vdev_parent; |
34dc7c2f | 337 | |
9babb374 | 338 | /* |
1bd201e7 | 339 | * If our parent is NULL (inactive spare or cache) or is the root, |
9babb374 BB |
340 | * just return our own asize. |
341 | */ | |
342 | if (pvd == NULL) | |
343 | return (vd->vdev_asize); | |
34dc7c2f BB |
344 | |
345 | /* | |
9babb374 BB |
346 | * The top-level vdev just returns the allocatable size rounded |
347 | * to the nearest metaslab. | |
34dc7c2f | 348 | */ |
9babb374 BB |
349 | if (vd == vd->vdev_top) |
350 | return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift)); | |
34dc7c2f | 351 | |
b2255edc | 352 | return (pvd->vdev_ops->vdev_op_min_asize(pvd)); |
9babb374 BB |
353 | } |
354 | ||
355 | void | |
356 | vdev_set_min_asize(vdev_t *vd) | |
357 | { | |
358 | vd->vdev_min_asize = vdev_get_min_asize(vd); | |
34dc7c2f | 359 | |
1c27024e | 360 | for (int c = 0; c < vd->vdev_children; c++) |
9babb374 | 361 | vdev_set_min_asize(vd->vdev_child[c]); |
34dc7c2f BB |
362 | } |
363 | ||
b2255edc BB |
364 | /* |
365 | * Get the minimal allocation size for the top-level vdev. | |
366 | */ | |
367 | uint64_t | |
368 | vdev_get_min_alloc(vdev_t *vd) | |
369 | { | |
370 | uint64_t min_alloc = 1ULL << vd->vdev_ashift; | |
371 | ||
372 | if (vd->vdev_ops->vdev_op_min_alloc != NULL) | |
373 | min_alloc = vd->vdev_ops->vdev_op_min_alloc(vd); | |
374 | ||
375 | return (min_alloc); | |
376 | } | |
377 | ||
378 | /* | |
379 | * Get the parity level for a top-level vdev. | |
380 | */ | |
381 | uint64_t | |
382 | vdev_get_nparity(vdev_t *vd) | |
383 | { | |
384 | uint64_t nparity = 0; | |
385 | ||
386 | if (vd->vdev_ops->vdev_op_nparity != NULL) | |
387 | nparity = vd->vdev_ops->vdev_op_nparity(vd); | |
388 | ||
389 | return (nparity); | |
390 | } | |
391 | ||
69f024a5 RW |
392 | static int |
393 | vdev_prop_get_int(vdev_t *vd, vdev_prop_t prop, uint64_t *value) | |
394 | { | |
395 | spa_t *spa = vd->vdev_spa; | |
396 | objset_t *mos = spa->spa_meta_objset; | |
397 | uint64_t objid; | |
398 | int err; | |
399 | ||
3e4ed421 RW |
400 | if (vd->vdev_root_zap != 0) { |
401 | objid = vd->vdev_root_zap; | |
402 | } else if (vd->vdev_top_zap != 0) { | |
69f024a5 RW |
403 | objid = vd->vdev_top_zap; |
404 | } else if (vd->vdev_leaf_zap != 0) { | |
405 | objid = vd->vdev_leaf_zap; | |
406 | } else { | |
407 | return (EINVAL); | |
408 | } | |
409 | ||
410 | err = zap_lookup(mos, objid, vdev_prop_to_name(prop), | |
411 | sizeof (uint64_t), 1, value); | |
412 | ||
413 | if (err == ENOENT) | |
414 | *value = vdev_prop_default_numeric(prop); | |
415 | ||
416 | return (err); | |
417 | } | |
418 | ||
b2255edc BB |
419 | /* |
420 | * Get the number of data disks for a top-level vdev. | |
421 | */ | |
422 | uint64_t | |
423 | vdev_get_ndisks(vdev_t *vd) | |
424 | { | |
425 | uint64_t ndisks = 1; | |
426 | ||
427 | if (vd->vdev_ops->vdev_op_ndisks != NULL) | |
428 | ndisks = vd->vdev_ops->vdev_op_ndisks(vd); | |
429 | ||
430 | return (ndisks); | |
431 | } | |
432 | ||
34dc7c2f BB |
433 | vdev_t * |
434 | vdev_lookup_top(spa_t *spa, uint64_t vdev) | |
435 | { | |
436 | vdev_t *rvd = spa->spa_root_vdev; | |
437 | ||
b128c09f | 438 | ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); |
34dc7c2f | 439 | |
b128c09f BB |
440 | if (vdev < rvd->vdev_children) { |
441 | ASSERT(rvd->vdev_child[vdev] != NULL); | |
34dc7c2f | 442 | return (rvd->vdev_child[vdev]); |
b128c09f | 443 | } |
34dc7c2f BB |
444 | |
445 | return (NULL); | |
446 | } | |
447 | ||
448 | vdev_t * | |
449 | vdev_lookup_by_guid(vdev_t *vd, uint64_t guid) | |
450 | { | |
34dc7c2f BB |
451 | vdev_t *mvd; |
452 | ||
453 | if (vd->vdev_guid == guid) | |
454 | return (vd); | |
455 | ||
1c27024e | 456 | for (int c = 0; c < vd->vdev_children; c++) |
34dc7c2f BB |
457 | if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) != |
458 | NULL) | |
459 | return (mvd); | |
460 | ||
461 | return (NULL); | |
462 | } | |
463 | ||
9c43027b AJ |
464 | static int |
465 | vdev_count_leaves_impl(vdev_t *vd) | |
466 | { | |
467 | int n = 0; | |
9c43027b AJ |
468 | |
469 | if (vd->vdev_ops->vdev_op_leaf) | |
470 | return (1); | |
471 | ||
1c27024e | 472 | for (int c = 0; c < vd->vdev_children; c++) |
9c43027b AJ |
473 | n += vdev_count_leaves_impl(vd->vdev_child[c]); |
474 | ||
475 | return (n); | |
476 | } | |
477 | ||
478 | int | |
479 | vdev_count_leaves(spa_t *spa) | |
480 | { | |
743253df OF |
481 | int rc; |
482 | ||
483 | spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); | |
484 | rc = vdev_count_leaves_impl(spa->spa_root_vdev); | |
485 | spa_config_exit(spa, SCL_VDEV, FTAG); | |
486 | ||
487 | return (rc); | |
9c43027b AJ |
488 | } |
489 | ||
34dc7c2f BB |
490 | void |
491 | vdev_add_child(vdev_t *pvd, vdev_t *cvd) | |
492 | { | |
493 | size_t oldsize, newsize; | |
494 | uint64_t id = cvd->vdev_id; | |
495 | vdev_t **newchild; | |
496 | ||
44de2f02 | 497 | ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); |
34dc7c2f BB |
498 | ASSERT(cvd->vdev_parent == NULL); |
499 | ||
500 | cvd->vdev_parent = pvd; | |
501 | ||
502 | if (pvd == NULL) | |
503 | return; | |
504 | ||
505 | ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL); | |
506 | ||
507 | oldsize = pvd->vdev_children * sizeof (vdev_t *); | |
508 | pvd->vdev_children = MAX(pvd->vdev_children, id + 1); | |
509 | newsize = pvd->vdev_children * sizeof (vdev_t *); | |
510 | ||
79c76d5b | 511 | newchild = kmem_alloc(newsize, KM_SLEEP); |
34dc7c2f | 512 | if (pvd->vdev_child != NULL) { |
861166b0 | 513 | memcpy(newchild, pvd->vdev_child, oldsize); |
34dc7c2f BB |
514 | kmem_free(pvd->vdev_child, oldsize); |
515 | } | |
516 | ||
517 | pvd->vdev_child = newchild; | |
518 | pvd->vdev_child[id] = cvd; | |
519 | ||
520 | cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd); | |
521 | ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL); | |
522 | ||
523 | /* | |
524 | * Walk up all ancestors to update guid sum. | |
525 | */ | |
526 | for (; pvd != NULL; pvd = pvd->vdev_parent) | |
527 | pvd->vdev_guid_sum += cvd->vdev_guid_sum; | |
3d31aad8 OF |
528 | |
529 | if (cvd->vdev_ops->vdev_op_leaf) { | |
530 | list_insert_head(&cvd->vdev_spa->spa_leaf_list, cvd); | |
531 | cvd->vdev_spa->spa_leaf_list_gen++; | |
532 | } | |
34dc7c2f BB |
533 | } |
534 | ||
535 | void | |
536 | vdev_remove_child(vdev_t *pvd, vdev_t *cvd) | |
537 | { | |
538 | int c; | |
539 | uint_t id = cvd->vdev_id; | |
540 | ||
541 | ASSERT(cvd->vdev_parent == pvd); | |
542 | ||
543 | if (pvd == NULL) | |
544 | return; | |
545 | ||
546 | ASSERT(id < pvd->vdev_children); | |
547 | ASSERT(pvd->vdev_child[id] == cvd); | |
548 | ||
549 | pvd->vdev_child[id] = NULL; | |
550 | cvd->vdev_parent = NULL; | |
551 | ||
552 | for (c = 0; c < pvd->vdev_children; c++) | |
553 | if (pvd->vdev_child[c]) | |
554 | break; | |
555 | ||
556 | if (c == pvd->vdev_children) { | |
557 | kmem_free(pvd->vdev_child, c * sizeof (vdev_t *)); | |
558 | pvd->vdev_child = NULL; | |
559 | pvd->vdev_children = 0; | |
560 | } | |
561 | ||
3d31aad8 OF |
562 | if (cvd->vdev_ops->vdev_op_leaf) { |
563 | spa_t *spa = cvd->vdev_spa; | |
564 | list_remove(&spa->spa_leaf_list, cvd); | |
565 | spa->spa_leaf_list_gen++; | |
566 | } | |
567 | ||
34dc7c2f BB |
568 | /* |
569 | * Walk up all ancestors to update guid sum. | |
570 | */ | |
571 | for (; pvd != NULL; pvd = pvd->vdev_parent) | |
572 | pvd->vdev_guid_sum -= cvd->vdev_guid_sum; | |
34dc7c2f BB |
573 | } |
574 | ||
575 | /* | |
576 | * Remove any holes in the child array. | |
577 | */ | |
578 | void | |
579 | vdev_compact_children(vdev_t *pvd) | |
580 | { | |
581 | vdev_t **newchild, *cvd; | |
582 | int oldc = pvd->vdev_children; | |
9babb374 | 583 | int newc; |
34dc7c2f | 584 | |
b128c09f | 585 | ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); |
34dc7c2f | 586 | |
a1d477c2 MA |
587 | if (oldc == 0) |
588 | return; | |
589 | ||
1c27024e | 590 | for (int c = newc = 0; c < oldc; c++) |
34dc7c2f BB |
591 | if (pvd->vdev_child[c]) |
592 | newc++; | |
593 | ||
a1d477c2 MA |
594 | if (newc > 0) { |
595 | newchild = kmem_zalloc(newc * sizeof (vdev_t *), KM_SLEEP); | |
34dc7c2f | 596 | |
a1d477c2 MA |
597 | for (int c = newc = 0; c < oldc; c++) { |
598 | if ((cvd = pvd->vdev_child[c]) != NULL) { | |
599 | newchild[newc] = cvd; | |
600 | cvd->vdev_id = newc++; | |
601 | } | |
34dc7c2f | 602 | } |
a1d477c2 MA |
603 | } else { |
604 | newchild = NULL; | |
34dc7c2f BB |
605 | } |
606 | ||
607 | kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *)); | |
608 | pvd->vdev_child = newchild; | |
609 | pvd->vdev_children = newc; | |
610 | } | |
611 | ||
612 | /* | |
613 | * Allocate and minimally initialize a vdev_t. | |
614 | */ | |
428870ff | 615 | vdev_t * |
34dc7c2f BB |
616 | vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops) |
617 | { | |
618 | vdev_t *vd; | |
a1d477c2 | 619 | vdev_indirect_config_t *vic; |
34dc7c2f | 620 | |
79c76d5b | 621 | vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP); |
a1d477c2 | 622 | vic = &vd->vdev_indirect_config; |
34dc7c2f BB |
623 | |
624 | if (spa->spa_root_vdev == NULL) { | |
625 | ASSERT(ops == &vdev_root_ops); | |
626 | spa->spa_root_vdev = vd; | |
3541dc6d | 627 | spa->spa_load_guid = spa_generate_guid(NULL); |
34dc7c2f BB |
628 | } |
629 | ||
428870ff | 630 | if (guid == 0 && ops != &vdev_hole_ops) { |
34dc7c2f BB |
631 | if (spa->spa_root_vdev == vd) { |
632 | /* | |
633 | * The root vdev's guid will also be the pool guid, | |
634 | * which must be unique among all pools. | |
635 | */ | |
428870ff | 636 | guid = spa_generate_guid(NULL); |
34dc7c2f BB |
637 | } else { |
638 | /* | |
639 | * Any other vdev's guid must be unique within the pool. | |
640 | */ | |
428870ff | 641 | guid = spa_generate_guid(spa); |
34dc7c2f BB |
642 | } |
643 | ASSERT(!spa_guid_exists(spa_guid(spa), guid)); | |
644 | } | |
645 | ||
646 | vd->vdev_spa = spa; | |
647 | vd->vdev_id = id; | |
648 | vd->vdev_guid = guid; | |
649 | vd->vdev_guid_sum = guid; | |
650 | vd->vdev_ops = ops; | |
651 | vd->vdev_state = VDEV_STATE_CLOSED; | |
428870ff | 652 | vd->vdev_ishole = (ops == &vdev_hole_ops); |
a1d477c2 MA |
653 | vic->vic_prev_indirect_vdev = UINT64_MAX; |
654 | ||
655 | rw_init(&vd->vdev_indirect_rwlock, NULL, RW_DEFAULT, NULL); | |
656 | mutex_init(&vd->vdev_obsolete_lock, NULL, MUTEX_DEFAULT, NULL); | |
ca577779 PD |
657 | vd->vdev_obsolete_segments = range_tree_create(NULL, RANGE_SEG64, NULL, |
658 | 0, 0); | |
34dc7c2f | 659 | |
6078881a TH |
660 | /* |
661 | * Initialize rate limit structs for events. We rate limit ZIO delay | |
662 | * and checksum events so that we don't overwhelm ZED with thousands | |
663 | * of events when a disk is acting up. | |
664 | */ | |
ad796b8a TH |
665 | zfs_ratelimit_init(&vd->vdev_delay_rl, &zfs_slow_io_events_per_second, |
666 | 1); | |
e778b048 RM |
667 | zfs_ratelimit_init(&vd->vdev_deadman_rl, &zfs_slow_io_events_per_second, |
668 | 1); | |
ad796b8a TH |
669 | zfs_ratelimit_init(&vd->vdev_checksum_rl, |
670 | &zfs_checksum_events_per_second, 1); | |
6078881a | 671 | |
69f024a5 RW |
672 | /* |
673 | * Default Thresholds for tuning ZED | |
674 | */ | |
675 | vd->vdev_checksum_n = vdev_prop_default_numeric(VDEV_PROP_CHECKSUM_N); | |
676 | vd->vdev_checksum_t = vdev_prop_default_numeric(VDEV_PROP_CHECKSUM_T); | |
677 | vd->vdev_io_n = vdev_prop_default_numeric(VDEV_PROP_IO_N); | |
678 | vd->vdev_io_t = vdev_prop_default_numeric(VDEV_PROP_IO_T); | |
679 | ||
98f72a53 BB |
680 | list_link_init(&vd->vdev_config_dirty_node); |
681 | list_link_init(&vd->vdev_state_dirty_node); | |
c10d37dd | 682 | list_link_init(&vd->vdev_initialize_node); |
3d31aad8 | 683 | list_link_init(&vd->vdev_leaf_node); |
1b939560 | 684 | list_link_init(&vd->vdev_trim_node); |
b2255edc | 685 | |
448d7aaa | 686 | mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_NOLOCKDEP, NULL); |
34dc7c2f | 687 | mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL); |
b128c09f | 688 | mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL); |
d4a72f23 | 689 | mutex_init(&vd->vdev_scan_io_queue_lock, NULL, MUTEX_DEFAULT, NULL); |
9a49d3f3 | 690 | |
619f0976 GW |
691 | mutex_init(&vd->vdev_initialize_lock, NULL, MUTEX_DEFAULT, NULL); |
692 | mutex_init(&vd->vdev_initialize_io_lock, NULL, MUTEX_DEFAULT, NULL); | |
693 | cv_init(&vd->vdev_initialize_cv, NULL, CV_DEFAULT, NULL); | |
694 | cv_init(&vd->vdev_initialize_io_cv, NULL, CV_DEFAULT, NULL); | |
9a49d3f3 | 695 | |
1b939560 BB |
696 | mutex_init(&vd->vdev_trim_lock, NULL, MUTEX_DEFAULT, NULL); |
697 | mutex_init(&vd->vdev_autotrim_lock, NULL, MUTEX_DEFAULT, NULL); | |
698 | mutex_init(&vd->vdev_trim_io_lock, NULL, MUTEX_DEFAULT, NULL); | |
699 | cv_init(&vd->vdev_trim_cv, NULL, CV_DEFAULT, NULL); | |
700 | cv_init(&vd->vdev_autotrim_cv, NULL, CV_DEFAULT, NULL); | |
65d10bd8 | 701 | cv_init(&vd->vdev_autotrim_kick_cv, NULL, CV_DEFAULT, NULL); |
1b939560 | 702 | cv_init(&vd->vdev_trim_io_cv, NULL, CV_DEFAULT, NULL); |
6078881a | 703 | |
9a49d3f3 | 704 | mutex_init(&vd->vdev_rebuild_lock, NULL, MUTEX_DEFAULT, NULL); |
9a49d3f3 | 705 | cv_init(&vd->vdev_rebuild_cv, NULL, CV_DEFAULT, NULL); |
9a49d3f3 | 706 | |
1c27024e | 707 | for (int t = 0; t < DTL_TYPES; t++) { |
ca577779 PD |
708 | vd->vdev_dtl[t] = range_tree_create(NULL, RANGE_SEG64, NULL, 0, |
709 | 0); | |
fb5f0bc8 | 710 | } |
9a49d3f3 | 711 | |
4747a7d3 | 712 | txg_list_create(&vd->vdev_ms_list, spa, |
34dc7c2f | 713 | offsetof(struct metaslab, ms_txg_node)); |
4747a7d3 | 714 | txg_list_create(&vd->vdev_dtl_list, spa, |
34dc7c2f BB |
715 | offsetof(struct vdev, vdev_dtl_node)); |
716 | vd->vdev_stat.vs_timestamp = gethrtime(); | |
717 | vdev_queue_init(vd); | |
34dc7c2f BB |
718 | |
719 | return (vd); | |
720 | } | |
721 | ||
722 | /* | |
723 | * Allocate a new vdev. The 'alloctype' is used to control whether we are | |
724 | * creating a new vdev or loading an existing one - the behavior is slightly | |
725 | * different for each case. | |
726 | */ | |
727 | int | |
728 | vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id, | |
729 | int alloctype) | |
730 | { | |
731 | vdev_ops_t *ops; | |
d1807f16 | 732 | const char *type; |
b2255edc | 733 | uint64_t guid = 0, islog; |
34dc7c2f | 734 | vdev_t *vd; |
a1d477c2 | 735 | vdev_indirect_config_t *vic; |
d1807f16 | 736 | const char *tmp = NULL; |
4a283c7f | 737 | int rc; |
cc99f275 DB |
738 | vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE; |
739 | boolean_t top_level = (parent && !parent->vdev_parent); | |
34dc7c2f | 740 | |
b128c09f | 741 | ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); |
34dc7c2f BB |
742 | |
743 | if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) | |
2e528b49 | 744 | return (SET_ERROR(EINVAL)); |
34dc7c2f BB |
745 | |
746 | if ((ops = vdev_getops(type)) == NULL) | |
2e528b49 | 747 | return (SET_ERROR(EINVAL)); |
34dc7c2f BB |
748 | |
749 | /* | |
750 | * If this is a load, get the vdev guid from the nvlist. | |
751 | * Otherwise, vdev_alloc_common() will generate one for us. | |
752 | */ | |
753 | if (alloctype == VDEV_ALLOC_LOAD) { | |
754 | uint64_t label_id; | |
755 | ||
756 | if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) || | |
757 | label_id != id) | |
2e528b49 | 758 | return (SET_ERROR(EINVAL)); |
34dc7c2f BB |
759 | |
760 | if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) | |
2e528b49 | 761 | return (SET_ERROR(EINVAL)); |
34dc7c2f BB |
762 | } else if (alloctype == VDEV_ALLOC_SPARE) { |
763 | if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) | |
2e528b49 | 764 | return (SET_ERROR(EINVAL)); |
34dc7c2f BB |
765 | } else if (alloctype == VDEV_ALLOC_L2CACHE) { |
766 | if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) | |
2e528b49 | 767 | return (SET_ERROR(EINVAL)); |
9babb374 BB |
768 | } else if (alloctype == VDEV_ALLOC_ROOTPOOL) { |
769 | if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) | |
2e528b49 | 770 | return (SET_ERROR(EINVAL)); |
34dc7c2f BB |
771 | } |
772 | ||
773 | /* | |
774 | * The first allocated vdev must be of type 'root'. | |
775 | */ | |
776 | if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL) | |
2e528b49 | 777 | return (SET_ERROR(EINVAL)); |
34dc7c2f BB |
778 | |
779 | /* | |
780 | * Determine whether we're a log vdev. | |
781 | */ | |
782 | islog = 0; | |
783 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog); | |
784 | if (islog && spa_version(spa) < SPA_VERSION_SLOGS) | |
2e528b49 | 785 | return (SET_ERROR(ENOTSUP)); |
34dc7c2f | 786 | |
428870ff | 787 | if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES) |
2e528b49 | 788 | return (SET_ERROR(ENOTSUP)); |
428870ff | 789 | |
cc99f275 | 790 | if (top_level && alloctype == VDEV_ALLOC_ADD) { |
d1807f16 | 791 | const char *bias; |
cc99f275 | 792 | |
b2255edc BB |
793 | /* |
794 | * If creating a top-level vdev, check for allocation | |
795 | * classes input. | |
796 | */ | |
cc99f275 DB |
797 | if (nvlist_lookup_string(nv, ZPOOL_CONFIG_ALLOCATION_BIAS, |
798 | &bias) == 0) { | |
799 | alloc_bias = vdev_derive_alloc_bias(bias); | |
800 | ||
801 | /* spa_vdev_add() expects feature to be enabled */ | |
802 | if (spa->spa_load_state != SPA_LOAD_CREATE && | |
803 | !spa_feature_is_enabled(spa, | |
804 | SPA_FEATURE_ALLOCATION_CLASSES)) { | |
805 | return (SET_ERROR(ENOTSUP)); | |
806 | } | |
807 | } | |
b2255edc BB |
808 | |
809 | /* spa_vdev_add() expects feature to be enabled */ | |
810 | if (ops == &vdev_draid_ops && | |
811 | spa->spa_load_state != SPA_LOAD_CREATE && | |
812 | !spa_feature_is_enabled(spa, SPA_FEATURE_DRAID)) { | |
813 | return (SET_ERROR(ENOTSUP)); | |
814 | } | |
cc99f275 DB |
815 | } |
816 | ||
b2255edc BB |
817 | /* |
818 | * Initialize the vdev specific data. This is done before calling | |
819 | * vdev_alloc_common() since it may fail and this simplifies the | |
820 | * error reporting and cleanup code paths. | |
821 | */ | |
822 | void *tsd = NULL; | |
823 | if (ops->vdev_op_init != NULL) { | |
824 | rc = ops->vdev_op_init(spa, nv, &tsd); | |
825 | if (rc != 0) { | |
826 | return (rc); | |
827 | } | |
828 | } | |
34dc7c2f | 829 | |
b2255edc BB |
830 | vd = vdev_alloc_common(spa, id, guid, ops); |
831 | vd->vdev_tsd = tsd; | |
34dc7c2f | 832 | vd->vdev_islog = islog; |
b2255edc | 833 | |
cc99f275 DB |
834 | if (top_level && alloc_bias != VDEV_BIAS_NONE) |
835 | vd->vdev_alloc_bias = alloc_bias; | |
34dc7c2f | 836 | |
d1807f16 RY |
837 | if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &tmp) == 0) |
838 | vd->vdev_path = spa_strdup(tmp); | |
4a283c7f TH |
839 | |
840 | /* | |
841 | * ZPOOL_CONFIG_AUX_STATE = "external" means we previously forced a | |
842 | * fault on a vdev and want it to persist across imports (like with | |
843 | * zpool offline -f). | |
844 | */ | |
845 | rc = nvlist_lookup_string(nv, ZPOOL_CONFIG_AUX_STATE, &tmp); | |
846 | if (rc == 0 && tmp != NULL && strcmp(tmp, "external") == 0) { | |
847 | vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL; | |
848 | vd->vdev_faulted = 1; | |
849 | vd->vdev_label_aux = VDEV_AUX_EXTERNAL; | |
850 | } | |
851 | ||
d1807f16 RY |
852 | if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &tmp) == 0) |
853 | vd->vdev_devid = spa_strdup(tmp); | |
854 | if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH, &tmp) == 0) | |
855 | vd->vdev_physpath = spa_strdup(tmp); | |
1bbd8770 TH |
856 | |
857 | if (nvlist_lookup_string(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH, | |
d1807f16 RY |
858 | &tmp) == 0) |
859 | vd->vdev_enc_sysfs_path = spa_strdup(tmp); | |
1bbd8770 | 860 | |
d1807f16 RY |
861 | if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &tmp) == 0) |
862 | vd->vdev_fru = spa_strdup(tmp); | |
34dc7c2f BB |
863 | |
864 | /* | |
865 | * Set the whole_disk property. If it's not specified, leave the value | |
866 | * as -1. | |
867 | */ | |
868 | if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, | |
869 | &vd->vdev_wholedisk) != 0) | |
870 | vd->vdev_wholedisk = -1ULL; | |
871 | ||
b2255edc BB |
872 | vic = &vd->vdev_indirect_config; |
873 | ||
a1d477c2 MA |
874 | ASSERT0(vic->vic_mapping_object); |
875 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT, | |
876 | &vic->vic_mapping_object); | |
877 | ASSERT0(vic->vic_births_object); | |
878 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS, | |
879 | &vic->vic_births_object); | |
880 | ASSERT3U(vic->vic_prev_indirect_vdev, ==, UINT64_MAX); | |
881 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV, | |
882 | &vic->vic_prev_indirect_vdev); | |
883 | ||
34dc7c2f BB |
884 | /* |
885 | * Look for the 'not present' flag. This will only be set if the device | |
886 | * was not present at the time of import. | |
887 | */ | |
9babb374 BB |
888 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, |
889 | &vd->vdev_not_present); | |
34dc7c2f BB |
890 | |
891 | /* | |
4d2dad04 AH |
892 | * Get the alignment requirement. Ignore pool ashift for vdev |
893 | * attach case. | |
34dc7c2f | 894 | */ |
4d2dad04 AH |
895 | if (alloctype != VDEV_ALLOC_ATTACH) { |
896 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, | |
897 | &vd->vdev_ashift); | |
898 | } else { | |
899 | vd->vdev_attaching = B_TRUE; | |
900 | } | |
34dc7c2f | 901 | |
428870ff BB |
902 | /* |
903 | * Retrieve the vdev creation time. | |
904 | */ | |
905 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, | |
906 | &vd->vdev_crtxg); | |
907 | ||
3e4ed421 RW |
908 | if (vd->vdev_ops == &vdev_root_ops && |
909 | (alloctype == VDEV_ALLOC_LOAD || | |
910 | alloctype == VDEV_ALLOC_SPLIT || | |
911 | alloctype == VDEV_ALLOC_ROOTPOOL)) { | |
912 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_ROOT_ZAP, | |
913 | &vd->vdev_root_zap); | |
914 | } | |
915 | ||
34dc7c2f BB |
916 | /* |
917 | * If we're a top-level vdev, try to load the allocation parameters. | |
918 | */ | |
cc99f275 | 919 | if (top_level && |
428870ff | 920 | (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) { |
34dc7c2f BB |
921 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY, |
922 | &vd->vdev_ms_array); | |
923 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT, | |
924 | &vd->vdev_ms_shift); | |
925 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE, | |
926 | &vd->vdev_asize); | |
2a673e76 AJ |
927 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NONALLOCATING, |
928 | &vd->vdev_noalloc); | |
428870ff BB |
929 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVING, |
930 | &vd->vdev_removing); | |
e0ab3ab5 JS |
931 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP, |
932 | &vd->vdev_top_zap); | |
933 | } else { | |
934 | ASSERT0(vd->vdev_top_zap); | |
428870ff BB |
935 | } |
936 | ||
cc99f275 | 937 | if (top_level && alloctype != VDEV_ALLOC_ATTACH) { |
428870ff BB |
938 | ASSERT(alloctype == VDEV_ALLOC_LOAD || |
939 | alloctype == VDEV_ALLOC_ADD || | |
940 | alloctype == VDEV_ALLOC_SPLIT || | |
941 | alloctype == VDEV_ALLOC_ROOTPOOL); | |
cc99f275 | 942 | /* Note: metaslab_group_create() is now deferred */ |
34dc7c2f BB |
943 | } |
944 | ||
e0ab3ab5 JS |
945 | if (vd->vdev_ops->vdev_op_leaf && |
946 | (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) { | |
947 | (void) nvlist_lookup_uint64(nv, | |
948 | ZPOOL_CONFIG_VDEV_LEAF_ZAP, &vd->vdev_leaf_zap); | |
949 | } else { | |
950 | ASSERT0(vd->vdev_leaf_zap); | |
951 | } | |
952 | ||
34dc7c2f BB |
953 | /* |
954 | * If we're a leaf vdev, try to load the DTL object and other state. | |
955 | */ | |
e0ab3ab5 | 956 | |
b128c09f | 957 | if (vd->vdev_ops->vdev_op_leaf && |
9babb374 BB |
958 | (alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE || |
959 | alloctype == VDEV_ALLOC_ROOTPOOL)) { | |
b128c09f BB |
960 | if (alloctype == VDEV_ALLOC_LOAD) { |
961 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL, | |
93cf2076 | 962 | &vd->vdev_dtl_object); |
b128c09f BB |
963 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE, |
964 | &vd->vdev_unspare); | |
965 | } | |
9babb374 BB |
966 | |
967 | if (alloctype == VDEV_ALLOC_ROOTPOOL) { | |
968 | uint64_t spare = 0; | |
969 | ||
970 | if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, | |
971 | &spare) == 0 && spare) | |
972 | spa_spare_add(vd); | |
973 | } | |
974 | ||
34dc7c2f BB |
975 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, |
976 | &vd->vdev_offline); | |
b128c09f | 977 | |
5d1f7fb6 GW |
978 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG, |
979 | &vd->vdev_resilver_txg); | |
572e2857 | 980 | |
9a49d3f3 BB |
981 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REBUILD_TXG, |
982 | &vd->vdev_rebuild_txg); | |
983 | ||
80a91e74 | 984 | if (nvlist_exists(nv, ZPOOL_CONFIG_RESILVER_DEFER)) |
3c819a2c | 985 | vdev_defer_resilver(vd); |
80a91e74 | 986 | |
34dc7c2f | 987 | /* |
4a283c7f TH |
988 | * In general, when importing a pool we want to ignore the |
989 | * persistent fault state, as the diagnosis made on another | |
990 | * system may not be valid in the current context. The only | |
991 | * exception is if we forced a vdev to a persistently faulted | |
992 | * state with 'zpool offline -f'. The persistent fault will | |
993 | * remain across imports until cleared. | |
994 | * | |
995 | * Local vdevs will remain in the faulted state. | |
34dc7c2f | 996 | */ |
4a283c7f TH |
997 | if (spa_load_state(spa) == SPA_LOAD_OPEN || |
998 | spa_load_state(spa) == SPA_LOAD_IMPORT) { | |
34dc7c2f BB |
999 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, |
1000 | &vd->vdev_faulted); | |
1001 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED, | |
1002 | &vd->vdev_degraded); | |
1003 | (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, | |
1004 | &vd->vdev_removed); | |
428870ff BB |
1005 | |
1006 | if (vd->vdev_faulted || vd->vdev_degraded) { | |
d1807f16 | 1007 | const char *aux; |
428870ff BB |
1008 | |
1009 | vd->vdev_label_aux = | |
1010 | VDEV_AUX_ERR_EXCEEDED; | |
1011 | if (nvlist_lookup_string(nv, | |
1012 | ZPOOL_CONFIG_AUX_STATE, &aux) == 0 && | |
1013 | strcmp(aux, "external") == 0) | |
1014 | vd->vdev_label_aux = VDEV_AUX_EXTERNAL; | |
a0ad7ca5 TC |
1015 | else |
1016 | vd->vdev_faulted = 0ULL; | |
428870ff | 1017 | } |
34dc7c2f BB |
1018 | } |
1019 | } | |
1020 | ||
1021 | /* | |
1022 | * Add ourselves to the parent's list of children. | |
1023 | */ | |
1024 | vdev_add_child(parent, vd); | |
1025 | ||
1026 | *vdp = vd; | |
1027 | ||
1028 | return (0); | |
1029 | } | |
1030 | ||
1031 | void | |
1032 | vdev_free(vdev_t *vd) | |
1033 | { | |
34dc7c2f | 1034 | spa_t *spa = vd->vdev_spa; |
1b939560 | 1035 | |
619f0976 | 1036 | ASSERT3P(vd->vdev_initialize_thread, ==, NULL); |
1b939560 BB |
1037 | ASSERT3P(vd->vdev_trim_thread, ==, NULL); |
1038 | ASSERT3P(vd->vdev_autotrim_thread, ==, NULL); | |
9a49d3f3 | 1039 | ASSERT3P(vd->vdev_rebuild_thread, ==, NULL); |
34dc7c2f | 1040 | |
d4a72f23 TC |
1041 | /* |
1042 | * Scan queues are normally destroyed at the end of a scan. If the | |
1043 | * queue exists here, that implies the vdev is being removed while | |
1044 | * the scan is still running. | |
1045 | */ | |
1046 | if (vd->vdev_scan_io_queue != NULL) { | |
1047 | mutex_enter(&vd->vdev_scan_io_queue_lock); | |
1048 | dsl_scan_io_queue_destroy(vd->vdev_scan_io_queue); | |
1049 | vd->vdev_scan_io_queue = NULL; | |
1050 | mutex_exit(&vd->vdev_scan_io_queue_lock); | |
1051 | } | |
1052 | ||
34dc7c2f BB |
1053 | /* |
1054 | * vdev_free() implies closing the vdev first. This is simpler than | |
1055 | * trying to ensure complicated semantics for all callers. | |
1056 | */ | |
1057 | vdev_close(vd); | |
1058 | ||
b128c09f | 1059 | ASSERT(!list_link_active(&vd->vdev_config_dirty_node)); |
428870ff | 1060 | ASSERT(!list_link_active(&vd->vdev_state_dirty_node)); |
34dc7c2f BB |
1061 | |
1062 | /* | |
1063 | * Free all children. | |
1064 | */ | |
1c27024e | 1065 | for (int c = 0; c < vd->vdev_children; c++) |
34dc7c2f BB |
1066 | vdev_free(vd->vdev_child[c]); |
1067 | ||
1068 | ASSERT(vd->vdev_child == NULL); | |
1069 | ASSERT(vd->vdev_guid_sum == vd->vdev_guid); | |
1070 | ||
b2255edc BB |
1071 | if (vd->vdev_ops->vdev_op_fini != NULL) |
1072 | vd->vdev_ops->vdev_op_fini(vd); | |
1073 | ||
34dc7c2f BB |
1074 | /* |
1075 | * Discard allocation state. | |
1076 | */ | |
428870ff | 1077 | if (vd->vdev_mg != NULL) { |
34dc7c2f | 1078 | vdev_metaslab_fini(vd); |
428870ff | 1079 | metaslab_group_destroy(vd->vdev_mg); |
93e28d66 | 1080 | vd->vdev_mg = NULL; |
428870ff | 1081 | } |
aa755b35 MA |
1082 | if (vd->vdev_log_mg != NULL) { |
1083 | ASSERT0(vd->vdev_ms_count); | |
1084 | metaslab_group_destroy(vd->vdev_log_mg); | |
1085 | vd->vdev_log_mg = NULL; | |
1086 | } | |
34dc7c2f | 1087 | |
c99c9001 MS |
1088 | ASSERT0(vd->vdev_stat.vs_space); |
1089 | ASSERT0(vd->vdev_stat.vs_dspace); | |
1090 | ASSERT0(vd->vdev_stat.vs_alloc); | |
34dc7c2f BB |
1091 | |
1092 | /* | |
1093 | * Remove this vdev from its parent's child list. | |
1094 | */ | |
1095 | vdev_remove_child(vd->vdev_parent, vd); | |
1096 | ||
1097 | ASSERT(vd->vdev_parent == NULL); | |
3d31aad8 | 1098 | ASSERT(!list_link_active(&vd->vdev_leaf_node)); |
34dc7c2f BB |
1099 | |
1100 | /* | |
1101 | * Clean up vdev structure. | |
1102 | */ | |
1103 | vdev_queue_fini(vd); | |
34dc7c2f BB |
1104 | |
1105 | if (vd->vdev_path) | |
1106 | spa_strfree(vd->vdev_path); | |
1107 | if (vd->vdev_devid) | |
1108 | spa_strfree(vd->vdev_devid); | |
1109 | if (vd->vdev_physpath) | |
1110 | spa_strfree(vd->vdev_physpath); | |
1bbd8770 TH |
1111 | |
1112 | if (vd->vdev_enc_sysfs_path) | |
1113 | spa_strfree(vd->vdev_enc_sysfs_path); | |
1114 | ||
9babb374 BB |
1115 | if (vd->vdev_fru) |
1116 | spa_strfree(vd->vdev_fru); | |
34dc7c2f BB |
1117 | |
1118 | if (vd->vdev_isspare) | |
1119 | spa_spare_remove(vd); | |
1120 | if (vd->vdev_isl2cache) | |
1121 | spa_l2cache_remove(vd); | |
1122 | ||
1123 | txg_list_destroy(&vd->vdev_ms_list); | |
1124 | txg_list_destroy(&vd->vdev_dtl_list); | |
fb5f0bc8 | 1125 | |
34dc7c2f | 1126 | mutex_enter(&vd->vdev_dtl_lock); |
93cf2076 | 1127 | space_map_close(vd->vdev_dtl_sm); |
1c27024e | 1128 | for (int t = 0; t < DTL_TYPES; t++) { |
93cf2076 GW |
1129 | range_tree_vacate(vd->vdev_dtl[t], NULL, NULL); |
1130 | range_tree_destroy(vd->vdev_dtl[t]); | |
fb5f0bc8 | 1131 | } |
34dc7c2f | 1132 | mutex_exit(&vd->vdev_dtl_lock); |
fb5f0bc8 | 1133 | |
a1d477c2 MA |
1134 | EQUIV(vd->vdev_indirect_births != NULL, |
1135 | vd->vdev_indirect_mapping != NULL); | |
1136 | if (vd->vdev_indirect_births != NULL) { | |
1137 | vdev_indirect_mapping_close(vd->vdev_indirect_mapping); | |
1138 | vdev_indirect_births_close(vd->vdev_indirect_births); | |
1139 | } | |
1140 | ||
1141 | if (vd->vdev_obsolete_sm != NULL) { | |
1142 | ASSERT(vd->vdev_removing || | |
1143 | vd->vdev_ops == &vdev_indirect_ops); | |
1144 | space_map_close(vd->vdev_obsolete_sm); | |
1145 | vd->vdev_obsolete_sm = NULL; | |
1146 | } | |
1147 | range_tree_destroy(vd->vdev_obsolete_segments); | |
1148 | rw_destroy(&vd->vdev_indirect_rwlock); | |
1149 | mutex_destroy(&vd->vdev_obsolete_lock); | |
1150 | ||
34dc7c2f BB |
1151 | mutex_destroy(&vd->vdev_dtl_lock); |
1152 | mutex_destroy(&vd->vdev_stat_lock); | |
b128c09f | 1153 | mutex_destroy(&vd->vdev_probe_lock); |
d4a72f23 | 1154 | mutex_destroy(&vd->vdev_scan_io_queue_lock); |
9a49d3f3 | 1155 | |
619f0976 GW |
1156 | mutex_destroy(&vd->vdev_initialize_lock); |
1157 | mutex_destroy(&vd->vdev_initialize_io_lock); | |
1158 | cv_destroy(&vd->vdev_initialize_io_cv); | |
1159 | cv_destroy(&vd->vdev_initialize_cv); | |
9a49d3f3 | 1160 | |
1b939560 BB |
1161 | mutex_destroy(&vd->vdev_trim_lock); |
1162 | mutex_destroy(&vd->vdev_autotrim_lock); | |
1163 | mutex_destroy(&vd->vdev_trim_io_lock); | |
1164 | cv_destroy(&vd->vdev_trim_cv); | |
1165 | cv_destroy(&vd->vdev_autotrim_cv); | |
65d10bd8 | 1166 | cv_destroy(&vd->vdev_autotrim_kick_cv); |
1b939560 | 1167 | cv_destroy(&vd->vdev_trim_io_cv); |
34dc7c2f | 1168 | |
9a49d3f3 | 1169 | mutex_destroy(&vd->vdev_rebuild_lock); |
9a49d3f3 | 1170 | cv_destroy(&vd->vdev_rebuild_cv); |
9a49d3f3 | 1171 | |
c17486b2 | 1172 | zfs_ratelimit_fini(&vd->vdev_delay_rl); |
e778b048 | 1173 | zfs_ratelimit_fini(&vd->vdev_deadman_rl); |
c17486b2 GN |
1174 | zfs_ratelimit_fini(&vd->vdev_checksum_rl); |
1175 | ||
34dc7c2f BB |
1176 | if (vd == spa->spa_root_vdev) |
1177 | spa->spa_root_vdev = NULL; | |
1178 | ||
1179 | kmem_free(vd, sizeof (vdev_t)); | |
1180 | } | |
1181 | ||
1182 | /* | |
1183 | * Transfer top-level vdev state from svd to tvd. | |
1184 | */ | |
1185 | static void | |
1186 | vdev_top_transfer(vdev_t *svd, vdev_t *tvd) | |
1187 | { | |
1188 | spa_t *spa = svd->vdev_spa; | |
1189 | metaslab_t *msp; | |
1190 | vdev_t *vd; | |
1191 | int t; | |
1192 | ||
1193 | ASSERT(tvd == tvd->vdev_top); | |
1194 | ||
1195 | tvd->vdev_ms_array = svd->vdev_ms_array; | |
1196 | tvd->vdev_ms_shift = svd->vdev_ms_shift; | |
1197 | tvd->vdev_ms_count = svd->vdev_ms_count; | |
e0ab3ab5 | 1198 | tvd->vdev_top_zap = svd->vdev_top_zap; |
34dc7c2f BB |
1199 | |
1200 | svd->vdev_ms_array = 0; | |
1201 | svd->vdev_ms_shift = 0; | |
1202 | svd->vdev_ms_count = 0; | |
e0ab3ab5 | 1203 | svd->vdev_top_zap = 0; |
34dc7c2f | 1204 | |
5ffb9d1d GW |
1205 | if (tvd->vdev_mg) |
1206 | ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg); | |
aa755b35 MA |
1207 | if (tvd->vdev_log_mg) |
1208 | ASSERT3P(tvd->vdev_log_mg, ==, svd->vdev_log_mg); | |
34dc7c2f | 1209 | tvd->vdev_mg = svd->vdev_mg; |
aa755b35 | 1210 | tvd->vdev_log_mg = svd->vdev_log_mg; |
34dc7c2f BB |
1211 | tvd->vdev_ms = svd->vdev_ms; |
1212 | ||
1213 | svd->vdev_mg = NULL; | |
aa755b35 | 1214 | svd->vdev_log_mg = NULL; |
34dc7c2f BB |
1215 | svd->vdev_ms = NULL; |
1216 | ||
1217 | if (tvd->vdev_mg != NULL) | |
1218 | tvd->vdev_mg->mg_vd = tvd; | |
aa755b35 MA |
1219 | if (tvd->vdev_log_mg != NULL) |
1220 | tvd->vdev_log_mg->mg_vd = tvd; | |
34dc7c2f | 1221 | |
d2734cce SD |
1222 | tvd->vdev_checkpoint_sm = svd->vdev_checkpoint_sm; |
1223 | svd->vdev_checkpoint_sm = NULL; | |
1224 | ||
cc99f275 DB |
1225 | tvd->vdev_alloc_bias = svd->vdev_alloc_bias; |
1226 | svd->vdev_alloc_bias = VDEV_BIAS_NONE; | |
1227 | ||
34dc7c2f BB |
1228 | tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc; |
1229 | tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space; | |
1230 | tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace; | |
1231 | ||
1232 | svd->vdev_stat.vs_alloc = 0; | |
1233 | svd->vdev_stat.vs_space = 0; | |
1234 | svd->vdev_stat.vs_dspace = 0; | |
1235 | ||
9e052db4 MA |
1236 | /* |
1237 | * State which may be set on a top-level vdev that's in the | |
1238 | * process of being removed. | |
1239 | */ | |
1240 | ASSERT0(tvd->vdev_indirect_config.vic_births_object); | |
1241 | ASSERT0(tvd->vdev_indirect_config.vic_mapping_object); | |
1242 | ASSERT3U(tvd->vdev_indirect_config.vic_prev_indirect_vdev, ==, -1ULL); | |
1243 | ASSERT3P(tvd->vdev_indirect_mapping, ==, NULL); | |
1244 | ASSERT3P(tvd->vdev_indirect_births, ==, NULL); | |
1245 | ASSERT3P(tvd->vdev_obsolete_sm, ==, NULL); | |
2a673e76 | 1246 | ASSERT0(tvd->vdev_noalloc); |
9e052db4 | 1247 | ASSERT0(tvd->vdev_removing); |
9a49d3f3 | 1248 | ASSERT0(tvd->vdev_rebuilding); |
2a673e76 | 1249 | tvd->vdev_noalloc = svd->vdev_noalloc; |
9e052db4 | 1250 | tvd->vdev_removing = svd->vdev_removing; |
9a49d3f3 BB |
1251 | tvd->vdev_rebuilding = svd->vdev_rebuilding; |
1252 | tvd->vdev_rebuild_config = svd->vdev_rebuild_config; | |
9e052db4 MA |
1253 | tvd->vdev_indirect_config = svd->vdev_indirect_config; |
1254 | tvd->vdev_indirect_mapping = svd->vdev_indirect_mapping; | |
1255 | tvd->vdev_indirect_births = svd->vdev_indirect_births; | |
1256 | range_tree_swap(&svd->vdev_obsolete_segments, | |
1257 | &tvd->vdev_obsolete_segments); | |
1258 | tvd->vdev_obsolete_sm = svd->vdev_obsolete_sm; | |
1259 | svd->vdev_indirect_config.vic_mapping_object = 0; | |
1260 | svd->vdev_indirect_config.vic_births_object = 0; | |
1261 | svd->vdev_indirect_config.vic_prev_indirect_vdev = -1ULL; | |
1262 | svd->vdev_indirect_mapping = NULL; | |
1263 | svd->vdev_indirect_births = NULL; | |
1264 | svd->vdev_obsolete_sm = NULL; | |
2a673e76 | 1265 | svd->vdev_noalloc = 0; |
9e052db4 | 1266 | svd->vdev_removing = 0; |
9a49d3f3 | 1267 | svd->vdev_rebuilding = 0; |
9e052db4 | 1268 | |
34dc7c2f BB |
1269 | for (t = 0; t < TXG_SIZE; t++) { |
1270 | while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL) | |
1271 | (void) txg_list_add(&tvd->vdev_ms_list, msp, t); | |
1272 | while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL) | |
1273 | (void) txg_list_add(&tvd->vdev_dtl_list, vd, t); | |
1274 | if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t)) | |
1275 | (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t); | |
1276 | } | |
1277 | ||
b128c09f | 1278 | if (list_link_active(&svd->vdev_config_dirty_node)) { |
34dc7c2f BB |
1279 | vdev_config_clean(svd); |
1280 | vdev_config_dirty(tvd); | |
1281 | } | |
1282 | ||
b128c09f BB |
1283 | if (list_link_active(&svd->vdev_state_dirty_node)) { |
1284 | vdev_state_clean(svd); | |
1285 | vdev_state_dirty(tvd); | |
1286 | } | |
1287 | ||
34dc7c2f BB |
1288 | tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio; |
1289 | svd->vdev_deflate_ratio = 0; | |
1290 | ||
1291 | tvd->vdev_islog = svd->vdev_islog; | |
1292 | svd->vdev_islog = 0; | |
d4a72f23 TC |
1293 | |
1294 | dsl_scan_io_queue_vdev_xfer(svd, tvd); | |
34dc7c2f BB |
1295 | } |
1296 | ||
1297 | static void | |
1298 | vdev_top_update(vdev_t *tvd, vdev_t *vd) | |
1299 | { | |
34dc7c2f BB |
1300 | if (vd == NULL) |
1301 | return; | |
1302 | ||
1303 | vd->vdev_top = tvd; | |
1304 | ||
1c27024e | 1305 | for (int c = 0; c < vd->vdev_children; c++) |
34dc7c2f BB |
1306 | vdev_top_update(tvd, vd->vdev_child[c]); |
1307 | } | |
1308 | ||
1309 | /* | |
b2255edc BB |
1310 | * Add a mirror/replacing vdev above an existing vdev. There is no need to |
1311 | * call .vdev_op_init() since mirror/replacing vdevs do not have private state. | |
34dc7c2f BB |
1312 | */ |
1313 | vdev_t * | |
1314 | vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops) | |
1315 | { | |
1316 | spa_t *spa = cvd->vdev_spa; | |
1317 | vdev_t *pvd = cvd->vdev_parent; | |
1318 | vdev_t *mvd; | |
1319 | ||
b128c09f | 1320 | ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); |
34dc7c2f BB |
1321 | |
1322 | mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops); | |
1323 | ||
1324 | mvd->vdev_asize = cvd->vdev_asize; | |
9babb374 | 1325 | mvd->vdev_min_asize = cvd->vdev_min_asize; |
1bd201e7 | 1326 | mvd->vdev_max_asize = cvd->vdev_max_asize; |
a1d477c2 | 1327 | mvd->vdev_psize = cvd->vdev_psize; |
34dc7c2f | 1328 | mvd->vdev_ashift = cvd->vdev_ashift; |
6fe3498c RM |
1329 | mvd->vdev_logical_ashift = cvd->vdev_logical_ashift; |
1330 | mvd->vdev_physical_ashift = cvd->vdev_physical_ashift; | |
34dc7c2f | 1331 | mvd->vdev_state = cvd->vdev_state; |
428870ff | 1332 | mvd->vdev_crtxg = cvd->vdev_crtxg; |
34dc7c2f BB |
1333 | |
1334 | vdev_remove_child(pvd, cvd); | |
1335 | vdev_add_child(pvd, mvd); | |
1336 | cvd->vdev_id = mvd->vdev_children; | |
1337 | vdev_add_child(mvd, cvd); | |
1338 | vdev_top_update(cvd->vdev_top, cvd->vdev_top); | |
1339 | ||
1340 | if (mvd == mvd->vdev_top) | |
1341 | vdev_top_transfer(cvd, mvd); | |
1342 | ||
1343 | return (mvd); | |
1344 | } | |
1345 | ||
1346 | /* | |
1347 | * Remove a 1-way mirror/replacing vdev from the tree. | |
1348 | */ | |
1349 | void | |
1350 | vdev_remove_parent(vdev_t *cvd) | |
1351 | { | |
1352 | vdev_t *mvd = cvd->vdev_parent; | |
1353 | vdev_t *pvd = mvd->vdev_parent; | |
1354 | ||
b128c09f | 1355 | ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); |
34dc7c2f BB |
1356 | |
1357 | ASSERT(mvd->vdev_children == 1); | |
1358 | ASSERT(mvd->vdev_ops == &vdev_mirror_ops || | |
1359 | mvd->vdev_ops == &vdev_replacing_ops || | |
1360 | mvd->vdev_ops == &vdev_spare_ops); | |
1361 | cvd->vdev_ashift = mvd->vdev_ashift; | |
6fe3498c RM |
1362 | cvd->vdev_logical_ashift = mvd->vdev_logical_ashift; |
1363 | cvd->vdev_physical_ashift = mvd->vdev_physical_ashift; | |
34dc7c2f BB |
1364 | vdev_remove_child(mvd, cvd); |
1365 | vdev_remove_child(pvd, mvd); | |
fb5f0bc8 | 1366 | |
34dc7c2f | 1367 | /* |
b128c09f BB |
1368 | * If cvd will replace mvd as a top-level vdev, preserve mvd's guid. |
1369 | * Otherwise, we could have detached an offline device, and when we | |
1370 | * go to import the pool we'll think we have two top-level vdevs, | |
1371 | * instead of a different version of the same top-level vdev. | |
34dc7c2f | 1372 | */ |
fb5f0bc8 BB |
1373 | if (mvd->vdev_top == mvd) { |
1374 | uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid; | |
428870ff | 1375 | cvd->vdev_orig_guid = cvd->vdev_guid; |
fb5f0bc8 BB |
1376 | cvd->vdev_guid += guid_delta; |
1377 | cvd->vdev_guid_sum += guid_delta; | |
61e99a73 AB |
1378 | |
1379 | /* | |
1380 | * If pool not set for autoexpand, we need to also preserve | |
1381 | * mvd's asize to prevent automatic expansion of cvd. | |
1382 | * Otherwise if we are adjusting the mirror by attaching and | |
1383 | * detaching children of non-uniform sizes, the mirror could | |
1384 | * autoexpand, unexpectedly requiring larger devices to | |
1385 | * re-establish the mirror. | |
1386 | */ | |
1387 | if (!cvd->vdev_spa->spa_autoexpand) | |
1388 | cvd->vdev_asize = mvd->vdev_asize; | |
fb5f0bc8 | 1389 | } |
b128c09f BB |
1390 | cvd->vdev_id = mvd->vdev_id; |
1391 | vdev_add_child(pvd, cvd); | |
34dc7c2f BB |
1392 | vdev_top_update(cvd->vdev_top, cvd->vdev_top); |
1393 | ||
1394 | if (cvd == cvd->vdev_top) | |
1395 | vdev_top_transfer(mvd, cvd); | |
1396 | ||
1397 | ASSERT(mvd->vdev_children == 0); | |
1398 | vdev_free(mvd); | |
1399 | } | |
1400 | ||
d9bb583c AH |
1401 | /* |
1402 | * Choose GCD for spa_gcd_alloc. | |
1403 | */ | |
1404 | static uint64_t | |
1405 | vdev_gcd(uint64_t a, uint64_t b) | |
1406 | { | |
1407 | while (b != 0) { | |
1408 | uint64_t t = b; | |
1409 | b = a % b; | |
1410 | a = t; | |
1411 | } | |
1412 | return (a); | |
1413 | } | |
1414 | ||
1415 | /* | |
1416 | * Set spa_min_alloc and spa_gcd_alloc. | |
1417 | */ | |
1418 | static void | |
1419 | vdev_spa_set_alloc(spa_t *spa, uint64_t min_alloc) | |
1420 | { | |
1421 | if (min_alloc < spa->spa_min_alloc) | |
1422 | spa->spa_min_alloc = min_alloc; | |
1423 | if (spa->spa_gcd_alloc == INT_MAX) { | |
1424 | spa->spa_gcd_alloc = min_alloc; | |
1425 | } else { | |
1426 | spa->spa_gcd_alloc = vdev_gcd(min_alloc, | |
1427 | spa->spa_gcd_alloc); | |
1428 | } | |
1429 | } | |
1430 | ||
aa755b35 | 1431 | void |
cc99f275 DB |
1432 | vdev_metaslab_group_create(vdev_t *vd) |
1433 | { | |
1434 | spa_t *spa = vd->vdev_spa; | |
1435 | ||
1436 | /* | |
1437 | * metaslab_group_create was delayed until allocation bias was available | |
1438 | */ | |
1439 | if (vd->vdev_mg == NULL) { | |
1440 | metaslab_class_t *mc; | |
1441 | ||
1442 | if (vd->vdev_islog && vd->vdev_alloc_bias == VDEV_BIAS_NONE) | |
1443 | vd->vdev_alloc_bias = VDEV_BIAS_LOG; | |
1444 | ||
1445 | ASSERT3U(vd->vdev_islog, ==, | |
1446 | (vd->vdev_alloc_bias == VDEV_BIAS_LOG)); | |
1447 | ||
1448 | switch (vd->vdev_alloc_bias) { | |
1449 | case VDEV_BIAS_LOG: | |
1450 | mc = spa_log_class(spa); | |
1451 | break; | |
1452 | case VDEV_BIAS_SPECIAL: | |
1453 | mc = spa_special_class(spa); | |
1454 | break; | |
1455 | case VDEV_BIAS_DEDUP: | |
1456 | mc = spa_dedup_class(spa); | |
1457 | break; | |
1458 | default: | |
1459 | mc = spa_normal_class(spa); | |
1460 | } | |
1461 | ||
1462 | vd->vdev_mg = metaslab_group_create(mc, vd, | |
1463 | spa->spa_alloc_count); | |
1464 | ||
aa755b35 MA |
1465 | if (!vd->vdev_islog) { |
1466 | vd->vdev_log_mg = metaslab_group_create( | |
1467 | spa_embedded_log_class(spa), vd, 1); | |
1468 | } | |
1469 | ||
cc99f275 | 1470 | /* |
dff71c79 | 1471 | * The spa ashift min/max only apply for the normal metaslab |
bf169e9f | 1472 | * class. Class destination is late binding so ashift boundary |
dff71c79 | 1473 | * setting had to wait until now. |
cc99f275 DB |
1474 | */ |
1475 | if (vd->vdev_top == vd && vd->vdev_ashift != 0 && | |
1476 | mc == spa_normal_class(spa) && vd->vdev_aux == NULL) { | |
1477 | if (vd->vdev_ashift > spa->spa_max_ashift) | |
1478 | spa->spa_max_ashift = vd->vdev_ashift; | |
1479 | if (vd->vdev_ashift < spa->spa_min_ashift) | |
1480 | spa->spa_min_ashift = vd->vdev_ashift; | |
b2255edc BB |
1481 | |
1482 | uint64_t min_alloc = vdev_get_min_alloc(vd); | |
d9bb583c | 1483 | vdev_spa_set_alloc(spa, min_alloc); |
cc99f275 DB |
1484 | } |
1485 | } | |
1486 | } | |
1487 | ||
34dc7c2f BB |
1488 | int |
1489 | vdev_metaslab_init(vdev_t *vd, uint64_t txg) | |
1490 | { | |
1491 | spa_t *spa = vd->vdev_spa; | |
34dc7c2f BB |
1492 | uint64_t oldc = vd->vdev_ms_count; |
1493 | uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift; | |
1494 | metaslab_t **mspp; | |
1495 | int error; | |
cc99f275 | 1496 | boolean_t expanding = (oldc != 0); |
34dc7c2f | 1497 | |
428870ff BB |
1498 | ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER)); |
1499 | ||
1500 | /* | |
1501 | * This vdev is not being allocated from yet or is a hole. | |
1502 | */ | |
1503 | if (vd->vdev_ms_shift == 0) | |
34dc7c2f BB |
1504 | return (0); |
1505 | ||
428870ff BB |
1506 | ASSERT(!vd->vdev_ishole); |
1507 | ||
34dc7c2f BB |
1508 | ASSERT(oldc <= newc); |
1509 | ||
bffb68a2 | 1510 | mspp = vmem_zalloc(newc * sizeof (*mspp), KM_SLEEP); |
34dc7c2f | 1511 | |
cc99f275 | 1512 | if (expanding) { |
861166b0 | 1513 | memcpy(mspp, vd->vdev_ms, oldc * sizeof (*mspp)); |
bffb68a2 | 1514 | vmem_free(vd->vdev_ms, oldc * sizeof (*mspp)); |
34dc7c2f BB |
1515 | } |
1516 | ||
1517 | vd->vdev_ms = mspp; | |
1518 | vd->vdev_ms_count = newc; | |
93cf2076 | 1519 | |
aa755b35 MA |
1520 | for (uint64_t m = oldc; m < newc; m++) { |
1521 | uint64_t object = 0; | |
a1d477c2 MA |
1522 | /* |
1523 | * vdev_ms_array may be 0 if we are creating the "fake" | |
1524 | * metaslabs for an indirect vdev for zdb's leak detection. | |
1525 | * See zdb_leak_init(). | |
1526 | */ | |
1527 | if (txg == 0 && vd->vdev_ms_array != 0) { | |
aa755b35 MA |
1528 | error = dmu_read(spa->spa_meta_objset, |
1529 | vd->vdev_ms_array, | |
9babb374 BB |
1530 | m * sizeof (uint64_t), sizeof (uint64_t), &object, |
1531 | DMU_READ_PREFETCH); | |
4a0ee12a PZ |
1532 | if (error != 0) { |
1533 | vdev_dbgmsg(vd, "unable to read the metaslab " | |
1534 | "array [error=%d]", error); | |
34dc7c2f | 1535 | return (error); |
4a0ee12a | 1536 | } |
34dc7c2f | 1537 | } |
fb42a493 PS |
1538 | |
1539 | error = metaslab_init(vd->vdev_mg, m, object, txg, | |
1540 | &(vd->vdev_ms[m])); | |
4a0ee12a PZ |
1541 | if (error != 0) { |
1542 | vdev_dbgmsg(vd, "metaslab_init failed [error=%d]", | |
1543 | error); | |
fb42a493 | 1544 | return (error); |
4a0ee12a | 1545 | } |
34dc7c2f BB |
1546 | } |
1547 | ||
aa755b35 MA |
1548 | /* |
1549 | * Find the emptiest metaslab on the vdev and mark it for use for | |
1550 | * embedded slog by moving it from the regular to the log metaslab | |
1551 | * group. | |
1552 | */ | |
1553 | if (vd->vdev_mg->mg_class == spa_normal_class(spa) && | |
1554 | vd->vdev_ms_count > zfs_embedded_slog_min_ms && | |
1555 | avl_is_empty(&vd->vdev_log_mg->mg_metaslab_tree)) { | |
1556 | uint64_t slog_msid = 0; | |
1557 | uint64_t smallest = UINT64_MAX; | |
1558 | ||
1559 | /* | |
1560 | * Note, we only search the new metaslabs, because the old | |
1561 | * (pre-existing) ones may be active (e.g. have non-empty | |
1562 | * range_tree's), and we don't move them to the new | |
1563 | * metaslab_t. | |
1564 | */ | |
1565 | for (uint64_t m = oldc; m < newc; m++) { | |
1566 | uint64_t alloc = | |
1567 | space_map_allocated(vd->vdev_ms[m]->ms_sm); | |
1568 | if (alloc < smallest) { | |
1569 | slog_msid = m; | |
1570 | smallest = alloc; | |
1571 | } | |
1572 | } | |
1573 | metaslab_t *slog_ms = vd->vdev_ms[slog_msid]; | |
1574 | /* | |
1575 | * The metaslab was marked as dirty at the end of | |
1576 | * metaslab_init(). Remove it from the dirty list so that we | |
1577 | * can uninitialize and reinitialize it to the new class. | |
1578 | */ | |
1579 | if (txg != 0) { | |
1580 | (void) txg_list_remove_this(&vd->vdev_ms_list, | |
1581 | slog_ms, txg); | |
1582 | } | |
1583 | uint64_t sm_obj = space_map_object(slog_ms->ms_sm); | |
1584 | metaslab_fini(slog_ms); | |
1585 | VERIFY0(metaslab_init(vd->vdev_log_mg, slog_msid, sm_obj, txg, | |
1586 | &vd->vdev_ms[slog_msid])); | |
1587 | } | |
1588 | ||
428870ff BB |
1589 | if (txg == 0) |
1590 | spa_config_enter(spa, SCL_ALLOC, FTAG, RW_WRITER); | |
1591 | ||
1592 | /* | |
2a673e76 AJ |
1593 | * If the vdev is marked as non-allocating then don't |
1594 | * activate the metaslabs since we want to ensure that | |
1595 | * no allocations are performed on this device. | |
428870ff | 1596 | */ |
2a673e76 AJ |
1597 | if (vd->vdev_noalloc) { |
1598 | /* track non-allocating vdev space */ | |
1599 | spa->spa_nonallocating_dspace += spa_deflate(spa) ? | |
1600 | vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space; | |
1601 | } else if (!expanding) { | |
428870ff | 1602 | metaslab_group_activate(vd->vdev_mg); |
aa755b35 MA |
1603 | if (vd->vdev_log_mg != NULL) |
1604 | metaslab_group_activate(vd->vdev_log_mg); | |
cc99f275 | 1605 | } |
428870ff BB |
1606 | |
1607 | if (txg == 0) | |
1608 | spa_config_exit(spa, SCL_ALLOC, FTAG); | |
1609 | ||
34dc7c2f BB |
1610 | return (0); |
1611 | } | |
1612 | ||
1613 | void | |
1614 | vdev_metaslab_fini(vdev_t *vd) | |
1615 | { | |
d2734cce SD |
1616 | if (vd->vdev_checkpoint_sm != NULL) { |
1617 | ASSERT(spa_feature_is_active(vd->vdev_spa, | |
1618 | SPA_FEATURE_POOL_CHECKPOINT)); | |
1619 | space_map_close(vd->vdev_checkpoint_sm); | |
1620 | /* | |
1621 | * Even though we close the space map, we need to set its | |
1622 | * pointer to NULL. The reason is that vdev_metaslab_fini() | |
1623 | * may be called multiple times for certain operations | |
1624 | * (i.e. when destroying a pool) so we need to ensure that | |
1625 | * this clause never executes twice. This logic is similar | |
1626 | * to the one used for the vdev_ms clause below. | |
1627 | */ | |
1628 | vd->vdev_checkpoint_sm = NULL; | |
1629 | } | |
1630 | ||
34dc7c2f | 1631 | if (vd->vdev_ms != NULL) { |
928e8ad4 | 1632 | metaslab_group_t *mg = vd->vdev_mg; |
aa755b35 | 1633 | |
928e8ad4 | 1634 | metaslab_group_passivate(mg); |
aa755b35 MA |
1635 | if (vd->vdev_log_mg != NULL) { |
1636 | ASSERT(!vd->vdev_islog); | |
1637 | metaslab_group_passivate(vd->vdev_log_mg); | |
1638 | } | |
a1d477c2 | 1639 | |
928e8ad4 | 1640 | uint64_t count = vd->vdev_ms_count; |
a1d477c2 | 1641 | for (uint64_t m = 0; m < count; m++) { |
93cf2076 | 1642 | metaslab_t *msp = vd->vdev_ms[m]; |
93cf2076 GW |
1643 | if (msp != NULL) |
1644 | metaslab_fini(msp); | |
1645 | } | |
bffb68a2 | 1646 | vmem_free(vd->vdev_ms, count * sizeof (metaslab_t *)); |
34dc7c2f | 1647 | vd->vdev_ms = NULL; |
a1d477c2 | 1648 | vd->vdev_ms_count = 0; |
928e8ad4 | 1649 | |
aa755b35 | 1650 | for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) { |
928e8ad4 | 1651 | ASSERT0(mg->mg_histogram[i]); |
aa755b35 MA |
1652 | if (vd->vdev_log_mg != NULL) |
1653 | ASSERT0(vd->vdev_log_mg->mg_histogram[i]); | |
1654 | } | |
a1d477c2 MA |
1655 | } |
1656 | ASSERT0(vd->vdev_ms_count); | |
34dc7c2f BB |
1657 | } |
1658 | ||
b128c09f BB |
1659 | typedef struct vdev_probe_stats { |
1660 | boolean_t vps_readable; | |
1661 | boolean_t vps_writeable; | |
1662 | int vps_flags; | |
b128c09f BB |
1663 | } vdev_probe_stats_t; |
1664 | ||
1665 | static void | |
1666 | vdev_probe_done(zio_t *zio) | |
34dc7c2f | 1667 | { |
fb5f0bc8 | 1668 | spa_t *spa = zio->io_spa; |
d164b209 | 1669 | vdev_t *vd = zio->io_vd; |
b128c09f | 1670 | vdev_probe_stats_t *vps = zio->io_private; |
d164b209 BB |
1671 | |
1672 | ASSERT(vd->vdev_probe_zio != NULL); | |
b128c09f BB |
1673 | |
1674 | if (zio->io_type == ZIO_TYPE_READ) { | |
b128c09f BB |
1675 | if (zio->io_error == 0) |
1676 | vps->vps_readable = 1; | |
fb5f0bc8 | 1677 | if (zio->io_error == 0 && spa_writeable(spa)) { |
d164b209 | 1678 | zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd, |
a6255b7f | 1679 | zio->io_offset, zio->io_size, zio->io_abd, |
b128c09f BB |
1680 | ZIO_CHECKSUM_OFF, vdev_probe_done, vps, |
1681 | ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE)); | |
1682 | } else { | |
a6255b7f | 1683 | abd_free(zio->io_abd); |
b128c09f BB |
1684 | } |
1685 | } else if (zio->io_type == ZIO_TYPE_WRITE) { | |
b128c09f BB |
1686 | if (zio->io_error == 0) |
1687 | vps->vps_writeable = 1; | |
a6255b7f | 1688 | abd_free(zio->io_abd); |
b128c09f | 1689 | } else if (zio->io_type == ZIO_TYPE_NULL) { |
d164b209 | 1690 | zio_t *pio; |
3dfb57a3 | 1691 | zio_link_t *zl; |
b128c09f BB |
1692 | |
1693 | vd->vdev_cant_read |= !vps->vps_readable; | |
1694 | vd->vdev_cant_write |= !vps->vps_writeable; | |
1695 | ||
1696 | if (vdev_readable(vd) && | |
fb5f0bc8 | 1697 | (vdev_writeable(vd) || !spa_writeable(spa))) { |
b128c09f BB |
1698 | zio->io_error = 0; |
1699 | } else { | |
1700 | ASSERT(zio->io_error != 0); | |
4a0ee12a | 1701 | vdev_dbgmsg(vd, "failed probe"); |
1144586b | 1702 | (void) zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE, |
4f072827 | 1703 | spa, vd, NULL, NULL, 0); |
2e528b49 | 1704 | zio->io_error = SET_ERROR(ENXIO); |
b128c09f | 1705 | } |
d164b209 BB |
1706 | |
1707 | mutex_enter(&vd->vdev_probe_lock); | |
1708 | ASSERT(vd->vdev_probe_zio == zio); | |
1709 | vd->vdev_probe_zio = NULL; | |
1710 | mutex_exit(&vd->vdev_probe_lock); | |
1711 | ||
3dfb57a3 DB |
1712 | zl = NULL; |
1713 | while ((pio = zio_walk_parents(zio, &zl)) != NULL) | |
d164b209 | 1714 | if (!vdev_accessible(vd, pio)) |
2e528b49 | 1715 | pio->io_error = SET_ERROR(ENXIO); |
d164b209 | 1716 | |
b128c09f BB |
1717 | kmem_free(vps, sizeof (*vps)); |
1718 | } | |
1719 | } | |
34dc7c2f | 1720 | |
b128c09f | 1721 | /* |
d3cc8b15 WA |
1722 | * Determine whether this device is accessible. |
1723 | * | |
1724 | * Read and write to several known locations: the pad regions of each | |
1725 | * vdev label but the first, which we leave alone in case it contains | |
1726 | * a VTOC. | |
b128c09f BB |
1727 | */ |
1728 | zio_t * | |
d164b209 | 1729 | vdev_probe(vdev_t *vd, zio_t *zio) |
b128c09f BB |
1730 | { |
1731 | spa_t *spa = vd->vdev_spa; | |
d164b209 BB |
1732 | vdev_probe_stats_t *vps = NULL; |
1733 | zio_t *pio; | |
1734 | ||
1735 | ASSERT(vd->vdev_ops->vdev_op_leaf); | |
34dc7c2f | 1736 | |
d164b209 BB |
1737 | /* |
1738 | * Don't probe the probe. | |
1739 | */ | |
1740 | if (zio && (zio->io_flags & ZIO_FLAG_PROBE)) | |
1741 | return (NULL); | |
b128c09f | 1742 | |
d164b209 BB |
1743 | /* |
1744 | * To prevent 'probe storms' when a device fails, we create | |
1745 | * just one probe i/o at a time. All zios that want to probe | |
1746 | * this vdev will become parents of the probe io. | |
1747 | */ | |
1748 | mutex_enter(&vd->vdev_probe_lock); | |
b128c09f | 1749 | |
d164b209 | 1750 | if ((pio = vd->vdev_probe_zio) == NULL) { |
79c76d5b | 1751 | vps = kmem_zalloc(sizeof (*vps), KM_SLEEP); |
d164b209 BB |
1752 | |
1753 | vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE | | |
70ea484e | 1754 | ZIO_FLAG_DONT_AGGREGATE | ZIO_FLAG_TRYHARD; |
d164b209 BB |
1755 | |
1756 | if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) { | |
1757 | /* | |
1758 | * vdev_cant_read and vdev_cant_write can only | |
1759 | * transition from TRUE to FALSE when we have the | |
1760 | * SCL_ZIO lock as writer; otherwise they can only | |
1761 | * transition from FALSE to TRUE. This ensures that | |
1762 | * any zio looking at these values can assume that | |
1763 | * failures persist for the life of the I/O. That's | |
1764 | * important because when a device has intermittent | |
1765 | * connectivity problems, we want to ensure that | |
1766 | * they're ascribed to the device (ENXIO) and not | |
1767 | * the zio (EIO). | |
1768 | * | |
1769 | * Since we hold SCL_ZIO as writer here, clear both | |
1770 | * values so the probe can reevaluate from first | |
1771 | * principles. | |
1772 | */ | |
1773 | vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER; | |
1774 | vd->vdev_cant_read = B_FALSE; | |
1775 | vd->vdev_cant_write = B_FALSE; | |
1776 | } | |
1777 | ||
1778 | vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd, | |
1779 | vdev_probe_done, vps, | |
1780 | vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE); | |
1781 | ||
428870ff BB |
1782 | /* |
1783 | * We can't change the vdev state in this context, so we | |
1784 | * kick off an async task to do it on our behalf. | |
1785 | */ | |
d164b209 BB |
1786 | if (zio != NULL) { |
1787 | vd->vdev_probe_wanted = B_TRUE; | |
1788 | spa_async_request(spa, SPA_ASYNC_PROBE); | |
1789 | } | |
b128c09f BB |
1790 | } |
1791 | ||
d164b209 BB |
1792 | if (zio != NULL) |
1793 | zio_add_child(zio, pio); | |
b128c09f | 1794 | |
d164b209 | 1795 | mutex_exit(&vd->vdev_probe_lock); |
b128c09f | 1796 | |
d164b209 BB |
1797 | if (vps == NULL) { |
1798 | ASSERT(zio != NULL); | |
1799 | return (NULL); | |
1800 | } | |
b128c09f | 1801 | |
1c27024e | 1802 | for (int l = 1; l < VDEV_LABELS; l++) { |
d164b209 | 1803 | zio_nowait(zio_read_phys(pio, vd, |
b128c09f | 1804 | vdev_label_offset(vd->vdev_psize, l, |
108a454a | 1805 | offsetof(vdev_label_t, vl_be)), VDEV_PAD_SIZE, |
a6255b7f | 1806 | abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE), |
b128c09f BB |
1807 | ZIO_CHECKSUM_OFF, vdev_probe_done, vps, |
1808 | ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE)); | |
1809 | } | |
1810 | ||
d164b209 BB |
1811 | if (zio == NULL) |
1812 | return (pio); | |
1813 | ||
1814 | zio_nowait(pio); | |
1815 | return (NULL); | |
34dc7c2f BB |
1816 | } |
1817 | ||
a0e01997 AS |
1818 | static void |
1819 | vdev_load_child(void *arg) | |
1820 | { | |
1821 | vdev_t *vd = arg; | |
1822 | ||
1823 | vd->vdev_load_error = vdev_load(vd); | |
1824 | } | |
1825 | ||
45d1cae3 BB |
1826 | static void |
1827 | vdev_open_child(void *arg) | |
1828 | { | |
1829 | vdev_t *vd = arg; | |
1830 | ||
1831 | vd->vdev_open_thread = curthread; | |
1832 | vd->vdev_open_error = vdev_open(vd); | |
1833 | vd->vdev_open_thread = NULL; | |
1834 | } | |
1835 | ||
6c285672 | 1836 | static boolean_t |
428870ff BB |
1837 | vdev_uses_zvols(vdev_t *vd) |
1838 | { | |
6c285672 JL |
1839 | #ifdef _KERNEL |
1840 | if (zvol_is_zvol(vd->vdev_path)) | |
428870ff | 1841 | return (B_TRUE); |
6c285672 JL |
1842 | #endif |
1843 | ||
1c27024e | 1844 | for (int c = 0; c < vd->vdev_children; c++) |
428870ff BB |
1845 | if (vdev_uses_zvols(vd->vdev_child[c])) |
1846 | return (B_TRUE); | |
6c285672 | 1847 | |
428870ff BB |
1848 | return (B_FALSE); |
1849 | } | |
1850 | ||
b2255edc BB |
1851 | /* |
1852 | * Returns B_TRUE if the passed child should be opened. | |
1853 | */ | |
1854 | static boolean_t | |
1855 | vdev_default_open_children_func(vdev_t *vd) | |
1856 | { | |
14e4e3cb | 1857 | (void) vd; |
b2255edc BB |
1858 | return (B_TRUE); |
1859 | } | |
1860 | ||
1861 | /* | |
1862 | * Open the requested child vdevs. If any of the leaf vdevs are using | |
1863 | * a ZFS volume then do the opens in a single thread. This avoids a | |
1864 | * deadlock when the current thread is holding the spa_namespace_lock. | |
1865 | */ | |
1866 | static void | |
1867 | vdev_open_children_impl(vdev_t *vd, vdev_open_children_func_t *open_func) | |
45d1cae3 | 1868 | { |
45d1cae3 BB |
1869 | int children = vd->vdev_children; |
1870 | ||
b2255edc BB |
1871 | taskq_t *tq = taskq_create("vdev_open", children, minclsyspri, |
1872 | children, children, TASKQ_PREPOPULATE); | |
1873 | vd->vdev_nonrot = B_TRUE; | |
45d1cae3 | 1874 | |
b2255edc BB |
1875 | for (int c = 0; c < children; c++) { |
1876 | vdev_t *cvd = vd->vdev_child[c]; | |
1877 | ||
1878 | if (open_func(cvd) == B_FALSE) | |
1879 | continue; | |
1880 | ||
1881 | if (tq == NULL || vdev_uses_zvols(vd)) { | |
1882 | cvd->vdev_open_error = vdev_open(cvd); | |
1883 | } else { | |
4770aa06 | 1884 | VERIFY(taskq_dispatch(tq, vdev_open_child, |
b2255edc BB |
1885 | cvd, TQ_SLEEP) != TASKQID_INVALID); |
1886 | } | |
45d1cae3 | 1887 | |
b2255edc BB |
1888 | vd->vdev_nonrot &= cvd->vdev_nonrot; |
1889 | } | |
1890 | ||
1891 | if (tq != NULL) { | |
1892 | taskq_wait(tq); | |
4770aa06 HJ |
1893 | taskq_destroy(tq); |
1894 | } | |
b2255edc | 1895 | } |
4770aa06 | 1896 | |
b2255edc BB |
1897 | /* |
1898 | * Open all child vdevs. | |
1899 | */ | |
1900 | void | |
1901 | vdev_open_children(vdev_t *vd) | |
1902 | { | |
1903 | vdev_open_children_impl(vd, vdev_default_open_children_func); | |
1904 | } | |
fb40095f | 1905 | |
b2255edc BB |
1906 | /* |
1907 | * Conditionally open a subset of child vdevs. | |
1908 | */ | |
1909 | void | |
1910 | vdev_open_children_subset(vdev_t *vd, vdev_open_children_func_t *open_func) | |
1911 | { | |
1912 | vdev_open_children_impl(vd, open_func); | |
45d1cae3 BB |
1913 | } |
1914 | ||
a1d477c2 MA |
1915 | /* |
1916 | * Compute the raidz-deflation ratio. Note, we hard-code | |
1917 | * in 128k (1 << 17) because it is the "typical" blocksize. | |
1918 | * Even though SPA_MAXBLOCKSIZE changed, this algorithm can not change, | |
1919 | * otherwise it would inconsistently account for existing bp's. | |
1920 | */ | |
1921 | static void | |
1922 | vdev_set_deflate_ratio(vdev_t *vd) | |
1923 | { | |
1924 | if (vd == vd->vdev_top && !vd->vdev_ishole && vd->vdev_ashift != 0) { | |
1925 | vd->vdev_deflate_ratio = (1 << 17) / | |
1926 | (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT); | |
1927 | } | |
1928 | } | |
1929 | ||
37f6845c AM |
1930 | /* |
1931 | * Choose the best of two ashifts, preferring one between logical ashift | |
1932 | * (absolute minimum) and administrator defined maximum, otherwise take | |
1933 | * the biggest of the two. | |
1934 | */ | |
1935 | uint64_t | |
1936 | vdev_best_ashift(uint64_t logical, uint64_t a, uint64_t b) | |
1937 | { | |
1938 | if (a > logical && a <= zfs_vdev_max_auto_ashift) { | |
1939 | if (b <= logical || b > zfs_vdev_max_auto_ashift) | |
1940 | return (a); | |
1941 | else | |
1942 | return (MAX(a, b)); | |
1943 | } else if (b <= logical || b > zfs_vdev_max_auto_ashift) | |
1944 | return (MAX(a, b)); | |
1945 | return (b); | |
1946 | } | |
1947 | ||
c494aa7f GW |
1948 | /* |
1949 | * Maximize performance by inflating the configured ashift for top level | |
1950 | * vdevs to be as close to the physical ashift as possible while maintaining | |
1951 | * administrator defined limits and ensuring it doesn't go below the | |
1952 | * logical ashift. | |
1953 | */ | |
1954 | static void | |
1955 | vdev_ashift_optimize(vdev_t *vd) | |
1956 | { | |
1957 | ASSERT(vd == vd->vdev_top); | |
1958 | ||
37f6845c AM |
1959 | if (vd->vdev_ashift < vd->vdev_physical_ashift && |
1960 | vd->vdev_physical_ashift <= zfs_vdev_max_auto_ashift) { | |
c494aa7f GW |
1961 | vd->vdev_ashift = MIN( |
1962 | MAX(zfs_vdev_max_auto_ashift, vd->vdev_ashift), | |
1963 | MAX(zfs_vdev_min_auto_ashift, | |
1964 | vd->vdev_physical_ashift)); | |
1965 | } else { | |
1966 | /* | |
1967 | * If the logical and physical ashifts are the same, then | |
1968 | * we ensure that the top-level vdev's ashift is not smaller | |
1969 | * than our minimum ashift value. For the unusual case | |
1970 | * where logical ashift > physical ashift, we can't cap | |
1971 | * the calculated ashift based on max ashift as that | |
1972 | * would cause failures. | |
1973 | * We still check if we need to increase it to match | |
1974 | * the min ashift. | |
1975 | */ | |
1976 | vd->vdev_ashift = MAX(zfs_vdev_min_auto_ashift, | |
1977 | vd->vdev_ashift); | |
1978 | } | |
1979 | } | |
1980 | ||
34dc7c2f BB |
1981 | /* |
1982 | * Prepare a virtual device for access. | |
1983 | */ | |
1984 | int | |
1985 | vdev_open(vdev_t *vd) | |
1986 | { | |
fb5f0bc8 | 1987 | spa_t *spa = vd->vdev_spa; |
34dc7c2f | 1988 | int error; |
34dc7c2f | 1989 | uint64_t osize = 0; |
1bd201e7 CS |
1990 | uint64_t max_osize = 0; |
1991 | uint64_t asize, max_asize, psize; | |
6fe3498c RM |
1992 | uint64_t logical_ashift = 0; |
1993 | uint64_t physical_ashift = 0; | |
34dc7c2f | 1994 | |
45d1cae3 BB |
1995 | ASSERT(vd->vdev_open_thread == curthread || |
1996 | spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); | |
34dc7c2f BB |
1997 | ASSERT(vd->vdev_state == VDEV_STATE_CLOSED || |
1998 | vd->vdev_state == VDEV_STATE_CANT_OPEN || | |
1999 | vd->vdev_state == VDEV_STATE_OFFLINE); | |
2000 | ||
34dc7c2f | 2001 | vd->vdev_stat.vs_aux = VDEV_AUX_NONE; |
9babb374 BB |
2002 | vd->vdev_cant_read = B_FALSE; |
2003 | vd->vdev_cant_write = B_FALSE; | |
2004 | vd->vdev_min_asize = vdev_get_min_asize(vd); | |
34dc7c2f | 2005 | |
428870ff BB |
2006 | /* |
2007 | * If this vdev is not removed, check its fault status. If it's | |
2008 | * faulted, bail out of the open. | |
2009 | */ | |
34dc7c2f BB |
2010 | if (!vd->vdev_removed && vd->vdev_faulted) { |
2011 | ASSERT(vd->vdev_children == 0); | |
428870ff BB |
2012 | ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED || |
2013 | vd->vdev_label_aux == VDEV_AUX_EXTERNAL); | |
34dc7c2f | 2014 | vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, |
428870ff | 2015 | vd->vdev_label_aux); |
2e528b49 | 2016 | return (SET_ERROR(ENXIO)); |
34dc7c2f BB |
2017 | } else if (vd->vdev_offline) { |
2018 | ASSERT(vd->vdev_children == 0); | |
2019 | vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE); | |
2e528b49 | 2020 | return (SET_ERROR(ENXIO)); |
34dc7c2f BB |
2021 | } |
2022 | ||
6fe3498c RM |
2023 | error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize, |
2024 | &logical_ashift, &physical_ashift); | |
55c12724 AH |
2025 | |
2026 | /* Keep the device in removed state if unplugged */ | |
2027 | if (error == ENOENT && vd->vdev_removed) { | |
2028 | vdev_set_state(vd, B_TRUE, VDEV_STATE_REMOVED, | |
2029 | VDEV_AUX_NONE); | |
2030 | return (error); | |
2031 | } | |
2032 | ||
0c637f31 | 2033 | /* |
2034 | * Physical volume size should never be larger than its max size, unless | |
2035 | * the disk has shrunk while we were reading it or the device is buggy | |
2036 | * or damaged: either way it's not safe for use, bail out of the open. | |
2037 | */ | |
2038 | if (osize > max_osize) { | |
2039 | vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, | |
2040 | VDEV_AUX_OPEN_FAILED); | |
2041 | return (SET_ERROR(ENXIO)); | |
2042 | } | |
2043 | ||
428870ff BB |
2044 | /* |
2045 | * Reset the vdev_reopening flag so that we actually close | |
2046 | * the vdev on error. | |
2047 | */ | |
2048 | vd->vdev_reopening = B_FALSE; | |
34dc7c2f | 2049 | if (zio_injection_enabled && error == 0) |
28caa74b | 2050 | error = zio_handle_device_injection(vd, NULL, SET_ERROR(ENXIO)); |
34dc7c2f BB |
2051 | |
2052 | if (error) { | |
2053 | if (vd->vdev_removed && | |
2054 | vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED) | |
2055 | vd->vdev_removed = B_FALSE; | |
2056 | ||
6cb8e530 PZ |
2057 | if (vd->vdev_stat.vs_aux == VDEV_AUX_CHILDREN_OFFLINE) { |
2058 | vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, | |
2059 | vd->vdev_stat.vs_aux); | |
2060 | } else { | |
2061 | vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, | |
2062 | vd->vdev_stat.vs_aux); | |
2063 | } | |
34dc7c2f BB |
2064 | return (error); |
2065 | } | |
2066 | ||
2067 | vd->vdev_removed = B_FALSE; | |
2068 | ||
428870ff BB |
2069 | /* |
2070 | * Recheck the faulted flag now that we have confirmed that | |
2071 | * the vdev is accessible. If we're faulted, bail. | |
2072 | */ | |
2073 | if (vd->vdev_faulted) { | |
2074 | ASSERT(vd->vdev_children == 0); | |
2075 | ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED || | |
2076 | vd->vdev_label_aux == VDEV_AUX_EXTERNAL); | |
2077 | vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, | |
2078 | vd->vdev_label_aux); | |
2e528b49 | 2079 | return (SET_ERROR(ENXIO)); |
428870ff BB |
2080 | } |
2081 | ||
34dc7c2f BB |
2082 | if (vd->vdev_degraded) { |
2083 | ASSERT(vd->vdev_children == 0); | |
2084 | vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, | |
2085 | VDEV_AUX_ERR_EXCEEDED); | |
2086 | } else { | |
428870ff | 2087 | vdev_set_state(vd, B_TRUE, VDEV_STATE_HEALTHY, 0); |
34dc7c2f BB |
2088 | } |
2089 | ||
428870ff BB |
2090 | /* |
2091 | * For hole or missing vdevs we just return success. | |
2092 | */ | |
2093 | if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) | |
2094 | return (0); | |
2095 | ||
1c27024e | 2096 | for (int c = 0; c < vd->vdev_children; c++) { |
34dc7c2f BB |
2097 | if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) { |
2098 | vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, | |
2099 | VDEV_AUX_NONE); | |
2100 | break; | |
2101 | } | |
9babb374 | 2102 | } |
34dc7c2f BB |
2103 | |
2104 | osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t)); | |
1bd201e7 | 2105 | max_osize = P2ALIGN(max_osize, (uint64_t)sizeof (vdev_label_t)); |
34dc7c2f BB |
2106 | |
2107 | if (vd->vdev_children == 0) { | |
2108 | if (osize < SPA_MINDEVSIZE) { | |
2109 | vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, | |
2110 | VDEV_AUX_TOO_SMALL); | |
2e528b49 | 2111 | return (SET_ERROR(EOVERFLOW)); |
34dc7c2f BB |
2112 | } |
2113 | psize = osize; | |
2114 | asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE); | |
1bd201e7 CS |
2115 | max_asize = max_osize - (VDEV_LABEL_START_SIZE + |
2116 | VDEV_LABEL_END_SIZE); | |
34dc7c2f BB |
2117 | } else { |
2118 | if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE - | |
2119 | (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) { | |
2120 | vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, | |
2121 | VDEV_AUX_TOO_SMALL); | |
2e528b49 | 2122 | return (SET_ERROR(EOVERFLOW)); |
34dc7c2f BB |
2123 | } |
2124 | psize = 0; | |
2125 | asize = osize; | |
1bd201e7 | 2126 | max_asize = max_osize; |
34dc7c2f BB |
2127 | } |
2128 | ||
9d3f7b87 OF |
2129 | /* |
2130 | * If the vdev was expanded, record this so that we can re-create the | |
2131 | * uberblock rings in labels {2,3}, during the next sync. | |
2132 | */ | |
2133 | if ((psize > vd->vdev_psize) && (vd->vdev_psize != 0)) | |
2134 | vd->vdev_copy_uberblocks = B_TRUE; | |
2135 | ||
34dc7c2f BB |
2136 | vd->vdev_psize = psize; |
2137 | ||
9babb374 | 2138 | /* |
2e215fec | 2139 | * Make sure the allocatable size hasn't shrunk too much. |
9babb374 BB |
2140 | */ |
2141 | if (asize < vd->vdev_min_asize) { | |
2142 | vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, | |
2143 | VDEV_AUX_BAD_LABEL); | |
2e528b49 | 2144 | return (SET_ERROR(EINVAL)); |
9babb374 BB |
2145 | } |
2146 | ||
c494aa7f GW |
2147 | /* |
2148 | * We can always set the logical/physical ashift members since | |
2149 | * their values are only used to calculate the vdev_ashift when | |
2150 | * the device is first added to the config. These values should | |
2151 | * not be used for anything else since they may change whenever | |
2152 | * the device is reopened and we don't store them in the label. | |
2153 | */ | |
6fe3498c RM |
2154 | vd->vdev_physical_ashift = |
2155 | MAX(physical_ashift, vd->vdev_physical_ashift); | |
c494aa7f GW |
2156 | vd->vdev_logical_ashift = MAX(logical_ashift, |
2157 | vd->vdev_logical_ashift); | |
6fe3498c | 2158 | |
34dc7c2f BB |
2159 | if (vd->vdev_asize == 0) { |
2160 | /* | |
2161 | * This is the first-ever open, so use the computed values. | |
b28e57cb | 2162 | * For compatibility, a different ashift can be requested. |
34dc7c2f BB |
2163 | */ |
2164 | vd->vdev_asize = asize; | |
1bd201e7 | 2165 | vd->vdev_max_asize = max_asize; |
c494aa7f GW |
2166 | |
2167 | /* | |
bf169e9f | 2168 | * If the vdev_ashift was not overridden at creation time, |
c494aa7f GW |
2169 | * then set it the logical ashift and optimize the ashift. |
2170 | */ | |
2171 | if (vd->vdev_ashift == 0) { | |
2172 | vd->vdev_ashift = vd->vdev_logical_ashift; | |
2173 | ||
2174 | if (vd->vdev_logical_ashift > ASHIFT_MAX) { | |
2175 | vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, | |
2176 | VDEV_AUX_ASHIFT_TOO_BIG); | |
2177 | return (SET_ERROR(EDOM)); | |
2178 | } | |
2179 | ||
4d2dad04 | 2180 | if (vd->vdev_top == vd && vd->vdev_attaching == B_FALSE) |
c494aa7f | 2181 | vdev_ashift_optimize(vd); |
4d2dad04 | 2182 | vd->vdev_attaching = B_FALSE; |
c494aa7f | 2183 | } |
ff61d1a4 | 2184 | if (vd->vdev_ashift != 0 && (vd->vdev_ashift < ASHIFT_MIN || |
2185 | vd->vdev_ashift > ASHIFT_MAX)) { | |
2186 | vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, | |
2187 | VDEV_AUX_BAD_ASHIFT); | |
2188 | return (SET_ERROR(EDOM)); | |
2189 | } | |
34dc7c2f BB |
2190 | } else { |
2191 | /* | |
6fe3498c | 2192 | * Make sure the alignment required hasn't increased. |
34dc7c2f | 2193 | */ |
6fe3498c | 2194 | if (vd->vdev_ashift > vd->vdev_top->vdev_ashift && |
32a9872b | 2195 | vd->vdev_ops->vdev_op_leaf) { |
1144586b TS |
2196 | (void) zfs_ereport_post( |
2197 | FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT, | |
4f072827 | 2198 | spa, vd, NULL, NULL, 0); |
6fe3498c RM |
2199 | vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, |
2200 | VDEV_AUX_BAD_LABEL); | |
2201 | return (SET_ERROR(EDOM)); | |
6fe3498c | 2202 | } |
1bd201e7 | 2203 | vd->vdev_max_asize = max_asize; |
9babb374 | 2204 | } |
34dc7c2f | 2205 | |
9babb374 | 2206 | /* |
2e215fec SH |
2207 | * If all children are healthy we update asize if either: |
2208 | * The asize has increased, due to a device expansion caused by dynamic | |
2209 | * LUN growth or vdev replacement, and automatic expansion is enabled; | |
2210 | * making the additional space available. | |
2211 | * | |
2212 | * The asize has decreased, due to a device shrink usually caused by a | |
2213 | * vdev replace with a smaller device. This ensures that calculations | |
2214 | * based of max_asize and asize e.g. esize are always valid. It's safe | |
2215 | * to do this as we've already validated that asize is greater than | |
2216 | * vdev_min_asize. | |
9babb374 | 2217 | */ |
2e215fec SH |
2218 | if (vd->vdev_state == VDEV_STATE_HEALTHY && |
2219 | ((asize > vd->vdev_asize && | |
2220 | (vd->vdev_expanding || spa->spa_autoexpand)) || | |
2221 | (asize < vd->vdev_asize))) | |
9babb374 | 2222 | vd->vdev_asize = asize; |
34dc7c2f | 2223 | |
9babb374 | 2224 | vdev_set_min_asize(vd); |
34dc7c2f BB |
2225 | |
2226 | /* | |
2227 | * Ensure we can issue some IO before declaring the | |
2228 | * vdev open for business. | |
2229 | */ | |
b128c09f BB |
2230 | if (vd->vdev_ops->vdev_op_leaf && |
2231 | (error = zio_wait(vdev_probe(vd, NULL))) != 0) { | |
428870ff BB |
2232 | vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, |
2233 | VDEV_AUX_ERR_EXCEEDED); | |
34dc7c2f BB |
2234 | return (error); |
2235 | } | |
2236 | ||
b2255edc | 2237 | /* |
bf169e9f | 2238 | * Track the minimum allocation size. |
b2255edc BB |
2239 | */ |
2240 | if (vd->vdev_top == vd && vd->vdev_ashift != 0 && | |
2241 | vd->vdev_islog == 0 && vd->vdev_aux == NULL) { | |
2242 | uint64_t min_alloc = vdev_get_min_alloc(vd); | |
d9bb583c | 2243 | vdev_spa_set_alloc(spa, min_alloc); |
b2255edc BB |
2244 | } |
2245 | ||
34dc7c2f | 2246 | /* |
3c819a2c JP |
2247 | * If this is a leaf vdev, assess whether a resilver is needed. |
2248 | * But don't do this if we are doing a reopen for a scrub, since | |
2249 | * this would just restart the scrub we are already doing. | |
34dc7c2f | 2250 | */ |
3c819a2c JP |
2251 | if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen) |
2252 | dsl_scan_assess_vdev(spa->spa_dsl_pool, vd); | |
34dc7c2f BB |
2253 | |
2254 | return (0); | |
2255 | } | |
2256 | ||
cf0977ad AS |
2257 | static void |
2258 | vdev_validate_child(void *arg) | |
2259 | { | |
2260 | vdev_t *vd = arg; | |
2261 | ||
2262 | vd->vdev_validate_thread = curthread; | |
2263 | vd->vdev_validate_error = vdev_validate(vd); | |
2264 | vd->vdev_validate_thread = NULL; | |
2265 | } | |
2266 | ||
34dc7c2f BB |
2267 | /* |
2268 | * Called once the vdevs are all opened, this routine validates the label | |
6cb8e530 | 2269 | * contents. This needs to be done before vdev_load() so that we don't |
34dc7c2f BB |
2270 | * inadvertently do repair I/Os to the wrong device. |
2271 | * | |
2272 | * This function will only return failure if one of the vdevs indicates that it | |
2273 | * has since been destroyed or exported. This is only possible if | |
2274 | * /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state | |
2275 | * will be updated but the function will return 0. | |
2276 | */ | |
2277 | int | |
6cb8e530 | 2278 | vdev_validate(vdev_t *vd) |
34dc7c2f BB |
2279 | { |
2280 | spa_t *spa = vd->vdev_spa; | |
cf0977ad | 2281 | taskq_t *tq = NULL; |
34dc7c2f | 2282 | nvlist_t *label; |
6cb8e530 | 2283 | uint64_t guid = 0, aux_guid = 0, top_guid; |
34dc7c2f | 2284 | uint64_t state; |
6cb8e530 PZ |
2285 | nvlist_t *nvl; |
2286 | uint64_t txg; | |
cf0977ad | 2287 | int children = vd->vdev_children; |
34dc7c2f | 2288 | |
6cb8e530 PZ |
2289 | if (vdev_validate_skip) |
2290 | return (0); | |
2291 | ||
cf0977ad AS |
2292 | if (children > 0) { |
2293 | tq = taskq_create("vdev_validate", children, minclsyspri, | |
2294 | children, children, TASKQ_PREPOPULATE); | |
2295 | } | |
2296 | ||
2297 | for (uint64_t c = 0; c < children; c++) { | |
2298 | vdev_t *cvd = vd->vdev_child[c]; | |
2299 | ||
2300 | if (tq == NULL || vdev_uses_zvols(cvd)) { | |
2301 | vdev_validate_child(cvd); | |
2302 | } else { | |
2303 | VERIFY(taskq_dispatch(tq, vdev_validate_child, cvd, | |
2304 | TQ_SLEEP) != TASKQID_INVALID); | |
2305 | } | |
2306 | } | |
2307 | if (tq != NULL) { | |
2308 | taskq_wait(tq); | |
2309 | taskq_destroy(tq); | |
2310 | } | |
2311 | for (int c = 0; c < children; c++) { | |
2312 | int error = vd->vdev_child[c]->vdev_validate_error; | |
2313 | ||
2314 | if (error != 0) | |
2e528b49 | 2315 | return (SET_ERROR(EBADF)); |
cf0977ad AS |
2316 | } |
2317 | ||
34dc7c2f BB |
2318 | |
2319 | /* | |
2320 | * If the device has already failed, or was marked offline, don't do | |
2321 | * any further validation. Otherwise, label I/O will fail and we will | |
2322 | * overwrite the previous state. | |
2323 | */ | |
6cb8e530 PZ |
2324 | if (!vd->vdev_ops->vdev_op_leaf || !vdev_readable(vd)) |
2325 | return (0); | |
34dc7c2f | 2326 | |
6cb8e530 PZ |
2327 | /* |
2328 | * If we are performing an extreme rewind, we allow for a label that | |
2329 | * was modified at a point after the current txg. | |
a11c7aae PZ |
2330 | * If config lock is not held do not check for the txg. spa_sync could |
2331 | * be updating the vdev's label before updating spa_last_synced_txg. | |
6cb8e530 | 2332 | */ |
a11c7aae PZ |
2333 | if (spa->spa_extreme_rewind || spa_last_synced_txg(spa) == 0 || |
2334 | spa_config_held(spa, SCL_CONFIG, RW_WRITER) != SCL_CONFIG) | |
6cb8e530 PZ |
2335 | txg = UINT64_MAX; |
2336 | else | |
2337 | txg = spa_last_synced_txg(spa); | |
34dc7c2f | 2338 | |
6cb8e530 | 2339 | if ((label = vdev_label_read_config(vd, txg)) == NULL) { |
dce1bf99 | 2340 | vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, |
6cb8e530 | 2341 | VDEV_AUX_BAD_LABEL); |
38a19edd PZ |
2342 | vdev_dbgmsg(vd, "vdev_validate: failed reading config for " |
2343 | "txg %llu", (u_longlong_t)txg); | |
6cb8e530 PZ |
2344 | return (0); |
2345 | } | |
428870ff | 2346 | |
6cb8e530 PZ |
2347 | /* |
2348 | * Determine if this vdev has been split off into another | |
2349 | * pool. If so, then refuse to open it. | |
2350 | */ | |
2351 | if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_SPLIT_GUID, | |
2352 | &aux_guid) == 0 && aux_guid == spa_guid(spa)) { | |
2353 | vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, | |
2354 | VDEV_AUX_SPLIT_POOL); | |
2355 | nvlist_free(label); | |
2356 | vdev_dbgmsg(vd, "vdev_validate: vdev split into other pool"); | |
2357 | return (0); | |
2358 | } | |
34dc7c2f | 2359 | |
6cb8e530 PZ |
2360 | if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &guid) != 0) { |
2361 | vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, | |
2362 | VDEV_AUX_CORRUPT_DATA); | |
2363 | nvlist_free(label); | |
2364 | vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label", | |
2365 | ZPOOL_CONFIG_POOL_GUID); | |
2366 | return (0); | |
2367 | } | |
428870ff | 2368 | |
6cb8e530 PZ |
2369 | /* |
2370 | * If config is not trusted then ignore the spa guid check. This is | |
2371 | * necessary because if the machine crashed during a re-guid the new | |
2372 | * guid might have been written to all of the vdev labels, but not the | |
2373 | * cached config. The check will be performed again once we have the | |
2374 | * trusted config from the MOS. | |
2375 | */ | |
2376 | if (spa->spa_trust_config && guid != spa_guid(spa)) { | |
2377 | vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, | |
2378 | VDEV_AUX_CORRUPT_DATA); | |
2379 | nvlist_free(label); | |
2380 | vdev_dbgmsg(vd, "vdev_validate: vdev label pool_guid doesn't " | |
2381 | "match config (%llu != %llu)", (u_longlong_t)guid, | |
2382 | (u_longlong_t)spa_guid(spa)); | |
2383 | return (0); | |
2384 | } | |
2385 | ||
2386 | if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvl) | |
2387 | != 0 || nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_ORIG_GUID, | |
2388 | &aux_guid) != 0) | |
2389 | aux_guid = 0; | |
2390 | ||
2391 | if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0) { | |
2392 | vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, | |
2393 | VDEV_AUX_CORRUPT_DATA); | |
2394 | nvlist_free(label); | |
2395 | vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label", | |
2396 | ZPOOL_CONFIG_GUID); | |
2397 | return (0); | |
2398 | } | |
2399 | ||
2400 | if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, &top_guid) | |
2401 | != 0) { | |
2402 | vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, | |
2403 | VDEV_AUX_CORRUPT_DATA); | |
2404 | nvlist_free(label); | |
2405 | vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label", | |
2406 | ZPOOL_CONFIG_TOP_GUID); | |
2407 | return (0); | |
2408 | } | |
2409 | ||
2410 | /* | |
2411 | * If this vdev just became a top-level vdev because its sibling was | |
2412 | * detached, it will have adopted the parent's vdev guid -- but the | |
2413 | * label may or may not be on disk yet. Fortunately, either version | |
2414 | * of the label will have the same top guid, so if we're a top-level | |
2415 | * vdev, we can safely compare to that instead. | |
2416 | * However, if the config comes from a cachefile that failed to update | |
2417 | * after the detach, a top-level vdev will appear as a non top-level | |
2418 | * vdev in the config. Also relax the constraints if we perform an | |
2419 | * extreme rewind. | |
2420 | * | |
2421 | * If we split this vdev off instead, then we also check the | |
2422 | * original pool's guid. We don't want to consider the vdev | |
2423 | * corrupt if it is partway through a split operation. | |
2424 | */ | |
2425 | if (vd->vdev_guid != guid && vd->vdev_guid != aux_guid) { | |
2426 | boolean_t mismatch = B_FALSE; | |
2427 | if (spa->spa_trust_config && !spa->spa_extreme_rewind) { | |
2428 | if (vd != vd->vdev_top || vd->vdev_guid != top_guid) | |
2429 | mismatch = B_TRUE; | |
2430 | } else { | |
2431 | if (vd->vdev_guid != top_guid && | |
2432 | vd->vdev_top->vdev_guid != guid) | |
2433 | mismatch = B_TRUE; | |
34dc7c2f BB |
2434 | } |
2435 | ||
6cb8e530 | 2436 | if (mismatch) { |
34dc7c2f BB |
2437 | vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, |
2438 | VDEV_AUX_CORRUPT_DATA); | |
2439 | nvlist_free(label); | |
6cb8e530 PZ |
2440 | vdev_dbgmsg(vd, "vdev_validate: config guid " |
2441 | "doesn't match label guid"); | |
2442 | vdev_dbgmsg(vd, "CONFIG: guid %llu, top_guid %llu", | |
2443 | (u_longlong_t)vd->vdev_guid, | |
2444 | (u_longlong_t)vd->vdev_top->vdev_guid); | |
2445 | vdev_dbgmsg(vd, "LABEL: guid %llu, top_guid %llu, " | |
2446 | "aux_guid %llu", (u_longlong_t)guid, | |
2447 | (u_longlong_t)top_guid, (u_longlong_t)aux_guid); | |
34dc7c2f BB |
2448 | return (0); |
2449 | } | |
6cb8e530 | 2450 | } |
34dc7c2f | 2451 | |
6cb8e530 PZ |
2452 | if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, |
2453 | &state) != 0) { | |
2454 | vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, | |
2455 | VDEV_AUX_CORRUPT_DATA); | |
34dc7c2f | 2456 | nvlist_free(label); |
6cb8e530 PZ |
2457 | vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label", |
2458 | ZPOOL_CONFIG_POOL_STATE); | |
2459 | return (0); | |
2460 | } | |
34dc7c2f | 2461 | |
6cb8e530 PZ |
2462 | nvlist_free(label); |
2463 | ||
2464 | /* | |
2465 | * If this is a verbatim import, no need to check the | |
2466 | * state of the pool. | |
2467 | */ | |
2468 | if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) && | |
2469 | spa_load_state(spa) == SPA_LOAD_OPEN && | |
2470 | state != POOL_STATE_ACTIVE) { | |
2471 | vdev_dbgmsg(vd, "vdev_validate: invalid pool state (%llu) " | |
2472 | "for spa %s", (u_longlong_t)state, spa->spa_name); | |
2473 | return (SET_ERROR(EBADF)); | |
2474 | } | |
2475 | ||
2476 | /* | |
2477 | * If we were able to open and validate a vdev that was | |
2478 | * previously marked permanently unavailable, clear that state | |
2479 | * now. | |
2480 | */ | |
2481 | if (vd->vdev_not_present) | |
2482 | vd->vdev_not_present = 0; | |
2483 | ||
2484 | return (0); | |
2485 | } | |
2486 | ||
2487 | static void | |
2488 | vdev_copy_path_impl(vdev_t *svd, vdev_t *dvd) | |
2489 | { | |
2a8430a2 | 2490 | char *old, *new; |
6cb8e530 PZ |
2491 | if (svd->vdev_path != NULL && dvd->vdev_path != NULL) { |
2492 | if (strcmp(svd->vdev_path, dvd->vdev_path) != 0) { | |
2493 | zfs_dbgmsg("vdev_copy_path: vdev %llu: path changed " | |
2494 | "from '%s' to '%s'", (u_longlong_t)dvd->vdev_guid, | |
2495 | dvd->vdev_path, svd->vdev_path); | |
2496 | spa_strfree(dvd->vdev_path); | |
2497 | dvd->vdev_path = spa_strdup(svd->vdev_path); | |
4a0ee12a | 2498 | } |
6cb8e530 PZ |
2499 | } else if (svd->vdev_path != NULL) { |
2500 | dvd->vdev_path = spa_strdup(svd->vdev_path); | |
2501 | zfs_dbgmsg("vdev_copy_path: vdev %llu: path set to '%s'", | |
2502 | (u_longlong_t)dvd->vdev_guid, dvd->vdev_path); | |
2503 | } | |
2a8430a2 TH |
2504 | |
2505 | /* | |
2506 | * Our enclosure sysfs path may have changed between imports | |
2507 | */ | |
2508 | old = dvd->vdev_enc_sysfs_path; | |
2509 | new = svd->vdev_enc_sysfs_path; | |
2510 | if ((old != NULL && new == NULL) || | |
2511 | (old == NULL && new != NULL) || | |
2512 | ((old != NULL && new != NULL) && strcmp(new, old) != 0)) { | |
2513 | zfs_dbgmsg("vdev_copy_path: vdev %llu: vdev_enc_sysfs_path " | |
2514 | "changed from '%s' to '%s'", (u_longlong_t)dvd->vdev_guid, | |
2515 | old, new); | |
2516 | ||
2517 | if (dvd->vdev_enc_sysfs_path) | |
2518 | spa_strfree(dvd->vdev_enc_sysfs_path); | |
2519 | ||
2520 | if (svd->vdev_enc_sysfs_path) { | |
2521 | dvd->vdev_enc_sysfs_path = spa_strdup( | |
2522 | svd->vdev_enc_sysfs_path); | |
2523 | } else { | |
2524 | dvd->vdev_enc_sysfs_path = NULL; | |
2525 | } | |
2526 | } | |
6cb8e530 | 2527 | } |
34dc7c2f | 2528 | |
6cb8e530 PZ |
2529 | /* |
2530 | * Recursively copy vdev paths from one vdev to another. Source and destination | |
2531 | * vdev trees must have same geometry otherwise return error. Intended to copy | |
2532 | * paths from userland config into MOS config. | |
2533 | */ | |
2534 | int | |
2535 | vdev_copy_path_strict(vdev_t *svd, vdev_t *dvd) | |
2536 | { | |
2537 | if ((svd->vdev_ops == &vdev_missing_ops) || | |
2538 | (svd->vdev_ishole && dvd->vdev_ishole) || | |
2539 | (dvd->vdev_ops == &vdev_indirect_ops)) | |
2540 | return (0); | |
2541 | ||
2542 | if (svd->vdev_ops != dvd->vdev_ops) { | |
2543 | vdev_dbgmsg(svd, "vdev_copy_path: vdev type mismatch: %s != %s", | |
2544 | svd->vdev_ops->vdev_op_type, dvd->vdev_ops->vdev_op_type); | |
2545 | return (SET_ERROR(EINVAL)); | |
2546 | } | |
2547 | ||
2548 | if (svd->vdev_guid != dvd->vdev_guid) { | |
2549 | vdev_dbgmsg(svd, "vdev_copy_path: guids mismatch (%llu != " | |
2550 | "%llu)", (u_longlong_t)svd->vdev_guid, | |
2551 | (u_longlong_t)dvd->vdev_guid); | |
2552 | return (SET_ERROR(EINVAL)); | |
b128c09f | 2553 | } |
34dc7c2f | 2554 | |
6cb8e530 PZ |
2555 | if (svd->vdev_children != dvd->vdev_children) { |
2556 | vdev_dbgmsg(svd, "vdev_copy_path: children count mismatch: " | |
2557 | "%llu != %llu", (u_longlong_t)svd->vdev_children, | |
2558 | (u_longlong_t)dvd->vdev_children); | |
2559 | return (SET_ERROR(EINVAL)); | |
2560 | } | |
2561 | ||
2562 | for (uint64_t i = 0; i < svd->vdev_children; i++) { | |
2563 | int error = vdev_copy_path_strict(svd->vdev_child[i], | |
2564 | dvd->vdev_child[i]); | |
2565 | if (error != 0) | |
2566 | return (error); | |
2567 | } | |
2568 | ||
2569 | if (svd->vdev_ops->vdev_op_leaf) | |
2570 | vdev_copy_path_impl(svd, dvd); | |
2571 | ||
34dc7c2f BB |
2572 | return (0); |
2573 | } | |
2574 | ||
6cb8e530 PZ |
2575 | static void |
2576 | vdev_copy_path_search(vdev_t *stvd, vdev_t *dvd) | |
2577 | { | |
2578 | ASSERT(stvd->vdev_top == stvd); | |
2579 | ASSERT3U(stvd->vdev_id, ==, dvd->vdev_top->vdev_id); | |
2580 | ||
2581 | for (uint64_t i = 0; i < dvd->vdev_children; i++) { | |
2582 | vdev_copy_path_search(stvd, dvd->vdev_child[i]); | |
2583 | } | |
2584 | ||
2585 | if (!dvd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(dvd)) | |
2586 | return; | |
2587 | ||
2588 | /* | |
2589 | * The idea here is that while a vdev can shift positions within | |
2590 | * a top vdev (when replacing, attaching mirror, etc.) it cannot | |
2591 | * step outside of it. | |
2592 | */ | |
2593 | vdev_t *vd = vdev_lookup_by_guid(stvd, dvd->vdev_guid); | |
2594 | ||
2595 | if (vd == NULL || vd->vdev_ops != dvd->vdev_ops) | |
2596 | return; | |
2597 | ||
2598 | ASSERT(vd->vdev_ops->vdev_op_leaf); | |
2599 | ||
2600 | vdev_copy_path_impl(vd, dvd); | |
2601 | } | |
2602 | ||
2603 | /* | |
2604 | * Recursively copy vdev paths from one root vdev to another. Source and | |
2605 | * destination vdev trees may differ in geometry. For each destination leaf | |
2606 | * vdev, search a vdev with the same guid and top vdev id in the source. | |
2607 | * Intended to copy paths from userland config into MOS config. | |
2608 | */ | |
2609 | void | |
2610 | vdev_copy_path_relaxed(vdev_t *srvd, vdev_t *drvd) | |
2611 | { | |
2612 | uint64_t children = MIN(srvd->vdev_children, drvd->vdev_children); | |
2613 | ASSERT(srvd->vdev_ops == &vdev_root_ops); | |
2614 | ASSERT(drvd->vdev_ops == &vdev_root_ops); | |
2615 | ||
2616 | for (uint64_t i = 0; i < children; i++) { | |
2617 | vdev_copy_path_search(srvd->vdev_child[i], | |
2618 | drvd->vdev_child[i]); | |
2619 | } | |
2620 | } | |
2621 | ||
34dc7c2f BB |
2622 | /* |
2623 | * Close a virtual device. | |
2624 | */ | |
2625 | void | |
2626 | vdev_close(vdev_t *vd) | |
2627 | { | |
428870ff | 2628 | vdev_t *pvd = vd->vdev_parent; |
2a8ba608 | 2629 | spa_t *spa __maybe_unused = vd->vdev_spa; |
fb5f0bc8 | 2630 | |
b2255edc BB |
2631 | ASSERT(vd != NULL); |
2632 | ASSERT(vd->vdev_open_thread == curthread || | |
2633 | spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); | |
fb5f0bc8 | 2634 | |
428870ff BB |
2635 | /* |
2636 | * If our parent is reopening, then we are as well, unless we are | |
2637 | * going offline. | |
2638 | */ | |
2639 | if (pvd != NULL && pvd->vdev_reopening) | |
2640 | vd->vdev_reopening = (pvd->vdev_reopening && !vd->vdev_offline); | |
2641 | ||
34dc7c2f BB |
2642 | vd->vdev_ops->vdev_op_close(vd); |
2643 | ||
34dc7c2f | 2644 | /* |
9babb374 | 2645 | * We record the previous state before we close it, so that if we are |
34dc7c2f BB |
2646 | * doing a reopen(), we don't generate FMA ereports if we notice that |
2647 | * it's still faulted. | |
2648 | */ | |
2649 | vd->vdev_prevstate = vd->vdev_state; | |
2650 | ||
2651 | if (vd->vdev_offline) | |
2652 | vd->vdev_state = VDEV_STATE_OFFLINE; | |
2653 | else | |
2654 | vd->vdev_state = VDEV_STATE_CLOSED; | |
2655 | vd->vdev_stat.vs_aux = VDEV_AUX_NONE; | |
2656 | } | |
2657 | ||
428870ff BB |
2658 | void |
2659 | vdev_hold(vdev_t *vd) | |
2660 | { | |
2661 | spa_t *spa = vd->vdev_spa; | |
2662 | ||
2663 | ASSERT(spa_is_root(spa)); | |
2664 | if (spa->spa_state == POOL_STATE_UNINITIALIZED) | |
2665 | return; | |
2666 | ||
1c27024e | 2667 | for (int c = 0; c < vd->vdev_children; c++) |
428870ff BB |
2668 | vdev_hold(vd->vdev_child[c]); |
2669 | ||
11f2e9a4 | 2670 | if (vd->vdev_ops->vdev_op_leaf && vd->vdev_ops->vdev_op_hold != NULL) |
428870ff BB |
2671 | vd->vdev_ops->vdev_op_hold(vd); |
2672 | } | |
2673 | ||
2674 | void | |
2675 | vdev_rele(vdev_t *vd) | |
2676 | { | |
d6320ddb | 2677 | ASSERT(spa_is_root(vd->vdev_spa)); |
1c27024e | 2678 | for (int c = 0; c < vd->vdev_children; c++) |
428870ff BB |
2679 | vdev_rele(vd->vdev_child[c]); |
2680 | ||
11f2e9a4 | 2681 | if (vd->vdev_ops->vdev_op_leaf && vd->vdev_ops->vdev_op_rele != NULL) |
428870ff BB |
2682 | vd->vdev_ops->vdev_op_rele(vd); |
2683 | } | |
2684 | ||
2685 | /* | |
2686 | * Reopen all interior vdevs and any unopened leaves. We don't actually | |
2687 | * reopen leaf vdevs which had previously been opened as they might deadlock | |
2688 | * on the spa_config_lock. Instead we only obtain the leaf's physical size. | |
2689 | * If the leaf has never been opened then open it, as usual. | |
2690 | */ | |
34dc7c2f BB |
2691 | void |
2692 | vdev_reopen(vdev_t *vd) | |
2693 | { | |
2694 | spa_t *spa = vd->vdev_spa; | |
2695 | ||
b128c09f | 2696 | ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); |
34dc7c2f | 2697 | |
428870ff BB |
2698 | /* set the reopening flag unless we're taking the vdev offline */ |
2699 | vd->vdev_reopening = !vd->vdev_offline; | |
34dc7c2f BB |
2700 | vdev_close(vd); |
2701 | (void) vdev_open(vd); | |
2702 | ||
2703 | /* | |
2704 | * Call vdev_validate() here to make sure we have the same device. | |
2705 | * Otherwise, a device with an invalid label could be successfully | |
2706 | * opened in response to vdev_reopen(). | |
2707 | */ | |
b128c09f BB |
2708 | if (vd->vdev_aux) { |
2709 | (void) vdev_validate_aux(vd); | |
2710 | if (vdev_readable(vd) && vdev_writeable(vd) && | |
77f6826b GA |
2711 | vd->vdev_aux == &spa->spa_l2cache) { |
2712 | /* | |
77f6826b GA |
2713 | * In case the vdev is present we should evict all ARC |
2714 | * buffers and pointers to log blocks and reclaim their | |
2715 | * space before restoring its contents to L2ARC. | |
2716 | */ | |
2717 | if (l2arc_vdev_present(vd)) { | |
2718 | l2arc_rebuild_vdev(vd, B_TRUE); | |
2719 | } else { | |
2720 | l2arc_add_vdev(spa, vd); | |
2721 | } | |
2722 | spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD); | |
b7654bd7 | 2723 | spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM); |
77f6826b | 2724 | } |
b128c09f | 2725 | } else { |
6cb8e530 | 2726 | (void) vdev_validate(vd); |
b128c09f | 2727 | } |
34dc7c2f | 2728 | |
9d618615 A |
2729 | /* |
2730 | * Recheck if resilver is still needed and cancel any | |
2731 | * scheduled resilver if resilver is unneeded. | |
2732 | */ | |
2733 | if (!vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL) && | |
2734 | spa->spa_async_tasks & SPA_ASYNC_RESILVER) { | |
2735 | mutex_enter(&spa->spa_async_lock); | |
2736 | spa->spa_async_tasks &= ~SPA_ASYNC_RESILVER; | |
2737 | mutex_exit(&spa->spa_async_lock); | |
2738 | } | |
2739 | ||
34dc7c2f BB |
2740 | /* |
2741 | * Reassess parent vdev's health. | |
2742 | */ | |
2743 | vdev_propagate_state(vd); | |
2744 | } | |
2745 | ||
2746 | int | |
2747 | vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing) | |
2748 | { | |
2749 | int error; | |
2750 | ||
2751 | /* | |
2752 | * Normally, partial opens (e.g. of a mirror) are allowed. | |
2753 | * For a create, however, we want to fail the request if | |
2754 | * there are any components we can't open. | |
2755 | */ | |
2756 | error = vdev_open(vd); | |
2757 | ||
2758 | if (error || vd->vdev_state != VDEV_STATE_HEALTHY) { | |
2759 | vdev_close(vd); | |
28caa74b | 2760 | return (error ? error : SET_ERROR(ENXIO)); |
34dc7c2f BB |
2761 | } |
2762 | ||
2763 | /* | |
93cf2076 | 2764 | * Recursively load DTLs and initialize all labels. |
34dc7c2f | 2765 | */ |
93cf2076 GW |
2766 | if ((error = vdev_dtl_load(vd)) != 0 || |
2767 | (error = vdev_label_init(vd, txg, isreplacing ? | |
34dc7c2f BB |
2768 | VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) { |
2769 | vdev_close(vd); | |
2770 | return (error); | |
2771 | } | |
2772 | ||
2773 | return (0); | |
2774 | } | |
2775 | ||
34dc7c2f | 2776 | void |
9babb374 | 2777 | vdev_metaslab_set_size(vdev_t *vd) |
34dc7c2f | 2778 | { |
d2734cce | 2779 | uint64_t asize = vd->vdev_asize; |
c853f382 | 2780 | uint64_t ms_count = asize >> zfs_vdev_default_ms_shift; |
e4e94ca3 | 2781 | uint64_t ms_shift; |
d2734cce | 2782 | |
34dc7c2f | 2783 | /* |
e4e94ca3 DB |
2784 | * There are two dimensions to the metaslab sizing calculation: |
2785 | * the size of the metaslab and the count of metaslabs per vdev. | |
e4e94ca3 | 2786 | * |
c853f382 SD |
2787 | * The default values used below are a good balance between memory |
2788 | * usage (larger metaslab size means more memory needed for loaded | |
2789 | * metaslabs; more metaslabs means more memory needed for the | |
2790 | * metaslab_t structs), metaslab load time (larger metaslabs take | |
2791 | * longer to load), and metaslab sync time (more metaslabs means | |
2792 | * more time spent syncing all of them). | |
2793 | * | |
2794 | * In general, we aim for zfs_vdev_default_ms_count (200) metaslabs. | |
2795 | * The range of the dimensions are as follows: | |
2796 | * | |
2797 | * 2^29 <= ms_size <= 2^34 | |
e4e94ca3 DB |
2798 | * 16 <= ms_count <= 131,072 |
2799 | * | |
2800 | * On the lower end of vdev sizes, we aim for metaslabs sizes of | |
2801 | * at least 512MB (2^29) to minimize fragmentation effects when | |
2802 | * testing with smaller devices. However, the count constraint | |
2803 | * of at least 16 metaslabs will override this minimum size goal. | |
2804 | * | |
2805 | * On the upper end of vdev sizes, we aim for a maximum metaslab | |
c853f382 SD |
2806 | * size of 16GB. However, we will cap the total count to 2^17 |
2807 | * metaslabs to keep our memory footprint in check and let the | |
2808 | * metaslab size grow from there if that limit is hit. | |
e4e94ca3 DB |
2809 | * |
2810 | * The net effect of applying above constrains is summarized below. | |
2811 | * | |
c853f382 SD |
2812 | * vdev size metaslab count |
2813 | * --------------|----------------- | |
2814 | * < 8GB ~16 | |
2815 | * 8GB - 100GB one per 512MB | |
2816 | * 100GB - 3TB ~200 | |
2817 | * 3TB - 2PB one per 16GB | |
2818 | * > 2PB ~131,072 | |
2819 | * -------------------------------- | |
2820 | * | |
2821 | * Finally, note that all of the above calculate the initial | |
2822 | * number of metaslabs. Expanding a top-level vdev will result | |
2823 | * in additional metaslabs being allocated making it possible | |
2824 | * to exceed the zfs_vdev_ms_count_limit. | |
34dc7c2f | 2825 | */ |
d2734cce | 2826 | |
c853f382 SD |
2827 | if (ms_count < zfs_vdev_min_ms_count) |
2828 | ms_shift = highbit64(asize / zfs_vdev_min_ms_count); | |
2829 | else if (ms_count > zfs_vdev_default_ms_count) | |
2830 | ms_shift = highbit64(asize / zfs_vdev_default_ms_count); | |
e4e94ca3 | 2831 | else |
c853f382 | 2832 | ms_shift = zfs_vdev_default_ms_shift; |
e4e94ca3 DB |
2833 | |
2834 | if (ms_shift < SPA_MAXBLOCKSHIFT) { | |
2835 | ms_shift = SPA_MAXBLOCKSHIFT; | |
c853f382 SD |
2836 | } else if (ms_shift > zfs_vdev_max_ms_shift) { |
2837 | ms_shift = zfs_vdev_max_ms_shift; | |
e4e94ca3 | 2838 | /* cap the total count to constrain memory footprint */ |
c853f382 SD |
2839 | if ((asize >> ms_shift) > zfs_vdev_ms_count_limit) |
2840 | ms_shift = highbit64(asize / zfs_vdev_ms_count_limit); | |
d2734cce SD |
2841 | } |
2842 | ||
2843 | vd->vdev_ms_shift = ms_shift; | |
2844 | ASSERT3U(vd->vdev_ms_shift, >=, SPA_MAXBLOCKSHIFT); | |
34dc7c2f BB |
2845 | } |
2846 | ||
2847 | void | |
2848 | vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg) | |
2849 | { | |
2850 | ASSERT(vd == vd->vdev_top); | |
a1d477c2 MA |
2851 | /* indirect vdevs don't have metaslabs or dtls */ |
2852 | ASSERT(vdev_is_concrete(vd) || flags == 0); | |
34dc7c2f | 2853 | ASSERT(ISP2(flags)); |
572e2857 | 2854 | ASSERT(spa_writeable(vd->vdev_spa)); |
34dc7c2f BB |
2855 | |
2856 | if (flags & VDD_METASLAB) | |
2857 | (void) txg_list_add(&vd->vdev_ms_list, arg, txg); | |
2858 | ||
2859 | if (flags & VDD_DTL) | |
2860 | (void) txg_list_add(&vd->vdev_dtl_list, arg, txg); | |
2861 | ||
2862 | (void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg); | |
2863 | } | |
2864 | ||
93cf2076 GW |
2865 | void |
2866 | vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg) | |
2867 | { | |
1c27024e | 2868 | for (int c = 0; c < vd->vdev_children; c++) |
93cf2076 GW |
2869 | vdev_dirty_leaves(vd->vdev_child[c], flags, txg); |
2870 | ||
2871 | if (vd->vdev_ops->vdev_op_leaf) | |
2872 | vdev_dirty(vd->vdev_top, flags, vd, txg); | |
2873 | } | |
2874 | ||
fb5f0bc8 BB |
2875 | /* |
2876 | * DTLs. | |
2877 | * | |
2878 | * A vdev's DTL (dirty time log) is the set of transaction groups for which | |
428870ff | 2879 | * the vdev has less than perfect replication. There are four kinds of DTL: |
fb5f0bc8 BB |
2880 | * |
2881 | * DTL_MISSING: txgs for which the vdev has no valid copies of the data | |
2882 | * | |
2883 | * DTL_PARTIAL: txgs for which data is available, but not fully replicated | |
2884 | * | |
2885 | * DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon | |
2886 | * scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of | |
2887 | * txgs that was scrubbed. | |
2888 | * | |
2889 | * DTL_OUTAGE: txgs which cannot currently be read, whether due to | |
2890 | * persistent errors or just some device being offline. | |
2891 | * Unlike the other three, the DTL_OUTAGE map is not generally | |
2892 | * maintained; it's only computed when needed, typically to | |
2893 | * determine whether a device can be detached. | |
2894 | * | |
2895 | * For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device | |
2896 | * either has the data or it doesn't. | |
2897 | * | |
2898 | * For interior vdevs such as mirror and RAID-Z the picture is more complex. | |
2899 | * A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because | |
2900 | * if any child is less than fully replicated, then so is its parent. | |
2901 | * A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs, | |
2902 | * comprising only those txgs which appear in 'maxfaults' or more children; | |
2903 | * those are the txgs we don't have enough replication to read. For example, | |
2904 | * double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2); | |
2905 | * thus, its DTL_MISSING consists of the set of txgs that appear in more than | |
2906 | * two child DTL_MISSING maps. | |
2907 | * | |
2908 | * It should be clear from the above that to compute the DTLs and outage maps | |
2909 | * for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps. | |
2910 | * Therefore, that is all we keep on disk. When loading the pool, or after | |
2911 | * a configuration change, we generate all other DTLs from first principles. | |
2912 | */ | |
34dc7c2f | 2913 | void |
fb5f0bc8 | 2914 | vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) |
34dc7c2f | 2915 | { |
93cf2076 | 2916 | range_tree_t *rt = vd->vdev_dtl[t]; |
fb5f0bc8 BB |
2917 | |
2918 | ASSERT(t < DTL_TYPES); | |
2919 | ASSERT(vd != vd->vdev_spa->spa_root_vdev); | |
572e2857 | 2920 | ASSERT(spa_writeable(vd->vdev_spa)); |
fb5f0bc8 | 2921 | |
a1d477c2 | 2922 | mutex_enter(&vd->vdev_dtl_lock); |
93cf2076 GW |
2923 | if (!range_tree_contains(rt, txg, size)) |
2924 | range_tree_add(rt, txg, size); | |
a1d477c2 | 2925 | mutex_exit(&vd->vdev_dtl_lock); |
34dc7c2f BB |
2926 | } |
2927 | ||
fb5f0bc8 BB |
2928 | boolean_t |
2929 | vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size) | |
34dc7c2f | 2930 | { |
93cf2076 | 2931 | range_tree_t *rt = vd->vdev_dtl[t]; |
fb5f0bc8 | 2932 | boolean_t dirty = B_FALSE; |
34dc7c2f | 2933 | |
fb5f0bc8 BB |
2934 | ASSERT(t < DTL_TYPES); |
2935 | ASSERT(vd != vd->vdev_spa->spa_root_vdev); | |
34dc7c2f | 2936 | |
a1d477c2 MA |
2937 | /* |
2938 | * While we are loading the pool, the DTLs have not been loaded yet. | |
4d0ba941 BB |
2939 | * This isn't a problem but it can result in devices being tried |
2940 | * which are known to not have the data. In which case, the import | |
2941 | * is relying on the checksum to ensure that we get the right data. | |
2942 | * Note that while importing we are only reading the MOS, which is | |
2943 | * always checksummed. | |
a1d477c2 | 2944 | */ |
a1d477c2 | 2945 | mutex_enter(&vd->vdev_dtl_lock); |
d2734cce | 2946 | if (!range_tree_is_empty(rt)) |
93cf2076 | 2947 | dirty = range_tree_contains(rt, txg, size); |
a1d477c2 | 2948 | mutex_exit(&vd->vdev_dtl_lock); |
34dc7c2f BB |
2949 | |
2950 | return (dirty); | |
2951 | } | |
2952 | ||
fb5f0bc8 BB |
2953 | boolean_t |
2954 | vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t) | |
2955 | { | |
93cf2076 | 2956 | range_tree_t *rt = vd->vdev_dtl[t]; |
fb5f0bc8 BB |
2957 | boolean_t empty; |
2958 | ||
a1d477c2 | 2959 | mutex_enter(&vd->vdev_dtl_lock); |
d2734cce | 2960 | empty = range_tree_is_empty(rt); |
a1d477c2 | 2961 | mutex_exit(&vd->vdev_dtl_lock); |
fb5f0bc8 BB |
2962 | |
2963 | return (empty); | |
2964 | } | |
2965 | ||
3d6da72d | 2966 | /* |
b2255edc BB |
2967 | * Check if the txg falls within the range which must be |
2968 | * resilvered. DVAs outside this range can always be skipped. | |
2969 | */ | |
2970 | boolean_t | |
2971 | vdev_default_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize, | |
2972 | uint64_t phys_birth) | |
2973 | { | |
14e4e3cb AZ |
2974 | (void) dva, (void) psize; |
2975 | ||
b2255edc BB |
2976 | /* Set by sequential resilver. */ |
2977 | if (phys_birth == TXG_UNKNOWN) | |
2978 | return (B_TRUE); | |
2979 | ||
2980 | return (vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1)); | |
2981 | } | |
2982 | ||
2983 | /* | |
2984 | * Returns B_TRUE if the vdev determines the DVA needs to be resilvered. | |
3d6da72d IH |
2985 | */ |
2986 | boolean_t | |
b2255edc BB |
2987 | vdev_dtl_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize, |
2988 | uint64_t phys_birth) | |
3d6da72d IH |
2989 | { |
2990 | ASSERT(vd != vd->vdev_spa->spa_root_vdev); | |
2991 | ||
2992 | if (vd->vdev_ops->vdev_op_need_resilver == NULL || | |
2993 | vd->vdev_ops->vdev_op_leaf) | |
2994 | return (B_TRUE); | |
2995 | ||
b2255edc BB |
2996 | return (vd->vdev_ops->vdev_op_need_resilver(vd, dva, psize, |
2997 | phys_birth)); | |
3d6da72d IH |
2998 | } |
2999 | ||
5d1f7fb6 GW |
3000 | /* |
3001 | * Returns the lowest txg in the DTL range. | |
3002 | */ | |
3003 | static uint64_t | |
3004 | vdev_dtl_min(vdev_t *vd) | |
3005 | { | |
5d1f7fb6 | 3006 | ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock)); |
93cf2076 | 3007 | ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0); |
5d1f7fb6 GW |
3008 | ASSERT0(vd->vdev_children); |
3009 | ||
ca577779 | 3010 | return (range_tree_min(vd->vdev_dtl[DTL_MISSING]) - 1); |
5d1f7fb6 GW |
3011 | } |
3012 | ||
3013 | /* | |
3014 | * Returns the highest txg in the DTL. | |
3015 | */ | |
3016 | static uint64_t | |
3017 | vdev_dtl_max(vdev_t *vd) | |
3018 | { | |
5d1f7fb6 | 3019 | ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock)); |
93cf2076 | 3020 | ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0); |
5d1f7fb6 GW |
3021 | ASSERT0(vd->vdev_children); |
3022 | ||
ca577779 | 3023 | return (range_tree_max(vd->vdev_dtl[DTL_MISSING])); |
5d1f7fb6 GW |
3024 | } |
3025 | ||
3026 | /* | |
3027 | * Determine if a resilvering vdev should remove any DTL entries from | |
3028 | * its range. If the vdev was resilvering for the entire duration of the | |
3029 | * scan then it should excise that range from its DTLs. Otherwise, this | |
3030 | * vdev is considered partially resilvered and should leave its DTL | |
3031 | * entries intact. The comment in vdev_dtl_reassess() describes how we | |
3032 | * excise the DTLs. | |
3033 | */ | |
3034 | static boolean_t | |
9a49d3f3 | 3035 | vdev_dtl_should_excise(vdev_t *vd, boolean_t rebuild_done) |
5d1f7fb6 | 3036 | { |
5d1f7fb6 GW |
3037 | ASSERT0(vd->vdev_children); |
3038 | ||
335b251a MA |
3039 | if (vd->vdev_state < VDEV_STATE_DEGRADED) |
3040 | return (B_FALSE); | |
3041 | ||
80a91e74 TC |
3042 | if (vd->vdev_resilver_deferred) |
3043 | return (B_FALSE); | |
3044 | ||
9a49d3f3 | 3045 | if (range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) |
5d1f7fb6 GW |
3046 | return (B_TRUE); |
3047 | ||
9a49d3f3 BB |
3048 | if (rebuild_done) { |
3049 | vdev_rebuild_t *vr = &vd->vdev_top->vdev_rebuild_config; | |
3050 | vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; | |
3051 | ||
3052 | /* Rebuild not initiated by attach */ | |
3053 | if (vd->vdev_rebuild_txg == 0) | |
3054 | return (B_TRUE); | |
3055 | ||
3056 | /* | |
3057 | * When a rebuild completes without error then all missing data | |
3058 | * up to the rebuild max txg has been reconstructed and the DTL | |
3059 | * is eligible for excision. | |
3060 | */ | |
3061 | if (vrp->vrp_rebuild_state == VDEV_REBUILD_COMPLETE && | |
3062 | vdev_dtl_max(vd) <= vrp->vrp_max_txg) { | |
3063 | ASSERT3U(vrp->vrp_min_txg, <=, vdev_dtl_min(vd)); | |
3064 | ASSERT3U(vrp->vrp_min_txg, <, vd->vdev_rebuild_txg); | |
3065 | ASSERT3U(vd->vdev_rebuild_txg, <=, vrp->vrp_max_txg); | |
3066 | return (B_TRUE); | |
3067 | } | |
3068 | } else { | |
3069 | dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan; | |
3070 | dsl_scan_phys_t *scnp __maybe_unused = &scn->scn_phys; | |
3071 | ||
3072 | /* Resilver not initiated by attach */ | |
3073 | if (vd->vdev_resilver_txg == 0) | |
3074 | return (B_TRUE); | |
3075 | ||
3076 | /* | |
3077 | * When a resilver is initiated the scan will assign the | |
3078 | * scn_max_txg value to the highest txg value that exists | |
3079 | * in all DTLs. If this device's max DTL is not part of this | |
3080 | * scan (i.e. it is not in the range (scn_min_txg, scn_max_txg] | |
3081 | * then it is not eligible for excision. | |
3082 | */ | |
3083 | if (vdev_dtl_max(vd) <= scn->scn_phys.scn_max_txg) { | |
3084 | ASSERT3U(scnp->scn_min_txg, <=, vdev_dtl_min(vd)); | |
3085 | ASSERT3U(scnp->scn_min_txg, <, vd->vdev_resilver_txg); | |
3086 | ASSERT3U(vd->vdev_resilver_txg, <=, scnp->scn_max_txg); | |
3087 | return (B_TRUE); | |
3088 | } | |
5d1f7fb6 | 3089 | } |
9a49d3f3 | 3090 | |
5d1f7fb6 GW |
3091 | return (B_FALSE); |
3092 | } | |
3093 | ||
34dc7c2f | 3094 | /* |
fde25c0a TC |
3095 | * Reassess DTLs after a config change or scrub completion. If txg == 0 no |
3096 | * write operations will be issued to the pool. | |
34dc7c2f BB |
3097 | */ |
3098 | void | |
9a49d3f3 BB |
3099 | vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, |
3100 | boolean_t scrub_done, boolean_t rebuild_done) | |
34dc7c2f BB |
3101 | { |
3102 | spa_t *spa = vd->vdev_spa; | |
fb5f0bc8 | 3103 | avl_tree_t reftree; |
1c27024e | 3104 | int minref; |
34dc7c2f | 3105 | |
fb5f0bc8 | 3106 | ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); |
34dc7c2f | 3107 | |
1c27024e | 3108 | for (int c = 0; c < vd->vdev_children; c++) |
fb5f0bc8 | 3109 | vdev_dtl_reassess(vd->vdev_child[c], txg, |
9a49d3f3 | 3110 | scrub_txg, scrub_done, rebuild_done); |
fb5f0bc8 | 3111 | |
a1d477c2 | 3112 | if (vd == spa->spa_root_vdev || !vdev_is_concrete(vd) || vd->vdev_aux) |
fb5f0bc8 BB |
3113 | return; |
3114 | ||
3115 | if (vd->vdev_ops->vdev_op_leaf) { | |
428870ff | 3116 | dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan; |
9a49d3f3 BB |
3117 | vdev_rebuild_t *vr = &vd->vdev_top->vdev_rebuild_config; |
3118 | boolean_t check_excise = B_FALSE; | |
41035a04 | 3119 | boolean_t wasempty = B_TRUE; |
428870ff | 3120 | |
34dc7c2f | 3121 | mutex_enter(&vd->vdev_dtl_lock); |
5d1f7fb6 | 3122 | |
02638a30 | 3123 | /* |
9a49d3f3 | 3124 | * If requested, pretend the scan or rebuild completed cleanly. |
02638a30 | 3125 | */ |
9a49d3f3 BB |
3126 | if (zfs_scan_ignore_errors) { |
3127 | if (scn != NULL) | |
3128 | scn->scn_phys.scn_errors = 0; | |
3129 | if (vr != NULL) | |
3130 | vr->vr_rebuild_phys.vrp_errors = 0; | |
3131 | } | |
02638a30 | 3132 | |
41035a04 JP |
3133 | if (scrub_txg != 0 && |
3134 | !range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) { | |
3135 | wasempty = B_FALSE; | |
3136 | zfs_dbgmsg("guid:%llu txg:%llu scrub:%llu started:%d " | |
3137 | "dtl:%llu/%llu errors:%llu", | |
3138 | (u_longlong_t)vd->vdev_guid, (u_longlong_t)txg, | |
3139 | (u_longlong_t)scrub_txg, spa->spa_scrub_started, | |
3140 | (u_longlong_t)vdev_dtl_min(vd), | |
3141 | (u_longlong_t)vdev_dtl_max(vd), | |
3142 | (u_longlong_t)(scn ? scn->scn_phys.scn_errors : 0)); | |
3143 | } | |
3144 | ||
5d1f7fb6 | 3145 | /* |
9a49d3f3 BB |
3146 | * If we've completed a scrub/resilver or a rebuild cleanly |
3147 | * then determine if this vdev should remove any DTLs. We | |
3148 | * only want to excise regions on vdevs that were available | |
3149 | * during the entire duration of this scan. | |
5d1f7fb6 | 3150 | */ |
9a49d3f3 BB |
3151 | if (rebuild_done && |
3152 | vr != NULL && vr->vr_rebuild_phys.vrp_errors == 0) { | |
3153 | check_excise = B_TRUE; | |
3154 | } else { | |
3155 | if (spa->spa_scrub_started || | |
3156 | (scn != NULL && scn->scn_phys.scn_errors == 0)) { | |
3157 | check_excise = B_TRUE; | |
3158 | } | |
3159 | } | |
3160 | ||
3161 | if (scrub_txg && check_excise && | |
3162 | vdev_dtl_should_excise(vd, rebuild_done)) { | |
b128c09f | 3163 | /* |
9a49d3f3 BB |
3164 | * We completed a scrub, resilver or rebuild up to |
3165 | * scrub_txg. If we did it without rebooting, then | |
3166 | * the scrub dtl will be valid, so excise the old | |
3167 | * region and fold in the scrub dtl. Otherwise, | |
3168 | * leave the dtl as-is if there was an error. | |
fb5f0bc8 BB |
3169 | * |
3170 | * There's little trick here: to excise the beginning | |
3171 | * of the DTL_MISSING map, we put it into a reference | |
3172 | * tree and then add a segment with refcnt -1 that | |
3173 | * covers the range [0, scrub_txg). This means | |
3174 | * that each txg in that range has refcnt -1 or 0. | |
3175 | * We then add DTL_SCRUB with a refcnt of 2, so that | |
3176 | * entries in the range [0, scrub_txg) will have a | |
3177 | * positive refcnt -- either 1 or 2. We then convert | |
3178 | * the reference tree into the new DTL_MISSING map. | |
b128c09f | 3179 | */ |
93cf2076 GW |
3180 | space_reftree_create(&reftree); |
3181 | space_reftree_add_map(&reftree, | |
3182 | vd->vdev_dtl[DTL_MISSING], 1); | |
3183 | space_reftree_add_seg(&reftree, 0, scrub_txg, -1); | |
3184 | space_reftree_add_map(&reftree, | |
3185 | vd->vdev_dtl[DTL_SCRUB], 2); | |
3186 | space_reftree_generate_map(&reftree, | |
3187 | vd->vdev_dtl[DTL_MISSING], 1); | |
3188 | space_reftree_destroy(&reftree); | |
41035a04 JP |
3189 | |
3190 | if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) { | |
3191 | zfs_dbgmsg("update DTL_MISSING:%llu/%llu", | |
3192 | (u_longlong_t)vdev_dtl_min(vd), | |
3193 | (u_longlong_t)vdev_dtl_max(vd)); | |
3194 | } else if (!wasempty) { | |
3195 | zfs_dbgmsg("DTL_MISSING is now empty"); | |
3196 | } | |
34dc7c2f | 3197 | } |
93cf2076 GW |
3198 | range_tree_vacate(vd->vdev_dtl[DTL_PARTIAL], NULL, NULL); |
3199 | range_tree_walk(vd->vdev_dtl[DTL_MISSING], | |
3200 | range_tree_add, vd->vdev_dtl[DTL_PARTIAL]); | |
34dc7c2f | 3201 | if (scrub_done) |
93cf2076 GW |
3202 | range_tree_vacate(vd->vdev_dtl[DTL_SCRUB], NULL, NULL); |
3203 | range_tree_vacate(vd->vdev_dtl[DTL_OUTAGE], NULL, NULL); | |
fb5f0bc8 | 3204 | if (!vdev_readable(vd)) |
93cf2076 | 3205 | range_tree_add(vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL); |
fb5f0bc8 | 3206 | else |
93cf2076 GW |
3207 | range_tree_walk(vd->vdev_dtl[DTL_MISSING], |
3208 | range_tree_add, vd->vdev_dtl[DTL_OUTAGE]); | |
5d1f7fb6 GW |
3209 | |
3210 | /* | |
9a49d3f3 BB |
3211 | * If the vdev was resilvering or rebuilding and no longer |
3212 | * has any DTLs then reset the appropriate flag and dirty | |
d14fa5db | 3213 | * the top level so that we persist the change. |
5d1f7fb6 | 3214 | */ |
9a49d3f3 | 3215 | if (txg != 0 && |
d2734cce SD |
3216 | range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) && |
3217 | range_tree_is_empty(vd->vdev_dtl[DTL_OUTAGE])) { | |
9a49d3f3 BB |
3218 | if (vd->vdev_rebuild_txg != 0) { |
3219 | vd->vdev_rebuild_txg = 0; | |
3220 | vdev_config_dirty(vd->vdev_top); | |
3221 | } else if (vd->vdev_resilver_txg != 0) { | |
3222 | vd->vdev_resilver_txg = 0; | |
3223 | vdev_config_dirty(vd->vdev_top); | |
3224 | } | |
d14fa5db | 3225 | } |
5d1f7fb6 | 3226 | |
34dc7c2f | 3227 | mutex_exit(&vd->vdev_dtl_lock); |
b128c09f | 3228 | |
34dc7c2f BB |
3229 | if (txg != 0) |
3230 | vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg); | |
3231 | return; | |
3232 | } | |
3233 | ||
34dc7c2f | 3234 | mutex_enter(&vd->vdev_dtl_lock); |
1c27024e | 3235 | for (int t = 0; t < DTL_TYPES; t++) { |
428870ff BB |
3236 | /* account for child's outage in parent's missing map */ |
3237 | int s = (t == DTL_MISSING) ? DTL_OUTAGE: t; | |
fb5f0bc8 BB |
3238 | if (t == DTL_SCRUB) |
3239 | continue; /* leaf vdevs only */ | |
3240 | if (t == DTL_PARTIAL) | |
3241 | minref = 1; /* i.e. non-zero */ | |
b2255edc BB |
3242 | else if (vdev_get_nparity(vd) != 0) |
3243 | minref = vdev_get_nparity(vd) + 1; /* RAID-Z, dRAID */ | |
fb5f0bc8 BB |
3244 | else |
3245 | minref = vd->vdev_children; /* any kind of mirror */ | |
93cf2076 | 3246 | space_reftree_create(&reftree); |
1c27024e | 3247 | for (int c = 0; c < vd->vdev_children; c++) { |
fb5f0bc8 BB |
3248 | vdev_t *cvd = vd->vdev_child[c]; |
3249 | mutex_enter(&cvd->vdev_dtl_lock); | |
93cf2076 | 3250 | space_reftree_add_map(&reftree, cvd->vdev_dtl[s], 1); |
fb5f0bc8 BB |
3251 | mutex_exit(&cvd->vdev_dtl_lock); |
3252 | } | |
93cf2076 GW |
3253 | space_reftree_generate_map(&reftree, vd->vdev_dtl[t], minref); |
3254 | space_reftree_destroy(&reftree); | |
34dc7c2f | 3255 | } |
fb5f0bc8 | 3256 | mutex_exit(&vd->vdev_dtl_lock); |
34dc7c2f BB |
3257 | } |
3258 | ||
55c12724 AH |
3259 | /* |
3260 | * Iterate over all the vdevs except spare, and post kobj events | |
3261 | */ | |
3262 | void | |
3263 | vdev_post_kobj_evt(vdev_t *vd) | |
3264 | { | |
3265 | if (vd->vdev_ops->vdev_op_kobj_evt_post && | |
3266 | vd->vdev_kobj_flag == B_FALSE) { | |
3267 | vd->vdev_kobj_flag = B_TRUE; | |
3268 | vd->vdev_ops->vdev_op_kobj_evt_post(vd); | |
3269 | } | |
3270 | ||
3271 | for (int c = 0; c < vd->vdev_children; c++) | |
3272 | vdev_post_kobj_evt(vd->vdev_child[c]); | |
3273 | } | |
3274 | ||
3275 | /* | |
3276 | * Iterate over all the vdevs except spare, and clear kobj events | |
3277 | */ | |
3278 | void | |
3279 | vdev_clear_kobj_evt(vdev_t *vd) | |
3280 | { | |
3281 | vd->vdev_kobj_flag = B_FALSE; | |
3282 | ||
3283 | for (int c = 0; c < vd->vdev_children; c++) | |
3284 | vdev_clear_kobj_evt(vd->vdev_child[c]); | |
3285 | } | |
3286 | ||
93cf2076 | 3287 | int |
34dc7c2f BB |
3288 | vdev_dtl_load(vdev_t *vd) |
3289 | { | |
3290 | spa_t *spa = vd->vdev_spa; | |
34dc7c2f | 3291 | objset_t *mos = spa->spa_meta_objset; |
4d0ba941 | 3292 | range_tree_t *rt; |
93cf2076 | 3293 | int error = 0; |
34dc7c2f | 3294 | |
93cf2076 | 3295 | if (vd->vdev_ops->vdev_op_leaf && vd->vdev_dtl_object != 0) { |
a1d477c2 | 3296 | ASSERT(vdev_is_concrete(vd)); |
34dc7c2f | 3297 | |
e39fe05b FU |
3298 | /* |
3299 | * If the dtl cannot be sync'd there is no need to open it. | |
3300 | */ | |
3301 | if (spa->spa_mode == SPA_MODE_READ && !spa->spa_read_spacemaps) | |
3302 | return (0); | |
3303 | ||
93cf2076 | 3304 | error = space_map_open(&vd->vdev_dtl_sm, mos, |
a1d477c2 | 3305 | vd->vdev_dtl_object, 0, -1ULL, 0); |
93cf2076 GW |
3306 | if (error) |
3307 | return (error); | |
3308 | ASSERT(vd->vdev_dtl_sm != NULL); | |
34dc7c2f | 3309 | |
4d0ba941 BB |
3310 | rt = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0); |
3311 | error = space_map_load(vd->vdev_dtl_sm, rt, SM_ALLOC); | |
3312 | if (error == 0) { | |
3313 | mutex_enter(&vd->vdev_dtl_lock); | |
3314 | range_tree_walk(rt, range_tree_add, | |
3315 | vd->vdev_dtl[DTL_MISSING]); | |
3316 | mutex_exit(&vd->vdev_dtl_lock); | |
3317 | } | |
3318 | ||
3319 | range_tree_vacate(rt, NULL, NULL); | |
3320 | range_tree_destroy(rt); | |
34dc7c2f | 3321 | |
93cf2076 GW |
3322 | return (error); |
3323 | } | |
3324 | ||
1c27024e | 3325 | for (int c = 0; c < vd->vdev_children; c++) { |
93cf2076 GW |
3326 | error = vdev_dtl_load(vd->vdev_child[c]); |
3327 | if (error != 0) | |
3328 | break; | |
3329 | } | |
34dc7c2f BB |
3330 | |
3331 | return (error); | |
3332 | } | |
3333 | ||
cc99f275 DB |
3334 | static void |
3335 | vdev_zap_allocation_data(vdev_t *vd, dmu_tx_t *tx) | |
3336 | { | |
3337 | spa_t *spa = vd->vdev_spa; | |
3338 | objset_t *mos = spa->spa_meta_objset; | |
3339 | vdev_alloc_bias_t alloc_bias = vd->vdev_alloc_bias; | |
3340 | const char *string; | |
3341 | ||
3342 | ASSERT(alloc_bias != VDEV_BIAS_NONE); | |
3343 | ||
3344 | string = | |
3345 | (alloc_bias == VDEV_BIAS_LOG) ? VDEV_ALLOC_BIAS_LOG : | |
3346 | (alloc_bias == VDEV_BIAS_SPECIAL) ? VDEV_ALLOC_BIAS_SPECIAL : | |
3347 | (alloc_bias == VDEV_BIAS_DEDUP) ? VDEV_ALLOC_BIAS_DEDUP : NULL; | |
3348 | ||
3349 | ASSERT(string != NULL); | |
3350 | VERIFY0(zap_add(mos, vd->vdev_top_zap, VDEV_TOP_ZAP_ALLOCATION_BIAS, | |
3351 | 1, strlen(string) + 1, string, tx)); | |
3352 | ||
3353 | if (alloc_bias == VDEV_BIAS_SPECIAL || alloc_bias == VDEV_BIAS_DEDUP) { | |
3354 | spa_activate_allocation_classes(spa, tx); | |
3355 | } | |
3356 | } | |
3357 | ||
e0ab3ab5 JS |
3358 | void |
3359 | vdev_destroy_unlink_zap(vdev_t *vd, uint64_t zapobj, dmu_tx_t *tx) | |
3360 | { | |
3361 | spa_t *spa = vd->vdev_spa; | |
3362 | ||
3363 | VERIFY0(zap_destroy(spa->spa_meta_objset, zapobj, tx)); | |
3364 | VERIFY0(zap_remove_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps, | |
3365 | zapobj, tx)); | |
3366 | } | |
3367 | ||
3368 | uint64_t | |
3369 | vdev_create_link_zap(vdev_t *vd, dmu_tx_t *tx) | |
3370 | { | |
3371 | spa_t *spa = vd->vdev_spa; | |
3372 | uint64_t zap = zap_create(spa->spa_meta_objset, DMU_OTN_ZAP_METADATA, | |
3373 | DMU_OT_NONE, 0, tx); | |
3374 | ||
3375 | ASSERT(zap != 0); | |
3376 | VERIFY0(zap_add_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps, | |
3377 | zap, tx)); | |
3378 | ||
3379 | return (zap); | |
3380 | } | |
3381 | ||
3382 | void | |
3383 | vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx) | |
3384 | { | |
e0ab3ab5 JS |
3385 | if (vd->vdev_ops != &vdev_hole_ops && |
3386 | vd->vdev_ops != &vdev_missing_ops && | |
3387 | vd->vdev_ops != &vdev_root_ops && | |
3388 | !vd->vdev_top->vdev_removing) { | |
3389 | if (vd->vdev_ops->vdev_op_leaf && vd->vdev_leaf_zap == 0) { | |
3390 | vd->vdev_leaf_zap = vdev_create_link_zap(vd, tx); | |
3391 | } | |
3392 | if (vd == vd->vdev_top && vd->vdev_top_zap == 0) { | |
3393 | vd->vdev_top_zap = vdev_create_link_zap(vd, tx); | |
cc99f275 DB |
3394 | if (vd->vdev_alloc_bias != VDEV_BIAS_NONE) |
3395 | vdev_zap_allocation_data(vd, tx); | |
e0ab3ab5 JS |
3396 | } |
3397 | } | |
3e4ed421 RW |
3398 | if (vd->vdev_ops == &vdev_root_ops && vd->vdev_root_zap == 0 && |
3399 | spa_feature_is_enabled(vd->vdev_spa, SPA_FEATURE_AVZ_V2)) { | |
3400 | if (!spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_AVZ_V2)) | |
3401 | spa_feature_incr(vd->vdev_spa, SPA_FEATURE_AVZ_V2, tx); | |
3402 | vd->vdev_root_zap = vdev_create_link_zap(vd, tx); | |
3403 | } | |
cc99f275 | 3404 | |
1c27024e | 3405 | for (uint64_t i = 0; i < vd->vdev_children; i++) { |
e0ab3ab5 JS |
3406 | vdev_construct_zaps(vd->vdev_child[i], tx); |
3407 | } | |
3408 | } | |
3409 | ||
65c7cc49 | 3410 | static void |
34dc7c2f BB |
3411 | vdev_dtl_sync(vdev_t *vd, uint64_t txg) |
3412 | { | |
3413 | spa_t *spa = vd->vdev_spa; | |
93cf2076 | 3414 | range_tree_t *rt = vd->vdev_dtl[DTL_MISSING]; |
34dc7c2f | 3415 | objset_t *mos = spa->spa_meta_objset; |
93cf2076 | 3416 | range_tree_t *rtsync; |
34dc7c2f | 3417 | dmu_tx_t *tx; |
93cf2076 | 3418 | uint64_t object = space_map_object(vd->vdev_dtl_sm); |
34dc7c2f | 3419 | |
a1d477c2 | 3420 | ASSERT(vdev_is_concrete(vd)); |
93cf2076 | 3421 | ASSERT(vd->vdev_ops->vdev_op_leaf); |
428870ff | 3422 | |
34dc7c2f BB |
3423 | tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); |
3424 | ||
93cf2076 GW |
3425 | if (vd->vdev_detached || vd->vdev_top->vdev_removing) { |
3426 | mutex_enter(&vd->vdev_dtl_lock); | |
3427 | space_map_free(vd->vdev_dtl_sm, tx); | |
3428 | space_map_close(vd->vdev_dtl_sm); | |
3429 | vd->vdev_dtl_sm = NULL; | |
3430 | mutex_exit(&vd->vdev_dtl_lock); | |
e0ab3ab5 JS |
3431 | |
3432 | /* | |
3433 | * We only destroy the leaf ZAP for detached leaves or for | |
3434 | * removed log devices. Removed data devices handle leaf ZAP | |
3435 | * cleanup later, once cancellation is no longer possible. | |
3436 | */ | |
3437 | if (vd->vdev_leaf_zap != 0 && (vd->vdev_detached || | |
3438 | vd->vdev_top->vdev_islog)) { | |
3439 | vdev_destroy_unlink_zap(vd, vd->vdev_leaf_zap, tx); | |
3440 | vd->vdev_leaf_zap = 0; | |
3441 | } | |
3442 | ||
34dc7c2f | 3443 | dmu_tx_commit(tx); |
34dc7c2f BB |
3444 | return; |
3445 | } | |
3446 | ||
93cf2076 GW |
3447 | if (vd->vdev_dtl_sm == NULL) { |
3448 | uint64_t new_object; | |
3449 | ||
93e28d66 | 3450 | new_object = space_map_alloc(mos, zfs_vdev_dtl_sm_blksz, tx); |
93cf2076 GW |
3451 | VERIFY3U(new_object, !=, 0); |
3452 | ||
3453 | VERIFY0(space_map_open(&vd->vdev_dtl_sm, mos, new_object, | |
a1d477c2 | 3454 | 0, -1ULL, 0)); |
93cf2076 | 3455 | ASSERT(vd->vdev_dtl_sm != NULL); |
34dc7c2f BB |
3456 | } |
3457 | ||
ca577779 | 3458 | rtsync = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0); |
34dc7c2f BB |
3459 | |
3460 | mutex_enter(&vd->vdev_dtl_lock); | |
93cf2076 | 3461 | range_tree_walk(rt, range_tree_add, rtsync); |
34dc7c2f BB |
3462 | mutex_exit(&vd->vdev_dtl_lock); |
3463 | ||
93e28d66 | 3464 | space_map_truncate(vd->vdev_dtl_sm, zfs_vdev_dtl_sm_blksz, tx); |
4d044c4c | 3465 | space_map_write(vd->vdev_dtl_sm, rtsync, SM_ALLOC, SM_NO_VDEVID, tx); |
93cf2076 | 3466 | range_tree_vacate(rtsync, NULL, NULL); |
34dc7c2f | 3467 | |
93cf2076 | 3468 | range_tree_destroy(rtsync); |
34dc7c2f | 3469 | |
93cf2076 GW |
3470 | /* |
3471 | * If the object for the space map has changed then dirty | |
3472 | * the top level so that we update the config. | |
3473 | */ | |
3474 | if (object != space_map_object(vd->vdev_dtl_sm)) { | |
4a0ee12a PZ |
3475 | vdev_dbgmsg(vd, "txg %llu, spa %s, DTL old object %llu, " |
3476 | "new object %llu", (u_longlong_t)txg, spa_name(spa), | |
3477 | (u_longlong_t)object, | |
3478 | (u_longlong_t)space_map_object(vd->vdev_dtl_sm)); | |
93cf2076 GW |
3479 | vdev_config_dirty(vd->vdev_top); |
3480 | } | |
34dc7c2f BB |
3481 | |
3482 | dmu_tx_commit(tx); | |
3483 | } | |
3484 | ||
fb5f0bc8 BB |
3485 | /* |
3486 | * Determine whether the specified vdev can be offlined/detached/removed | |
3487 | * without losing data. | |
3488 | */ | |
3489 | boolean_t | |
3490 | vdev_dtl_required(vdev_t *vd) | |
3491 | { | |
3492 | spa_t *spa = vd->vdev_spa; | |
3493 | vdev_t *tvd = vd->vdev_top; | |
3494 | uint8_t cant_read = vd->vdev_cant_read; | |
3495 | boolean_t required; | |
3496 | ||
3497 | ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); | |
3498 | ||
3499 | if (vd == spa->spa_root_vdev || vd == tvd) | |
3500 | return (B_TRUE); | |
3501 | ||
3502 | /* | |
3503 | * Temporarily mark the device as unreadable, and then determine | |
3504 | * whether this results in any DTL outages in the top-level vdev. | |
3505 | * If not, we can safely offline/detach/remove the device. | |
3506 | */ | |
3507 | vd->vdev_cant_read = B_TRUE; | |
9a49d3f3 | 3508 | vdev_dtl_reassess(tvd, 0, 0, B_FALSE, B_FALSE); |
fb5f0bc8 BB |
3509 | required = !vdev_dtl_empty(tvd, DTL_OUTAGE); |
3510 | vd->vdev_cant_read = cant_read; | |
9a49d3f3 | 3511 | vdev_dtl_reassess(tvd, 0, 0, B_FALSE, B_FALSE); |
fb5f0bc8 | 3512 | |
28caa74b MM |
3513 | if (!required && zio_injection_enabled) { |
3514 | required = !!zio_handle_device_injection(vd, NULL, | |
3515 | SET_ERROR(ECHILD)); | |
3516 | } | |
572e2857 | 3517 | |
fb5f0bc8 BB |
3518 | return (required); |
3519 | } | |
3520 | ||
b128c09f BB |
3521 | /* |
3522 | * Determine if resilver is needed, and if so the txg range. | |
3523 | */ | |
3524 | boolean_t | |
3525 | vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp) | |
3526 | { | |
3527 | boolean_t needed = B_FALSE; | |
3528 | uint64_t thismin = UINT64_MAX; | |
3529 | uint64_t thismax = 0; | |
3530 | ||
3531 | if (vd->vdev_children == 0) { | |
3532 | mutex_enter(&vd->vdev_dtl_lock); | |
d2734cce | 3533 | if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) && |
fb5f0bc8 | 3534 | vdev_writeable(vd)) { |
b128c09f | 3535 | |
5d1f7fb6 GW |
3536 | thismin = vdev_dtl_min(vd); |
3537 | thismax = vdev_dtl_max(vd); | |
b128c09f BB |
3538 | needed = B_TRUE; |
3539 | } | |
3540 | mutex_exit(&vd->vdev_dtl_lock); | |
3541 | } else { | |
1c27024e | 3542 | for (int c = 0; c < vd->vdev_children; c++) { |
b128c09f BB |
3543 | vdev_t *cvd = vd->vdev_child[c]; |
3544 | uint64_t cmin, cmax; | |
3545 | ||
3546 | if (vdev_resilver_needed(cvd, &cmin, &cmax)) { | |
3547 | thismin = MIN(thismin, cmin); | |
3548 | thismax = MAX(thismax, cmax); | |
3549 | needed = B_TRUE; | |
3550 | } | |
3551 | } | |
3552 | } | |
3553 | ||
3554 | if (needed && minp) { | |
3555 | *minp = thismin; | |
3556 | *maxp = thismax; | |
3557 | } | |
3558 | return (needed); | |
3559 | } | |
3560 | ||
d2734cce | 3561 | /* |
27f80e85 BB |
3562 | * Gets the checkpoint space map object from the vdev's ZAP. On success sm_obj |
3563 | * will contain either the checkpoint spacemap object or zero if none exists. | |
3564 | * All other errors are returned to the caller. | |
d2734cce SD |
3565 | */ |
3566 | int | |
27f80e85 | 3567 | vdev_checkpoint_sm_object(vdev_t *vd, uint64_t *sm_obj) |
d2734cce SD |
3568 | { |
3569 | ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER)); | |
27f80e85 | 3570 | |
d2734cce | 3571 | if (vd->vdev_top_zap == 0) { |
27f80e85 | 3572 | *sm_obj = 0; |
d2734cce SD |
3573 | return (0); |
3574 | } | |
3575 | ||
27f80e85 BB |
3576 | int error = zap_lookup(spa_meta_objset(vd->vdev_spa), vd->vdev_top_zap, |
3577 | VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1, sm_obj); | |
3578 | if (error == ENOENT) { | |
3579 | *sm_obj = 0; | |
3580 | error = 0; | |
3581 | } | |
d2734cce | 3582 | |
27f80e85 | 3583 | return (error); |
d2734cce SD |
3584 | } |
3585 | ||
a1d477c2 | 3586 | int |
34dc7c2f BB |
3587 | vdev_load(vdev_t *vd) |
3588 | { | |
a0e01997 | 3589 | int children = vd->vdev_children; |
a1d477c2 | 3590 | int error = 0; |
a0e01997 AS |
3591 | taskq_t *tq = NULL; |
3592 | ||
3593 | /* | |
3594 | * It's only worthwhile to use the taskq for the root vdev, because the | |
3595 | * slow part is metaslab_init, and that only happens for top-level | |
3596 | * vdevs. | |
3597 | */ | |
3598 | if (vd->vdev_ops == &vdev_root_ops && vd->vdev_children > 0) { | |
3599 | tq = taskq_create("vdev_load", children, minclsyspri, | |
3600 | children, children, TASKQ_PREPOPULATE); | |
3601 | } | |
a1d477c2 | 3602 | |
34dc7c2f BB |
3603 | /* |
3604 | * Recursively load all children. | |
3605 | */ | |
a1d477c2 | 3606 | for (int c = 0; c < vd->vdev_children; c++) { |
a0e01997 AS |
3607 | vdev_t *cvd = vd->vdev_child[c]; |
3608 | ||
3609 | if (tq == NULL || vdev_uses_zvols(cvd)) { | |
3610 | cvd->vdev_load_error = vdev_load(cvd); | |
3611 | } else { | |
3612 | VERIFY(taskq_dispatch(tq, vdev_load_child, | |
3613 | cvd, TQ_SLEEP) != TASKQID_INVALID); | |
a1d477c2 MA |
3614 | } |
3615 | } | |
3616 | ||
a0e01997 AS |
3617 | if (tq != NULL) { |
3618 | taskq_wait(tq); | |
3619 | taskq_destroy(tq); | |
3620 | } | |
3621 | ||
3622 | for (int c = 0; c < vd->vdev_children; c++) { | |
3623 | int error = vd->vdev_child[c]->vdev_load_error; | |
3624 | ||
3625 | if (error != 0) | |
3626 | return (error); | |
3627 | } | |
3628 | ||
a1d477c2 | 3629 | vdev_set_deflate_ratio(vd); |
34dc7c2f | 3630 | |
cc99f275 DB |
3631 | /* |
3632 | * On spa_load path, grab the allocation bias from our zap | |
3633 | */ | |
3634 | if (vd == vd->vdev_top && vd->vdev_top_zap != 0) { | |
3635 | spa_t *spa = vd->vdev_spa; | |
3636 | char bias_str[64]; | |
3637 | ||
3a92552f | 3638 | error = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap, |
cc99f275 | 3639 | VDEV_TOP_ZAP_ALLOCATION_BIAS, 1, sizeof (bias_str), |
3a92552f MA |
3640 | bias_str); |
3641 | if (error == 0) { | |
cc99f275 DB |
3642 | ASSERT(vd->vdev_alloc_bias == VDEV_BIAS_NONE); |
3643 | vd->vdev_alloc_bias = vdev_derive_alloc_bias(bias_str); | |
3a92552f MA |
3644 | } else if (error != ENOENT) { |
3645 | vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, | |
3646 | VDEV_AUX_CORRUPT_DATA); | |
3647 | vdev_dbgmsg(vd, "vdev_load: zap_lookup(top_zap=%llu) " | |
5dbf6c5a AZ |
3648 | "failed [error=%d]", |
3649 | (u_longlong_t)vd->vdev_top_zap, error); | |
3a92552f | 3650 | return (error); |
cc99f275 DB |
3651 | } |
3652 | } | |
3653 | ||
16f0fdad MZ |
3654 | if (vd == vd->vdev_top && vd->vdev_top_zap != 0) { |
3655 | spa_t *spa = vd->vdev_spa; | |
3656 | uint64_t failfast; | |
3657 | ||
3658 | error = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap, | |
3659 | vdev_prop_to_name(VDEV_PROP_FAILFAST), sizeof (failfast), | |
3660 | 1, &failfast); | |
3661 | if (error == 0) { | |
3662 | vd->vdev_failfast = failfast & 1; | |
3663 | } else if (error == ENOENT) { | |
3664 | vd->vdev_failfast = vdev_prop_default_numeric( | |
3665 | VDEV_PROP_FAILFAST); | |
3666 | } else { | |
3667 | vdev_dbgmsg(vd, | |
3668 | "vdev_load: zap_lookup(top_zap=%llu) " | |
3669 | "failed [error=%d]", | |
3670 | (u_longlong_t)vd->vdev_top_zap, error); | |
3671 | } | |
3672 | } | |
3673 | ||
9a49d3f3 BB |
3674 | /* |
3675 | * Load any rebuild state from the top-level vdev zap. | |
3676 | */ | |
3677 | if (vd == vd->vdev_top && vd->vdev_top_zap != 0) { | |
3678 | error = vdev_rebuild_load(vd); | |
3679 | if (error && error != ENOTSUP) { | |
3680 | vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, | |
3681 | VDEV_AUX_CORRUPT_DATA); | |
3682 | vdev_dbgmsg(vd, "vdev_load: vdev_rebuild_load " | |
3683 | "failed [error=%d]", error); | |
3684 | return (error); | |
3685 | } | |
3686 | } | |
3687 | ||
69f024a5 RW |
3688 | if (vd->vdev_top_zap != 0 || vd->vdev_leaf_zap != 0) { |
3689 | uint64_t zapobj; | |
3690 | ||
3691 | if (vd->vdev_top_zap != 0) | |
3692 | zapobj = vd->vdev_top_zap; | |
3693 | else | |
3694 | zapobj = vd->vdev_leaf_zap; | |
3695 | ||
3696 | error = vdev_prop_get_int(vd, VDEV_PROP_CHECKSUM_N, | |
3697 | &vd->vdev_checksum_n); | |
3698 | if (error && error != ENOENT) | |
3699 | vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) " | |
3700 | "failed [error=%d]", (u_longlong_t)zapobj, error); | |
3701 | ||
3702 | error = vdev_prop_get_int(vd, VDEV_PROP_CHECKSUM_T, | |
3703 | &vd->vdev_checksum_t); | |
3704 | if (error && error != ENOENT) | |
3705 | vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) " | |
3706 | "failed [error=%d]", (u_longlong_t)zapobj, error); | |
3707 | ||
3708 | error = vdev_prop_get_int(vd, VDEV_PROP_IO_N, | |
3709 | &vd->vdev_io_n); | |
3710 | if (error && error != ENOENT) | |
3711 | vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) " | |
3712 | "failed [error=%d]", (u_longlong_t)zapobj, error); | |
3713 | ||
3714 | error = vdev_prop_get_int(vd, VDEV_PROP_IO_T, | |
3715 | &vd->vdev_io_t); | |
3716 | if (error && error != ENOENT) | |
3717 | vdev_dbgmsg(vd, "vdev_load: zap_lookup(zap=%llu) " | |
3718 | "failed [error=%d]", (u_longlong_t)zapobj, error); | |
3719 | } | |
3720 | ||
34dc7c2f BB |
3721 | /* |
3722 | * If this is a top-level vdev, initialize its metaslabs. | |
3723 | */ | |
a1d477c2 | 3724 | if (vd == vd->vdev_top && vdev_is_concrete(vd)) { |
cc99f275 DB |
3725 | vdev_metaslab_group_create(vd); |
3726 | ||
a1d477c2 MA |
3727 | if (vd->vdev_ashift == 0 || vd->vdev_asize == 0) { |
3728 | vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, | |
3729 | VDEV_AUX_CORRUPT_DATA); | |
4a0ee12a PZ |
3730 | vdev_dbgmsg(vd, "vdev_load: invalid size. ashift=%llu, " |
3731 | "asize=%llu", (u_longlong_t)vd->vdev_ashift, | |
3732 | (u_longlong_t)vd->vdev_asize); | |
a1d477c2 | 3733 | return (SET_ERROR(ENXIO)); |
928e8ad4 SD |
3734 | } |
3735 | ||
3736 | error = vdev_metaslab_init(vd, 0); | |
3737 | if (error != 0) { | |
4a0ee12a PZ |
3738 | vdev_dbgmsg(vd, "vdev_load: metaslab_init failed " |
3739 | "[error=%d]", error); | |
a1d477c2 MA |
3740 | vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, |
3741 | VDEV_AUX_CORRUPT_DATA); | |
3742 | return (error); | |
3743 | } | |
d2734cce | 3744 | |
27f80e85 BB |
3745 | uint64_t checkpoint_sm_obj; |
3746 | error = vdev_checkpoint_sm_object(vd, &checkpoint_sm_obj); | |
3747 | if (error == 0 && checkpoint_sm_obj != 0) { | |
d2734cce SD |
3748 | objset_t *mos = spa_meta_objset(vd->vdev_spa); |
3749 | ASSERT(vd->vdev_asize != 0); | |
3750 | ASSERT3P(vd->vdev_checkpoint_sm, ==, NULL); | |
3751 | ||
928e8ad4 | 3752 | error = space_map_open(&vd->vdev_checkpoint_sm, |
d2734cce | 3753 | mos, checkpoint_sm_obj, 0, vd->vdev_asize, |
928e8ad4 SD |
3754 | vd->vdev_ashift); |
3755 | if (error != 0) { | |
d2734cce SD |
3756 | vdev_dbgmsg(vd, "vdev_load: space_map_open " |
3757 | "failed for checkpoint spacemap (obj %llu) " | |
3758 | "[error=%d]", | |
3759 | (u_longlong_t)checkpoint_sm_obj, error); | |
3760 | return (error); | |
3761 | } | |
3762 | ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL); | |
d2734cce SD |
3763 | |
3764 | /* | |
3765 | * Since the checkpoint_sm contains free entries | |
425d3237 SD |
3766 | * exclusively we can use space_map_allocated() to |
3767 | * indicate the cumulative checkpointed space that | |
3768 | * has been freed. | |
d2734cce SD |
3769 | */ |
3770 | vd->vdev_stat.vs_checkpoint_space = | |
425d3237 | 3771 | -space_map_allocated(vd->vdev_checkpoint_sm); |
d2734cce SD |
3772 | vd->vdev_spa->spa_checkpoint_info.sci_dspace += |
3773 | vd->vdev_stat.vs_checkpoint_space; | |
27f80e85 BB |
3774 | } else if (error != 0) { |
3775 | vdev_dbgmsg(vd, "vdev_load: failed to retrieve " | |
3776 | "checkpoint space map object from vdev ZAP " | |
3777 | "[error=%d]", error); | |
3778 | return (error); | |
d2734cce | 3779 | } |
a1d477c2 MA |
3780 | } |
3781 | ||
34dc7c2f BB |
3782 | /* |
3783 | * If this is a leaf vdev, load its DTL. | |
3784 | */ | |
a1d477c2 | 3785 | if (vd->vdev_ops->vdev_op_leaf && (error = vdev_dtl_load(vd)) != 0) { |
34dc7c2f BB |
3786 | vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, |
3787 | VDEV_AUX_CORRUPT_DATA); | |
4a0ee12a PZ |
3788 | vdev_dbgmsg(vd, "vdev_load: vdev_dtl_load failed " |
3789 | "[error=%d]", error); | |
a1d477c2 MA |
3790 | return (error); |
3791 | } | |
3792 | ||
27f80e85 BB |
3793 | uint64_t obsolete_sm_object; |
3794 | error = vdev_obsolete_sm_object(vd, &obsolete_sm_object); | |
3795 | if (error == 0 && obsolete_sm_object != 0) { | |
a1d477c2 MA |
3796 | objset_t *mos = vd->vdev_spa->spa_meta_objset; |
3797 | ASSERT(vd->vdev_asize != 0); | |
d2734cce | 3798 | ASSERT3P(vd->vdev_obsolete_sm, ==, NULL); |
a1d477c2 MA |
3799 | |
3800 | if ((error = space_map_open(&vd->vdev_obsolete_sm, mos, | |
3801 | obsolete_sm_object, 0, vd->vdev_asize, 0))) { | |
3802 | vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, | |
3803 | VDEV_AUX_CORRUPT_DATA); | |
4a0ee12a PZ |
3804 | vdev_dbgmsg(vd, "vdev_load: space_map_open failed for " |
3805 | "obsolete spacemap (obj %llu) [error=%d]", | |
3806 | (u_longlong_t)obsolete_sm_object, error); | |
a1d477c2 MA |
3807 | return (error); |
3808 | } | |
27f80e85 BB |
3809 | } else if (error != 0) { |
3810 | vdev_dbgmsg(vd, "vdev_load: failed to retrieve obsolete " | |
3811 | "space map object from vdev ZAP [error=%d]", error); | |
3812 | return (error); | |
a1d477c2 MA |
3813 | } |
3814 | ||
3815 | return (0); | |
34dc7c2f BB |
3816 | } |
3817 | ||
3818 | /* | |
3819 | * The special vdev case is used for hot spares and l2cache devices. Its | |
3820 | * sole purpose it to set the vdev state for the associated vdev. To do this, | |
3821 | * we make sure that we can open the underlying device, then try to read the | |
3822 | * label, and make sure that the label is sane and that it hasn't been | |
3823 | * repurposed to another pool. | |
3824 | */ | |
3825 | int | |
3826 | vdev_validate_aux(vdev_t *vd) | |
3827 | { | |
3828 | nvlist_t *label; | |
3829 | uint64_t guid, version; | |
3830 | uint64_t state; | |
3831 | ||
b128c09f BB |
3832 | if (!vdev_readable(vd)) |
3833 | return (0); | |
3834 | ||
3bc7e0fb | 3835 | if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) { |
34dc7c2f BB |
3836 | vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, |
3837 | VDEV_AUX_CORRUPT_DATA); | |
3838 | return (-1); | |
3839 | } | |
3840 | ||
3841 | if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 || | |
9ae529ec | 3842 | !SPA_VERSION_IS_SUPPORTED(version) || |
34dc7c2f BB |
3843 | nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 || |
3844 | guid != vd->vdev_guid || | |
3845 | nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) { | |
3846 | vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN, | |
3847 | VDEV_AUX_CORRUPT_DATA); | |
3848 | nvlist_free(label); | |
3849 | return (-1); | |
3850 | } | |
3851 | ||
3852 | /* | |
3853 | * We don't actually check the pool state here. If it's in fact in | |
3854 | * use by another pool, we update this fact on the fly when requested. | |
3855 | */ | |
3856 | nvlist_free(label); | |
3857 | return (0); | |
3858 | } | |
3859 | ||
93e28d66 SD |
3860 | static void |
3861 | vdev_destroy_ms_flush_data(vdev_t *vd, dmu_tx_t *tx) | |
3862 | { | |
3863 | objset_t *mos = spa_meta_objset(vd->vdev_spa); | |
3864 | ||
3865 | if (vd->vdev_top_zap == 0) | |
3866 | return; | |
3867 | ||
3868 | uint64_t object = 0; | |
3869 | int err = zap_lookup(mos, vd->vdev_top_zap, | |
3870 | VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1, &object); | |
3871 | if (err == ENOENT) | |
3872 | return; | |
3a92552f | 3873 | VERIFY0(err); |
93e28d66 SD |
3874 | |
3875 | VERIFY0(dmu_object_free(mos, object, tx)); | |
3876 | VERIFY0(zap_remove(mos, vd->vdev_top_zap, | |
3877 | VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, tx)); | |
3878 | } | |
3879 | ||
a1d477c2 MA |
3880 | /* |
3881 | * Free the objects used to store this vdev's spacemaps, and the array | |
3882 | * that points to them. | |
3883 | */ | |
428870ff | 3884 | void |
a1d477c2 MA |
3885 | vdev_destroy_spacemaps(vdev_t *vd, dmu_tx_t *tx) |
3886 | { | |
3887 | if (vd->vdev_ms_array == 0) | |
3888 | return; | |
3889 | ||
3890 | objset_t *mos = vd->vdev_spa->spa_meta_objset; | |
3891 | uint64_t array_count = vd->vdev_asize >> vd->vdev_ms_shift; | |
3892 | size_t array_bytes = array_count * sizeof (uint64_t); | |
3893 | uint64_t *smobj_array = kmem_alloc(array_bytes, KM_SLEEP); | |
3894 | VERIFY0(dmu_read(mos, vd->vdev_ms_array, 0, | |
3895 | array_bytes, smobj_array, 0)); | |
3896 | ||
3897 | for (uint64_t i = 0; i < array_count; i++) { | |
3898 | uint64_t smobj = smobj_array[i]; | |
3899 | if (smobj == 0) | |
3900 | continue; | |
3901 | ||
3902 | space_map_free_obj(mos, smobj, tx); | |
3903 | } | |
3904 | ||
3905 | kmem_free(smobj_array, array_bytes); | |
3906 | VERIFY0(dmu_object_free(mos, vd->vdev_ms_array, tx)); | |
93e28d66 | 3907 | vdev_destroy_ms_flush_data(vd, tx); |
a1d477c2 MA |
3908 | vd->vdev_ms_array = 0; |
3909 | } | |
3910 | ||
3911 | static void | |
ee900344 | 3912 | vdev_remove_empty_log(vdev_t *vd, uint64_t txg) |
428870ff BB |
3913 | { |
3914 | spa_t *spa = vd->vdev_spa; | |
428870ff | 3915 | |
ee900344 | 3916 | ASSERT(vd->vdev_islog); |
e0ab3ab5 JS |
3917 | ASSERT(vd == vd->vdev_top); |
3918 | ASSERT3U(txg, ==, spa_syncing_txg(spa)); | |
428870ff | 3919 | |
ee900344 | 3920 | dmu_tx_t *tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); |
e0ab3ab5 | 3921 | |
ee900344 SD |
3922 | vdev_destroy_spacemaps(vd, tx); |
3923 | if (vd->vdev_top_zap != 0) { | |
e0ab3ab5 JS |
3924 | vdev_destroy_unlink_zap(vd, vd->vdev_top_zap, tx); |
3925 | vd->vdev_top_zap = 0; | |
3926 | } | |
ee900344 | 3927 | |
428870ff BB |
3928 | dmu_tx_commit(tx); |
3929 | } | |
3930 | ||
34dc7c2f BB |
3931 | void |
3932 | vdev_sync_done(vdev_t *vd, uint64_t txg) | |
3933 | { | |
3934 | metaslab_t *msp; | |
428870ff BB |
3935 | boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg)); |
3936 | ||
a1d477c2 | 3937 | ASSERT(vdev_is_concrete(vd)); |
34dc7c2f | 3938 | |
619f0976 GW |
3939 | while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))) |
3940 | != NULL) | |
34dc7c2f | 3941 | metaslab_sync_done(msp, txg); |
428870ff | 3942 | |
aa755b35 | 3943 | if (reassess) { |
428870ff | 3944 | metaslab_sync_reassess(vd->vdev_mg); |
aa755b35 MA |
3945 | if (vd->vdev_log_mg != NULL) |
3946 | metaslab_sync_reassess(vd->vdev_log_mg); | |
3947 | } | |
34dc7c2f BB |
3948 | } |
3949 | ||
3950 | void | |
3951 | vdev_sync(vdev_t *vd, uint64_t txg) | |
3952 | { | |
3953 | spa_t *spa = vd->vdev_spa; | |
3954 | vdev_t *lvd; | |
3955 | metaslab_t *msp; | |
34dc7c2f | 3956 | |
6c926f42 SD |
3957 | ASSERT3U(txg, ==, spa->spa_syncing_txg); |
3958 | dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); | |
a1d477c2 | 3959 | if (range_tree_space(vd->vdev_obsolete_segments) > 0) { |
a1d477c2 MA |
3960 | ASSERT(vd->vdev_removing || |
3961 | vd->vdev_ops == &vdev_indirect_ops); | |
3962 | ||
a1d477c2 | 3963 | vdev_indirect_sync_obsolete(vd, tx); |
a1d477c2 MA |
3964 | |
3965 | /* | |
3966 | * If the vdev is indirect, it can't have dirty | |
3967 | * metaslabs or DTLs. | |
3968 | */ | |
3969 | if (vd->vdev_ops == &vdev_indirect_ops) { | |
3970 | ASSERT(txg_list_empty(&vd->vdev_ms_list, txg)); | |
3971 | ASSERT(txg_list_empty(&vd->vdev_dtl_list, txg)); | |
6c926f42 | 3972 | dmu_tx_commit(tx); |
a1d477c2 MA |
3973 | return; |
3974 | } | |
3975 | } | |
3976 | ||
3977 | ASSERT(vdev_is_concrete(vd)); | |
3978 | ||
3979 | if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0 && | |
3980 | !vd->vdev_removing) { | |
34dc7c2f | 3981 | ASSERT(vd == vd->vdev_top); |
a1d477c2 | 3982 | ASSERT0(vd->vdev_indirect_config.vic_mapping_object); |
34dc7c2f BB |
3983 | vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset, |
3984 | DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx); | |
3985 | ASSERT(vd->vdev_ms_array != 0); | |
3986 | vdev_config_dirty(vd); | |
34dc7c2f BB |
3987 | } |
3988 | ||
3989 | while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) { | |
3990 | metaslab_sync(msp, txg); | |
3991 | (void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg)); | |
3992 | } | |
3993 | ||
3994 | while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL) | |
3995 | vdev_dtl_sync(lvd, txg); | |
3996 | ||
a1d477c2 | 3997 | /* |
ee900344 SD |
3998 | * If this is an empty log device being removed, destroy the |
3999 | * metadata associated with it. | |
a1d477c2 | 4000 | */ |
ee900344 SD |
4001 | if (vd->vdev_islog && vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing) |
4002 | vdev_remove_empty_log(vd, txg); | |
a1d477c2 | 4003 | |
34dc7c2f | 4004 | (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg)); |
6c926f42 | 4005 | dmu_tx_commit(tx); |
34dc7c2f BB |
4006 | } |
4007 | ||
4008 | uint64_t | |
4009 | vdev_psize_to_asize(vdev_t *vd, uint64_t psize) | |
4010 | { | |
4011 | return (vd->vdev_ops->vdev_op_asize(vd, psize)); | |
4012 | } | |
4013 | ||
34dc7c2f BB |
4014 | /* |
4015 | * Mark the given vdev faulted. A faulted vdev behaves as if the device could | |
4016 | * not be opened, and no I/O is attempted. | |
4017 | */ | |
4018 | int | |
428870ff | 4019 | vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux) |
34dc7c2f | 4020 | { |
572e2857 | 4021 | vdev_t *vd, *tvd; |
34dc7c2f | 4022 | |
428870ff | 4023 | spa_vdev_state_enter(spa, SCL_NONE); |
34dc7c2f | 4024 | |
b128c09f | 4025 | if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) |
28caa74b | 4026 | return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV))); |
34dc7c2f | 4027 | |
34dc7c2f | 4028 | if (!vd->vdev_ops->vdev_op_leaf) |
28caa74b | 4029 | return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP))); |
34dc7c2f | 4030 | |
572e2857 BB |
4031 | tvd = vd->vdev_top; |
4032 | ||
4a283c7f TH |
4033 | /* |
4034 | * If user did a 'zpool offline -f' then make the fault persist across | |
4035 | * reboots. | |
4036 | */ | |
4037 | if (aux == VDEV_AUX_EXTERNAL_PERSIST) { | |
4038 | /* | |
4039 | * There are two kinds of forced faults: temporary and | |
4040 | * persistent. Temporary faults go away at pool import, while | |
4041 | * persistent faults stay set. Both types of faults can be | |
4042 | * cleared with a zpool clear. | |
4043 | * | |
4044 | * We tell if a vdev is persistently faulted by looking at the | |
4045 | * ZPOOL_CONFIG_AUX_STATE nvpair. If it's set to "external" at | |
4046 | * import then it's a persistent fault. Otherwise, it's | |
4047 | * temporary. We get ZPOOL_CONFIG_AUX_STATE set to "external" | |
4048 | * by setting vd.vdev_stat.vs_aux to VDEV_AUX_EXTERNAL. This | |
4049 | * tells vdev_config_generate() (which gets run later) to set | |
4050 | * ZPOOL_CONFIG_AUX_STATE to "external" in the nvlist. | |
4051 | */ | |
4052 | vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL; | |
4053 | vd->vdev_tmpoffline = B_FALSE; | |
4054 | aux = VDEV_AUX_EXTERNAL; | |
4055 | } else { | |
4056 | vd->vdev_tmpoffline = B_TRUE; | |
4057 | } | |
4058 | ||
428870ff BB |
4059 | /* |
4060 | * We don't directly use the aux state here, but if we do a | |
4061 | * vdev_reopen(), we need this value to be present to remember why we | |
4062 | * were faulted. | |
4063 | */ | |
4064 | vd->vdev_label_aux = aux; | |
4065 | ||
34dc7c2f BB |
4066 | /* |
4067 | * Faulted state takes precedence over degraded. | |
4068 | */ | |
428870ff | 4069 | vd->vdev_delayed_close = B_FALSE; |
34dc7c2f BB |
4070 | vd->vdev_faulted = 1ULL; |
4071 | vd->vdev_degraded = 0ULL; | |
428870ff | 4072 | vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, aux); |
34dc7c2f BB |
4073 | |
4074 | /* | |
428870ff BB |
4075 | * If this device has the only valid copy of the data, then |
4076 | * back off and simply mark the vdev as degraded instead. | |
34dc7c2f | 4077 | */ |
572e2857 | 4078 | if (!tvd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) { |
34dc7c2f BB |
4079 | vd->vdev_degraded = 1ULL; |
4080 | vd->vdev_faulted = 0ULL; | |
4081 | ||
4082 | /* | |
4083 | * If we reopen the device and it's not dead, only then do we | |
4084 | * mark it degraded. | |
4085 | */ | |
572e2857 | 4086 | vdev_reopen(tvd); |
34dc7c2f | 4087 | |
428870ff BB |
4088 | if (vdev_readable(vd)) |
4089 | vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux); | |
34dc7c2f BB |
4090 | } |
4091 | ||
b128c09f | 4092 | return (spa_vdev_state_exit(spa, vd, 0)); |
34dc7c2f BB |
4093 | } |
4094 | ||
4095 | /* | |
4096 | * Mark the given vdev degraded. A degraded vdev is purely an indication to the | |
4097 | * user that something is wrong. The vdev continues to operate as normal as far | |
4098 | * as I/O is concerned. | |
4099 | */ | |
4100 | int | |
428870ff | 4101 | vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux) |
34dc7c2f | 4102 | { |
b128c09f | 4103 | vdev_t *vd; |
34dc7c2f | 4104 | |
428870ff | 4105 | spa_vdev_state_enter(spa, SCL_NONE); |
34dc7c2f | 4106 | |
b128c09f | 4107 | if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) |
28caa74b | 4108 | return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV))); |
34dc7c2f | 4109 | |
34dc7c2f | 4110 | if (!vd->vdev_ops->vdev_op_leaf) |
28caa74b | 4111 | return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP))); |
34dc7c2f BB |
4112 | |
4113 | /* | |
4114 | * If the vdev is already faulted, then don't do anything. | |
4115 | */ | |
b128c09f BB |
4116 | if (vd->vdev_faulted || vd->vdev_degraded) |
4117 | return (spa_vdev_state_exit(spa, NULL, 0)); | |
34dc7c2f BB |
4118 | |
4119 | vd->vdev_degraded = 1ULL; | |
4120 | if (!vdev_is_dead(vd)) | |
4121 | vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, | |
428870ff | 4122 | aux); |
34dc7c2f | 4123 | |
b128c09f | 4124 | return (spa_vdev_state_exit(spa, vd, 0)); |
34dc7c2f BB |
4125 | } |
4126 | ||
55c12724 AH |
4127 | int |
4128 | vdev_remove_wanted(spa_t *spa, uint64_t guid) | |
4129 | { | |
4130 | vdev_t *vd; | |
4131 | ||
4132 | spa_vdev_state_enter(spa, SCL_NONE); | |
4133 | ||
4134 | if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) | |
4135 | return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV))); | |
4136 | ||
4137 | /* | |
577e835f BB |
4138 | * If the vdev is already removed, or expanding which can trigger |
4139 | * repartition add/remove events, then don't do anything. | |
55c12724 | 4140 | */ |
577e835f | 4141 | if (vd->vdev_removed || vd->vdev_expanding) |
55c12724 AH |
4142 | return (spa_vdev_state_exit(spa, NULL, 0)); |
4143 | ||
577e835f BB |
4144 | /* |
4145 | * Confirm the vdev has been removed, otherwise don't do anything. | |
4146 | */ | |
4147 | if (vd->vdev_ops->vdev_op_leaf && !zio_wait(vdev_probe(vd, NULL))) | |
4148 | return (spa_vdev_state_exit(spa, NULL, SET_ERROR(EEXIST))); | |
4149 | ||
55c12724 AH |
4150 | vd->vdev_remove_wanted = B_TRUE; |
4151 | spa_async_request(spa, SPA_ASYNC_REMOVE); | |
4152 | ||
4153 | return (spa_vdev_state_exit(spa, vd, 0)); | |
4154 | } | |
4155 | ||
4156 | ||
34dc7c2f | 4157 | /* |
d3cc8b15 WA |
4158 | * Online the given vdev. |
4159 | * | |
4160 | * If 'ZFS_ONLINE_UNSPARE' is set, it implies two things. First, any attached | |
4161 | * spare device should be detached when the device finishes resilvering. | |
4162 | * Second, the online should be treated like a 'test' online case, so no FMA | |
4163 | * events are generated if the device fails to open. | |
34dc7c2f BB |
4164 | */ |
4165 | int | |
b128c09f | 4166 | vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate) |
34dc7c2f | 4167 | { |
9babb374 | 4168 | vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev; |
153b2285 YP |
4169 | boolean_t wasoffline; |
4170 | vdev_state_t oldstate; | |
34dc7c2f | 4171 | |
428870ff | 4172 | spa_vdev_state_enter(spa, SCL_NONE); |
34dc7c2f | 4173 | |
b128c09f | 4174 | if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) |
28caa74b | 4175 | return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV))); |
34dc7c2f BB |
4176 | |
4177 | if (!vd->vdev_ops->vdev_op_leaf) | |
28caa74b | 4178 | return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP))); |
34dc7c2f | 4179 | |
153b2285 YP |
4180 | wasoffline = (vd->vdev_offline || vd->vdev_tmpoffline); |
4181 | oldstate = vd->vdev_state; | |
fb390aaf | 4182 | |
9babb374 | 4183 | tvd = vd->vdev_top; |
34dc7c2f BB |
4184 | vd->vdev_offline = B_FALSE; |
4185 | vd->vdev_tmpoffline = B_FALSE; | |
b128c09f BB |
4186 | vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE); |
4187 | vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT); | |
9babb374 BB |
4188 | |
4189 | /* XXX - L2ARC 1.0 does not support expansion */ | |
4190 | if (!vd->vdev_aux) { | |
4191 | for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) | |
d441e85d BB |
4192 | pvd->vdev_expanding = !!((flags & ZFS_ONLINE_EXPAND) || |
4193 | spa->spa_autoexpand); | |
d48091de | 4194 | vd->vdev_expansion_time = gethrestime_sec(); |
9babb374 BB |
4195 | } |
4196 | ||
4197 | vdev_reopen(tvd); | |
34dc7c2f BB |
4198 | vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE; |
4199 | ||
9babb374 BB |
4200 | if (!vd->vdev_aux) { |
4201 | for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) | |
4202 | pvd->vdev_expanding = B_FALSE; | |
4203 | } | |
4204 | ||
34dc7c2f BB |
4205 | if (newstate) |
4206 | *newstate = vd->vdev_state; | |
4207 | if ((flags & ZFS_ONLINE_UNSPARE) && | |
4208 | !vdev_is_dead(vd) && vd->vdev_parent && | |
4209 | vd->vdev_parent->vdev_ops == &vdev_spare_ops && | |
4210 | vd->vdev_parent->vdev_child[0] == vd) | |
4211 | vd->vdev_unspare = B_TRUE; | |
4212 | ||
9babb374 BB |
4213 | if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) { |
4214 | ||
4215 | /* XXX - L2ARC 1.0 does not support expansion */ | |
4216 | if (vd->vdev_aux) | |
4217 | return (spa_vdev_state_exit(spa, vd, ENOTSUP)); | |
4218 | spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); | |
4219 | } | |
fb390aaf | 4220 | |
619f0976 GW |
4221 | /* Restart initializing if necessary */ |
4222 | mutex_enter(&vd->vdev_initialize_lock); | |
4223 | if (vdev_writeable(vd) && | |
4224 | vd->vdev_initialize_thread == NULL && | |
4225 | vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) { | |
4226 | (void) vdev_initialize(vd); | |
4227 | } | |
4228 | mutex_exit(&vd->vdev_initialize_lock); | |
4229 | ||
b7654bd7 GA |
4230 | /* |
4231 | * Restart trimming if necessary. We do not restart trimming for cache | |
4232 | * devices here. This is triggered by l2arc_rebuild_vdev() | |
4233 | * asynchronously for the whole device or in l2arc_evict() as it evicts | |
4234 | * space for upcoming writes. | |
4235 | */ | |
1b939560 | 4236 | mutex_enter(&vd->vdev_trim_lock); |
b7654bd7 | 4237 | if (vdev_writeable(vd) && !vd->vdev_isl2cache && |
1b939560 BB |
4238 | vd->vdev_trim_thread == NULL && |
4239 | vd->vdev_trim_state == VDEV_TRIM_ACTIVE) { | |
4240 | (void) vdev_trim(vd, vd->vdev_trim_rate, vd->vdev_trim_partial, | |
4241 | vd->vdev_trim_secure); | |
4242 | } | |
4243 | mutex_exit(&vd->vdev_trim_lock); | |
4244 | ||
153b2285 YP |
4245 | if (wasoffline || |
4246 | (oldstate < VDEV_STATE_DEGRADED && | |
719534ca | 4247 | vd->vdev_state >= VDEV_STATE_DEGRADED)) { |
12fa0466 | 4248 | spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_ONLINE); |
fb390aaf | 4249 | |
719534ca AH |
4250 | /* |
4251 | * Asynchronously detach spare vdev if resilver or | |
4252 | * rebuild is not required | |
4253 | */ | |
4254 | if (vd->vdev_unspare && | |
4255 | !dsl_scan_resilvering(spa->spa_dsl_pool) && | |
4256 | !dsl_scan_resilver_scheduled(spa->spa_dsl_pool) && | |
4257 | !vdev_rebuild_active(tvd)) | |
4258 | spa_async_request(spa, SPA_ASYNC_DETACH_SPARE); | |
4259 | } | |
fb5f0bc8 | 4260 | return (spa_vdev_state_exit(spa, vd, 0)); |
34dc7c2f BB |
4261 | } |
4262 | ||
428870ff BB |
4263 | static int |
4264 | vdev_offline_locked(spa_t *spa, uint64_t guid, uint64_t flags) | |
34dc7c2f | 4265 | { |
9babb374 | 4266 | vdev_t *vd, *tvd; |
428870ff BB |
4267 | int error = 0; |
4268 | uint64_t generation; | |
4269 | metaslab_group_t *mg; | |
34dc7c2f | 4270 | |
428870ff BB |
4271 | top: |
4272 | spa_vdev_state_enter(spa, SCL_ALLOC); | |
34dc7c2f | 4273 | |
b128c09f | 4274 | if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) |
28caa74b | 4275 | return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV))); |
34dc7c2f BB |
4276 | |
4277 | if (!vd->vdev_ops->vdev_op_leaf) | |
28caa74b | 4278 | return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP))); |
34dc7c2f | 4279 | |
b2255edc BB |
4280 | if (vd->vdev_ops == &vdev_draid_spare_ops) |
4281 | return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); | |
4282 | ||
9babb374 | 4283 | tvd = vd->vdev_top; |
428870ff BB |
4284 | mg = tvd->vdev_mg; |
4285 | generation = spa->spa_config_generation + 1; | |
9babb374 | 4286 | |
34dc7c2f BB |
4287 | /* |
4288 | * If the device isn't already offline, try to offline it. | |
4289 | */ | |
4290 | if (!vd->vdev_offline) { | |
4291 | /* | |
fb5f0bc8 | 4292 | * If this device has the only valid copy of some data, |
9babb374 BB |
4293 | * don't allow it to be offlined. Log devices are always |
4294 | * expendable. | |
34dc7c2f | 4295 | */ |
9babb374 BB |
4296 | if (!tvd->vdev_islog && vd->vdev_aux == NULL && |
4297 | vdev_dtl_required(vd)) | |
28caa74b MM |
4298 | return (spa_vdev_state_exit(spa, NULL, |
4299 | SET_ERROR(EBUSY))); | |
34dc7c2f | 4300 | |
428870ff BB |
4301 | /* |
4302 | * If the top-level is a slog and it has had allocations | |
4303 | * then proceed. We check that the vdev's metaslab group | |
4304 | * is not NULL since it's possible that we may have just | |
4305 | * added this vdev but not yet initialized its metaslabs. | |
4306 | */ | |
4307 | if (tvd->vdev_islog && mg != NULL) { | |
4308 | /* | |
4309 | * Prevent any future allocations. | |
4310 | */ | |
aa755b35 | 4311 | ASSERT3P(tvd->vdev_log_mg, ==, NULL); |
428870ff BB |
4312 | metaslab_group_passivate(mg); |
4313 | (void) spa_vdev_state_exit(spa, vd, 0); | |
4314 | ||
a1d477c2 | 4315 | error = spa_reset_logs(spa); |
428870ff | 4316 | |
d2734cce SD |
4317 | /* |
4318 | * If the log device was successfully reset but has | |
4319 | * checkpointed data, do not offline it. | |
4320 | */ | |
4321 | if (error == 0 && | |
4322 | tvd->vdev_checkpoint_sm != NULL) { | |
425d3237 SD |
4323 | ASSERT3U(space_map_allocated( |
4324 | tvd->vdev_checkpoint_sm), !=, 0); | |
d2734cce SD |
4325 | error = ZFS_ERR_CHECKPOINT_EXISTS; |
4326 | } | |
4327 | ||
428870ff BB |
4328 | spa_vdev_state_enter(spa, SCL_ALLOC); |
4329 | ||
4330 | /* | |
4331 | * Check to see if the config has changed. | |
4332 | */ | |
4333 | if (error || generation != spa->spa_config_generation) { | |
4334 | metaslab_group_activate(mg); | |
4335 | if (error) | |
4336 | return (spa_vdev_state_exit(spa, | |
4337 | vd, error)); | |
4338 | (void) spa_vdev_state_exit(spa, vd, 0); | |
4339 | goto top; | |
4340 | } | |
c99c9001 | 4341 | ASSERT0(tvd->vdev_stat.vs_alloc); |
428870ff BB |
4342 | } |
4343 | ||
34dc7c2f BB |
4344 | /* |
4345 | * Offline this device and reopen its top-level vdev. | |
9babb374 BB |
4346 | * If the top-level vdev is a log device then just offline |
4347 | * it. Otherwise, if this action results in the top-level | |
4348 | * vdev becoming unusable, undo it and fail the request. | |
34dc7c2f BB |
4349 | */ |
4350 | vd->vdev_offline = B_TRUE; | |
9babb374 BB |
4351 | vdev_reopen(tvd); |
4352 | ||
4353 | if (!tvd->vdev_islog && vd->vdev_aux == NULL && | |
4354 | vdev_is_dead(tvd)) { | |
34dc7c2f | 4355 | vd->vdev_offline = B_FALSE; |
9babb374 | 4356 | vdev_reopen(tvd); |
28caa74b MM |
4357 | return (spa_vdev_state_exit(spa, NULL, |
4358 | SET_ERROR(EBUSY))); | |
34dc7c2f | 4359 | } |
428870ff BB |
4360 | |
4361 | /* | |
4362 | * Add the device back into the metaslab rotor so that | |
4363 | * once we online the device it's open for business. | |
4364 | */ | |
4365 | if (tvd->vdev_islog && mg != NULL) | |
4366 | metaslab_group_activate(mg); | |
34dc7c2f BB |
4367 | } |
4368 | ||
b128c09f | 4369 | vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY); |
34dc7c2f | 4370 | |
428870ff BB |
4371 | return (spa_vdev_state_exit(spa, vd, 0)); |
4372 | } | |
9babb374 | 4373 | |
428870ff BB |
4374 | int |
4375 | vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags) | |
4376 | { | |
4377 | int error; | |
9babb374 | 4378 | |
428870ff BB |
4379 | mutex_enter(&spa->spa_vdev_top_lock); |
4380 | error = vdev_offline_locked(spa, guid, flags); | |
4381 | mutex_exit(&spa->spa_vdev_top_lock); | |
4382 | ||
4383 | return (error); | |
34dc7c2f BB |
4384 | } |
4385 | ||
4386 | /* | |
4387 | * Clear the error counts associated with this vdev. Unlike vdev_online() and | |
4388 | * vdev_offline(), we assume the spa config is locked. We also clear all | |
4389 | * children. If 'vd' is NULL, then the user wants to clear all vdevs. | |
34dc7c2f BB |
4390 | */ |
4391 | void | |
b128c09f | 4392 | vdev_clear(spa_t *spa, vdev_t *vd) |
34dc7c2f | 4393 | { |
b128c09f BB |
4394 | vdev_t *rvd = spa->spa_root_vdev; |
4395 | ||
4396 | ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); | |
34dc7c2f BB |
4397 | |
4398 | if (vd == NULL) | |
b128c09f | 4399 | vd = rvd; |
34dc7c2f BB |
4400 | |
4401 | vd->vdev_stat.vs_read_errors = 0; | |
4402 | vd->vdev_stat.vs_write_errors = 0; | |
4403 | vd->vdev_stat.vs_checksum_errors = 0; | |
ad796b8a | 4404 | vd->vdev_stat.vs_slow_ios = 0; |
34dc7c2f | 4405 | |
1c27024e | 4406 | for (int c = 0; c < vd->vdev_children; c++) |
b128c09f | 4407 | vdev_clear(spa, vd->vdev_child[c]); |
34dc7c2f | 4408 | |
a1d477c2 | 4409 | /* |
e996c502 | 4410 | * It makes no sense to "clear" an indirect or removed vdev. |
a1d477c2 | 4411 | */ |
e996c502 | 4412 | if (!vdev_is_concrete(vd) || vd->vdev_removed) |
a1d477c2 MA |
4413 | return; |
4414 | ||
34dc7c2f | 4415 | /* |
b128c09f BB |
4416 | * If we're in the FAULTED state or have experienced failed I/O, then |
4417 | * clear the persistent state and attempt to reopen the device. We | |
4418 | * also mark the vdev config dirty, so that the new faulted state is | |
4419 | * written out to disk. | |
34dc7c2f | 4420 | */ |
b128c09f BB |
4421 | if (vd->vdev_faulted || vd->vdev_degraded || |
4422 | !vdev_readable(vd) || !vdev_writeable(vd)) { | |
428870ff | 4423 | /* |
4e33ba4c | 4424 | * When reopening in response to a clear event, it may be due to |
428870ff BB |
4425 | * a fmadm repair request. In this case, if the device is |
4426 | * still broken, we want to still post the ereport again. | |
4427 | */ | |
4428 | vd->vdev_forcefault = B_TRUE; | |
4429 | ||
572e2857 | 4430 | vd->vdev_faulted = vd->vdev_degraded = 0ULL; |
b128c09f BB |
4431 | vd->vdev_cant_read = B_FALSE; |
4432 | vd->vdev_cant_write = B_FALSE; | |
4a283c7f | 4433 | vd->vdev_stat.vs_aux = 0; |
b128c09f | 4434 | |
572e2857 | 4435 | vdev_reopen(vd == rvd ? rvd : vd->vdev_top); |
34dc7c2f | 4436 | |
428870ff BB |
4437 | vd->vdev_forcefault = B_FALSE; |
4438 | ||
572e2857 | 4439 | if (vd != rvd && vdev_writeable(vd->vdev_top)) |
b128c09f BB |
4440 | vdev_state_dirty(vd->vdev_top); |
4441 | ||
3c819a2c JP |
4442 | /* If a resilver isn't required, check if vdevs can be culled */ |
4443 | if (vd->vdev_aux == NULL && !vdev_is_dead(vd) && | |
4444 | !dsl_scan_resilvering(spa->spa_dsl_pool) && | |
4445 | !dsl_scan_resilver_scheduled(spa->spa_dsl_pool)) | |
4446 | spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); | |
34dc7c2f | 4447 | |
12fa0466 | 4448 | spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_CLEAR); |
34dc7c2f | 4449 | } |
428870ff BB |
4450 | |
4451 | /* | |
4452 | * When clearing a FMA-diagnosed fault, we always want to | |
4453 | * unspare the device, as we assume that the original spare was | |
4454 | * done in response to the FMA fault. | |
4455 | */ | |
4456 | if (!vdev_is_dead(vd) && vd->vdev_parent != NULL && | |
4457 | vd->vdev_parent->vdev_ops == &vdev_spare_ops && | |
4458 | vd->vdev_parent->vdev_child[0] == vd) | |
4459 | vd->vdev_unspare = B_TRUE; | |
03e02e5b DB |
4460 | |
4461 | /* Clear recent error events cache (i.e. duplicate events tracking) */ | |
4462 | zfs_ereport_clear(spa, vd); | |
34dc7c2f BB |
4463 | } |
4464 | ||
b128c09f BB |
4465 | boolean_t |
4466 | vdev_is_dead(vdev_t *vd) | |
4467 | { | |
428870ff BB |
4468 | /* |
4469 | * Holes and missing devices are always considered "dead". | |
4470 | * This simplifies the code since we don't have to check for | |
4471 | * these types of devices in the various code paths. | |
4472 | * Instead we rely on the fact that we skip over dead devices | |
4473 | * before issuing I/O to them. | |
4474 | */ | |
a1d477c2 MA |
4475 | return (vd->vdev_state < VDEV_STATE_DEGRADED || |
4476 | vd->vdev_ops == &vdev_hole_ops || | |
428870ff | 4477 | vd->vdev_ops == &vdev_missing_ops); |
b128c09f BB |
4478 | } |
4479 | ||
4480 | boolean_t | |
34dc7c2f BB |
4481 | vdev_readable(vdev_t *vd) |
4482 | { | |
b128c09f | 4483 | return (!vdev_is_dead(vd) && !vd->vdev_cant_read); |
34dc7c2f BB |
4484 | } |
4485 | ||
b128c09f | 4486 | boolean_t |
34dc7c2f BB |
4487 | vdev_writeable(vdev_t *vd) |
4488 | { | |
a1d477c2 MA |
4489 | return (!vdev_is_dead(vd) && !vd->vdev_cant_write && |
4490 | vdev_is_concrete(vd)); | |
34dc7c2f BB |
4491 | } |
4492 | ||
b128c09f BB |
4493 | boolean_t |
4494 | vdev_allocatable(vdev_t *vd) | |
34dc7c2f | 4495 | { |
fb5f0bc8 BB |
4496 | uint64_t state = vd->vdev_state; |
4497 | ||
b128c09f | 4498 | /* |
fb5f0bc8 | 4499 | * We currently allow allocations from vdevs which may be in the |
b128c09f BB |
4500 | * process of reopening (i.e. VDEV_STATE_CLOSED). If the device |
4501 | * fails to reopen then we'll catch it later when we're holding | |
fb5f0bc8 BB |
4502 | * the proper locks. Note that we have to get the vdev state |
4503 | * in a local variable because although it changes atomically, | |
4504 | * we're asking two separate questions about it. | |
b128c09f | 4505 | */ |
fb5f0bc8 | 4506 | return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) && |
a1d477c2 | 4507 | !vd->vdev_cant_write && vdev_is_concrete(vd) && |
3dfb57a3 | 4508 | vd->vdev_mg->mg_initialized); |
34dc7c2f BB |
4509 | } |
4510 | ||
b128c09f BB |
4511 | boolean_t |
4512 | vdev_accessible(vdev_t *vd, zio_t *zio) | |
34dc7c2f | 4513 | { |
b128c09f | 4514 | ASSERT(zio->io_vd == vd); |
34dc7c2f | 4515 | |
b128c09f BB |
4516 | if (vdev_is_dead(vd) || vd->vdev_remove_wanted) |
4517 | return (B_FALSE); | |
34dc7c2f | 4518 | |
b128c09f BB |
4519 | if (zio->io_type == ZIO_TYPE_READ) |
4520 | return (!vd->vdev_cant_read); | |
34dc7c2f | 4521 | |
b128c09f BB |
4522 | if (zio->io_type == ZIO_TYPE_WRITE) |
4523 | return (!vd->vdev_cant_write); | |
34dc7c2f | 4524 | |
b128c09f | 4525 | return (B_TRUE); |
34dc7c2f BB |
4526 | } |
4527 | ||
193a37cb TH |
4528 | static void |
4529 | vdev_get_child_stat(vdev_t *cvd, vdev_stat_t *vs, vdev_stat_t *cvs) | |
34dc7c2f | 4530 | { |
b2255edc BB |
4531 | /* |
4532 | * Exclude the dRAID spare when aggregating to avoid double counting | |
4533 | * the ops and bytes. These IOs are counted by the physical leaves. | |
4534 | */ | |
4535 | if (cvd->vdev_ops == &vdev_draid_spare_ops) | |
4536 | return; | |
4537 | ||
1b939560 | 4538 | for (int t = 0; t < VS_ZIO_TYPES; t++) { |
193a37cb TH |
4539 | vs->vs_ops[t] += cvs->vs_ops[t]; |
4540 | vs->vs_bytes[t] += cvs->vs_bytes[t]; | |
4541 | } | |
34dc7c2f | 4542 | |
193a37cb TH |
4543 | cvs->vs_scan_removing = cvd->vdev_removing; |
4544 | } | |
f3a7f661 | 4545 | |
193a37cb TH |
4546 | /* |
4547 | * Get extended stats | |
4548 | */ | |
4549 | static void | |
4550 | vdev_get_child_stat_ex(vdev_t *cvd, vdev_stat_ex_t *vsx, vdev_stat_ex_t *cvsx) | |
4551 | { | |
14e4e3cb AZ |
4552 | (void) cvd; |
4553 | ||
193a37cb TH |
4554 | int t, b; |
4555 | for (t = 0; t < ZIO_TYPES; t++) { | |
7e945072 | 4556 | for (b = 0; b < ARRAY_SIZE(vsx->vsx_disk_histo[0]); b++) |
193a37cb | 4557 | vsx->vsx_disk_histo[t][b] += cvsx->vsx_disk_histo[t][b]; |
7e945072 TH |
4558 | |
4559 | for (b = 0; b < ARRAY_SIZE(vsx->vsx_total_histo[0]); b++) { | |
193a37cb TH |
4560 | vsx->vsx_total_histo[t][b] += |
4561 | cvsx->vsx_total_histo[t][b]; | |
4562 | } | |
f38dfec3 | 4563 | } |
34dc7c2f | 4564 | |
193a37cb | 4565 | for (t = 0; t < ZIO_PRIORITY_NUM_QUEUEABLE; t++) { |
7e945072 | 4566 | for (b = 0; b < ARRAY_SIZE(vsx->vsx_queue_histo[0]); b++) { |
193a37cb TH |
4567 | vsx->vsx_queue_histo[t][b] += |
4568 | cvsx->vsx_queue_histo[t][b]; | |
4569 | } | |
4570 | vsx->vsx_active_queue[t] += cvsx->vsx_active_queue[t]; | |
4571 | vsx->vsx_pend_queue[t] += cvsx->vsx_pend_queue[t]; | |
7e945072 TH |
4572 | |
4573 | for (b = 0; b < ARRAY_SIZE(vsx->vsx_ind_histo[0]); b++) | |
4574 | vsx->vsx_ind_histo[t][b] += cvsx->vsx_ind_histo[t][b]; | |
4575 | ||
4576 | for (b = 0; b < ARRAY_SIZE(vsx->vsx_agg_histo[0]); b++) | |
4577 | vsx->vsx_agg_histo[t][b] += cvsx->vsx_agg_histo[t][b]; | |
193a37cb | 4578 | } |
7e945072 | 4579 | |
193a37cb TH |
4580 | } |
4581 | ||
d2734cce SD |
4582 | boolean_t |
4583 | vdev_is_spacemap_addressable(vdev_t *vd) | |
4584 | { | |
419ba591 SD |
4585 | if (spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_SPACEMAP_V2)) |
4586 | return (B_TRUE); | |
4587 | ||
d2734cce | 4588 | /* |
419ba591 SD |
4589 | * If double-word space map entries are not enabled we assume |
4590 | * 47 bits of the space map entry are dedicated to the entry's | |
4591 | * offset (see SM_OFFSET_BITS in space_map.h). We then use that | |
4592 | * to calculate the maximum address that can be described by a | |
4593 | * space map entry for the given device. | |
d2734cce | 4594 | */ |
419ba591 | 4595 | uint64_t shift = vd->vdev_ashift + SM_OFFSET_BITS; |
d2734cce SD |
4596 | |
4597 | if (shift >= 63) /* detect potential overflow */ | |
4598 | return (B_TRUE); | |
4599 | ||
4600 | return (vd->vdev_asize < (1ULL << shift)); | |
4601 | } | |
4602 | ||
193a37cb TH |
4603 | /* |
4604 | * Get statistics for the given vdev. | |
4605 | */ | |
4606 | static void | |
4607 | vdev_get_stats_ex_impl(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx) | |
4608 | { | |
1c27024e | 4609 | int t; |
34dc7c2f BB |
4610 | /* |
4611 | * If we're getting stats on the root vdev, aggregate the I/O counts | |
4612 | * over all top-level vdevs (i.e. the direct children of the root). | |
4613 | */ | |
193a37cb TH |
4614 | if (!vd->vdev_ops->vdev_op_leaf) { |
4615 | if (vs) { | |
4616 | memset(vs->vs_ops, 0, sizeof (vs->vs_ops)); | |
4617 | memset(vs->vs_bytes, 0, sizeof (vs->vs_bytes)); | |
4618 | } | |
4619 | if (vsx) | |
4620 | memset(vsx, 0, sizeof (*vsx)); | |
4621 | ||
1c27024e | 4622 | for (int c = 0; c < vd->vdev_children; c++) { |
193a37cb | 4623 | vdev_t *cvd = vd->vdev_child[c]; |
34dc7c2f | 4624 | vdev_stat_t *cvs = &cvd->vdev_stat; |
193a37cb TH |
4625 | vdev_stat_ex_t *cvsx = &cvd->vdev_stat_ex; |
4626 | ||
4627 | vdev_get_stats_ex_impl(cvd, cvs, cvsx); | |
4628 | if (vs) | |
4629 | vdev_get_child_stat(cvd, vs, cvs); | |
4630 | if (vsx) | |
4631 | vdev_get_child_stat_ex(cvd, vsx, cvsx); | |
193a37cb TH |
4632 | } |
4633 | } else { | |
4634 | /* | |
4635 | * We're a leaf. Just copy our ZIO active queue stats in. The | |
4636 | * other leaf stats are updated in vdev_stat_update(). | |
4637 | */ | |
4638 | if (!vsx) | |
4639 | return; | |
4640 | ||
4641 | memcpy(vsx, &vd->vdev_stat_ex, sizeof (vd->vdev_stat_ex)); | |
4642 | ||
8469b5aa AM |
4643 | for (t = 0; t < ZIO_PRIORITY_NUM_QUEUEABLE; t++) { |
4644 | vsx->vsx_active_queue[t] = vd->vdev_queue.vq_cactive[t]; | |
4645 | vsx->vsx_pend_queue[t] = vdev_queue_class_length(vd, t); | |
34dc7c2f BB |
4646 | } |
4647 | } | |
193a37cb TH |
4648 | } |
4649 | ||
4650 | void | |
4651 | vdev_get_stats_ex(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx) | |
4652 | { | |
0f676dc2 | 4653 | vdev_t *tvd = vd->vdev_top; |
193a37cb TH |
4654 | mutex_enter(&vd->vdev_stat_lock); |
4655 | if (vs) { | |
861166b0 | 4656 | memcpy(vs, &vd->vdev_stat, sizeof (*vs)); |
193a37cb TH |
4657 | vs->vs_timestamp = gethrtime() - vs->vs_timestamp; |
4658 | vs->vs_state = vd->vdev_state; | |
4659 | vs->vs_rsize = vdev_get_min_asize(vd); | |
9a49d3f3 | 4660 | |
619f0976 | 4661 | if (vd->vdev_ops->vdev_op_leaf) { |
1282274f | 4662 | vs->vs_pspace = vd->vdev_psize; |
193a37cb TH |
4663 | vs->vs_rsize += VDEV_LABEL_START_SIZE + |
4664 | VDEV_LABEL_END_SIZE; | |
619f0976 | 4665 | /* |
1b939560 | 4666 | * Report initializing progress. Since we don't |
619f0976 GW |
4667 | * have the initializing locks held, this is only |
4668 | * an estimate (although a fairly accurate one). | |
4669 | */ | |
4670 | vs->vs_initialize_bytes_done = | |
4671 | vd->vdev_initialize_bytes_done; | |
4672 | vs->vs_initialize_bytes_est = | |
4673 | vd->vdev_initialize_bytes_est; | |
4674 | vs->vs_initialize_state = vd->vdev_initialize_state; | |
4675 | vs->vs_initialize_action_time = | |
4676 | vd->vdev_initialize_action_time; | |
1b939560 BB |
4677 | |
4678 | /* | |
4679 | * Report manual TRIM progress. Since we don't have | |
4680 | * the manual TRIM locks held, this is only an | |
4681 | * estimate (although fairly accurate one). | |
4682 | */ | |
4683 | vs->vs_trim_notsup = !vd->vdev_has_trim; | |
4684 | vs->vs_trim_bytes_done = vd->vdev_trim_bytes_done; | |
4685 | vs->vs_trim_bytes_est = vd->vdev_trim_bytes_est; | |
4686 | vs->vs_trim_state = vd->vdev_trim_state; | |
4687 | vs->vs_trim_action_time = vd->vdev_trim_action_time; | |
9a49d3f3 BB |
4688 | |
4689 | /* Set when there is a deferred resilver. */ | |
4690 | vs->vs_resilver_deferred = vd->vdev_resilver_deferred; | |
619f0976 | 4691 | } |
9a49d3f3 | 4692 | |
0f676dc2 | 4693 | /* |
1b939560 | 4694 | * Report expandable space on top-level, non-auxiliary devices |
0f676dc2 GM |
4695 | * only. The expandable space is reported in terms of metaslab |
4696 | * sized units since that determines how much space the pool | |
4697 | * can expand. | |
4698 | */ | |
4699 | if (vd->vdev_aux == NULL && tvd != NULL) { | |
4700 | vs->vs_esize = P2ALIGN( | |
4701 | vd->vdev_max_asize - vd->vdev_asize, | |
4702 | 1ULL << tvd->vdev_ms_shift); | |
4703 | } | |
9a49d3f3 | 4704 | |
6fe3498c RM |
4705 | vs->vs_configured_ashift = vd->vdev_top != NULL |
4706 | ? vd->vdev_top->vdev_ashift : vd->vdev_ashift; | |
4707 | vs->vs_logical_ashift = vd->vdev_logical_ashift; | |
37f6845c AM |
4708 | if (vd->vdev_physical_ashift <= ASHIFT_MAX) |
4709 | vs->vs_physical_ashift = vd->vdev_physical_ashift; | |
4710 | else | |
4711 | vs->vs_physical_ashift = 0; | |
6fe3498c | 4712 | |
9a49d3f3 BB |
4713 | /* |
4714 | * Report fragmentation and rebuild progress for top-level, | |
4715 | * non-auxiliary, concrete devices. | |
4716 | */ | |
193a37cb | 4717 | if (vd->vdev_aux == NULL && vd == vd->vdev_top && |
a1d477c2 | 4718 | vdev_is_concrete(vd)) { |
aa755b35 MA |
4719 | /* |
4720 | * The vdev fragmentation rating doesn't take into | |
4721 | * account the embedded slog metaslab (vdev_log_mg). | |
4722 | * Since it's only one metaslab, it would have a tiny | |
4723 | * impact on the overall fragmentation. | |
4724 | */ | |
cc99f275 DB |
4725 | vs->vs_fragmentation = (vd->vdev_mg != NULL) ? |
4726 | vd->vdev_mg->mg_fragmentation : 0; | |
193a37cb | 4727 | } |
2a673e76 AJ |
4728 | vs->vs_noalloc = MAX(vd->vdev_noalloc, |
4729 | tvd ? tvd->vdev_noalloc : 0); | |
193a37cb TH |
4730 | } |
4731 | ||
193a37cb | 4732 | vdev_get_stats_ex_impl(vd, vs, vsx); |
f3a7f661 | 4733 | mutex_exit(&vd->vdev_stat_lock); |
34dc7c2f BB |
4734 | } |
4735 | ||
193a37cb TH |
4736 | void |
4737 | vdev_get_stats(vdev_t *vd, vdev_stat_t *vs) | |
4738 | { | |
4739 | return (vdev_get_stats_ex(vd, vs, NULL)); | |
4740 | } | |
4741 | ||
34dc7c2f BB |
4742 | void |
4743 | vdev_clear_stats(vdev_t *vd) | |
4744 | { | |
4745 | mutex_enter(&vd->vdev_stat_lock); | |
4746 | vd->vdev_stat.vs_space = 0; | |
4747 | vd->vdev_stat.vs_dspace = 0; | |
4748 | vd->vdev_stat.vs_alloc = 0; | |
4749 | mutex_exit(&vd->vdev_stat_lock); | |
4750 | } | |
4751 | ||
428870ff BB |
4752 | void |
4753 | vdev_scan_stat_init(vdev_t *vd) | |
4754 | { | |
4755 | vdev_stat_t *vs = &vd->vdev_stat; | |
4756 | ||
1c27024e | 4757 | for (int c = 0; c < vd->vdev_children; c++) |
428870ff BB |
4758 | vdev_scan_stat_init(vd->vdev_child[c]); |
4759 | ||
4760 | mutex_enter(&vd->vdev_stat_lock); | |
4761 | vs->vs_scan_processed = 0; | |
4762 | mutex_exit(&vd->vdev_stat_lock); | |
4763 | } | |
4764 | ||
34dc7c2f | 4765 | void |
b128c09f | 4766 | vdev_stat_update(zio_t *zio, uint64_t psize) |
34dc7c2f | 4767 | { |
fb5f0bc8 BB |
4768 | spa_t *spa = zio->io_spa; |
4769 | vdev_t *rvd = spa->spa_root_vdev; | |
b128c09f | 4770 | vdev_t *vd = zio->io_vd ? zio->io_vd : rvd; |
34dc7c2f BB |
4771 | vdev_t *pvd; |
4772 | uint64_t txg = zio->io_txg; | |
950980b4 RY |
4773 | /* Suppress ASAN false positive */ |
4774 | #ifdef __SANITIZE_ADDRESS__ | |
63652e15 DS |
4775 | vdev_stat_t *vs = vd ? &vd->vdev_stat : NULL; |
4776 | vdev_stat_ex_t *vsx = vd ? &vd->vdev_stat_ex : NULL; | |
950980b4 RY |
4777 | #else |
4778 | vdev_stat_t *vs = &vd->vdev_stat; | |
4779 | vdev_stat_ex_t *vsx = &vd->vdev_stat_ex; | |
4780 | #endif | |
34dc7c2f BB |
4781 | zio_type_t type = zio->io_type; |
4782 | int flags = zio->io_flags; | |
4783 | ||
b128c09f BB |
4784 | /* |
4785 | * If this i/o is a gang leader, it didn't do any actual work. | |
4786 | */ | |
4787 | if (zio->io_gang_tree) | |
4788 | return; | |
4789 | ||
34dc7c2f | 4790 | if (zio->io_error == 0) { |
b128c09f BB |
4791 | /* |
4792 | * If this is a root i/o, don't count it -- we've already | |
4793 | * counted the top-level vdevs, and vdev_get_stats() will | |
4794 | * aggregate them when asked. This reduces contention on | |
4795 | * the root vdev_stat_lock and implicitly handles blocks | |
4796 | * that compress away to holes, for which there is no i/o. | |
4797 | * (Holes never create vdev children, so all the counters | |
4798 | * remain zero, which is what we want.) | |
4799 | * | |
4800 | * Note: this only applies to successful i/o (io_error == 0) | |
4801 | * because unlike i/o counts, errors are not additive. | |
4802 | * When reading a ditto block, for example, failure of | |
4803 | * one top-level vdev does not imply a root-level error. | |
4804 | */ | |
4805 | if (vd == rvd) | |
4806 | return; | |
4807 | ||
4808 | ASSERT(vd == zio->io_vd); | |
fb5f0bc8 BB |
4809 | |
4810 | if (flags & ZIO_FLAG_IO_BYPASS) | |
4811 | return; | |
4812 | ||
4813 | mutex_enter(&vd->vdev_stat_lock); | |
4814 | ||
b128c09f | 4815 | if (flags & ZIO_FLAG_IO_REPAIR) { |
9a49d3f3 BB |
4816 | /* |
4817 | * Repair is the result of a resilver issued by the | |
4818 | * scan thread (spa_sync). | |
4819 | */ | |
572e2857 | 4820 | if (flags & ZIO_FLAG_SCAN_THREAD) { |
9a49d3f3 BB |
4821 | dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan; |
4822 | dsl_scan_phys_t *scn_phys = &scn->scn_phys; | |
428870ff BB |
4823 | uint64_t *processed = &scn_phys->scn_processed; |
4824 | ||
428870ff BB |
4825 | if (vd->vdev_ops->vdev_op_leaf) |
4826 | atomic_add_64(processed, psize); | |
4827 | vs->vs_scan_processed += psize; | |
4828 | } | |
4829 | ||
9a49d3f3 BB |
4830 | /* |
4831 | * Repair is the result of a rebuild issued by the | |
b2255edc BB |
4832 | * rebuild thread (vdev_rebuild_thread). To avoid |
4833 | * double counting repaired bytes the virtual dRAID | |
4834 | * spare vdev is excluded from the processed bytes. | |
9a49d3f3 BB |
4835 | */ |
4836 | if (zio->io_priority == ZIO_PRIORITY_REBUILD) { | |
4837 | vdev_t *tvd = vd->vdev_top; | |
4838 | vdev_rebuild_t *vr = &tvd->vdev_rebuild_config; | |
4839 | vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; | |
4840 | uint64_t *rebuilt = &vrp->vrp_bytes_rebuilt; | |
4841 | ||
b2255edc BB |
4842 | if (vd->vdev_ops->vdev_op_leaf && |
4843 | vd->vdev_ops != &vdev_draid_spare_ops) { | |
9a49d3f3 | 4844 | atomic_add_64(rebuilt, psize); |
b2255edc | 4845 | } |
9a49d3f3 BB |
4846 | vs->vs_rebuild_processed += psize; |
4847 | } | |
4848 | ||
fb5f0bc8 | 4849 | if (flags & ZIO_FLAG_SELF_HEAL) |
b128c09f | 4850 | vs->vs_self_healed += psize; |
34dc7c2f | 4851 | } |
fb5f0bc8 | 4852 | |
193a37cb TH |
4853 | /* |
4854 | * The bytes/ops/histograms are recorded at the leaf level and | |
4855 | * aggregated into the higher level vdevs in vdev_get_stats(). | |
4856 | */ | |
4eb0db42 TH |
4857 | if (vd->vdev_ops->vdev_op_leaf && |
4858 | (zio->io_priority < ZIO_PRIORITY_NUM_QUEUEABLE)) { | |
1b939560 | 4859 | zio_type_t vs_type = type; |
9a49d3f3 | 4860 | zio_priority_t priority = zio->io_priority; |
1b939560 BB |
4861 | |
4862 | /* | |
4863 | * TRIM ops and bytes are reported to user space as | |
4864 | * ZIO_TYPE_IOCTL. This is done to preserve the | |
4865 | * vdev_stat_t structure layout for user space. | |
4866 | */ | |
4867 | if (type == ZIO_TYPE_TRIM) | |
4868 | vs_type = ZIO_TYPE_IOCTL; | |
193a37cb | 4869 | |
9a49d3f3 BB |
4870 | /* |
4871 | * Solely for the purposes of 'zpool iostat -lqrw' | |
bf169e9f | 4872 | * reporting use the priority to categorize the IO. |
9a49d3f3 BB |
4873 | * Only the following are reported to user space: |
4874 | * | |
4875 | * ZIO_PRIORITY_SYNC_READ, | |
4876 | * ZIO_PRIORITY_SYNC_WRITE, | |
4877 | * ZIO_PRIORITY_ASYNC_READ, | |
4878 | * ZIO_PRIORITY_ASYNC_WRITE, | |
4879 | * ZIO_PRIORITY_SCRUB, | |
00888c08 TB |
4880 | * ZIO_PRIORITY_TRIM, |
4881 | * ZIO_PRIORITY_REBUILD. | |
9a49d3f3 | 4882 | */ |
00888c08 | 4883 | if (priority == ZIO_PRIORITY_INITIALIZING) { |
9a49d3f3 BB |
4884 | ASSERT3U(type, ==, ZIO_TYPE_WRITE); |
4885 | priority = ZIO_PRIORITY_ASYNC_WRITE; | |
4886 | } else if (priority == ZIO_PRIORITY_REMOVAL) { | |
4887 | priority = ((type == ZIO_TYPE_WRITE) ? | |
4888 | ZIO_PRIORITY_ASYNC_WRITE : | |
4889 | ZIO_PRIORITY_ASYNC_READ); | |
4890 | } | |
4891 | ||
1b939560 BB |
4892 | vs->vs_ops[vs_type]++; |
4893 | vs->vs_bytes[vs_type] += psize; | |
193a37cb | 4894 | |
7e945072 | 4895 | if (flags & ZIO_FLAG_DELEGATED) { |
9a49d3f3 | 4896 | vsx->vsx_agg_histo[priority] |
7e945072 TH |
4897 | [RQ_HISTO(zio->io_size)]++; |
4898 | } else { | |
9a49d3f3 | 4899 | vsx->vsx_ind_histo[priority] |
7e945072 TH |
4900 | [RQ_HISTO(zio->io_size)]++; |
4901 | } | |
4902 | ||
193a37cb | 4903 | if (zio->io_delta && zio->io_delay) { |
9a49d3f3 | 4904 | vsx->vsx_queue_histo[priority] |
7e945072 | 4905 | [L_HISTO(zio->io_delta - zio->io_delay)]++; |
193a37cb | 4906 | vsx->vsx_disk_histo[type] |
7e945072 | 4907 | [L_HISTO(zio->io_delay)]++; |
193a37cb | 4908 | vsx->vsx_total_histo[type] |
7e945072 | 4909 | [L_HISTO(zio->io_delta)]++; |
193a37cb TH |
4910 | } |
4911 | } | |
fb5f0bc8 BB |
4912 | |
4913 | mutex_exit(&vd->vdev_stat_lock); | |
34dc7c2f BB |
4914 | return; |
4915 | } | |
4916 | ||
4917 | if (flags & ZIO_FLAG_SPECULATIVE) | |
4918 | return; | |
4919 | ||
9babb374 BB |
4920 | /* |
4921 | * If this is an I/O error that is going to be retried, then ignore the | |
4922 | * error. Otherwise, the user may interpret B_FAILFAST I/O errors as | |
4923 | * hard errors, when in reality they can happen for any number of | |
4924 | * innocuous reasons (bus resets, MPxIO link failure, etc). | |
4925 | */ | |
4926 | if (zio->io_error == EIO && | |
4927 | !(zio->io_flags & ZIO_FLAG_IO_RETRY)) | |
4928 | return; | |
4929 | ||
428870ff BB |
4930 | /* |
4931 | * Intent logs writes won't propagate their error to the root | |
4932 | * I/O so don't mark these types of failures as pool-level | |
4933 | * errors. | |
4934 | */ | |
4935 | if (zio->io_vd == NULL && (zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) | |
4936 | return; | |
4937 | ||
4d0ba941 | 4938 | if (type == ZIO_TYPE_WRITE && txg != 0 && |
fb5f0bc8 | 4939 | (!(flags & ZIO_FLAG_IO_REPAIR) || |
572e2857 | 4940 | (flags & ZIO_FLAG_SCAN_THREAD) || |
428870ff | 4941 | spa->spa_claiming)) { |
fb5f0bc8 | 4942 | /* |
428870ff BB |
4943 | * This is either a normal write (not a repair), or it's |
4944 | * a repair induced by the scrub thread, or it's a repair | |
4945 | * made by zil_claim() during spa_load() in the first txg. | |
4946 | * In the normal case, we commit the DTL change in the same | |
4947 | * txg as the block was born. In the scrub-induced repair | |
4948 | * case, we know that scrubs run in first-pass syncing context, | |
4949 | * so we commit the DTL change in spa_syncing_txg(spa). | |
4950 | * In the zil_claim() case, we commit in spa_first_txg(spa). | |
fb5f0bc8 BB |
4951 | * |
4952 | * We currently do not make DTL entries for failed spontaneous | |
4953 | * self-healing writes triggered by normal (non-scrubbing) | |
4954 | * reads, because we have no transactional context in which to | |
4955 | * do so -- and it's not clear that it'd be desirable anyway. | |
4956 | */ | |
4957 | if (vd->vdev_ops->vdev_op_leaf) { | |
4958 | uint64_t commit_txg = txg; | |
572e2857 | 4959 | if (flags & ZIO_FLAG_SCAN_THREAD) { |
fb5f0bc8 BB |
4960 | ASSERT(flags & ZIO_FLAG_IO_REPAIR); |
4961 | ASSERT(spa_sync_pass(spa) == 1); | |
4962 | vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1); | |
428870ff BB |
4963 | commit_txg = spa_syncing_txg(spa); |
4964 | } else if (spa->spa_claiming) { | |
4965 | ASSERT(flags & ZIO_FLAG_IO_REPAIR); | |
4966 | commit_txg = spa_first_txg(spa); | |
fb5f0bc8 | 4967 | } |
428870ff | 4968 | ASSERT(commit_txg >= spa_syncing_txg(spa)); |
fb5f0bc8 | 4969 | if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1)) |
34dc7c2f | 4970 | return; |
fb5f0bc8 BB |
4971 | for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent) |
4972 | vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1); | |
4973 | vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg); | |
34dc7c2f | 4974 | } |
fb5f0bc8 BB |
4975 | if (vd != rvd) |
4976 | vdev_dtl_dirty(vd, DTL_MISSING, txg, 1); | |
34dc7c2f BB |
4977 | } |
4978 | } | |
4979 | ||
cc99f275 DB |
4980 | int64_t |
4981 | vdev_deflated_space(vdev_t *vd, int64_t space) | |
4982 | { | |
4983 | ASSERT((space & (SPA_MINBLOCKSIZE-1)) == 0); | |
4984 | ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache); | |
4985 | ||
4986 | return ((space >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio); | |
4987 | } | |
4988 | ||
34dc7c2f | 4989 | /* |
1b939560 BB |
4990 | * Update the in-core space usage stats for this vdev, its metaslab class, |
4991 | * and the root vdev. | |
34dc7c2f BB |
4992 | */ |
4993 | void | |
428870ff BB |
4994 | vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta, |
4995 | int64_t space_delta) | |
34dc7c2f | 4996 | { |
14e4e3cb | 4997 | (void) defer_delta; |
cc99f275 | 4998 | int64_t dspace_delta; |
34dc7c2f BB |
4999 | spa_t *spa = vd->vdev_spa; |
5000 | vdev_t *rvd = spa->spa_root_vdev; | |
5001 | ||
5002 | ASSERT(vd == vd->vdev_top); | |
5003 | ||
5004 | /* | |
5005 | * Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion | |
5006 | * factor. We must calculate this here and not at the root vdev | |
5007 | * because the root vdev's psize-to-asize is simply the max of its | |
e1cfd73f | 5008 | * children's, thus not accurate enough for us. |
34dc7c2f | 5009 | */ |
cc99f275 | 5010 | dspace_delta = vdev_deflated_space(vd, space_delta); |
34dc7c2f BB |
5011 | |
5012 | mutex_enter(&vd->vdev_stat_lock); | |
7558997d SD |
5013 | /* ensure we won't underflow */ |
5014 | if (alloc_delta < 0) { | |
5015 | ASSERT3U(vd->vdev_stat.vs_alloc, >=, -alloc_delta); | |
5016 | } | |
5017 | ||
34dc7c2f | 5018 | vd->vdev_stat.vs_alloc += alloc_delta; |
428870ff | 5019 | vd->vdev_stat.vs_space += space_delta; |
34dc7c2f BB |
5020 | vd->vdev_stat.vs_dspace += dspace_delta; |
5021 | mutex_exit(&vd->vdev_stat_lock); | |
5022 | ||
cc99f275 DB |
5023 | /* every class but log contributes to root space stats */ |
5024 | if (vd->vdev_mg != NULL && !vd->vdev_islog) { | |
7558997d | 5025 | ASSERT(!vd->vdev_isl2cache); |
34dc7c2f | 5026 | mutex_enter(&rvd->vdev_stat_lock); |
34dc7c2f | 5027 | rvd->vdev_stat.vs_alloc += alloc_delta; |
428870ff | 5028 | rvd->vdev_stat.vs_space += space_delta; |
34dc7c2f BB |
5029 | rvd->vdev_stat.vs_dspace += dspace_delta; |
5030 | mutex_exit(&rvd->vdev_stat_lock); | |
5031 | } | |
cc99f275 | 5032 | /* Note: metaslab_class_space_update moved to metaslab_space_update */ |
34dc7c2f BB |
5033 | } |
5034 | ||
5035 | /* | |
5036 | * Mark a top-level vdev's config as dirty, placing it on the dirty list | |
5037 | * so that it will be written out next time the vdev configuration is synced. | |
5038 | * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs. | |
5039 | */ | |
5040 | void | |
5041 | vdev_config_dirty(vdev_t *vd) | |
5042 | { | |
5043 | spa_t *spa = vd->vdev_spa; | |
5044 | vdev_t *rvd = spa->spa_root_vdev; | |
5045 | int c; | |
5046 | ||
572e2857 BB |
5047 | ASSERT(spa_writeable(spa)); |
5048 | ||
34dc7c2f | 5049 | /* |
9babb374 BB |
5050 | * If this is an aux vdev (as with l2cache and spare devices), then we |
5051 | * update the vdev config manually and set the sync flag. | |
b128c09f BB |
5052 | */ |
5053 | if (vd->vdev_aux != NULL) { | |
5054 | spa_aux_vdev_t *sav = vd->vdev_aux; | |
5055 | nvlist_t **aux; | |
5056 | uint_t naux; | |
5057 | ||
5058 | for (c = 0; c < sav->sav_count; c++) { | |
5059 | if (sav->sav_vdevs[c] == vd) | |
5060 | break; | |
5061 | } | |
5062 | ||
5063 | if (c == sav->sav_count) { | |
5064 | /* | |
5065 | * We're being removed. There's nothing more to do. | |
5066 | */ | |
5067 | ASSERT(sav->sav_sync == B_TRUE); | |
5068 | return; | |
5069 | } | |
5070 | ||
5071 | sav->sav_sync = B_TRUE; | |
5072 | ||
9babb374 BB |
5073 | if (nvlist_lookup_nvlist_array(sav->sav_config, |
5074 | ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) { | |
5075 | VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, | |
5076 | ZPOOL_CONFIG_SPARES, &aux, &naux) == 0); | |
5077 | } | |
b128c09f BB |
5078 | |
5079 | ASSERT(c < naux); | |
5080 | ||
5081 | /* | |
5082 | * Setting the nvlist in the middle if the array is a little | |
5083 | * sketchy, but it will work. | |
5084 | */ | |
5085 | nvlist_free(aux[c]); | |
428870ff | 5086 | aux[c] = vdev_config_generate(spa, vd, B_TRUE, 0); |
b128c09f BB |
5087 | |
5088 | return; | |
5089 | } | |
5090 | ||
5091 | /* | |
5092 | * The dirty list is protected by the SCL_CONFIG lock. The caller | |
5093 | * must either hold SCL_CONFIG as writer, or must be the sync thread | |
5094 | * (which holds SCL_CONFIG as reader). There's only one sync thread, | |
34dc7c2f BB |
5095 | * so this is sufficient to ensure mutual exclusion. |
5096 | */ | |
b128c09f BB |
5097 | ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || |
5098 | (dsl_pool_sync_context(spa_get_dsl(spa)) && | |
5099 | spa_config_held(spa, SCL_CONFIG, RW_READER))); | |
34dc7c2f BB |
5100 | |
5101 | if (vd == rvd) { | |
5102 | for (c = 0; c < rvd->vdev_children; c++) | |
5103 | vdev_config_dirty(rvd->vdev_child[c]); | |
5104 | } else { | |
5105 | ASSERT(vd == vd->vdev_top); | |
5106 | ||
428870ff | 5107 | if (!list_link_active(&vd->vdev_config_dirty_node) && |
a1d477c2 | 5108 | vdev_is_concrete(vd)) { |
b128c09f | 5109 | list_insert_head(&spa->spa_config_dirty_list, vd); |
a1d477c2 | 5110 | } |
34dc7c2f BB |
5111 | } |
5112 | } | |
5113 | ||
5114 | void | |
5115 | vdev_config_clean(vdev_t *vd) | |
5116 | { | |
5117 | spa_t *spa = vd->vdev_spa; | |
5118 | ||
b128c09f BB |
5119 | ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) || |
5120 | (dsl_pool_sync_context(spa_get_dsl(spa)) && | |
5121 | spa_config_held(spa, SCL_CONFIG, RW_READER))); | |
34dc7c2f | 5122 | |
b128c09f BB |
5123 | ASSERT(list_link_active(&vd->vdev_config_dirty_node)); |
5124 | list_remove(&spa->spa_config_dirty_list, vd); | |
34dc7c2f BB |
5125 | } |
5126 | ||
b128c09f BB |
5127 | /* |
5128 | * Mark a top-level vdev's state as dirty, so that the next pass of | |
5129 | * spa_sync() can convert this into vdev_config_dirty(). We distinguish | |
5130 | * the state changes from larger config changes because they require | |
5131 | * much less locking, and are often needed for administrative actions. | |
5132 | */ | |
5133 | void | |
5134 | vdev_state_dirty(vdev_t *vd) | |
5135 | { | |
5136 | spa_t *spa = vd->vdev_spa; | |
5137 | ||
572e2857 | 5138 | ASSERT(spa_writeable(spa)); |
b128c09f BB |
5139 | ASSERT(vd == vd->vdev_top); |
5140 | ||
5141 | /* | |
5142 | * The state list is protected by the SCL_STATE lock. The caller | |
5143 | * must either hold SCL_STATE as writer, or must be the sync thread | |
5144 | * (which holds SCL_STATE as reader). There's only one sync thread, | |
5145 | * so this is sufficient to ensure mutual exclusion. | |
5146 | */ | |
5147 | ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || | |
5148 | (dsl_pool_sync_context(spa_get_dsl(spa)) && | |
5149 | spa_config_held(spa, SCL_STATE, RW_READER))); | |
5150 | ||
a1d477c2 MA |
5151 | if (!list_link_active(&vd->vdev_state_dirty_node) && |
5152 | vdev_is_concrete(vd)) | |
b128c09f BB |
5153 | list_insert_head(&spa->spa_state_dirty_list, vd); |
5154 | } | |
5155 | ||
5156 | void | |
5157 | vdev_state_clean(vdev_t *vd) | |
5158 | { | |
5159 | spa_t *spa = vd->vdev_spa; | |
5160 | ||
5161 | ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) || | |
5162 | (dsl_pool_sync_context(spa_get_dsl(spa)) && | |
5163 | spa_config_held(spa, SCL_STATE, RW_READER))); | |
5164 | ||
5165 | ASSERT(list_link_active(&vd->vdev_state_dirty_node)); | |
5166 | list_remove(&spa->spa_state_dirty_list, vd); | |
5167 | } | |
5168 | ||
5169 | /* | |
5170 | * Propagate vdev state up from children to parent. | |
5171 | */ | |
34dc7c2f BB |
5172 | void |
5173 | vdev_propagate_state(vdev_t *vd) | |
5174 | { | |
fb5f0bc8 BB |
5175 | spa_t *spa = vd->vdev_spa; |
5176 | vdev_t *rvd = spa->spa_root_vdev; | |
34dc7c2f BB |
5177 | int degraded = 0, faulted = 0; |
5178 | int corrupted = 0; | |
34dc7c2f BB |
5179 | vdev_t *child; |
5180 | ||
5181 | if (vd->vdev_children > 0) { | |
1c27024e | 5182 | for (int c = 0; c < vd->vdev_children; c++) { |
34dc7c2f | 5183 | child = vd->vdev_child[c]; |
b128c09f | 5184 | |
428870ff | 5185 | /* |
a1d477c2 MA |
5186 | * Don't factor holes or indirect vdevs into the |
5187 | * decision. | |
428870ff | 5188 | */ |
a1d477c2 | 5189 | if (!vdev_is_concrete(child)) |
428870ff BB |
5190 | continue; |
5191 | ||
b128c09f | 5192 | if (!vdev_readable(child) || |
fb5f0bc8 | 5193 | (!vdev_writeable(child) && spa_writeable(spa))) { |
b128c09f BB |
5194 | /* |
5195 | * Root special: if there is a top-level log | |
5196 | * device, treat the root vdev as if it were | |
5197 | * degraded. | |
5198 | */ | |
5199 | if (child->vdev_islog && vd == rvd) | |
5200 | degraded++; | |
5201 | else | |
5202 | faulted++; | |
5203 | } else if (child->vdev_state <= VDEV_STATE_DEGRADED) { | |
34dc7c2f | 5204 | degraded++; |
b128c09f | 5205 | } |
34dc7c2f BB |
5206 | |
5207 | if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA) | |
5208 | corrupted++; | |
5209 | } | |
5210 | ||
5211 | vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded); | |
5212 | ||
5213 | /* | |
b128c09f | 5214 | * Root special: if there is a top-level vdev that cannot be |
34dc7c2f BB |
5215 | * opened due to corrupted metadata, then propagate the root |
5216 | * vdev's aux state as 'corrupt' rather than 'insufficient | |
5217 | * replicas'. | |
5218 | */ | |
5219 | if (corrupted && vd == rvd && | |
5220 | rvd->vdev_state == VDEV_STATE_CANT_OPEN) | |
5221 | vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN, | |
5222 | VDEV_AUX_CORRUPT_DATA); | |
5223 | } | |
5224 | ||
b128c09f | 5225 | if (vd->vdev_parent) |
34dc7c2f BB |
5226 | vdev_propagate_state(vd->vdev_parent); |
5227 | } | |
5228 | ||
5229 | /* | |
5230 | * Set a vdev's state. If this is during an open, we don't update the parent | |
5231 | * state, because we're in the process of opening children depth-first. | |
5232 | * Otherwise, we propagate the change to the parent. | |
5233 | * | |
5234 | * If this routine places a device in a faulted state, an appropriate ereport is | |
5235 | * generated. | |
5236 | */ | |
5237 | void | |
5238 | vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux) | |
5239 | { | |
5240 | uint64_t save_state; | |
b128c09f | 5241 | spa_t *spa = vd->vdev_spa; |
34dc7c2f BB |
5242 | |
5243 | if (state == vd->vdev_state) { | |
976246fa DB |
5244 | /* |
5245 | * Since vdev_offline() code path is already in an offline | |
5246 | * state we can miss a statechange event to OFFLINE. Check | |
5247 | * the previous state to catch this condition. | |
5248 | */ | |
5249 | if (vd->vdev_ops->vdev_op_leaf && | |
5250 | (state == VDEV_STATE_OFFLINE) && | |
5251 | (vd->vdev_prevstate >= VDEV_STATE_FAULTED)) { | |
5252 | /* post an offline state change */ | |
5253 | zfs_post_state_change(spa, vd, vd->vdev_prevstate); | |
5254 | } | |
34dc7c2f BB |
5255 | vd->vdev_stat.vs_aux = aux; |
5256 | return; | |
5257 | } | |
5258 | ||
5259 | save_state = vd->vdev_state; | |
5260 | ||
5261 | vd->vdev_state = state; | |
5262 | vd->vdev_stat.vs_aux = aux; | |
5263 | ||
5264 | /* | |
5265 | * If we are setting the vdev state to anything but an open state, then | |
428870ff BB |
5266 | * always close the underlying device unless the device has requested |
5267 | * a delayed close (i.e. we're about to remove or fault the device). | |
5268 | * Otherwise, we keep accessible but invalid devices open forever. | |
5269 | * We don't call vdev_close() itself, because that implies some extra | |
5270 | * checks (offline, etc) that we don't want here. This is limited to | |
5271 | * leaf devices, because otherwise closing the device will affect other | |
5272 | * children. | |
34dc7c2f | 5273 | */ |
428870ff BB |
5274 | if (!vd->vdev_delayed_close && vdev_is_dead(vd) && |
5275 | vd->vdev_ops->vdev_op_leaf) | |
34dc7c2f BB |
5276 | vd->vdev_ops->vdev_op_close(vd); |
5277 | ||
5278 | if (vd->vdev_removed && | |
5279 | state == VDEV_STATE_CANT_OPEN && | |
5280 | (aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) { | |
5281 | /* | |
5282 | * If the previous state is set to VDEV_STATE_REMOVED, then this | |
5283 | * device was previously marked removed and someone attempted to | |
5284 | * reopen it. If this failed due to a nonexistent device, then | |
5285 | * keep the device in the REMOVED state. We also let this be if | |
5286 | * it is one of our special test online cases, which is only | |
5287 | * attempting to online the device and shouldn't generate an FMA | |
5288 | * fault. | |
5289 | */ | |
5290 | vd->vdev_state = VDEV_STATE_REMOVED; | |
5291 | vd->vdev_stat.vs_aux = VDEV_AUX_NONE; | |
5292 | } else if (state == VDEV_STATE_REMOVED) { | |
34dc7c2f BB |
5293 | vd->vdev_removed = B_TRUE; |
5294 | } else if (state == VDEV_STATE_CANT_OPEN) { | |
5295 | /* | |
572e2857 BB |
5296 | * If we fail to open a vdev during an import or recovery, we |
5297 | * mark it as "not available", which signifies that it was | |
5298 | * never there to begin with. Failure to open such a device | |
5299 | * is not considered an error. | |
34dc7c2f | 5300 | */ |
572e2857 BB |
5301 | if ((spa_load_state(spa) == SPA_LOAD_IMPORT || |
5302 | spa_load_state(spa) == SPA_LOAD_RECOVER) && | |
34dc7c2f BB |
5303 | vd->vdev_ops->vdev_op_leaf) |
5304 | vd->vdev_not_present = 1; | |
5305 | ||
5306 | /* | |
5307 | * Post the appropriate ereport. If the 'prevstate' field is | |
5308 | * set to something other than VDEV_STATE_UNKNOWN, it indicates | |
5309 | * that this is part of a vdev_reopen(). In this case, we don't | |
5310 | * want to post the ereport if the device was already in the | |
5311 | * CANT_OPEN state beforehand. | |
5312 | * | |
5313 | * If the 'checkremove' flag is set, then this is an attempt to | |
5314 | * online the device in response to an insertion event. If we | |
5315 | * hit this case, then we have detected an insertion event for a | |
5316 | * faulted or offline device that wasn't in the removed state. | |
5317 | * In this scenario, we don't post an ereport because we are | |
5318 | * about to replace the device, or attempt an online with | |
5319 | * vdev_forcefault, which will generate the fault for us. | |
5320 | */ | |
5321 | if ((vd->vdev_prevstate != state || vd->vdev_forcefault) && | |
5322 | !vd->vdev_not_present && !vd->vdev_checkremove && | |
b128c09f | 5323 | vd != spa->spa_root_vdev) { |
34dc7c2f BB |
5324 | const char *class; |
5325 | ||
5326 | switch (aux) { | |
5327 | case VDEV_AUX_OPEN_FAILED: | |
5328 | class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED; | |
5329 | break; | |
5330 | case VDEV_AUX_CORRUPT_DATA: | |
5331 | class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA; | |
5332 | break; | |
5333 | case VDEV_AUX_NO_REPLICAS: | |
5334 | class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS; | |
5335 | break; | |
5336 | case VDEV_AUX_BAD_GUID_SUM: | |
5337 | class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM; | |
5338 | break; | |
5339 | case VDEV_AUX_TOO_SMALL: | |
5340 | class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL; | |
5341 | break; | |
5342 | case VDEV_AUX_BAD_LABEL: | |
5343 | class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL; | |
5344 | break; | |
ff61d1a4 | 5345 | case VDEV_AUX_BAD_ASHIFT: |
5346 | class = FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT; | |
5347 | break; | |
34dc7c2f BB |
5348 | default: |
5349 | class = FM_EREPORT_ZFS_DEVICE_UNKNOWN; | |
5350 | } | |
5351 | ||
1144586b | 5352 | (void) zfs_ereport_post(class, spa, vd, NULL, NULL, |
4f072827 | 5353 | save_state); |
34dc7c2f BB |
5354 | } |
5355 | ||
5356 | /* Erase any notion of persistent removed state */ | |
5357 | vd->vdev_removed = B_FALSE; | |
5358 | } else { | |
5359 | vd->vdev_removed = B_FALSE; | |
5360 | } | |
5361 | ||
d02ca379 DB |
5362 | /* |
5363 | * Notify ZED of any significant state-change on a leaf vdev. | |
5364 | * | |
d02ca379 | 5365 | */ |
6078881a TH |
5366 | if (vd->vdev_ops->vdev_op_leaf) { |
5367 | /* preserve original state from a vdev_reopen() */ | |
5368 | if ((vd->vdev_prevstate != VDEV_STATE_UNKNOWN) && | |
5369 | (vd->vdev_prevstate != vd->vdev_state) && | |
5370 | (save_state <= VDEV_STATE_CLOSED)) | |
5371 | save_state = vd->vdev_prevstate; | |
5372 | ||
5373 | /* filter out state change due to initial vdev_open */ | |
5374 | if (save_state > VDEV_STATE_CLOSED) | |
5375 | zfs_post_state_change(spa, vd, save_state); | |
d02ca379 DB |
5376 | } |
5377 | ||
9babb374 BB |
5378 | if (!isopen && vd->vdev_parent) |
5379 | vdev_propagate_state(vd->vdev_parent); | |
34dc7c2f | 5380 | } |
b128c09f | 5381 | |
6cb8e530 PZ |
5382 | boolean_t |
5383 | vdev_children_are_offline(vdev_t *vd) | |
5384 | { | |
5385 | ASSERT(!vd->vdev_ops->vdev_op_leaf); | |
5386 | ||
5387 | for (uint64_t i = 0; i < vd->vdev_children; i++) { | |
5388 | if (vd->vdev_child[i]->vdev_state != VDEV_STATE_OFFLINE) | |
5389 | return (B_FALSE); | |
5390 | } | |
5391 | ||
5392 | return (B_TRUE); | |
5393 | } | |
5394 | ||
b128c09f BB |
5395 | /* |
5396 | * Check the vdev configuration to ensure that it's capable of supporting | |
e550644f | 5397 | * a root pool. We do not support partial configuration. |
b128c09f BB |
5398 | */ |
5399 | boolean_t | |
5400 | vdev_is_bootable(vdev_t *vd) | |
5401 | { | |
b128c09f | 5402 | if (!vd->vdev_ops->vdev_op_leaf) { |
e550644f | 5403 | const char *vdev_type = vd->vdev_ops->vdev_op_type; |
b128c09f | 5404 | |
cd5b8128 | 5405 | if (strcmp(vdev_type, VDEV_TYPE_MISSING) == 0) |
b128c09f | 5406 | return (B_FALSE); |
b128c09f BB |
5407 | } |
5408 | ||
e550644f | 5409 | for (int c = 0; c < vd->vdev_children; c++) { |
b128c09f BB |
5410 | if (!vdev_is_bootable(vd->vdev_child[c])) |
5411 | return (B_FALSE); | |
5412 | } | |
5413 | return (B_TRUE); | |
5414 | } | |
9babb374 | 5415 | |
a1d477c2 MA |
5416 | boolean_t |
5417 | vdev_is_concrete(vdev_t *vd) | |
5418 | { | |
5419 | vdev_ops_t *ops = vd->vdev_ops; | |
5420 | if (ops == &vdev_indirect_ops || ops == &vdev_hole_ops || | |
5421 | ops == &vdev_missing_ops || ops == &vdev_root_ops) { | |
5422 | return (B_FALSE); | |
5423 | } else { | |
5424 | return (B_TRUE); | |
5425 | } | |
5426 | } | |
5427 | ||
572e2857 BB |
5428 | /* |
5429 | * Determine if a log device has valid content. If the vdev was | |
5430 | * removed or faulted in the MOS config then we know that | |
5431 | * the content on the log device has already been written to the pool. | |
5432 | */ | |
5433 | boolean_t | |
5434 | vdev_log_state_valid(vdev_t *vd) | |
5435 | { | |
5436 | if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted && | |
5437 | !vd->vdev_removed) | |
5438 | return (B_TRUE); | |
5439 | ||
1c27024e | 5440 | for (int c = 0; c < vd->vdev_children; c++) |
572e2857 BB |
5441 | if (vdev_log_state_valid(vd->vdev_child[c])) |
5442 | return (B_TRUE); | |
5443 | ||
5444 | return (B_FALSE); | |
5445 | } | |
5446 | ||
9babb374 BB |
5447 | /* |
5448 | * Expand a vdev if possible. | |
5449 | */ | |
5450 | void | |
5451 | vdev_expand(vdev_t *vd, uint64_t txg) | |
5452 | { | |
5453 | ASSERT(vd->vdev_top == vd); | |
5454 | ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); | |
7637ef8d | 5455 | ASSERT(vdev_is_concrete(vd)); |
9babb374 | 5456 | |
a1d477c2 MA |
5457 | vdev_set_deflate_ratio(vd); |
5458 | ||
cc99f275 DB |
5459 | if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count && |
5460 | vdev_is_concrete(vd)) { | |
5461 | vdev_metaslab_group_create(vd); | |
9babb374 BB |
5462 | VERIFY(vdev_metaslab_init(vd, txg) == 0); |
5463 | vdev_config_dirty(vd); | |
5464 | } | |
5465 | } | |
428870ff BB |
5466 | |
5467 | /* | |
5468 | * Split a vdev. | |
5469 | */ | |
5470 | void | |
5471 | vdev_split(vdev_t *vd) | |
5472 | { | |
5473 | vdev_t *cvd, *pvd = vd->vdev_parent; | |
5474 | ||
399bb816 RY |
5475 | VERIFY3U(pvd->vdev_children, >, 1); |
5476 | ||
428870ff BB |
5477 | vdev_remove_child(pvd, vd); |
5478 | vdev_compact_children(pvd); | |
5479 | ||
399bb816 RY |
5480 | ASSERT3P(pvd->vdev_child, !=, NULL); |
5481 | ||
428870ff BB |
5482 | cvd = pvd->vdev_child[0]; |
5483 | if (pvd->vdev_children == 1) { | |
5484 | vdev_remove_parent(cvd); | |
5485 | cvd->vdev_splitting = B_TRUE; | |
5486 | } | |
5487 | vdev_propagate_state(cvd); | |
5488 | } | |
c28b2279 | 5489 | |
cc92e9d0 | 5490 | void |
dd66857d | 5491 | vdev_deadman(vdev_t *vd, const char *tag) |
cc92e9d0 | 5492 | { |
1c27024e | 5493 | for (int c = 0; c < vd->vdev_children; c++) { |
cc92e9d0 GW |
5494 | vdev_t *cvd = vd->vdev_child[c]; |
5495 | ||
8fb1ede1 | 5496 | vdev_deadman(cvd, tag); |
cc92e9d0 GW |
5497 | } |
5498 | ||
5499 | if (vd->vdev_ops->vdev_op_leaf) { | |
5500 | vdev_queue_t *vq = &vd->vdev_queue; | |
5501 | ||
5502 | mutex_enter(&vq->vq_lock); | |
8469b5aa | 5503 | if (vq->vq_active > 0) { |
cc92e9d0 GW |
5504 | spa_t *spa = vd->vdev_spa; |
5505 | zio_t *fio; | |
5506 | uint64_t delta; | |
5507 | ||
8469b5aa AM |
5508 | zfs_dbgmsg("slow vdev: %s has %u active IOs", |
5509 | vd->vdev_path, vq->vq_active); | |
8fb1ede1 | 5510 | |
cc92e9d0 GW |
5511 | /* |
5512 | * Look at the head of all the pending queues, | |
5513 | * if any I/O has been outstanding for longer than | |
8fb1ede1 | 5514 | * the spa_deadman_synctime invoke the deadman logic. |
cc92e9d0 | 5515 | */ |
8469b5aa | 5516 | fio = list_head(&vq->vq_active_list); |
cb682a17 | 5517 | delta = gethrtime() - fio->io_timestamp; |
8fb1ede1 BB |
5518 | if (delta > spa_deadman_synctime(spa)) |
5519 | zio_deadman(fio, tag); | |
cc92e9d0 GW |
5520 | } |
5521 | mutex_exit(&vq->vq_lock); | |
5522 | } | |
5523 | } | |
5524 | ||
80a91e74 | 5525 | void |
3c819a2c | 5526 | vdev_defer_resilver(vdev_t *vd) |
80a91e74 | 5527 | { |
3c819a2c | 5528 | ASSERT(vd->vdev_ops->vdev_op_leaf); |
4021ba4c | 5529 | |
3c819a2c JP |
5530 | vd->vdev_resilver_deferred = B_TRUE; |
5531 | vd->vdev_spa->spa_resilver_deferred = B_TRUE; | |
5532 | } | |
5533 | ||
5534 | /* | |
5535 | * Clears the resilver deferred flag on all leaf devs under vd. Returns | |
5536 | * B_TRUE if we have devices that need to be resilvered and are available to | |
5537 | * accept resilver I/Os. | |
5538 | */ | |
5539 | boolean_t | |
5540 | vdev_clear_resilver_deferred(vdev_t *vd, dmu_tx_t *tx) | |
5541 | { | |
5542 | boolean_t resilver_needed = B_FALSE; | |
5543 | spa_t *spa = vd->vdev_spa; | |
5544 | ||
5545 | for (int c = 0; c < vd->vdev_children; c++) { | |
5546 | vdev_t *cvd = vd->vdev_child[c]; | |
5547 | resilver_needed |= vdev_clear_resilver_deferred(cvd, tx); | |
4021ba4c TC |
5548 | } |
5549 | ||
3c819a2c JP |
5550 | if (vd == spa->spa_root_vdev && |
5551 | spa_feature_is_active(spa, SPA_FEATURE_RESILVER_DEFER)) { | |
5552 | spa_feature_decr(spa, SPA_FEATURE_RESILVER_DEFER, tx); | |
5553 | vdev_config_dirty(vd); | |
5554 | spa->spa_resilver_deferred = B_FALSE; | |
5555 | return (resilver_needed); | |
5556 | } | |
5557 | ||
5558 | if (!vdev_is_concrete(vd) || vd->vdev_aux || | |
5559 | !vd->vdev_ops->vdev_op_leaf) | |
5560 | return (resilver_needed); | |
5561 | ||
5562 | vd->vdev_resilver_deferred = B_FALSE; | |
5563 | ||
5564 | return (!vdev_is_dead(vd) && !vd->vdev_offline && | |
5565 | vdev_resilver_needed(vd, NULL, NULL)); | |
80a91e74 TC |
5566 | } |
5567 | ||
b2255edc BB |
5568 | boolean_t |
5569 | vdev_xlate_is_empty(range_seg64_t *rs) | |
5570 | { | |
5571 | return (rs->rs_start == rs->rs_end); | |
5572 | } | |
5573 | ||
1b939560 | 5574 | /* |
b2255edc BB |
5575 | * Translate a logical range to the first contiguous physical range for the |
5576 | * specified vdev_t. This function is initially called with a leaf vdev and | |
5577 | * will walk each parent vdev until it reaches a top-level vdev. Once the | |
5578 | * top-level is reached the physical range is initialized and the recursive | |
5579 | * function begins to unwind. As it unwinds it calls the parent's vdev | |
5580 | * specific translation function to do the real conversion. | |
1b939560 BB |
5581 | */ |
5582 | void | |
ca577779 | 5583 | vdev_xlate(vdev_t *vd, const range_seg64_t *logical_rs, |
b2255edc | 5584 | range_seg64_t *physical_rs, range_seg64_t *remain_rs) |
1b939560 BB |
5585 | { |
5586 | /* | |
5587 | * Walk up the vdev tree | |
5588 | */ | |
5589 | if (vd != vd->vdev_top) { | |
b2255edc BB |
5590 | vdev_xlate(vd->vdev_parent, logical_rs, physical_rs, |
5591 | remain_rs); | |
1b939560 BB |
5592 | } else { |
5593 | /* | |
b2255edc BB |
5594 | * We've reached the top-level vdev, initialize the physical |
5595 | * range to the logical range and set an empty remaining | |
5596 | * range then start to unwind. | |
1b939560 BB |
5597 | */ |
5598 | physical_rs->rs_start = logical_rs->rs_start; | |
5599 | physical_rs->rs_end = logical_rs->rs_end; | |
b2255edc BB |
5600 | |
5601 | remain_rs->rs_start = logical_rs->rs_start; | |
5602 | remain_rs->rs_end = logical_rs->rs_start; | |
5603 | ||
1b939560 BB |
5604 | return; |
5605 | } | |
5606 | ||
5607 | vdev_t *pvd = vd->vdev_parent; | |
5608 | ASSERT3P(pvd, !=, NULL); | |
5609 | ASSERT3P(pvd->vdev_ops->vdev_op_xlate, !=, NULL); | |
5610 | ||
5611 | /* | |
5612 | * As this recursive function unwinds, translate the logical | |
b2255edc BB |
5613 | * range into its physical and any remaining components by calling |
5614 | * the vdev specific translate function. | |
1b939560 | 5615 | */ |
ca577779 | 5616 | range_seg64_t intermediate = { 0 }; |
b2255edc | 5617 | pvd->vdev_ops->vdev_op_xlate(vd, physical_rs, &intermediate, remain_rs); |
1b939560 BB |
5618 | |
5619 | physical_rs->rs_start = intermediate.rs_start; | |
5620 | physical_rs->rs_end = intermediate.rs_end; | |
5621 | } | |
5622 | ||
b2255edc BB |
5623 | void |
5624 | vdev_xlate_walk(vdev_t *vd, const range_seg64_t *logical_rs, | |
5625 | vdev_xlate_func_t *func, void *arg) | |
5626 | { | |
5627 | range_seg64_t iter_rs = *logical_rs; | |
5628 | range_seg64_t physical_rs; | |
5629 | range_seg64_t remain_rs; | |
5630 | ||
5631 | while (!vdev_xlate_is_empty(&iter_rs)) { | |
5632 | ||
5633 | vdev_xlate(vd, &iter_rs, &physical_rs, &remain_rs); | |
5634 | ||
5635 | /* | |
5636 | * With raidz and dRAID, it's possible that the logical range | |
5637 | * does not live on this leaf vdev. Only when there is a non- | |
5638 | * zero physical size call the provided function. | |
5639 | */ | |
5640 | if (!vdev_xlate_is_empty(&physical_rs)) | |
5641 | func(arg, &physical_rs); | |
5642 | ||
5643 | iter_rs = remain_rs; | |
5644 | } | |
5645 | } | |
5646 | ||
2a673e76 AJ |
5647 | static char * |
5648 | vdev_name(vdev_t *vd, char *buf, int buflen) | |
5649 | { | |
5650 | if (vd->vdev_path == NULL) { | |
5651 | if (strcmp(vd->vdev_ops->vdev_op_type, "root") == 0) { | |
5652 | strlcpy(buf, vd->vdev_spa->spa_name, buflen); | |
5653 | } else if (!vd->vdev_ops->vdev_op_leaf) { | |
5654 | snprintf(buf, buflen, "%s-%llu", | |
5655 | vd->vdev_ops->vdev_op_type, | |
5656 | (u_longlong_t)vd->vdev_id); | |
5657 | } | |
5658 | } else { | |
5659 | strlcpy(buf, vd->vdev_path, buflen); | |
5660 | } | |
5661 | return (buf); | |
5662 | } | |
5663 | ||
e60e158e JG |
5664 | /* |
5665 | * Look at the vdev tree and determine whether any devices are currently being | |
5666 | * replaced. | |
5667 | */ | |
5668 | boolean_t | |
5669 | vdev_replace_in_progress(vdev_t *vdev) | |
5670 | { | |
5671 | ASSERT(spa_config_held(vdev->vdev_spa, SCL_ALL, RW_READER) != 0); | |
5672 | ||
5673 | if (vdev->vdev_ops == &vdev_replacing_ops) | |
5674 | return (B_TRUE); | |
5675 | ||
5676 | /* | |
5677 | * A 'spare' vdev indicates that we have a replace in progress, unless | |
5678 | * it has exactly two children, and the second, the hot spare, has | |
5679 | * finished being resilvered. | |
5680 | */ | |
5681 | if (vdev->vdev_ops == &vdev_spare_ops && (vdev->vdev_children > 2 || | |
5682 | !vdev_dtl_empty(vdev->vdev_child[1], DTL_MISSING))) | |
5683 | return (B_TRUE); | |
5684 | ||
5685 | for (int i = 0; i < vdev->vdev_children; i++) { | |
5686 | if (vdev_replace_in_progress(vdev->vdev_child[i])) | |
5687 | return (B_TRUE); | |
5688 | } | |
5689 | ||
5690 | return (B_FALSE); | |
5691 | } | |
5692 | ||
2a673e76 AJ |
5693 | /* |
5694 | * Add a (source=src, propname=propval) list to an nvlist. | |
5695 | */ | |
5696 | static void | |
d1807f16 | 5697 | vdev_prop_add_list(nvlist_t *nvl, const char *propname, const char *strval, |
2a673e76 AJ |
5698 | uint64_t intval, zprop_source_t src) |
5699 | { | |
5700 | nvlist_t *propval; | |
5701 | ||
5702 | propval = fnvlist_alloc(); | |
5703 | fnvlist_add_uint64(propval, ZPROP_SOURCE, src); | |
5704 | ||
5705 | if (strval != NULL) | |
5706 | fnvlist_add_string(propval, ZPROP_VALUE, strval); | |
5707 | else | |
5708 | fnvlist_add_uint64(propval, ZPROP_VALUE, intval); | |
5709 | ||
5710 | fnvlist_add_nvlist(nvl, propname, propval); | |
5711 | nvlist_free(propval); | |
5712 | } | |
5713 | ||
5714 | static void | |
5715 | vdev_props_set_sync(void *arg, dmu_tx_t *tx) | |
5716 | { | |
5717 | vdev_t *vd; | |
5718 | nvlist_t *nvp = arg; | |
5719 | spa_t *spa = dmu_tx_pool(tx)->dp_spa; | |
5720 | objset_t *mos = spa->spa_meta_objset; | |
5721 | nvpair_t *elem = NULL; | |
5722 | uint64_t vdev_guid; | |
929173ab | 5723 | uint64_t objid; |
2a673e76 AJ |
5724 | nvlist_t *nvprops; |
5725 | ||
5726 | vdev_guid = fnvlist_lookup_uint64(nvp, ZPOOL_VDEV_PROPS_SET_VDEV); | |
5727 | nvprops = fnvlist_lookup_nvlist(nvp, ZPOOL_VDEV_PROPS_SET_PROPS); | |
5728 | vd = spa_lookup_by_guid(spa, vdev_guid, B_TRUE); | |
da9c6c03 MM |
5729 | |
5730 | /* this vdev could get removed while waiting for this sync task */ | |
5731 | if (vd == NULL) | |
5732 | return; | |
2a673e76 | 5733 | |
929173ab YP |
5734 | /* |
5735 | * Set vdev property values in the vdev props mos object. | |
5736 | */ | |
5737 | if (vd->vdev_root_zap != 0) { | |
5738 | objid = vd->vdev_root_zap; | |
5739 | } else if (vd->vdev_top_zap != 0) { | |
5740 | objid = vd->vdev_top_zap; | |
5741 | } else if (vd->vdev_leaf_zap != 0) { | |
5742 | objid = vd->vdev_leaf_zap; | |
5743 | } else { | |
5744 | panic("unexpected vdev type"); | |
5745 | } | |
5746 | ||
2a673e76 AJ |
5747 | mutex_enter(&spa->spa_props_lock); |
5748 | ||
5749 | while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) { | |
929173ab | 5750 | uint64_t intval; |
d1807f16 | 5751 | const char *strval; |
2a673e76 AJ |
5752 | vdev_prop_t prop; |
5753 | const char *propname = nvpair_name(elem); | |
5754 | zprop_type_t proptype; | |
5755 | ||
2a673e76 | 5756 | switch (prop = vdev_name_to_prop(propname)) { |
4ff7a8fa | 5757 | case VDEV_PROP_USERPROP: |
2a673e76 AJ |
5758 | if (vdev_prop_user(propname)) { |
5759 | strval = fnvpair_value_string(elem); | |
5760 | if (strlen(strval) == 0) { | |
5761 | /* remove the property if value == "" */ | |
5762 | (void) zap_remove(mos, objid, propname, | |
5763 | tx); | |
5764 | } else { | |
5765 | VERIFY0(zap_update(mos, objid, propname, | |
5766 | 1, strlen(strval) + 1, strval, tx)); | |
5767 | } | |
5768 | spa_history_log_internal(spa, "vdev set", tx, | |
5769 | "vdev_guid=%llu: %s=%s", | |
5770 | (u_longlong_t)vdev_guid, nvpair_name(elem), | |
5771 | strval); | |
5772 | } | |
5773 | break; | |
5774 | default: | |
5775 | /* normalize the property name */ | |
5776 | propname = vdev_prop_to_name(prop); | |
5777 | proptype = vdev_prop_get_type(prop); | |
5778 | ||
5779 | if (nvpair_type(elem) == DATA_TYPE_STRING) { | |
5780 | ASSERT(proptype == PROP_TYPE_STRING); | |
5781 | strval = fnvpair_value_string(elem); | |
5782 | VERIFY0(zap_update(mos, objid, propname, | |
5783 | 1, strlen(strval) + 1, strval, tx)); | |
5784 | spa_history_log_internal(spa, "vdev set", tx, | |
5785 | "vdev_guid=%llu: %s=%s", | |
5786 | (u_longlong_t)vdev_guid, nvpair_name(elem), | |
5787 | strval); | |
5788 | } else if (nvpair_type(elem) == DATA_TYPE_UINT64) { | |
5789 | intval = fnvpair_value_uint64(elem); | |
5790 | ||
5791 | if (proptype == PROP_TYPE_INDEX) { | |
5792 | const char *unused; | |
5793 | VERIFY0(vdev_prop_index_to_string( | |
5794 | prop, intval, &unused)); | |
5795 | } | |
5796 | VERIFY0(zap_update(mos, objid, propname, | |
5797 | sizeof (uint64_t), 1, &intval, tx)); | |
5798 | spa_history_log_internal(spa, "vdev set", tx, | |
5799 | "vdev_guid=%llu: %s=%lld", | |
5800 | (u_longlong_t)vdev_guid, | |
5801 | nvpair_name(elem), (longlong_t)intval); | |
5802 | } else { | |
5803 | panic("invalid vdev property type %u", | |
5804 | nvpair_type(elem)); | |
5805 | } | |
5806 | } | |
5807 | ||
5808 | } | |
5809 | ||
5810 | mutex_exit(&spa->spa_props_lock); | |
5811 | } | |
5812 | ||
5813 | int | |
5814 | vdev_prop_set(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl) | |
5815 | { | |
5816 | spa_t *spa = vd->vdev_spa; | |
5817 | nvpair_t *elem = NULL; | |
5818 | uint64_t vdev_guid; | |
5819 | nvlist_t *nvprops; | |
70248b82 | 5820 | int error = 0; |
2a673e76 AJ |
5821 | |
5822 | ASSERT(vd != NULL); | |
5823 | ||
929173ab YP |
5824 | /* Check that vdev has a zap we can use */ |
5825 | if (vd->vdev_root_zap == 0 && | |
5826 | vd->vdev_top_zap == 0 && | |
5827 | vd->vdev_leaf_zap == 0) | |
5828 | return (SET_ERROR(EINVAL)); | |
5829 | ||
2a673e76 AJ |
5830 | if (nvlist_lookup_uint64(innvl, ZPOOL_VDEV_PROPS_SET_VDEV, |
5831 | &vdev_guid) != 0) | |
5832 | return (SET_ERROR(EINVAL)); | |
5833 | ||
5834 | if (nvlist_lookup_nvlist(innvl, ZPOOL_VDEV_PROPS_SET_PROPS, | |
5835 | &nvprops) != 0) | |
5836 | return (SET_ERROR(EINVAL)); | |
5837 | ||
5838 | if ((vd = spa_lookup_by_guid(spa, vdev_guid, B_TRUE)) == NULL) | |
5839 | return (SET_ERROR(EINVAL)); | |
5840 | ||
5841 | while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) { | |
d1807f16 | 5842 | const char *propname = nvpair_name(elem); |
2a673e76 AJ |
5843 | vdev_prop_t prop = vdev_name_to_prop(propname); |
5844 | uint64_t intval = 0; | |
d1807f16 | 5845 | const char *strval = NULL; |
2a673e76 | 5846 | |
4ff7a8fa | 5847 | if (prop == VDEV_PROP_USERPROP && !vdev_prop_user(propname)) { |
2a673e76 AJ |
5848 | error = EINVAL; |
5849 | goto end; | |
5850 | } | |
5851 | ||
5852 | if (vdev_prop_readonly(prop)) { | |
5853 | error = EROFS; | |
5854 | goto end; | |
5855 | } | |
5856 | ||
5857 | /* Special Processing */ | |
5858 | switch (prop) { | |
5859 | case VDEV_PROP_PATH: | |
5860 | if (vd->vdev_path == NULL) { | |
5861 | error = EROFS; | |
5862 | break; | |
5863 | } | |
5864 | if (nvpair_value_string(elem, &strval) != 0) { | |
5865 | error = EINVAL; | |
5866 | break; | |
5867 | } | |
5868 | /* New path must start with /dev/ */ | |
5869 | if (strncmp(strval, "/dev/", 5)) { | |
5870 | error = EINVAL; | |
5871 | break; | |
5872 | } | |
5873 | error = spa_vdev_setpath(spa, vdev_guid, strval); | |
5874 | break; | |
5875 | case VDEV_PROP_ALLOCATING: | |
5876 | if (nvpair_value_uint64(elem, &intval) != 0) { | |
5877 | error = EINVAL; | |
5878 | break; | |
5879 | } | |
5880 | if (intval != vd->vdev_noalloc) | |
5881 | break; | |
5882 | if (intval == 0) | |
5883 | error = spa_vdev_noalloc(spa, vdev_guid); | |
5884 | else | |
5885 | error = spa_vdev_alloc(spa, vdev_guid); | |
5886 | break; | |
16f0fdad MZ |
5887 | case VDEV_PROP_FAILFAST: |
5888 | if (nvpair_value_uint64(elem, &intval) != 0) { | |
5889 | error = EINVAL; | |
5890 | break; | |
5891 | } | |
5892 | vd->vdev_failfast = intval & 1; | |
5893 | break; | |
69f024a5 RW |
5894 | case VDEV_PROP_CHECKSUM_N: |
5895 | if (nvpair_value_uint64(elem, &intval) != 0) { | |
5896 | error = EINVAL; | |
5897 | break; | |
5898 | } | |
5899 | vd->vdev_checksum_n = intval; | |
5900 | break; | |
5901 | case VDEV_PROP_CHECKSUM_T: | |
5902 | if (nvpair_value_uint64(elem, &intval) != 0) { | |
5903 | error = EINVAL; | |
5904 | break; | |
5905 | } | |
5906 | vd->vdev_checksum_t = intval; | |
5907 | break; | |
5908 | case VDEV_PROP_IO_N: | |
5909 | if (nvpair_value_uint64(elem, &intval) != 0) { | |
5910 | error = EINVAL; | |
5911 | break; | |
5912 | } | |
5913 | vd->vdev_io_n = intval; | |
5914 | break; | |
5915 | case VDEV_PROP_IO_T: | |
5916 | if (nvpair_value_uint64(elem, &intval) != 0) { | |
5917 | error = EINVAL; | |
5918 | break; | |
5919 | } | |
5920 | vd->vdev_io_t = intval; | |
5921 | break; | |
2a673e76 AJ |
5922 | default: |
5923 | /* Most processing is done in vdev_props_set_sync */ | |
5924 | break; | |
5925 | } | |
5926 | end: | |
5927 | if (error != 0) { | |
5928 | intval = error; | |
5929 | vdev_prop_add_list(outnvl, propname, strval, intval, 0); | |
5930 | return (error); | |
5931 | } | |
5932 | } | |
5933 | ||
5934 | return (dsl_sync_task(spa->spa_name, NULL, vdev_props_set_sync, | |
5935 | innvl, 6, ZFS_SPACE_CHECK_EXTRA_RESERVED)); | |
5936 | } | |
5937 | ||
5938 | int | |
5939 | vdev_prop_get(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl) | |
5940 | { | |
5941 | spa_t *spa = vd->vdev_spa; | |
5942 | objset_t *mos = spa->spa_meta_objset; | |
5943 | int err = 0; | |
5944 | uint64_t objid; | |
5945 | uint64_t vdev_guid; | |
5946 | nvpair_t *elem = NULL; | |
5947 | nvlist_t *nvprops = NULL; | |
5948 | uint64_t intval = 0; | |
5949 | char *strval = NULL; | |
5950 | const char *propname = NULL; | |
5951 | vdev_prop_t prop; | |
5952 | ||
5953 | ASSERT(vd != NULL); | |
5954 | ASSERT(mos != NULL); | |
5955 | ||
5956 | if (nvlist_lookup_uint64(innvl, ZPOOL_VDEV_PROPS_GET_VDEV, | |
5957 | &vdev_guid) != 0) | |
5958 | return (SET_ERROR(EINVAL)); | |
5959 | ||
5960 | nvlist_lookup_nvlist(innvl, ZPOOL_VDEV_PROPS_GET_PROPS, &nvprops); | |
5961 | ||
3e4ed421 RW |
5962 | if (vd->vdev_root_zap != 0) { |
5963 | objid = vd->vdev_root_zap; | |
5964 | } else if (vd->vdev_top_zap != 0) { | |
2a673e76 AJ |
5965 | objid = vd->vdev_top_zap; |
5966 | } else if (vd->vdev_leaf_zap != 0) { | |
5967 | objid = vd->vdev_leaf_zap; | |
5968 | } else { | |
5969 | return (SET_ERROR(EINVAL)); | |
5970 | } | |
5971 | ASSERT(objid != 0); | |
5972 | ||
5973 | mutex_enter(&spa->spa_props_lock); | |
5974 | ||
5975 | if (nvprops != NULL) { | |
5976 | char namebuf[64] = { 0 }; | |
5977 | ||
5978 | while ((elem = nvlist_next_nvpair(nvprops, elem)) != NULL) { | |
5979 | intval = 0; | |
5980 | strval = NULL; | |
5981 | propname = nvpair_name(elem); | |
5982 | prop = vdev_name_to_prop(propname); | |
5983 | zprop_source_t src = ZPROP_SRC_DEFAULT; | |
5984 | uint64_t integer_size, num_integers; | |
5985 | ||
5986 | switch (prop) { | |
5987 | /* Special Read-only Properties */ | |
5988 | case VDEV_PROP_NAME: | |
5989 | strval = vdev_name(vd, namebuf, | |
5990 | sizeof (namebuf)); | |
5991 | if (strval == NULL) | |
5992 | continue; | |
5993 | vdev_prop_add_list(outnvl, propname, strval, 0, | |
5994 | ZPROP_SRC_NONE); | |
5995 | continue; | |
5996 | case VDEV_PROP_CAPACITY: | |
5997 | /* percent used */ | |
5998 | intval = (vd->vdev_stat.vs_dspace == 0) ? 0 : | |
5999 | (vd->vdev_stat.vs_alloc * 100 / | |
6000 | vd->vdev_stat.vs_dspace); | |
6001 | vdev_prop_add_list(outnvl, propname, NULL, | |
6002 | intval, ZPROP_SRC_NONE); | |
6003 | continue; | |
6004 | case VDEV_PROP_STATE: | |
6005 | vdev_prop_add_list(outnvl, propname, NULL, | |
6006 | vd->vdev_state, ZPROP_SRC_NONE); | |
6007 | continue; | |
6008 | case VDEV_PROP_GUID: | |
6009 | vdev_prop_add_list(outnvl, propname, NULL, | |
6010 | vd->vdev_guid, ZPROP_SRC_NONE); | |
6011 | continue; | |
6012 | case VDEV_PROP_ASIZE: | |
6013 | vdev_prop_add_list(outnvl, propname, NULL, | |
6014 | vd->vdev_asize, ZPROP_SRC_NONE); | |
6015 | continue; | |
6016 | case VDEV_PROP_PSIZE: | |
6017 | vdev_prop_add_list(outnvl, propname, NULL, | |
6018 | vd->vdev_psize, ZPROP_SRC_NONE); | |
6019 | continue; | |
6020 | case VDEV_PROP_ASHIFT: | |
6021 | vdev_prop_add_list(outnvl, propname, NULL, | |
6022 | vd->vdev_ashift, ZPROP_SRC_NONE); | |
6023 | continue; | |
6024 | case VDEV_PROP_SIZE: | |
6025 | vdev_prop_add_list(outnvl, propname, NULL, | |
6026 | vd->vdev_stat.vs_dspace, ZPROP_SRC_NONE); | |
6027 | continue; | |
6028 | case VDEV_PROP_FREE: | |
6029 | vdev_prop_add_list(outnvl, propname, NULL, | |
6030 | vd->vdev_stat.vs_dspace - | |
6031 | vd->vdev_stat.vs_alloc, ZPROP_SRC_NONE); | |
6032 | continue; | |
6033 | case VDEV_PROP_ALLOCATED: | |
6034 | vdev_prop_add_list(outnvl, propname, NULL, | |
6035 | vd->vdev_stat.vs_alloc, ZPROP_SRC_NONE); | |
6036 | continue; | |
6037 | case VDEV_PROP_EXPANDSZ: | |
6038 | vdev_prop_add_list(outnvl, propname, NULL, | |
6039 | vd->vdev_stat.vs_esize, ZPROP_SRC_NONE); | |
6040 | continue; | |
6041 | case VDEV_PROP_FRAGMENTATION: | |
6042 | vdev_prop_add_list(outnvl, propname, NULL, | |
6043 | vd->vdev_stat.vs_fragmentation, | |
6044 | ZPROP_SRC_NONE); | |
6045 | continue; | |
6046 | case VDEV_PROP_PARITY: | |
6047 | vdev_prop_add_list(outnvl, propname, NULL, | |
6048 | vdev_get_nparity(vd), ZPROP_SRC_NONE); | |
6049 | continue; | |
6050 | case VDEV_PROP_PATH: | |
6051 | if (vd->vdev_path == NULL) | |
6052 | continue; | |
6053 | vdev_prop_add_list(outnvl, propname, | |
6054 | vd->vdev_path, 0, ZPROP_SRC_NONE); | |
6055 | continue; | |
6056 | case VDEV_PROP_DEVID: | |
6057 | if (vd->vdev_devid == NULL) | |
6058 | continue; | |
6059 | vdev_prop_add_list(outnvl, propname, | |
6060 | vd->vdev_devid, 0, ZPROP_SRC_NONE); | |
6061 | continue; | |
6062 | case VDEV_PROP_PHYS_PATH: | |
6063 | if (vd->vdev_physpath == NULL) | |
6064 | continue; | |
6065 | vdev_prop_add_list(outnvl, propname, | |
6066 | vd->vdev_physpath, 0, ZPROP_SRC_NONE); | |
6067 | continue; | |
6068 | case VDEV_PROP_ENC_PATH: | |
6069 | if (vd->vdev_enc_sysfs_path == NULL) | |
6070 | continue; | |
6071 | vdev_prop_add_list(outnvl, propname, | |
6072 | vd->vdev_enc_sysfs_path, 0, ZPROP_SRC_NONE); | |
6073 | continue; | |
6074 | case VDEV_PROP_FRU: | |
6075 | if (vd->vdev_fru == NULL) | |
6076 | continue; | |
6077 | vdev_prop_add_list(outnvl, propname, | |
6078 | vd->vdev_fru, 0, ZPROP_SRC_NONE); | |
6079 | continue; | |
6080 | case VDEV_PROP_PARENT: | |
6081 | if (vd->vdev_parent != NULL) { | |
6082 | strval = vdev_name(vd->vdev_parent, | |
6083 | namebuf, sizeof (namebuf)); | |
6084 | vdev_prop_add_list(outnvl, propname, | |
6085 | strval, 0, ZPROP_SRC_NONE); | |
6086 | } | |
6087 | continue; | |
6088 | case VDEV_PROP_CHILDREN: | |
6089 | if (vd->vdev_children > 0) | |
6090 | strval = kmem_zalloc(ZAP_MAXVALUELEN, | |
6091 | KM_SLEEP); | |
6092 | for (uint64_t i = 0; i < vd->vdev_children; | |
6093 | i++) { | |
a926aab9 | 6094 | const char *vname; |
2a673e76 AJ |
6095 | |
6096 | vname = vdev_name(vd->vdev_child[i], | |
6097 | namebuf, sizeof (namebuf)); | |
6098 | if (vname == NULL) | |
6099 | vname = "(unknown)"; | |
6100 | if (strlen(strval) > 0) | |
6101 | strlcat(strval, ",", | |
6102 | ZAP_MAXVALUELEN); | |
6103 | strlcat(strval, vname, ZAP_MAXVALUELEN); | |
6104 | } | |
6105 | if (strval != NULL) { | |
6106 | vdev_prop_add_list(outnvl, propname, | |
6107 | strval, 0, ZPROP_SRC_NONE); | |
6108 | kmem_free(strval, ZAP_MAXVALUELEN); | |
6109 | } | |
6110 | continue; | |
6111 | case VDEV_PROP_NUMCHILDREN: | |
6112 | vdev_prop_add_list(outnvl, propname, NULL, | |
6113 | vd->vdev_children, ZPROP_SRC_NONE); | |
6114 | continue; | |
6115 | case VDEV_PROP_READ_ERRORS: | |
6116 | vdev_prop_add_list(outnvl, propname, NULL, | |
6117 | vd->vdev_stat.vs_read_errors, | |
6118 | ZPROP_SRC_NONE); | |
6119 | continue; | |
6120 | case VDEV_PROP_WRITE_ERRORS: | |
6121 | vdev_prop_add_list(outnvl, propname, NULL, | |
6122 | vd->vdev_stat.vs_write_errors, | |
6123 | ZPROP_SRC_NONE); | |
6124 | continue; | |
6125 | case VDEV_PROP_CHECKSUM_ERRORS: | |
6126 | vdev_prop_add_list(outnvl, propname, NULL, | |
6127 | vd->vdev_stat.vs_checksum_errors, | |
6128 | ZPROP_SRC_NONE); | |
6129 | continue; | |
6130 | case VDEV_PROP_INITIALIZE_ERRORS: | |
6131 | vdev_prop_add_list(outnvl, propname, NULL, | |
6132 | vd->vdev_stat.vs_initialize_errors, | |
6133 | ZPROP_SRC_NONE); | |
6134 | continue; | |
6135 | case VDEV_PROP_OPS_NULL: | |
6136 | vdev_prop_add_list(outnvl, propname, NULL, | |
6137 | vd->vdev_stat.vs_ops[ZIO_TYPE_NULL], | |
6138 | ZPROP_SRC_NONE); | |
6139 | continue; | |
6140 | case VDEV_PROP_OPS_READ: | |
6141 | vdev_prop_add_list(outnvl, propname, NULL, | |
6142 | vd->vdev_stat.vs_ops[ZIO_TYPE_READ], | |
6143 | ZPROP_SRC_NONE); | |
6144 | continue; | |
6145 | case VDEV_PROP_OPS_WRITE: | |
6146 | vdev_prop_add_list(outnvl, propname, NULL, | |
6147 | vd->vdev_stat.vs_ops[ZIO_TYPE_WRITE], | |
6148 | ZPROP_SRC_NONE); | |
6149 | continue; | |
6150 | case VDEV_PROP_OPS_FREE: | |
6151 | vdev_prop_add_list(outnvl, propname, NULL, | |
6152 | vd->vdev_stat.vs_ops[ZIO_TYPE_FREE], | |
6153 | ZPROP_SRC_NONE); | |
6154 | continue; | |
6155 | case VDEV_PROP_OPS_CLAIM: | |
6156 | vdev_prop_add_list(outnvl, propname, NULL, | |
6157 | vd->vdev_stat.vs_ops[ZIO_TYPE_CLAIM], | |
6158 | ZPROP_SRC_NONE); | |
6159 | continue; | |
6160 | case VDEV_PROP_OPS_TRIM: | |
6161 | /* | |
6162 | * TRIM ops and bytes are reported to user | |
6163 | * space as ZIO_TYPE_IOCTL. This is done to | |
6164 | * preserve the vdev_stat_t structure layout | |
6165 | * for user space. | |
6166 | */ | |
6167 | vdev_prop_add_list(outnvl, propname, NULL, | |
6168 | vd->vdev_stat.vs_ops[ZIO_TYPE_IOCTL], | |
6169 | ZPROP_SRC_NONE); | |
6170 | continue; | |
6171 | case VDEV_PROP_BYTES_NULL: | |
6172 | vdev_prop_add_list(outnvl, propname, NULL, | |
6173 | vd->vdev_stat.vs_bytes[ZIO_TYPE_NULL], | |
6174 | ZPROP_SRC_NONE); | |
6175 | continue; | |
6176 | case VDEV_PROP_BYTES_READ: | |
6177 | vdev_prop_add_list(outnvl, propname, NULL, | |
6178 | vd->vdev_stat.vs_bytes[ZIO_TYPE_READ], | |
6179 | ZPROP_SRC_NONE); | |
6180 | continue; | |
6181 | case VDEV_PROP_BYTES_WRITE: | |
6182 | vdev_prop_add_list(outnvl, propname, NULL, | |
6183 | vd->vdev_stat.vs_bytes[ZIO_TYPE_WRITE], | |
6184 | ZPROP_SRC_NONE); | |
6185 | continue; | |
6186 | case VDEV_PROP_BYTES_FREE: | |
6187 | vdev_prop_add_list(outnvl, propname, NULL, | |
6188 | vd->vdev_stat.vs_bytes[ZIO_TYPE_FREE], | |
6189 | ZPROP_SRC_NONE); | |
6190 | continue; | |
6191 | case VDEV_PROP_BYTES_CLAIM: | |
6192 | vdev_prop_add_list(outnvl, propname, NULL, | |
6193 | vd->vdev_stat.vs_bytes[ZIO_TYPE_CLAIM], | |
6194 | ZPROP_SRC_NONE); | |
6195 | continue; | |
6196 | case VDEV_PROP_BYTES_TRIM: | |
6197 | /* | |
6198 | * TRIM ops and bytes are reported to user | |
6199 | * space as ZIO_TYPE_IOCTL. This is done to | |
6200 | * preserve the vdev_stat_t structure layout | |
6201 | * for user space. | |
6202 | */ | |
6203 | vdev_prop_add_list(outnvl, propname, NULL, | |
6204 | vd->vdev_stat.vs_bytes[ZIO_TYPE_IOCTL], | |
6205 | ZPROP_SRC_NONE); | |
6206 | continue; | |
6207 | case VDEV_PROP_REMOVING: | |
6208 | vdev_prop_add_list(outnvl, propname, NULL, | |
6209 | vd->vdev_removing, ZPROP_SRC_NONE); | |
6210 | continue; | |
6211 | /* Numeric Properites */ | |
6212 | case VDEV_PROP_ALLOCATING: | |
2a673e76 AJ |
6213 | /* Leaf vdevs cannot have this property */ |
6214 | if (vd->vdev_mg == NULL && | |
6215 | vd->vdev_top != NULL) { | |
6216 | src = ZPROP_SRC_NONE; | |
6217 | intval = ZPROP_BOOLEAN_NA; | |
69f024a5 RW |
6218 | } else { |
6219 | err = vdev_prop_get_int(vd, prop, | |
6220 | &intval); | |
6221 | if (err && err != ENOENT) | |
6222 | break; | |
6223 | ||
6224 | if (intval == | |
6225 | vdev_prop_default_numeric(prop)) | |
6226 | src = ZPROP_SRC_DEFAULT; | |
6227 | else | |
6228 | src = ZPROP_SRC_LOCAL; | |
2a673e76 AJ |
6229 | } |
6230 | ||
69f024a5 | 6231 | vdev_prop_add_list(outnvl, propname, NULL, |
16f0fdad MZ |
6232 | intval, src); |
6233 | break; | |
6234 | case VDEV_PROP_FAILFAST: | |
6235 | src = ZPROP_SRC_LOCAL; | |
6236 | strval = NULL; | |
6237 | ||
6238 | err = zap_lookup(mos, objid, nvpair_name(elem), | |
6239 | sizeof (uint64_t), 1, &intval); | |
6240 | if (err == ENOENT) { | |
6241 | intval = vdev_prop_default_numeric( | |
6242 | prop); | |
6243 | err = 0; | |
6244 | } else if (err) { | |
6245 | break; | |
6246 | } | |
6247 | if (intval == vdev_prop_default_numeric(prop)) | |
6248 | src = ZPROP_SRC_DEFAULT; | |
6249 | ||
2a673e76 AJ |
6250 | vdev_prop_add_list(outnvl, propname, strval, |
6251 | intval, src); | |
6252 | break; | |
69f024a5 RW |
6253 | case VDEV_PROP_CHECKSUM_N: |
6254 | case VDEV_PROP_CHECKSUM_T: | |
6255 | case VDEV_PROP_IO_N: | |
6256 | case VDEV_PROP_IO_T: | |
6257 | err = vdev_prop_get_int(vd, prop, &intval); | |
6258 | if (err && err != ENOENT) | |
6259 | break; | |
6260 | ||
6261 | if (intval == vdev_prop_default_numeric(prop)) | |
6262 | src = ZPROP_SRC_DEFAULT; | |
6263 | else | |
6264 | src = ZPROP_SRC_LOCAL; | |
6265 | ||
6266 | vdev_prop_add_list(outnvl, propname, NULL, | |
6267 | intval, src); | |
6268 | break; | |
2a673e76 AJ |
6269 | /* Text Properties */ |
6270 | case VDEV_PROP_COMMENT: | |
6271 | /* Exists in the ZAP below */ | |
6272 | /* FALLTHRU */ | |
4ff7a8fa | 6273 | case VDEV_PROP_USERPROP: |
2a673e76 AJ |
6274 | /* User Properites */ |
6275 | src = ZPROP_SRC_LOCAL; | |
6276 | ||
6277 | err = zap_length(mos, objid, nvpair_name(elem), | |
6278 | &integer_size, &num_integers); | |
6279 | if (err) | |
6280 | break; | |
6281 | ||
6282 | switch (integer_size) { | |
6283 | case 8: | |
6284 | /* User properties cannot be integers */ | |
6285 | err = EINVAL; | |
6286 | break; | |
6287 | case 1: | |
6288 | /* string property */ | |
6289 | strval = kmem_alloc(num_integers, | |
6290 | KM_SLEEP); | |
6291 | err = zap_lookup(mos, objid, | |
6292 | nvpair_name(elem), 1, | |
6293 | num_integers, strval); | |
6294 | if (err) { | |
6295 | kmem_free(strval, | |
6296 | num_integers); | |
6297 | break; | |
6298 | } | |
6299 | vdev_prop_add_list(outnvl, propname, | |
6300 | strval, 0, src); | |
6301 | kmem_free(strval, num_integers); | |
6302 | break; | |
6303 | } | |
6304 | break; | |
6305 | default: | |
6306 | err = ENOENT; | |
6307 | break; | |
6308 | } | |
6309 | if (err) | |
6310 | break; | |
6311 | } | |
6312 | } else { | |
6313 | /* | |
6314 | * Get all properties from the MOS vdev property object. | |
6315 | */ | |
6316 | zap_cursor_t zc; | |
6317 | zap_attribute_t za; | |
6318 | for (zap_cursor_init(&zc, mos, objid); | |
6319 | (err = zap_cursor_retrieve(&zc, &za)) == 0; | |
6320 | zap_cursor_advance(&zc)) { | |
6321 | intval = 0; | |
6322 | strval = NULL; | |
6323 | zprop_source_t src = ZPROP_SRC_DEFAULT; | |
6324 | propname = za.za_name; | |
2a673e76 AJ |
6325 | |
6326 | switch (za.za_integer_length) { | |
6327 | case 8: | |
6328 | /* We do not allow integer user properties */ | |
6329 | /* This is likely an internal value */ | |
6330 | break; | |
6331 | case 1: | |
6332 | /* string property */ | |
6333 | strval = kmem_alloc(za.za_num_integers, | |
6334 | KM_SLEEP); | |
6335 | err = zap_lookup(mos, objid, za.za_name, 1, | |
6336 | za.za_num_integers, strval); | |
6337 | if (err) { | |
6338 | kmem_free(strval, za.za_num_integers); | |
6339 | break; | |
6340 | } | |
6341 | vdev_prop_add_list(outnvl, propname, strval, 0, | |
6342 | src); | |
6343 | kmem_free(strval, za.za_num_integers); | |
6344 | break; | |
6345 | ||
6346 | default: | |
6347 | break; | |
6348 | } | |
6349 | } | |
6350 | zap_cursor_fini(&zc); | |
6351 | } | |
6352 | ||
6353 | mutex_exit(&spa->spa_props_lock); | |
6354 | if (err && err != ENOENT) { | |
6355 | return (err); | |
6356 | } | |
6357 | ||
6358 | return (0); | |
6359 | } | |
6360 | ||
c28b2279 BB |
6361 | EXPORT_SYMBOL(vdev_fault); |
6362 | EXPORT_SYMBOL(vdev_degrade); | |
6363 | EXPORT_SYMBOL(vdev_online); | |
6364 | EXPORT_SYMBOL(vdev_offline); | |
6365 | EXPORT_SYMBOL(vdev_clear); | |
1b939560 | 6366 | |
fdc2d303 | 6367 | ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_count, UINT, ZMOD_RW, |
e4e94ca3 | 6368 | "Target number of metaslabs per top-level vdev"); |
80d52c39 | 6369 | |
fdc2d303 | 6370 | ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_shift, UINT, ZMOD_RW, |
ff73574c RN |
6371 | "Default lower limit for metaslab size"); |
6372 | ||
6373 | ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, max_ms_shift, UINT, ZMOD_RW, | |
6374 | "Default upper limit for metaslab size"); | |
93e28d66 | 6375 | |
fdc2d303 | 6376 | ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, min_ms_count, UINT, ZMOD_RW, |
d2734cce SD |
6377 | "Minimum number of metaslabs per top-level vdev"); |
6378 | ||
fdc2d303 | 6379 | ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, ms_count_limit, UINT, ZMOD_RW, |
e4e94ca3 DB |
6380 | "Practical upper limit of total metaslabs per top-level vdev"); |
6381 | ||
03fdcb9a | 6382 | ZFS_MODULE_PARAM(zfs, zfs_, slow_io_events_per_second, UINT, ZMOD_RW, |
ad796b8a | 6383 | "Rate limit slow IO (delay) events to this many per second"); |
80d52c39 | 6384 | |
7ada752a | 6385 | /* BEGIN CSTYLED */ |
03fdcb9a MM |
6386 | ZFS_MODULE_PARAM(zfs, zfs_, checksum_events_per_second, UINT, ZMOD_RW, |
6387 | "Rate limit checksum events to this many checksum errors per second " | |
7ada752a AZ |
6388 | "(do not set below ZED threshold)."); |
6389 | /* END CSTYLED */ | |
02638a30 | 6390 | |
03fdcb9a | 6391 | ZFS_MODULE_PARAM(zfs, zfs_, scan_ignore_errors, INT, ZMOD_RW, |
02638a30 | 6392 | "Ignore errors during resilver/scrub"); |
6cb8e530 | 6393 | |
03fdcb9a | 6394 | ZFS_MODULE_PARAM(zfs_vdev, vdev_, validate_skip, INT, ZMOD_RW, |
6cb8e530 | 6395 | "Bypass vdev_validate()"); |
53b1f5ea | 6396 | |
03fdcb9a MM |
6397 | ZFS_MODULE_PARAM(zfs, zfs_, nocacheflush, INT, ZMOD_RW, |
6398 | "Disable cache flushes"); | |
6fe3498c | 6399 | |
fdc2d303 | 6400 | ZFS_MODULE_PARAM(zfs, zfs_, embedded_slog_min_ms, UINT, ZMOD_RW, |
aa755b35 MA |
6401 | "Minimum number of metaslabs required to dedicate one for log blocks"); |
6402 | ||
7ada752a | 6403 | /* BEGIN CSTYLED */ |
6fe3498c | 6404 | ZFS_MODULE_PARAM_CALL(zfs_vdev, zfs_vdev_, min_auto_ashift, |
ab8d9c17 | 6405 | param_set_min_auto_ashift, param_get_uint, ZMOD_RW, |
6fe3498c RM |
6406 | "Minimum ashift used when creating new top-level vdevs"); |
6407 | ||
6408 | ZFS_MODULE_PARAM_CALL(zfs_vdev, zfs_vdev_, max_auto_ashift, | |
ab8d9c17 | 6409 | param_set_max_auto_ashift, param_get_uint, ZMOD_RW, |
6fe3498c RM |
6410 | "Maximum ashift used when optimizing for logical -> physical sector " |
6411 | "size on new top-level vdevs"); | |
4ea3f864 | 6412 | /* END CSTYLED */ |