]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
1d3ba0bf | 9 | * or https://opensource.org/licenses/CDDL-1.0. |
34dc7c2f BB |
10 | * See the License for the specific language governing permissions |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
428870ff | 22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
c183d164 | 23 | * Copyright (c) 2011, 2024 by Delphix. All rights reserved. |
adfe9d93 | 24 | * Copyright 2015 Nexenta Systems, Inc. All rights reserved. |
0c66c32d | 25 | * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. |
3c67d83a | 26 | * Copyright 2013 Saso Kiselkov. All rights reserved. |
0ea05c64 | 27 | * Copyright (c) 2017 Datto Inc. |
cc99f275 | 28 | * Copyright (c) 2017, Intel Corporation. |
e3570464 | 29 | * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved. |
975a1325 | 30 | * Copyright (c) 2023, 2024, Klara Inc. |
34dc7c2f BB |
31 | */ |
32 | ||
34dc7c2f | 33 | #include <sys/zfs_context.h> |
985c33b1 | 34 | #include <sys/zfs_chksum.h> |
34dc7c2f BB |
35 | #include <sys/spa_impl.h> |
36 | #include <sys/zio.h> | |
37 | #include <sys/zio_checksum.h> | |
38 | #include <sys/zio_compress.h> | |
39 | #include <sys/dmu.h> | |
40 | #include <sys/dmu_tx.h> | |
41 | #include <sys/zap.h> | |
42 | #include <sys/zil.h> | |
43 | #include <sys/vdev_impl.h> | |
619f0976 | 44 | #include <sys/vdev_initialize.h> |
1b939560 | 45 | #include <sys/vdev_trim.h> |
bc25c932 | 46 | #include <sys/vdev_file.h> |
ab9f4b0b | 47 | #include <sys/vdev_raidz.h> |
34dc7c2f BB |
48 | #include <sys/metaslab.h> |
49 | #include <sys/uberblock_impl.h> | |
50 | #include <sys/txg.h> | |
51 | #include <sys/avl.h> | |
52 | #include <sys/unique.h> | |
53 | #include <sys/dsl_pool.h> | |
54 | #include <sys/dsl_dir.h> | |
55 | #include <sys/dsl_prop.h> | |
26685276 | 56 | #include <sys/fm/util.h> |
428870ff | 57 | #include <sys/dsl_scan.h> |
34dc7c2f BB |
58 | #include <sys/fs/zfs.h> |
59 | #include <sys/metaslab_impl.h> | |
b128c09f | 60 | #include <sys/arc.h> |
67a1b037 | 61 | #include <sys/brt.h> |
428870ff | 62 | #include <sys/ddt.h> |
1421c891 | 63 | #include <sys/kstat.h> |
34dc7c2f | 64 | #include "zfs_prop.h" |
ca577779 | 65 | #include <sys/btree.h> |
3c67d83a | 66 | #include <sys/zfeature.h> |
bced7e3a | 67 | #include <sys/qat.h> |
10b3c7f5 | 68 | #include <sys/zstd/zstd.h> |
34dc7c2f BB |
69 | |
70 | /* | |
71 | * SPA locking | |
72 | * | |
93e28d66 | 73 | * There are three basic locks for managing spa_t structures: |
34dc7c2f BB |
74 | * |
75 | * spa_namespace_lock (global mutex) | |
76 | * | |
77 | * This lock must be acquired to do any of the following: | |
78 | * | |
79 | * - Lookup a spa_t by name | |
80 | * - Add or remove a spa_t from the namespace | |
81 | * - Increase spa_refcount from non-zero | |
82 | * - Check if spa_refcount is zero | |
83 | * - Rename a spa_t | |
84 | * - add/remove/attach/detach devices | |
975a1325 DB |
85 | * - Held for the duration of create/destroy |
86 | * - Held at the start and end of import and export | |
34dc7c2f BB |
87 | * |
88 | * It does not need to handle recursion. A create or destroy may | |
89 | * reference objects (files or zvols) in other pools, but by | |
90 | * definition they must have an existing reference, and will never need | |
91 | * to lookup a spa_t by name. | |
92 | * | |
c13060e4 | 93 | * spa_refcount (per-spa zfs_refcount_t protected by mutex) |
34dc7c2f BB |
94 | * |
95 | * This reference count keep track of any active users of the spa_t. The | |
96 | * spa_t cannot be destroyed or freed while this is non-zero. Internally, | |
97 | * the refcount is never really 'zero' - opening a pool implicitly keeps | |
b128c09f | 98 | * some references in the DMU. Internally we check against spa_minref, but |
34dc7c2f BB |
99 | * present the image of a zero/non-zero value to consumers. |
100 | * | |
b128c09f | 101 | * spa_config_lock[] (per-spa array of rwlocks) |
34dc7c2f BB |
102 | * |
103 | * This protects the spa_t from config changes, and must be held in | |
104 | * the following circumstances: | |
105 | * | |
106 | * - RW_READER to perform I/O to the spa | |
107 | * - RW_WRITER to change the vdev config | |
108 | * | |
34dc7c2f BB |
109 | * The locking order is fairly straightforward: |
110 | * | |
111 | * spa_namespace_lock -> spa_refcount | |
112 | * | |
113 | * The namespace lock must be acquired to increase the refcount from 0 | |
114 | * or to check if it is zero. | |
115 | * | |
b128c09f | 116 | * spa_refcount -> spa_config_lock[] |
34dc7c2f BB |
117 | * |
118 | * There must be at least one valid reference on the spa_t to acquire | |
119 | * the config lock. | |
120 | * | |
b128c09f | 121 | * spa_namespace_lock -> spa_config_lock[] |
34dc7c2f BB |
122 | * |
123 | * The namespace lock must always be taken before the config lock. | |
124 | * | |
125 | * | |
b128c09f | 126 | * The spa_namespace_lock can be acquired directly and is globally visible. |
34dc7c2f | 127 | * |
b128c09f BB |
128 | * The namespace is manipulated using the following functions, all of which |
129 | * require the spa_namespace_lock to be held. | |
34dc7c2f BB |
130 | * |
131 | * spa_lookup() Lookup a spa_t by name. | |
132 | * | |
133 | * spa_add() Create a new spa_t in the namespace. | |
134 | * | |
135 | * spa_remove() Remove a spa_t from the namespace. This also | |
136 | * frees up any memory associated with the spa_t. | |
137 | * | |
138 | * spa_next() Returns the next spa_t in the system, or the | |
139 | * first if NULL is passed. | |
140 | * | |
141 | * spa_evict_all() Shutdown and remove all spa_t structures in | |
142 | * the system. | |
143 | * | |
144 | * spa_guid_exists() Determine whether a pool/device guid exists. | |
145 | * | |
146 | * The spa_refcount is manipulated using the following functions: | |
147 | * | |
148 | * spa_open_ref() Adds a reference to the given spa_t. Must be | |
149 | * called with spa_namespace_lock held if the | |
150 | * refcount is currently zero. | |
151 | * | |
152 | * spa_close() Remove a reference from the spa_t. This will | |
153 | * not free the spa_t or remove it from the | |
154 | * namespace. No locking is required. | |
155 | * | |
156 | * spa_refcount_zero() Returns true if the refcount is currently | |
157 | * zero. Must be called with spa_namespace_lock | |
158 | * held. | |
159 | * | |
b128c09f BB |
160 | * The spa_config_lock[] is an array of rwlocks, ordered as follows: |
161 | * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. | |
162 | * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). | |
163 | * | |
164 | * To read the configuration, it suffices to hold one of these locks as reader. | |
165 | * To modify the configuration, you must hold all locks as writer. To modify | |
166 | * vdev state without altering the vdev tree's topology (e.g. online/offline), | |
167 | * you must hold SCL_STATE and SCL_ZIO as writer. | |
168 | * | |
169 | * We use these distinct config locks to avoid recursive lock entry. | |
170 | * For example, spa_sync() (which holds SCL_CONFIG as reader) induces | |
171 | * block allocations (SCL_ALLOC), which may require reading space maps | |
172 | * from disk (dmu_read() -> zio_read() -> SCL_ZIO). | |
173 | * | |
174 | * The spa config locks cannot be normal rwlocks because we need the | |
175 | * ability to hand off ownership. For example, SCL_ZIO is acquired | |
176 | * by the issuing thread and later released by an interrupt thread. | |
177 | * They do, however, obey the usual write-wanted semantics to prevent | |
178 | * writer (i.e. system administrator) starvation. | |
179 | * | |
180 | * The lock acquisition rules are as follows: | |
181 | * | |
182 | * SCL_CONFIG | |
183 | * Protects changes to the vdev tree topology, such as vdev | |
184 | * add/remove/attach/detach. Protects the dirty config list | |
185 | * (spa_config_dirty_list) and the set of spares and l2arc devices. | |
186 | * | |
187 | * SCL_STATE | |
188 | * Protects changes to pool state and vdev state, such as vdev | |
189 | * online/offline/fault/degrade/clear. Protects the dirty state list | |
190 | * (spa_state_dirty_list) and global pool state (spa_state). | |
191 | * | |
192 | * SCL_ALLOC | |
193 | * Protects changes to metaslab groups and classes. | |
194 | * Held as reader by metaslab_alloc() and metaslab_claim(). | |
195 | * | |
196 | * SCL_ZIO | |
197 | * Held by bp-level zios (those which have no io_vd upon entry) | |
198 | * to prevent changes to the vdev tree. The bp-level zio implicitly | |
199 | * protects all of its vdev child zios, which do not hold SCL_ZIO. | |
200 | * | |
201 | * SCL_FREE | |
202 | * Protects changes to metaslab groups and classes. | |
203 | * Held as reader by metaslab_free(). SCL_FREE is distinct from | |
204 | * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free | |
205 | * blocks in zio_done() while another i/o that holds either | |
206 | * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. | |
207 | * | |
208 | * SCL_VDEV | |
209 | * Held as reader to prevent changes to the vdev tree during trivial | |
428870ff | 210 | * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the |
b128c09f BB |
211 | * other locks, and lower than all of them, to ensure that it's safe |
212 | * to acquire regardless of caller context. | |
213 | * | |
214 | * In addition, the following rules apply: | |
215 | * | |
216 | * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. | |
217 | * The lock ordering is SCL_CONFIG > spa_props_lock. | |
218 | * | |
219 | * (b) I/O operations on leaf vdevs. For any zio operation that takes | |
220 | * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), | |
221 | * or zio_write_phys() -- the caller must ensure that the config cannot | |
222 | * cannot change in the interim, and that the vdev cannot be reopened. | |
223 | * SCL_STATE as reader suffices for both. | |
34dc7c2f BB |
224 | * |
225 | * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). | |
226 | * | |
227 | * spa_vdev_enter() Acquire the namespace lock and the config lock | |
228 | * for writing. | |
229 | * | |
230 | * spa_vdev_exit() Release the config lock, wait for all I/O | |
231 | * to complete, sync the updated configs to the | |
232 | * cache, and release the namespace lock. | |
233 | * | |
b128c09f BB |
234 | * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). |
235 | * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual | |
236 | * locking is, always, based on spa_namespace_lock and spa_config_lock[]. | |
34dc7c2f BB |
237 | */ |
238 | ||
c183d164 | 239 | avl_tree_t spa_namespace_avl; |
34dc7c2f | 240 | kmutex_t spa_namespace_lock; |
c183d164 | 241 | kcondvar_t spa_namespace_cv; |
18168da7 | 242 | static const int spa_max_replication_override = SPA_DVAS_PER_BP; |
34dc7c2f BB |
243 | |
244 | static kmutex_t spa_spare_lock; | |
245 | static avl_tree_t spa_spare_avl; | |
246 | static kmutex_t spa_l2cache_lock; | |
247 | static avl_tree_t spa_l2cache_avl; | |
248 | ||
da92d5cb | 249 | spa_mode_t spa_mode_global = SPA_MODE_UNINIT; |
34dc7c2f | 250 | |
0b39b9f9 | 251 | #ifdef ZFS_DEBUG |
a1d477c2 MA |
252 | /* |
253 | * Everything except dprintf, set_error, spa, and indirect_remap is on | |
254 | * by default in debug builds. | |
255 | */ | |
256 | int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SET_ERROR | | |
964c2d69 | 257 | ZFS_DEBUG_INDIRECT_REMAP); |
0b39b9f9 PS |
258 | #else |
259 | int zfs_flags = 0; | |
260 | #endif | |
261 | ||
262 | /* | |
263 | * zfs_recover can be set to nonzero to attempt to recover from | |
264 | * otherwise-fatal errors, typically caused by on-disk corruption. When | |
265 | * set, calls to zfs_panic_recover() will turn into warning messages. | |
266 | * This should only be used as a last resort, as it typically results | |
267 | * in leaked space, or worse. | |
268 | */ | |
269 | int zfs_recover = B_FALSE; | |
270 | ||
271 | /* | |
272 | * If destroy encounters an EIO while reading metadata (e.g. indirect | |
273 | * blocks), space referenced by the missing metadata can not be freed. | |
274 | * Normally this causes the background destroy to become "stalled", as | |
275 | * it is unable to make forward progress. While in this stalled state, | |
276 | * all remaining space to free from the error-encountering filesystem is | |
277 | * "temporarily leaked". Set this flag to cause it to ignore the EIO, | |
278 | * permanently leak the space from indirect blocks that can not be read, | |
279 | * and continue to free everything else that it can. | |
280 | * | |
281 | * The default, "stalling" behavior is useful if the storage partially | |
282 | * fails (i.e. some but not all i/os fail), and then later recovers. In | |
283 | * this case, we will be able to continue pool operations while it is | |
284 | * partially failed, and when it recovers, we can continue to free the | |
285 | * space, with no leaks. However, note that this case is actually | |
286 | * fairly rare. | |
287 | * | |
288 | * Typically pools either (a) fail completely (but perhaps temporarily, | |
289 | * e.g. a top-level vdev going offline), or (b) have localized, | |
290 | * permanent errors (e.g. disk returns the wrong data due to bit flip or | |
291 | * firmware bug). In case (a), this setting does not matter because the | |
292 | * pool will be suspended and the sync thread will not be able to make | |
293 | * forward progress regardless. In case (b), because the error is | |
294 | * permanent, the best we can do is leak the minimum amount of space, | |
295 | * which is what setting this flag will do. Therefore, it is reasonable | |
296 | * for this flag to normally be set, but we chose the more conservative | |
297 | * approach of not setting it, so that there is no possibility of | |
298 | * leaking space in the "partial temporary" failure case. | |
299 | */ | |
300 | int zfs_free_leak_on_eio = B_FALSE; | |
301 | ||
cc92e9d0 | 302 | /* |
e8b96c60 MA |
303 | * Expiration time in milliseconds. This value has two meanings. First it is |
304 | * used to determine when the spa_deadman() logic should fire. By default the | |
8fb1ede1 | 305 | * spa_deadman() will fire if spa_sync() has not completed in 600 seconds. |
e8b96c60 MA |
306 | * Secondly, the value determines if an I/O is considered "hung". Any I/O that |
307 | * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting | |
8fb1ede1 | 308 | * in one of three behaviors controlled by zfs_deadman_failmode. |
cc92e9d0 | 309 | */ |
ab8d9c17 | 310 | uint64_t zfs_deadman_synctime_ms = 600000UL; /* 10 min. */ |
8fb1ede1 BB |
311 | |
312 | /* | |
313 | * This value controls the maximum amount of time zio_wait() will block for an | |
314 | * outstanding IO. By default this is 300 seconds at which point the "hung" | |
315 | * behavior will be applied as described for zfs_deadman_synctime_ms. | |
316 | */ | |
ab8d9c17 | 317 | uint64_t zfs_deadman_ziotime_ms = 300000UL; /* 5 min. */ |
cc92e9d0 | 318 | |
b81a3ddc TC |
319 | /* |
320 | * Check time in milliseconds. This defines the frequency at which we check | |
321 | * for hung I/O. | |
322 | */ | |
ab8d9c17 | 323 | uint64_t zfs_deadman_checktime_ms = 60000UL; /* 1 min. */ |
b81a3ddc | 324 | |
cc92e9d0 GW |
325 | /* |
326 | * By default the deadman is enabled. | |
327 | */ | |
18168da7 | 328 | int zfs_deadman_enabled = B_TRUE; |
cc92e9d0 | 329 | |
8fb1ede1 BB |
330 | /* |
331 | * Controls the behavior of the deadman when it detects a "hung" I/O. | |
332 | * Valid values are zfs_deadman_failmode=<wait|continue|panic>. | |
333 | * | |
334 | * wait - Wait for the "hung" I/O (default) | |
335 | * continue - Attempt to recover from a "hung" I/O | |
336 | * panic - Panic the system | |
337 | */ | |
18168da7 | 338 | const char *zfs_deadman_failmode = "wait"; |
8fb1ede1 | 339 | |
e8b96c60 MA |
340 | /* |
341 | * The worst case is single-sector max-parity RAID-Z blocks, in which | |
342 | * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1) | |
343 | * times the size; so just assume that. Add to this the fact that | |
344 | * we can have up to 3 DVAs per bp, and one more factor of 2 because | |
345 | * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together, | |
346 | * the worst case is: | |
347 | * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24 | |
348 | */ | |
fdc2d303 | 349 | uint_t spa_asize_inflation = 24; |
e8b96c60 | 350 | |
3d45fdd6 MA |
351 | /* |
352 | * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in | |
f01eaed4 PS |
353 | * the pool to be consumed (bounded by spa_max_slop). This ensures that we |
354 | * don't run the pool completely out of space, due to unaccounted changes (e.g. | |
355 | * to the MOS). It also limits the worst-case time to allocate space. If we | |
356 | * have less than this amount of free space, most ZPL operations (e.g. write, | |
357 | * create) will return ENOSPC. The ZIL metaslabs (spa_embedded_log_class) are | |
358 | * also part of this 3.2% of space which can't be consumed by normal writes; | |
359 | * the slop space "proper" (spa_get_slop_space()) is decreased by the embedded | |
360 | * log space. | |
3d45fdd6 MA |
361 | * |
362 | * Certain operations (e.g. file removal, most administrative actions) can | |
363 | * use half the slop space. They will only return ENOSPC if less than half | |
364 | * the slop space is free. Typically, once the pool has less than the slop | |
365 | * space free, the user will use these operations to free up space in the pool. | |
366 | * These are the operations that call dsl_pool_adjustedsize() with the netfree | |
367 | * argument set to TRUE. | |
368 | * | |
d2734cce SD |
369 | * Operations that are almost guaranteed to free up space in the absence of |
370 | * a pool checkpoint can use up to three quarters of the slop space | |
371 | * (e.g zfs destroy). | |
372 | * | |
3d45fdd6 MA |
373 | * A very restricted set of operations are always permitted, regardless of |
374 | * the amount of free space. These are the operations that call | |
d2734cce SD |
375 | * dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net |
376 | * increase in the amount of space used, it is possible to run the pool | |
377 | * completely out of space, causing it to be permanently read-only. | |
3d45fdd6 | 378 | * |
d7958b4c MA |
379 | * Note that on very small pools, the slop space will be larger than |
380 | * 3.2%, in an effort to have it be at least spa_min_slop (128MB), | |
381 | * but we never allow it to be more than half the pool size. | |
382 | * | |
f01eaed4 PS |
383 | * Further, on very large pools, the slop space will be smaller than |
384 | * 3.2%, to avoid reserving much more space than we actually need; bounded | |
385 | * by spa_max_slop (128GB). | |
386 | * | |
3d45fdd6 MA |
387 | * See also the comments in zfs_space_check_t. |
388 | */ | |
fdc2d303 | 389 | uint_t spa_slop_shift = 5; |
18168da7 AZ |
390 | static const uint64_t spa_min_slop = 128ULL * 1024 * 1024; |
391 | static const uint64_t spa_max_slop = 128ULL * 1024 * 1024 * 1024; | |
3bd4df38 EN |
392 | |
393 | /* | |
394 | * Number of allocators to use, per spa instance | |
395 | */ | |
396 | static int spa_num_allocators = 4; | |
645b8330 | 397 | static int spa_cpus_per_allocator = 4; |
492f64e9 | 398 | |
95f71c01 EN |
399 | /* |
400 | * Spa active allocator. | |
401 | * Valid values are zfs_active_allocator=<dynamic|cursor|new-dynamic>. | |
402 | */ | |
403 | const char *zfs_active_allocator = "dynamic"; | |
3d45fdd6 | 404 | |
4a0ee12a PZ |
405 | void |
406 | spa_load_failed(spa_t *spa, const char *fmt, ...) | |
407 | { | |
408 | va_list adx; | |
409 | char buf[256]; | |
410 | ||
411 | va_start(adx, fmt); | |
412 | (void) vsnprintf(buf, sizeof (buf), fmt, adx); | |
413 | va_end(adx); | |
414 | ||
6cb8e530 PZ |
415 | zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name, |
416 | spa->spa_trust_config ? "trusted" : "untrusted", buf); | |
4a0ee12a PZ |
417 | } |
418 | ||
4a0ee12a PZ |
419 | void |
420 | spa_load_note(spa_t *spa, const char *fmt, ...) | |
421 | { | |
422 | va_list adx; | |
423 | char buf[256]; | |
424 | ||
425 | va_start(adx, fmt); | |
426 | (void) vsnprintf(buf, sizeof (buf), fmt, adx); | |
427 | va_end(adx); | |
428 | ||
6cb8e530 PZ |
429 | zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name, |
430 | spa->spa_trust_config ? "trusted" : "untrusted", buf); | |
687e4d7f DB |
431 | |
432 | spa_import_progress_set_notes_nolog(spa, "%s", buf); | |
4a0ee12a PZ |
433 | } |
434 | ||
cc99f275 DB |
435 | /* |
436 | * By default dedup and user data indirects land in the special class | |
437 | */ | |
18168da7 AZ |
438 | static int zfs_ddt_data_is_special = B_TRUE; |
439 | static int zfs_user_indirect_is_special = B_TRUE; | |
cc99f275 DB |
440 | |
441 | /* | |
442 | * The percentage of special class final space reserved for metadata only. | |
443 | * Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only | |
444 | * let metadata into the class. | |
445 | */ | |
fdc2d303 | 446 | static uint_t zfs_special_class_metadata_reserve_pct = 25; |
cc99f275 | 447 | |
34dc7c2f BB |
448 | /* |
449 | * ========================================================================== | |
450 | * SPA config locking | |
451 | * ========================================================================== | |
452 | */ | |
453 | static void | |
b128c09f BB |
454 | spa_config_lock_init(spa_t *spa) |
455 | { | |
1c27024e | 456 | for (int i = 0; i < SCL_LOCKS; i++) { |
b128c09f BB |
457 | spa_config_lock_t *scl = &spa->spa_config_lock[i]; |
458 | mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); | |
459 | cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); | |
b128c09f BB |
460 | scl->scl_writer = NULL; |
461 | scl->scl_write_wanted = 0; | |
42afb12d | 462 | scl->scl_count = 0; |
b128c09f | 463 | } |
34dc7c2f BB |
464 | } |
465 | ||
466 | static void | |
b128c09f BB |
467 | spa_config_lock_destroy(spa_t *spa) |
468 | { | |
1c27024e | 469 | for (int i = 0; i < SCL_LOCKS; i++) { |
b128c09f BB |
470 | spa_config_lock_t *scl = &spa->spa_config_lock[i]; |
471 | mutex_destroy(&scl->scl_lock); | |
472 | cv_destroy(&scl->scl_cv); | |
b128c09f BB |
473 | ASSERT(scl->scl_writer == NULL); |
474 | ASSERT(scl->scl_write_wanted == 0); | |
42afb12d | 475 | ASSERT(scl->scl_count == 0); |
b128c09f BB |
476 | } |
477 | } | |
478 | ||
479 | int | |
a926aab9 | 480 | spa_config_tryenter(spa_t *spa, int locks, const void *tag, krw_t rw) |
34dc7c2f | 481 | { |
1c27024e | 482 | for (int i = 0; i < SCL_LOCKS; i++) { |
b128c09f BB |
483 | spa_config_lock_t *scl = &spa->spa_config_lock[i]; |
484 | if (!(locks & (1 << i))) | |
485 | continue; | |
486 | mutex_enter(&scl->scl_lock); | |
487 | if (rw == RW_READER) { | |
488 | if (scl->scl_writer || scl->scl_write_wanted) { | |
489 | mutex_exit(&scl->scl_lock); | |
adfe9d93 SK |
490 | spa_config_exit(spa, locks & ((1 << i) - 1), |
491 | tag); | |
b128c09f BB |
492 | return (0); |
493 | } | |
494 | } else { | |
495 | ASSERT(scl->scl_writer != curthread); | |
42afb12d | 496 | if (scl->scl_count != 0) { |
b128c09f | 497 | mutex_exit(&scl->scl_lock); |
adfe9d93 SK |
498 | spa_config_exit(spa, locks & ((1 << i) - 1), |
499 | tag); | |
b128c09f BB |
500 | return (0); |
501 | } | |
502 | scl->scl_writer = curthread; | |
503 | } | |
42afb12d | 504 | scl->scl_count++; |
b128c09f BB |
505 | mutex_exit(&scl->scl_lock); |
506 | } | |
507 | return (1); | |
34dc7c2f BB |
508 | } |
509 | ||
71d191ef HW |
510 | static void |
511 | spa_config_enter_impl(spa_t *spa, int locks, const void *tag, krw_t rw, | |
512 | int mmp_flag) | |
34dc7c2f | 513 | { |
14e4e3cb | 514 | (void) tag; |
45d1cae3 BB |
515 | int wlocks_held = 0; |
516 | ||
13fe0198 MA |
517 | ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY); |
518 | ||
1c27024e | 519 | for (int i = 0; i < SCL_LOCKS; i++) { |
b128c09f | 520 | spa_config_lock_t *scl = &spa->spa_config_lock[i]; |
45d1cae3 BB |
521 | if (scl->scl_writer == curthread) |
522 | wlocks_held |= (1 << i); | |
b128c09f BB |
523 | if (!(locks & (1 << i))) |
524 | continue; | |
525 | mutex_enter(&scl->scl_lock); | |
526 | if (rw == RW_READER) { | |
71d191ef HW |
527 | while (scl->scl_writer || |
528 | (!mmp_flag && scl->scl_write_wanted)) { | |
b128c09f BB |
529 | cv_wait(&scl->scl_cv, &scl->scl_lock); |
530 | } | |
531 | } else { | |
532 | ASSERT(scl->scl_writer != curthread); | |
42afb12d | 533 | while (scl->scl_count != 0) { |
b128c09f BB |
534 | scl->scl_write_wanted++; |
535 | cv_wait(&scl->scl_cv, &scl->scl_lock); | |
536 | scl->scl_write_wanted--; | |
537 | } | |
538 | scl->scl_writer = curthread; | |
539 | } | |
42afb12d | 540 | scl->scl_count++; |
b128c09f | 541 | mutex_exit(&scl->scl_lock); |
34dc7c2f | 542 | } |
a1d477c2 | 543 | ASSERT3U(wlocks_held, <=, locks); |
34dc7c2f BB |
544 | } |
545 | ||
71d191ef HW |
546 | void |
547 | spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw) | |
548 | { | |
549 | spa_config_enter_impl(spa, locks, tag, rw, 0); | |
550 | } | |
551 | ||
552 | /* | |
553 | * The spa_config_enter_mmp() allows the mmp thread to cut in front of | |
554 | * outstanding write lock requests. This is needed since the mmp updates are | |
555 | * time sensitive and failure to service them promptly will result in a | |
556 | * suspended pool. This pool suspension has been seen in practice when there is | |
557 | * a single disk in a pool that is responding slowly and presumably about to | |
558 | * fail. | |
559 | */ | |
560 | ||
561 | void | |
562 | spa_config_enter_mmp(spa_t *spa, int locks, const void *tag, krw_t rw) | |
563 | { | |
564 | spa_config_enter_impl(spa, locks, tag, rw, 1); | |
565 | } | |
566 | ||
34dc7c2f | 567 | void |
dc04a8c7 | 568 | spa_config_exit(spa_t *spa, int locks, const void *tag) |
34dc7c2f | 569 | { |
14e4e3cb | 570 | (void) tag; |
1c27024e | 571 | for (int i = SCL_LOCKS - 1; i >= 0; i--) { |
b128c09f BB |
572 | spa_config_lock_t *scl = &spa->spa_config_lock[i]; |
573 | if (!(locks & (1 << i))) | |
574 | continue; | |
575 | mutex_enter(&scl->scl_lock); | |
42afb12d AM |
576 | ASSERT(scl->scl_count > 0); |
577 | if (--scl->scl_count == 0) { | |
b128c09f BB |
578 | ASSERT(scl->scl_writer == NULL || |
579 | scl->scl_writer == curthread); | |
580 | scl->scl_writer = NULL; /* OK in either case */ | |
581 | cv_broadcast(&scl->scl_cv); | |
582 | } | |
583 | mutex_exit(&scl->scl_lock); | |
34dc7c2f | 584 | } |
34dc7c2f BB |
585 | } |
586 | ||
b128c09f BB |
587 | int |
588 | spa_config_held(spa_t *spa, int locks, krw_t rw) | |
34dc7c2f | 589 | { |
1c27024e | 590 | int locks_held = 0; |
34dc7c2f | 591 | |
1c27024e | 592 | for (int i = 0; i < SCL_LOCKS; i++) { |
b128c09f BB |
593 | spa_config_lock_t *scl = &spa->spa_config_lock[i]; |
594 | if (!(locks & (1 << i))) | |
595 | continue; | |
42afb12d | 596 | if ((rw == RW_READER && scl->scl_count != 0) || |
b128c09f BB |
597 | (rw == RW_WRITER && scl->scl_writer == curthread)) |
598 | locks_held |= 1 << i; | |
599 | } | |
600 | ||
601 | return (locks_held); | |
34dc7c2f BB |
602 | } |
603 | ||
604 | /* | |
605 | * ========================================================================== | |
606 | * SPA namespace functions | |
607 | * ========================================================================== | |
608 | */ | |
609 | ||
610 | /* | |
611 | * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. | |
612 | * Returns NULL if no matching spa_t is found. | |
613 | */ | |
614 | spa_t * | |
615 | spa_lookup(const char *name) | |
616 | { | |
b128c09f BB |
617 | static spa_t search; /* spa_t is large; don't allocate on stack */ |
618 | spa_t *spa; | |
34dc7c2f | 619 | avl_index_t where; |
34dc7c2f BB |
620 | char *cp; |
621 | ||
622 | ASSERT(MUTEX_HELD(&spa_namespace_lock)); | |
623 | ||
c183d164 | 624 | retry: |
13fe0198 MA |
625 | (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); |
626 | ||
34dc7c2f BB |
627 | /* |
628 | * If it's a full dataset name, figure out the pool name and | |
629 | * just use that. | |
630 | */ | |
da536844 | 631 | cp = strpbrk(search.spa_name, "/@#"); |
13fe0198 | 632 | if (cp != NULL) |
34dc7c2f | 633 | *cp = '\0'; |
34dc7c2f | 634 | |
34dc7c2f | 635 | spa = avl_find(&spa_namespace_avl, &search, &where); |
c183d164 GW |
636 | if (spa == NULL) |
637 | return (NULL); | |
638 | ||
975a1325 DB |
639 | /* |
640 | * Avoid racing with import/export, which don't hold the namespace | |
641 | * lock for their entire duration. | |
642 | */ | |
643 | if ((spa->spa_load_thread != NULL && | |
644 | spa->spa_load_thread != curthread) || | |
645 | (spa->spa_export_thread != NULL && | |
646 | spa->spa_export_thread != curthread)) { | |
c183d164 GW |
647 | cv_wait(&spa_namespace_cv, &spa_namespace_lock); |
648 | goto retry; | |
649 | } | |
34dc7c2f | 650 | |
34dc7c2f BB |
651 | return (spa); |
652 | } | |
653 | ||
cc92e9d0 GW |
654 | /* |
655 | * Fires when spa_sync has not completed within zfs_deadman_synctime_ms. | |
656 | * If the zfs_deadman_enabled flag is set then it inspects all vdev queues | |
657 | * looking for potentially hung I/Os. | |
658 | */ | |
659 | void | |
660 | spa_deadman(void *arg) | |
661 | { | |
662 | spa_t *spa = arg; | |
663 | ||
b81a3ddc TC |
664 | /* Disable the deadman if the pool is suspended. */ |
665 | if (spa_suspended(spa)) | |
666 | return; | |
667 | ||
cc92e9d0 GW |
668 | zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu", |
669 | (gethrtime() - spa->spa_sync_starttime) / NANOSEC, | |
8e739b2c | 670 | (u_longlong_t)++spa->spa_deadman_calls); |
cc92e9d0 | 671 | if (zfs_deadman_enabled) |
8fb1ede1 | 672 | vdev_deadman(spa->spa_root_vdev, FTAG); |
cc92e9d0 | 673 | |
57ddcda1 | 674 | spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq, |
f764edf0 | 675 | spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() + |
b81a3ddc | 676 | MSEC_TO_TICK(zfs_deadman_checktime_ms)); |
cc92e9d0 GW |
677 | } |
678 | ||
65c7cc49 | 679 | static int |
93e28d66 SD |
680 | spa_log_sm_sort_by_txg(const void *va, const void *vb) |
681 | { | |
682 | const spa_log_sm_t *a = va; | |
683 | const spa_log_sm_t *b = vb; | |
684 | ||
ca577779 | 685 | return (TREE_CMP(a->sls_txg, b->sls_txg)); |
93e28d66 SD |
686 | } |
687 | ||
34dc7c2f BB |
688 | /* |
689 | * Create an uninitialized spa_t with the given name. Requires | |
690 | * spa_namespace_lock. The caller must ensure that the spa_t doesn't already | |
691 | * exist by calling spa_lookup() first. | |
692 | */ | |
693 | spa_t * | |
428870ff | 694 | spa_add(const char *name, nvlist_t *config, const char *altroot) |
34dc7c2f BB |
695 | { |
696 | spa_t *spa; | |
b128c09f | 697 | spa_config_dirent_t *dp; |
34dc7c2f BB |
698 | |
699 | ASSERT(MUTEX_HELD(&spa_namespace_lock)); | |
700 | ||
79c76d5b | 701 | spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); |
34dc7c2f | 702 | |
34dc7c2f | 703 | mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); |
34dc7c2f | 704 | mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); |
428870ff | 705 | mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); |
0c66c32d | 706 | mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL); |
34dc7c2f | 707 | mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); |
428870ff | 708 | mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL); |
34dc7c2f | 709 | mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); |
3c67d83a | 710 | mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL); |
428870ff BB |
711 | mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); |
712 | mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); | |
713 | mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL); | |
4eb30c68 | 714 | mutex_init(&spa->spa_feat_stats_lock, NULL, MUTEX_DEFAULT, NULL); |
93e28d66 | 715 | mutex_init(&spa->spa_flushed_ms_lock, NULL, MUTEX_DEFAULT, NULL); |
e60e158e | 716 | mutex_init(&spa->spa_activities_lock, NULL, MUTEX_DEFAULT, NULL); |
34dc7c2f BB |
717 | |
718 | cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); | |
0c66c32d | 719 | cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL); |
428870ff | 720 | cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL); |
34dc7c2f | 721 | cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); |
b128c09f | 722 | cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); |
e60e158e JG |
723 | cv_init(&spa->spa_activities_cv, NULL, CV_DEFAULT, NULL); |
724 | cv_init(&spa->spa_waiters_cv, NULL, CV_DEFAULT, NULL); | |
34dc7c2f | 725 | |
1c27024e | 726 | for (int t = 0; t < TXG_SIZE; t++) |
428870ff BB |
727 | bplist_create(&spa->spa_free_bplist[t]); |
728 | ||
b128c09f | 729 | (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); |
34dc7c2f BB |
730 | spa->spa_state = POOL_STATE_UNINITIALIZED; |
731 | spa->spa_freeze_txg = UINT64_MAX; | |
732 | spa->spa_final_txg = UINT64_MAX; | |
428870ff BB |
733 | spa->spa_load_max_txg = UINT64_MAX; |
734 | spa->spa_proc = &p0; | |
735 | spa->spa_proc_state = SPA_PROC_NONE; | |
6cb8e530 | 736 | spa->spa_trust_config = B_TRUE; |
25f06d67 | 737 | spa->spa_hostid = zone_get_hostid(NULL); |
34dc7c2f | 738 | |
e8b96c60 | 739 | spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms); |
8fb1ede1 BB |
740 | spa->spa_deadman_ziotime = MSEC2NSEC(zfs_deadman_ziotime_ms); |
741 | spa_set_deadman_failmode(spa, zfs_deadman_failmode); | |
95f71c01 | 742 | spa_set_allocator(spa, zfs_active_allocator); |
cc92e9d0 | 743 | |
424fd7c3 | 744 | zfs_refcount_create(&spa->spa_refcount); |
b128c09f | 745 | spa_config_lock_init(spa); |
1421c891 | 746 | spa_stats_init(spa); |
34dc7c2f | 747 | |
c183d164 | 748 | ASSERT(MUTEX_HELD(&spa_namespace_lock)); |
34dc7c2f BB |
749 | avl_add(&spa_namespace_avl, spa); |
750 | ||
34dc7c2f BB |
751 | /* |
752 | * Set the alternate root, if there is one. | |
753 | */ | |
0336f3d0 | 754 | if (altroot) |
34dc7c2f | 755 | spa->spa_root = spa_strdup(altroot); |
34dc7c2f | 756 | |
645b8330 AM |
757 | /* Do not allow more allocators than fraction of CPUs. */ |
758 | spa->spa_alloc_count = MAX(MIN(spa_num_allocators, | |
759 | boot_ncpus / MAX(spa_cpus_per_allocator, 1)), 1); | |
3bd4df38 | 760 | |
1b50749c AM |
761 | spa->spa_allocs = kmem_zalloc(spa->spa_alloc_count * |
762 | sizeof (spa_alloc_t), KM_SLEEP); | |
492f64e9 | 763 | for (int i = 0; i < spa->spa_alloc_count; i++) { |
1b50749c AM |
764 | mutex_init(&spa->spa_allocs[i].spaa_lock, NULL, MUTEX_DEFAULT, |
765 | NULL); | |
766 | avl_create(&spa->spa_allocs[i].spaa_tree, zio_bookmark_compare, | |
8469b5aa | 767 | sizeof (zio_t), offsetof(zio_t, io_queue_node.a)); |
492f64e9 | 768 | } |
645b8330 AM |
769 | if (spa->spa_alloc_count > 1) { |
770 | spa->spa_allocs_use = kmem_zalloc(offsetof(spa_allocs_use_t, | |
771 | sau_inuse[spa->spa_alloc_count]), KM_SLEEP); | |
772 | mutex_init(&spa->spa_allocs_use->sau_lock, NULL, MUTEX_DEFAULT, | |
773 | NULL); | |
774 | } | |
3bd4df38 | 775 | |
93e28d66 SD |
776 | avl_create(&spa->spa_metaslabs_by_flushed, metaslab_sort_by_flushed, |
777 | sizeof (metaslab_t), offsetof(metaslab_t, ms_spa_txg_node)); | |
778 | avl_create(&spa->spa_sm_logs_by_txg, spa_log_sm_sort_by_txg, | |
779 | sizeof (spa_log_sm_t), offsetof(spa_log_sm_t, sls_node)); | |
780 | list_create(&spa->spa_log_summary, sizeof (log_summary_entry_t), | |
781 | offsetof(log_summary_entry_t, lse_node)); | |
3dfb57a3 | 782 | |
b128c09f BB |
783 | /* |
784 | * Every pool starts with the default cachefile | |
785 | */ | |
786 | list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), | |
787 | offsetof(spa_config_dirent_t, scd_link)); | |
788 | ||
79c76d5b | 789 | dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); |
428870ff | 790 | dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path); |
b128c09f BB |
791 | list_insert_head(&spa->spa_config_list, dp); |
792 | ||
572e2857 | 793 | VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, |
79c76d5b | 794 | KM_SLEEP) == 0); |
572e2857 | 795 | |
9ae529ec CS |
796 | if (config != NULL) { |
797 | nvlist_t *features; | |
798 | ||
799 | if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, | |
800 | &features) == 0) { | |
801 | VERIFY(nvlist_dup(features, &spa->spa_label_features, | |
802 | 0) == 0); | |
803 | } | |
804 | ||
428870ff | 805 | VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); |
9ae529ec CS |
806 | } |
807 | ||
808 | if (spa->spa_label_features == NULL) { | |
809 | VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME, | |
79c76d5b | 810 | KM_SLEEP) == 0); |
9ae529ec | 811 | } |
428870ff | 812 | |
c3520e7f MA |
813 | spa->spa_min_ashift = INT_MAX; |
814 | spa->spa_max_ashift = 0; | |
b2255edc | 815 | spa->spa_min_alloc = INT_MAX; |
d9bb583c | 816 | spa->spa_gcd_alloc = INT_MAX; |
c3520e7f | 817 | |
e8a20144 GN |
818 | /* Reset cached value */ |
819 | spa->spa_dedup_dspace = ~0ULL; | |
820 | ||
b0bc7a84 MG |
821 | /* |
822 | * As a pool is being created, treat all features as disabled by | |
823 | * setting SPA_FEATURE_DISABLED for all entries in the feature | |
824 | * refcount cache. | |
825 | */ | |
1c27024e | 826 | for (int i = 0; i < SPA_FEATURES; i++) { |
b0bc7a84 MG |
827 | spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED; |
828 | } | |
829 | ||
3d31aad8 OF |
830 | list_create(&spa->spa_leaf_list, sizeof (vdev_t), |
831 | offsetof(vdev_t, vdev_leaf_node)); | |
832 | ||
34dc7c2f BB |
833 | return (spa); |
834 | } | |
835 | ||
836 | /* | |
837 | * Removes a spa_t from the namespace, freeing up any memory used. Requires | |
838 | * spa_namespace_lock. This is called only after the spa_t has been closed and | |
839 | * deactivated. | |
840 | */ | |
841 | void | |
842 | spa_remove(spa_t *spa) | |
843 | { | |
b128c09f BB |
844 | spa_config_dirent_t *dp; |
845 | ||
34dc7c2f | 846 | ASSERT(MUTEX_HELD(&spa_namespace_lock)); |
93e28d66 | 847 | ASSERT(spa_state(spa) == POOL_STATE_UNINITIALIZED); |
424fd7c3 | 848 | ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0); |
e60e158e | 849 | ASSERT0(spa->spa_waiters); |
34dc7c2f | 850 | |
428870ff BB |
851 | nvlist_free(spa->spa_config_splitting); |
852 | ||
34dc7c2f | 853 | avl_remove(&spa_namespace_avl, spa); |
34dc7c2f | 854 | |
0336f3d0 | 855 | if (spa->spa_root) |
34dc7c2f | 856 | spa_strfree(spa->spa_root); |
34dc7c2f | 857 | |
b3ad3f48 | 858 | while ((dp = list_remove_head(&spa->spa_config_list)) != NULL) { |
b128c09f BB |
859 | if (dp->scd_path != NULL) |
860 | spa_strfree(dp->scd_path); | |
861 | kmem_free(dp, sizeof (spa_config_dirent_t)); | |
862 | } | |
34dc7c2f | 863 | |
492f64e9 | 864 | for (int i = 0; i < spa->spa_alloc_count; i++) { |
1b50749c AM |
865 | avl_destroy(&spa->spa_allocs[i].spaa_tree); |
866 | mutex_destroy(&spa->spa_allocs[i].spaa_lock); | |
492f64e9 | 867 | } |
1b50749c AM |
868 | kmem_free(spa->spa_allocs, spa->spa_alloc_count * |
869 | sizeof (spa_alloc_t)); | |
645b8330 AM |
870 | if (spa->spa_alloc_count > 1) { |
871 | mutex_destroy(&spa->spa_allocs_use->sau_lock); | |
872 | kmem_free(spa->spa_allocs_use, offsetof(spa_allocs_use_t, | |
873 | sau_inuse[spa->spa_alloc_count])); | |
874 | } | |
492f64e9 | 875 | |
93e28d66 SD |
876 | avl_destroy(&spa->spa_metaslabs_by_flushed); |
877 | avl_destroy(&spa->spa_sm_logs_by_txg); | |
878 | list_destroy(&spa->spa_log_summary); | |
b128c09f | 879 | list_destroy(&spa->spa_config_list); |
3d31aad8 | 880 | list_destroy(&spa->spa_leaf_list); |
34dc7c2f | 881 | |
9ae529ec | 882 | nvlist_free(spa->spa_label_features); |
572e2857 | 883 | nvlist_free(spa->spa_load_info); |
417104bd | 884 | nvlist_free(spa->spa_feat_stats); |
34dc7c2f BB |
885 | spa_config_set(spa, NULL); |
886 | ||
424fd7c3 | 887 | zfs_refcount_destroy(&spa->spa_refcount); |
34dc7c2f | 888 | |
1421c891 | 889 | spa_stats_destroy(spa); |
b128c09f | 890 | spa_config_lock_destroy(spa); |
34dc7c2f | 891 | |
1c27024e | 892 | for (int t = 0; t < TXG_SIZE; t++) |
428870ff BB |
893 | bplist_destroy(&spa->spa_free_bplist[t]); |
894 | ||
3c67d83a TH |
895 | zio_checksum_templates_free(spa); |
896 | ||
34dc7c2f | 897 | cv_destroy(&spa->spa_async_cv); |
0c66c32d | 898 | cv_destroy(&spa->spa_evicting_os_cv); |
428870ff | 899 | cv_destroy(&spa->spa_proc_cv); |
34dc7c2f | 900 | cv_destroy(&spa->spa_scrub_io_cv); |
b128c09f | 901 | cv_destroy(&spa->spa_suspend_cv); |
e60e158e JG |
902 | cv_destroy(&spa->spa_activities_cv); |
903 | cv_destroy(&spa->spa_waiters_cv); | |
34dc7c2f | 904 | |
93e28d66 | 905 | mutex_destroy(&spa->spa_flushed_ms_lock); |
34dc7c2f | 906 | mutex_destroy(&spa->spa_async_lock); |
34dc7c2f | 907 | mutex_destroy(&spa->spa_errlist_lock); |
428870ff | 908 | mutex_destroy(&spa->spa_errlog_lock); |
0c66c32d | 909 | mutex_destroy(&spa->spa_evicting_os_lock); |
34dc7c2f | 910 | mutex_destroy(&spa->spa_history_lock); |
428870ff | 911 | mutex_destroy(&spa->spa_proc_lock); |
34dc7c2f | 912 | mutex_destroy(&spa->spa_props_lock); |
3c67d83a | 913 | mutex_destroy(&spa->spa_cksum_tmpls_lock); |
428870ff | 914 | mutex_destroy(&spa->spa_scrub_lock); |
b128c09f | 915 | mutex_destroy(&spa->spa_suspend_lock); |
428870ff | 916 | mutex_destroy(&spa->spa_vdev_top_lock); |
4eb30c68 | 917 | mutex_destroy(&spa->spa_feat_stats_lock); |
e60e158e | 918 | mutex_destroy(&spa->spa_activities_lock); |
34dc7c2f BB |
919 | |
920 | kmem_free(spa, sizeof (spa_t)); | |
921 | } | |
922 | ||
923 | /* | |
924 | * Given a pool, return the next pool in the namespace, or NULL if there is | |
925 | * none. If 'prev' is NULL, return the first pool. | |
926 | */ | |
927 | spa_t * | |
928 | spa_next(spa_t *prev) | |
929 | { | |
930 | ASSERT(MUTEX_HELD(&spa_namespace_lock)); | |
931 | ||
932 | if (prev) | |
933 | return (AVL_NEXT(&spa_namespace_avl, prev)); | |
934 | else | |
935 | return (avl_first(&spa_namespace_avl)); | |
936 | } | |
937 | ||
938 | /* | |
939 | * ========================================================================== | |
940 | * SPA refcount functions | |
941 | * ========================================================================== | |
942 | */ | |
943 | ||
944 | /* | |
945 | * Add a reference to the given spa_t. Must have at least one reference, or | |
946 | * have the namespace lock held. | |
947 | */ | |
948 | void | |
a926aab9 | 949 | spa_open_ref(spa_t *spa, const void *tag) |
34dc7c2f | 950 | { |
424fd7c3 | 951 | ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref || |
c183d164 GW |
952 | MUTEX_HELD(&spa_namespace_lock) || |
953 | spa->spa_load_thread == curthread); | |
c13060e4 | 954 | (void) zfs_refcount_add(&spa->spa_refcount, tag); |
34dc7c2f BB |
955 | } |
956 | ||
957 | /* | |
958 | * Remove a reference to the given spa_t. Must have at least one reference, or | |
975a1325 | 959 | * have the namespace lock held or be part of a pool import/export. |
34dc7c2f BB |
960 | */ |
961 | void | |
a926aab9 | 962 | spa_close(spa_t *spa, const void *tag) |
34dc7c2f | 963 | { |
424fd7c3 | 964 | ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref || |
c183d164 | 965 | MUTEX_HELD(&spa_namespace_lock) || |
975a1325 DB |
966 | spa->spa_load_thread == curthread || |
967 | spa->spa_export_thread == curthread); | |
424fd7c3 | 968 | (void) zfs_refcount_remove(&spa->spa_refcount, tag); |
34dc7c2f BB |
969 | } |
970 | ||
0c66c32d JG |
971 | /* |
972 | * Remove a reference to the given spa_t held by a dsl dir that is | |
973 | * being asynchronously released. Async releases occur from a taskq | |
974 | * performing eviction of dsl datasets and dirs. The namespace lock | |
975 | * isn't held and the hold by the object being evicted may contribute to | |
976 | * spa_minref (e.g. dataset or directory released during pool export), | |
977 | * so the asserts in spa_close() do not apply. | |
978 | */ | |
979 | void | |
a926aab9 | 980 | spa_async_close(spa_t *spa, const void *tag) |
0c66c32d | 981 | { |
424fd7c3 | 982 | (void) zfs_refcount_remove(&spa->spa_refcount, tag); |
0c66c32d JG |
983 | } |
984 | ||
34dc7c2f BB |
985 | /* |
986 | * Check to see if the spa refcount is zero. Must be called with | |
975a1325 DB |
987 | * spa_namespace_lock held or be the spa export thread. We really |
988 | * compare against spa_minref, which is the number of references | |
989 | * acquired when opening a pool | |
34dc7c2f BB |
990 | */ |
991 | boolean_t | |
992 | spa_refcount_zero(spa_t *spa) | |
993 | { | |
975a1325 DB |
994 | ASSERT(MUTEX_HELD(&spa_namespace_lock) || |
995 | spa->spa_export_thread == curthread); | |
34dc7c2f | 996 | |
424fd7c3 | 997 | return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref); |
34dc7c2f BB |
998 | } |
999 | ||
1000 | /* | |
1001 | * ========================================================================== | |
1002 | * SPA spare and l2cache tracking | |
1003 | * ========================================================================== | |
1004 | */ | |
1005 | ||
1006 | /* | |
1007 | * Hot spares and cache devices are tracked using the same code below, | |
1008 | * for 'auxiliary' devices. | |
1009 | */ | |
1010 | ||
1011 | typedef struct spa_aux { | |
1012 | uint64_t aux_guid; | |
1013 | uint64_t aux_pool; | |
1014 | avl_node_t aux_avl; | |
1015 | int aux_count; | |
1016 | } spa_aux_t; | |
1017 | ||
ee36c709 | 1018 | static inline int |
34dc7c2f BB |
1019 | spa_aux_compare(const void *a, const void *b) |
1020 | { | |
ee36c709 GN |
1021 | const spa_aux_t *sa = (const spa_aux_t *)a; |
1022 | const spa_aux_t *sb = (const spa_aux_t *)b; | |
34dc7c2f | 1023 | |
ca577779 | 1024 | return (TREE_CMP(sa->aux_guid, sb->aux_guid)); |
34dc7c2f BB |
1025 | } |
1026 | ||
65c7cc49 | 1027 | static void |
34dc7c2f BB |
1028 | spa_aux_add(vdev_t *vd, avl_tree_t *avl) |
1029 | { | |
1030 | avl_index_t where; | |
1031 | spa_aux_t search; | |
1032 | spa_aux_t *aux; | |
1033 | ||
1034 | search.aux_guid = vd->vdev_guid; | |
1035 | if ((aux = avl_find(avl, &search, &where)) != NULL) { | |
1036 | aux->aux_count++; | |
1037 | } else { | |
79c76d5b | 1038 | aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); |
34dc7c2f BB |
1039 | aux->aux_guid = vd->vdev_guid; |
1040 | aux->aux_count = 1; | |
1041 | avl_insert(avl, aux, where); | |
1042 | } | |
1043 | } | |
1044 | ||
65c7cc49 | 1045 | static void |
34dc7c2f BB |
1046 | spa_aux_remove(vdev_t *vd, avl_tree_t *avl) |
1047 | { | |
1048 | spa_aux_t search; | |
1049 | spa_aux_t *aux; | |
1050 | avl_index_t where; | |
1051 | ||
1052 | search.aux_guid = vd->vdev_guid; | |
1053 | aux = avl_find(avl, &search, &where); | |
1054 | ||
1055 | ASSERT(aux != NULL); | |
1056 | ||
1057 | if (--aux->aux_count == 0) { | |
1058 | avl_remove(avl, aux); | |
1059 | kmem_free(aux, sizeof (spa_aux_t)); | |
1060 | } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { | |
1061 | aux->aux_pool = 0ULL; | |
1062 | } | |
1063 | } | |
1064 | ||
65c7cc49 | 1065 | static boolean_t |
b128c09f | 1066 | spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) |
34dc7c2f BB |
1067 | { |
1068 | spa_aux_t search, *found; | |
34dc7c2f BB |
1069 | |
1070 | search.aux_guid = guid; | |
b128c09f | 1071 | found = avl_find(avl, &search, NULL); |
34dc7c2f BB |
1072 | |
1073 | if (pool) { | |
1074 | if (found) | |
1075 | *pool = found->aux_pool; | |
1076 | else | |
1077 | *pool = 0ULL; | |
1078 | } | |
1079 | ||
b128c09f BB |
1080 | if (refcnt) { |
1081 | if (found) | |
1082 | *refcnt = found->aux_count; | |
1083 | else | |
1084 | *refcnt = 0; | |
1085 | } | |
1086 | ||
34dc7c2f BB |
1087 | return (found != NULL); |
1088 | } | |
1089 | ||
65c7cc49 | 1090 | static void |
34dc7c2f BB |
1091 | spa_aux_activate(vdev_t *vd, avl_tree_t *avl) |
1092 | { | |
1093 | spa_aux_t search, *found; | |
1094 | avl_index_t where; | |
1095 | ||
1096 | search.aux_guid = vd->vdev_guid; | |
1097 | found = avl_find(avl, &search, &where); | |
1098 | ASSERT(found != NULL); | |
1099 | ASSERT(found->aux_pool == 0ULL); | |
1100 | ||
1101 | found->aux_pool = spa_guid(vd->vdev_spa); | |
1102 | } | |
1103 | ||
1104 | /* | |
1105 | * Spares are tracked globally due to the following constraints: | |
1106 | * | |
aa755b35 MA |
1107 | * - A spare may be part of multiple pools. |
1108 | * - A spare may be added to a pool even if it's actively in use within | |
34dc7c2f | 1109 | * another pool. |
aa755b35 | 1110 | * - A spare in use in any pool can only be the source of a replacement if |
34dc7c2f BB |
1111 | * the target is a spare in the same pool. |
1112 | * | |
1113 | * We keep track of all spares on the system through the use of a reference | |
1114 | * counted AVL tree. When a vdev is added as a spare, or used as a replacement | |
1115 | * spare, then we bump the reference count in the AVL tree. In addition, we set | |
1116 | * the 'vdev_isspare' member to indicate that the device is a spare (active or | |
1117 | * inactive). When a spare is made active (used to replace a device in the | |
1118 | * pool), we also keep track of which pool its been made a part of. | |
1119 | * | |
1120 | * The 'spa_spare_lock' protects the AVL tree. These functions are normally | |
1121 | * called under the spa_namespace lock as part of vdev reconfiguration. The | |
1122 | * separate spare lock exists for the status query path, which does not need to | |
1123 | * be completely consistent with respect to other vdev configuration changes. | |
1124 | */ | |
1125 | ||
1126 | static int | |
1127 | spa_spare_compare(const void *a, const void *b) | |
1128 | { | |
1129 | return (spa_aux_compare(a, b)); | |
1130 | } | |
1131 | ||
1132 | void | |
1133 | spa_spare_add(vdev_t *vd) | |
1134 | { | |
1135 | mutex_enter(&spa_spare_lock); | |
1136 | ASSERT(!vd->vdev_isspare); | |
1137 | spa_aux_add(vd, &spa_spare_avl); | |
1138 | vd->vdev_isspare = B_TRUE; | |
1139 | mutex_exit(&spa_spare_lock); | |
1140 | } | |
1141 | ||
1142 | void | |
1143 | spa_spare_remove(vdev_t *vd) | |
1144 | { | |
1145 | mutex_enter(&spa_spare_lock); | |
1146 | ASSERT(vd->vdev_isspare); | |
1147 | spa_aux_remove(vd, &spa_spare_avl); | |
1148 | vd->vdev_isspare = B_FALSE; | |
1149 | mutex_exit(&spa_spare_lock); | |
1150 | } | |
1151 | ||
1152 | boolean_t | |
b128c09f | 1153 | spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) |
34dc7c2f BB |
1154 | { |
1155 | boolean_t found; | |
1156 | ||
1157 | mutex_enter(&spa_spare_lock); | |
b128c09f | 1158 | found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); |
34dc7c2f BB |
1159 | mutex_exit(&spa_spare_lock); |
1160 | ||
1161 | return (found); | |
1162 | } | |
1163 | ||
1164 | void | |
1165 | spa_spare_activate(vdev_t *vd) | |
1166 | { | |
1167 | mutex_enter(&spa_spare_lock); | |
1168 | ASSERT(vd->vdev_isspare); | |
1169 | spa_aux_activate(vd, &spa_spare_avl); | |
1170 | mutex_exit(&spa_spare_lock); | |
1171 | } | |
1172 | ||
1173 | /* | |
1174 | * Level 2 ARC devices are tracked globally for the same reasons as spares. | |
1175 | * Cache devices currently only support one pool per cache device, and so | |
1176 | * for these devices the aux reference count is currently unused beyond 1. | |
1177 | */ | |
1178 | ||
1179 | static int | |
1180 | spa_l2cache_compare(const void *a, const void *b) | |
1181 | { | |
1182 | return (spa_aux_compare(a, b)); | |
1183 | } | |
1184 | ||
1185 | void | |
1186 | spa_l2cache_add(vdev_t *vd) | |
1187 | { | |
1188 | mutex_enter(&spa_l2cache_lock); | |
1189 | ASSERT(!vd->vdev_isl2cache); | |
1190 | spa_aux_add(vd, &spa_l2cache_avl); | |
1191 | vd->vdev_isl2cache = B_TRUE; | |
1192 | mutex_exit(&spa_l2cache_lock); | |
1193 | } | |
1194 | ||
1195 | void | |
1196 | spa_l2cache_remove(vdev_t *vd) | |
1197 | { | |
1198 | mutex_enter(&spa_l2cache_lock); | |
1199 | ASSERT(vd->vdev_isl2cache); | |
1200 | spa_aux_remove(vd, &spa_l2cache_avl); | |
1201 | vd->vdev_isl2cache = B_FALSE; | |
1202 | mutex_exit(&spa_l2cache_lock); | |
1203 | } | |
1204 | ||
1205 | boolean_t | |
1206 | spa_l2cache_exists(uint64_t guid, uint64_t *pool) | |
1207 | { | |
1208 | boolean_t found; | |
1209 | ||
1210 | mutex_enter(&spa_l2cache_lock); | |
b128c09f | 1211 | found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); |
34dc7c2f BB |
1212 | mutex_exit(&spa_l2cache_lock); |
1213 | ||
1214 | return (found); | |
1215 | } | |
1216 | ||
1217 | void | |
1218 | spa_l2cache_activate(vdev_t *vd) | |
1219 | { | |
1220 | mutex_enter(&spa_l2cache_lock); | |
1221 | ASSERT(vd->vdev_isl2cache); | |
1222 | spa_aux_activate(vd, &spa_l2cache_avl); | |
1223 | mutex_exit(&spa_l2cache_lock); | |
1224 | } | |
1225 | ||
34dc7c2f BB |
1226 | /* |
1227 | * ========================================================================== | |
1228 | * SPA vdev locking | |
1229 | * ========================================================================== | |
1230 | */ | |
1231 | ||
1232 | /* | |
1233 | * Lock the given spa_t for the purpose of adding or removing a vdev. | |
1234 | * Grabs the global spa_namespace_lock plus the spa config lock for writing. | |
1235 | * It returns the next transaction group for the spa_t. | |
1236 | */ | |
1237 | uint64_t | |
1238 | spa_vdev_enter(spa_t *spa) | |
1239 | { | |
428870ff | 1240 | mutex_enter(&spa->spa_vdev_top_lock); |
34dc7c2f | 1241 | mutex_enter(&spa_namespace_lock); |
1b939560 | 1242 | |
89acef99 | 1243 | ASSERT0(spa->spa_export_thread); |
975a1325 | 1244 | |
1b939560 BB |
1245 | vdev_autotrim_stop_all(spa); |
1246 | ||
428870ff BB |
1247 | return (spa_vdev_config_enter(spa)); |
1248 | } | |
1249 | ||
9a49d3f3 BB |
1250 | /* |
1251 | * The same as spa_vdev_enter() above but additionally takes the guid of | |
1252 | * the vdev being detached. When there is a rebuild in process it will be | |
1253 | * suspended while the vdev tree is modified then resumed by spa_vdev_exit(). | |
1254 | * The rebuild is canceled if only a single child remains after the detach. | |
1255 | */ | |
1256 | uint64_t | |
1257 | spa_vdev_detach_enter(spa_t *spa, uint64_t guid) | |
1258 | { | |
1259 | mutex_enter(&spa->spa_vdev_top_lock); | |
1260 | mutex_enter(&spa_namespace_lock); | |
1261 | ||
89acef99 | 1262 | ASSERT0(spa->spa_export_thread); |
975a1325 | 1263 | |
9a49d3f3 BB |
1264 | vdev_autotrim_stop_all(spa); |
1265 | ||
1266 | if (guid != 0) { | |
1267 | vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE); | |
1268 | if (vd) { | |
1269 | vdev_rebuild_stop_wait(vd->vdev_top); | |
1270 | } | |
1271 | } | |
1272 | ||
1273 | return (spa_vdev_config_enter(spa)); | |
1274 | } | |
1275 | ||
428870ff BB |
1276 | /* |
1277 | * Internal implementation for spa_vdev_enter(). Used when a vdev | |
1278 | * operation requires multiple syncs (i.e. removing a device) while | |
1279 | * keeping the spa_namespace_lock held. | |
1280 | */ | |
1281 | uint64_t | |
1282 | spa_vdev_config_enter(spa_t *spa) | |
1283 | { | |
1284 | ASSERT(MUTEX_HELD(&spa_namespace_lock)); | |
34dc7c2f | 1285 | |
b128c09f | 1286 | spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); |
34dc7c2f BB |
1287 | |
1288 | return (spa_last_synced_txg(spa) + 1); | |
1289 | } | |
1290 | ||
1291 | /* | |
428870ff BB |
1292 | * Used in combination with spa_vdev_config_enter() to allow the syncing |
1293 | * of multiple transactions without releasing the spa_namespace_lock. | |
34dc7c2f | 1294 | */ |
428870ff | 1295 | void |
dd66857d AZ |
1296 | spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, |
1297 | const char *tag) | |
34dc7c2f | 1298 | { |
1c27024e DB |
1299 | ASSERT(MUTEX_HELD(&spa_namespace_lock)); |
1300 | ||
34dc7c2f BB |
1301 | int config_changed = B_FALSE; |
1302 | ||
1303 | ASSERT(txg > spa_last_synced_txg(spa)); | |
1304 | ||
b128c09f BB |
1305 | spa->spa_pending_vdev = NULL; |
1306 | ||
34dc7c2f BB |
1307 | /* |
1308 | * Reassess the DTLs. | |
1309 | */ | |
9a49d3f3 | 1310 | vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE, B_FALSE); |
34dc7c2f | 1311 | |
b128c09f | 1312 | if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { |
34dc7c2f | 1313 | config_changed = B_TRUE; |
428870ff | 1314 | spa->spa_config_generation++; |
34dc7c2f BB |
1315 | } |
1316 | ||
428870ff BB |
1317 | /* |
1318 | * Verify the metaslab classes. | |
1319 | */ | |
1320 | ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0); | |
1321 | ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0); | |
aa755b35 | 1322 | ASSERT(metaslab_class_validate(spa_embedded_log_class(spa)) == 0); |
cc99f275 DB |
1323 | ASSERT(metaslab_class_validate(spa_special_class(spa)) == 0); |
1324 | ASSERT(metaslab_class_validate(spa_dedup_class(spa)) == 0); | |
428870ff | 1325 | |
b128c09f | 1326 | spa_config_exit(spa, SCL_ALL, spa); |
34dc7c2f | 1327 | |
428870ff BB |
1328 | /* |
1329 | * Panic the system if the specified tag requires it. This | |
1330 | * is useful for ensuring that configurations are updated | |
1331 | * transactionally. | |
1332 | */ | |
1333 | if (zio_injection_enabled) | |
1334 | zio_handle_panic_injection(spa, tag, 0); | |
1335 | ||
34dc7c2f BB |
1336 | /* |
1337 | * Note: this txg_wait_synced() is important because it ensures | |
1338 | * that there won't be more than one config change per txg. | |
1339 | * This allows us to use the txg as the generation number. | |
1340 | */ | |
1341 | if (error == 0) | |
1342 | txg_wait_synced(spa->spa_dsl_pool, txg); | |
1343 | ||
1344 | if (vd != NULL) { | |
93cf2076 | 1345 | ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL); |
619f0976 GW |
1346 | if (vd->vdev_ops->vdev_op_leaf) { |
1347 | mutex_enter(&vd->vdev_initialize_lock); | |
c10d37dd GW |
1348 | vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED, |
1349 | NULL); | |
619f0976 | 1350 | mutex_exit(&vd->vdev_initialize_lock); |
1b939560 BB |
1351 | |
1352 | mutex_enter(&vd->vdev_trim_lock); | |
1353 | vdev_trim_stop(vd, VDEV_TRIM_CANCELED, NULL); | |
1354 | mutex_exit(&vd->vdev_trim_lock); | |
619f0976 GW |
1355 | } |
1356 | ||
1b939560 BB |
1357 | /* |
1358 | * The vdev may be both a leaf and top-level device. | |
1359 | */ | |
1360 | vdev_autotrim_stop_wait(vd); | |
1361 | ||
75a089ed | 1362 | spa_config_enter(spa, SCL_STATE_ALL, spa, RW_WRITER); |
34dc7c2f | 1363 | vdev_free(vd); |
75a089ed | 1364 | spa_config_exit(spa, SCL_STATE_ALL, spa); |
34dc7c2f BB |
1365 | } |
1366 | ||
1367 | /* | |
1368 | * If the config changed, update the config cache. | |
1369 | */ | |
1370 | if (config_changed) | |
3a74f488 | 1371 | spa_write_cachefile(spa, B_FALSE, B_TRUE, B_TRUE); |
428870ff | 1372 | } |
34dc7c2f | 1373 | |
428870ff BB |
1374 | /* |
1375 | * Unlock the spa_t after adding or removing a vdev. Besides undoing the | |
1376 | * locking of spa_vdev_enter(), we also want make sure the transactions have | |
1377 | * synced to disk, and then update the global configuration cache with the new | |
1378 | * information. | |
1379 | */ | |
1380 | int | |
1381 | spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) | |
1382 | { | |
1b939560 | 1383 | vdev_autotrim_restart(spa); |
9a49d3f3 | 1384 | vdev_rebuild_restart(spa); |
1b939560 | 1385 | |
428870ff | 1386 | spa_vdev_config_exit(spa, vd, txg, error, FTAG); |
34dc7c2f | 1387 | mutex_exit(&spa_namespace_lock); |
428870ff | 1388 | mutex_exit(&spa->spa_vdev_top_lock); |
34dc7c2f BB |
1389 | |
1390 | return (error); | |
1391 | } | |
1392 | ||
b128c09f BB |
1393 | /* |
1394 | * Lock the given spa_t for the purpose of changing vdev state. | |
1395 | */ | |
1396 | void | |
428870ff | 1397 | spa_vdev_state_enter(spa_t *spa, int oplocks) |
b128c09f | 1398 | { |
428870ff BB |
1399 | int locks = SCL_STATE_ALL | oplocks; |
1400 | ||
1401 | /* | |
1402 | * Root pools may need to read of the underlying devfs filesystem | |
1403 | * when opening up a vdev. Unfortunately if we're holding the | |
1404 | * SCL_ZIO lock it will result in a deadlock when we try to issue | |
1405 | * the read from the root filesystem. Instead we "prefetch" | |
1406 | * the associated vnodes that we need prior to opening the | |
1407 | * underlying devices and cache them so that we can prevent | |
1408 | * any I/O when we are doing the actual open. | |
1409 | */ | |
1410 | if (spa_is_root(spa)) { | |
1411 | int low = locks & ~(SCL_ZIO - 1); | |
1412 | int high = locks & ~low; | |
1413 | ||
1414 | spa_config_enter(spa, high, spa, RW_WRITER); | |
1415 | vdev_hold(spa->spa_root_vdev); | |
1416 | spa_config_enter(spa, low, spa, RW_WRITER); | |
1417 | } else { | |
1418 | spa_config_enter(spa, locks, spa, RW_WRITER); | |
1419 | } | |
1420 | spa->spa_vdev_locks = locks; | |
b128c09f BB |
1421 | } |
1422 | ||
1423 | int | |
1424 | spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) | |
1425 | { | |
428870ff | 1426 | boolean_t config_changed = B_FALSE; |
4a283c7f TH |
1427 | vdev_t *vdev_top; |
1428 | ||
1429 | if (vd == NULL || vd == spa->spa_root_vdev) { | |
1430 | vdev_top = spa->spa_root_vdev; | |
1431 | } else { | |
1432 | vdev_top = vd->vdev_top; | |
1433 | } | |
428870ff BB |
1434 | |
1435 | if (vd != NULL || error == 0) | |
9a49d3f3 | 1436 | vdev_dtl_reassess(vdev_top, 0, 0, B_FALSE, B_FALSE); |
428870ff BB |
1437 | |
1438 | if (vd != NULL) { | |
4a283c7f TH |
1439 | if (vd != spa->spa_root_vdev) |
1440 | vdev_state_dirty(vdev_top); | |
1441 | ||
428870ff BB |
1442 | config_changed = B_TRUE; |
1443 | spa->spa_config_generation++; | |
1444 | } | |
b128c09f | 1445 | |
428870ff BB |
1446 | if (spa_is_root(spa)) |
1447 | vdev_rele(spa->spa_root_vdev); | |
1448 | ||
1449 | ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); | |
1450 | spa_config_exit(spa, spa->spa_vdev_locks, spa); | |
b128c09f | 1451 | |
fb5f0bc8 BB |
1452 | /* |
1453 | * If anything changed, wait for it to sync. This ensures that, | |
76d04993 | 1454 | * from the system administrator's perspective, zpool(8) commands |
fb5f0bc8 BB |
1455 | * are synchronous. This is important for things like zpool offline: |
1456 | * when the command completes, you expect no further I/O from ZFS. | |
1457 | */ | |
1458 | if (vd != NULL) | |
1459 | txg_wait_synced(spa->spa_dsl_pool, 0); | |
1460 | ||
428870ff BB |
1461 | /* |
1462 | * If the config changed, update the config cache. | |
1463 | */ | |
1464 | if (config_changed) { | |
1465 | mutex_enter(&spa_namespace_lock); | |
55c12724 | 1466 | spa_write_cachefile(spa, B_FALSE, B_TRUE, B_FALSE); |
428870ff BB |
1467 | mutex_exit(&spa_namespace_lock); |
1468 | } | |
1469 | ||
b128c09f BB |
1470 | return (error); |
1471 | } | |
1472 | ||
34dc7c2f BB |
1473 | /* |
1474 | * ========================================================================== | |
1475 | * Miscellaneous functions | |
1476 | * ========================================================================== | |
1477 | */ | |
1478 | ||
9ae529ec | 1479 | void |
b0bc7a84 | 1480 | spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx) |
9ae529ec | 1481 | { |
fa86b5db MA |
1482 | if (!nvlist_exists(spa->spa_label_features, feature)) { |
1483 | fnvlist_add_boolean(spa->spa_label_features, feature); | |
b0bc7a84 MG |
1484 | /* |
1485 | * When we are creating the pool (tx_txg==TXG_INITIAL), we can't | |
1486 | * dirty the vdev config because lock SCL_CONFIG is not held. | |
1487 | * Thankfully, in this case we don't need to dirty the config | |
1488 | * because it will be written out anyway when we finish | |
1489 | * creating the pool. | |
1490 | */ | |
1491 | if (tx->tx_txg != TXG_INITIAL) | |
1492 | vdev_config_dirty(spa->spa_root_vdev); | |
fa86b5db | 1493 | } |
9ae529ec CS |
1494 | } |
1495 | ||
1496 | void | |
1497 | spa_deactivate_mos_feature(spa_t *spa, const char *feature) | |
1498 | { | |
fa86b5db MA |
1499 | if (nvlist_remove_all(spa->spa_label_features, feature) == 0) |
1500 | vdev_config_dirty(spa->spa_root_vdev); | |
9ae529ec CS |
1501 | } |
1502 | ||
34dc7c2f | 1503 | /* |
572e2857 BB |
1504 | * Return the spa_t associated with given pool_guid, if it exists. If |
1505 | * device_guid is non-zero, determine whether the pool exists *and* contains | |
1506 | * a device with the specified device_guid. | |
34dc7c2f | 1507 | */ |
572e2857 BB |
1508 | spa_t * |
1509 | spa_by_guid(uint64_t pool_guid, uint64_t device_guid) | |
34dc7c2f BB |
1510 | { |
1511 | spa_t *spa; | |
1512 | avl_tree_t *t = &spa_namespace_avl; | |
1513 | ||
1514 | ASSERT(MUTEX_HELD(&spa_namespace_lock)); | |
1515 | ||
1516 | for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { | |
1517 | if (spa->spa_state == POOL_STATE_UNINITIALIZED) | |
1518 | continue; | |
1519 | if (spa->spa_root_vdev == NULL) | |
1520 | continue; | |
1521 | if (spa_guid(spa) == pool_guid) { | |
1522 | if (device_guid == 0) | |
1523 | break; | |
1524 | ||
1525 | if (vdev_lookup_by_guid(spa->spa_root_vdev, | |
1526 | device_guid) != NULL) | |
1527 | break; | |
1528 | ||
1529 | /* | |
1530 | * Check any devices we may be in the process of adding. | |
1531 | */ | |
1532 | if (spa->spa_pending_vdev) { | |
1533 | if (vdev_lookup_by_guid(spa->spa_pending_vdev, | |
1534 | device_guid) != NULL) | |
1535 | break; | |
1536 | } | |
1537 | } | |
1538 | } | |
1539 | ||
572e2857 BB |
1540 | return (spa); |
1541 | } | |
1542 | ||
1543 | /* | |
1544 | * Determine whether a pool with the given pool_guid exists. | |
1545 | */ | |
1546 | boolean_t | |
1547 | spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) | |
1548 | { | |
1549 | return (spa_by_guid(pool_guid, device_guid) != NULL); | |
34dc7c2f BB |
1550 | } |
1551 | ||
1552 | char * | |
1553 | spa_strdup(const char *s) | |
1554 | { | |
1555 | size_t len; | |
1556 | char *new; | |
1557 | ||
1558 | len = strlen(s); | |
79c76d5b | 1559 | new = kmem_alloc(len + 1, KM_SLEEP); |
861166b0 | 1560 | memcpy(new, s, len + 1); |
34dc7c2f BB |
1561 | |
1562 | return (new); | |
1563 | } | |
1564 | ||
1565 | void | |
1566 | spa_strfree(char *s) | |
1567 | { | |
1568 | kmem_free(s, strlen(s) + 1); | |
1569 | } | |
1570 | ||
428870ff BB |
1571 | uint64_t |
1572 | spa_generate_guid(spa_t *spa) | |
34dc7c2f | 1573 | { |
29274c9f | 1574 | uint64_t guid; |
34dc7c2f | 1575 | |
428870ff | 1576 | if (spa != NULL) { |
29274c9f AM |
1577 | do { |
1578 | (void) random_get_pseudo_bytes((void *)&guid, | |
1579 | sizeof (guid)); | |
1580 | } while (guid == 0 || spa_guid_exists(spa_guid(spa), guid)); | |
428870ff | 1581 | } else { |
29274c9f AM |
1582 | do { |
1583 | (void) random_get_pseudo_bytes((void *)&guid, | |
1584 | sizeof (guid)); | |
1585 | } while (guid == 0 || spa_guid_exists(guid, 0)); | |
34dc7c2f BB |
1586 | } |
1587 | ||
428870ff BB |
1588 | return (guid); |
1589 | } | |
1590 | ||
1591 | void | |
b0bc7a84 | 1592 | snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp) |
428870ff | 1593 | { |
9ae529ec | 1594 | char type[256]; |
a926aab9 AZ |
1595 | const char *checksum = NULL; |
1596 | const char *compress = NULL; | |
34dc7c2f | 1597 | |
428870ff | 1598 | if (bp != NULL) { |
9ae529ec CS |
1599 | if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) { |
1600 | dmu_object_byteswap_t bswap = | |
1601 | DMU_OT_BYTESWAP(BP_GET_TYPE(bp)); | |
1602 | (void) snprintf(type, sizeof (type), "bswap %s %s", | |
1603 | DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ? | |
1604 | "metadata" : "data", | |
1605 | dmu_ot_byteswap[bswap].ob_name); | |
1606 | } else { | |
1607 | (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name, | |
1608 | sizeof (type)); | |
1609 | } | |
9b67f605 MA |
1610 | if (!BP_IS_EMBEDDED(bp)) { |
1611 | checksum = | |
1612 | zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name; | |
1613 | } | |
428870ff | 1614 | compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name; |
34dc7c2f BB |
1615 | } |
1616 | ||
97143b9d | 1617 | SNPRINTF_BLKPTR(kmem_scnprintf, ' ', buf, buflen, bp, type, checksum, |
5c27ec10 | 1618 | compress); |
34dc7c2f BB |
1619 | } |
1620 | ||
1621 | void | |
1622 | spa_freeze(spa_t *spa) | |
1623 | { | |
1624 | uint64_t freeze_txg = 0; | |
1625 | ||
b128c09f | 1626 | spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); |
34dc7c2f BB |
1627 | if (spa->spa_freeze_txg == UINT64_MAX) { |
1628 | freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; | |
1629 | spa->spa_freeze_txg = freeze_txg; | |
1630 | } | |
b128c09f | 1631 | spa_config_exit(spa, SCL_ALL, FTAG); |
34dc7c2f BB |
1632 | if (freeze_txg != 0) |
1633 | txg_wait_synced(spa_get_dsl(spa), freeze_txg); | |
1634 | } | |
1635 | ||
0b39b9f9 PS |
1636 | void |
1637 | zfs_panic_recover(const char *fmt, ...) | |
1638 | { | |
1639 | va_list adx; | |
1640 | ||
1641 | va_start(adx, fmt); | |
1642 | vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); | |
1643 | va_end(adx); | |
1644 | } | |
1645 | ||
428870ff BB |
1646 | /* |
1647 | * This is a stripped-down version of strtoull, suitable only for converting | |
d3cc8b15 | 1648 | * lowercase hexadecimal numbers that don't overflow. |
428870ff BB |
1649 | */ |
1650 | uint64_t | |
e19572e4 | 1651 | zfs_strtonum(const char *str, char **nptr) |
428870ff BB |
1652 | { |
1653 | uint64_t val = 0; | |
1654 | char c; | |
1655 | int digit; | |
1656 | ||
1657 | while ((c = *str) != '\0') { | |
1658 | if (c >= '0' && c <= '9') | |
1659 | digit = c - '0'; | |
1660 | else if (c >= 'a' && c <= 'f') | |
1661 | digit = 10 + c - 'a'; | |
1662 | else | |
1663 | break; | |
1664 | ||
1665 | val *= 16; | |
1666 | val += digit; | |
1667 | ||
1668 | str++; | |
1669 | } | |
1670 | ||
1671 | if (nptr) | |
1672 | *nptr = (char *)str; | |
1673 | ||
1674 | return (val); | |
1675 | } | |
1676 | ||
cc99f275 DB |
1677 | void |
1678 | spa_activate_allocation_classes(spa_t *spa, dmu_tx_t *tx) | |
1679 | { | |
1680 | /* | |
1681 | * We bump the feature refcount for each special vdev added to the pool | |
1682 | */ | |
1683 | ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_ALLOCATION_CLASSES)); | |
1684 | spa_feature_incr(spa, SPA_FEATURE_ALLOCATION_CLASSES, tx); | |
1685 | } | |
1686 | ||
34dc7c2f BB |
1687 | /* |
1688 | * ========================================================================== | |
1689 | * Accessor functions | |
1690 | * ========================================================================== | |
1691 | */ | |
1692 | ||
b128c09f BB |
1693 | boolean_t |
1694 | spa_shutting_down(spa_t *spa) | |
34dc7c2f | 1695 | { |
b128c09f | 1696 | return (spa->spa_async_suspended); |
34dc7c2f BB |
1697 | } |
1698 | ||
1699 | dsl_pool_t * | |
1700 | spa_get_dsl(spa_t *spa) | |
1701 | { | |
1702 | return (spa->spa_dsl_pool); | |
1703 | } | |
1704 | ||
9ae529ec CS |
1705 | boolean_t |
1706 | spa_is_initializing(spa_t *spa) | |
1707 | { | |
1708 | return (spa->spa_is_initializing); | |
1709 | } | |
1710 | ||
a1d477c2 MA |
1711 | boolean_t |
1712 | spa_indirect_vdevs_loaded(spa_t *spa) | |
1713 | { | |
1714 | return (spa->spa_indirect_vdevs_loaded); | |
1715 | } | |
1716 | ||
34dc7c2f BB |
1717 | blkptr_t * |
1718 | spa_get_rootblkptr(spa_t *spa) | |
1719 | { | |
1720 | return (&spa->spa_ubsync.ub_rootbp); | |
1721 | } | |
1722 | ||
1723 | void | |
1724 | spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) | |
1725 | { | |
1726 | spa->spa_uberblock.ub_rootbp = *bp; | |
1727 | } | |
1728 | ||
1729 | void | |
1730 | spa_altroot(spa_t *spa, char *buf, size_t buflen) | |
1731 | { | |
1732 | if (spa->spa_root == NULL) | |
1733 | buf[0] = '\0'; | |
1734 | else | |
7584fbe8 | 1735 | (void) strlcpy(buf, spa->spa_root, buflen); |
34dc7c2f BB |
1736 | } |
1737 | ||
fdc2d303 | 1738 | uint32_t |
34dc7c2f BB |
1739 | spa_sync_pass(spa_t *spa) |
1740 | { | |
1741 | return (spa->spa_sync_pass); | |
1742 | } | |
1743 | ||
1744 | char * | |
1745 | spa_name(spa_t *spa) | |
1746 | { | |
34dc7c2f BB |
1747 | return (spa->spa_name); |
1748 | } | |
1749 | ||
1750 | uint64_t | |
1751 | spa_guid(spa_t *spa) | |
1752 | { | |
3bc7e0fb GW |
1753 | dsl_pool_t *dp = spa_get_dsl(spa); |
1754 | uint64_t guid; | |
1755 | ||
34dc7c2f BB |
1756 | /* |
1757 | * If we fail to parse the config during spa_load(), we can go through | |
1758 | * the error path (which posts an ereport) and end up here with no root | |
3541dc6d | 1759 | * vdev. We stash the original pool guid in 'spa_config_guid' to handle |
34dc7c2f BB |
1760 | * this case. |
1761 | */ | |
3bc7e0fb GW |
1762 | if (spa->spa_root_vdev == NULL) |
1763 | return (spa->spa_config_guid); | |
1764 | ||
1765 | guid = spa->spa_last_synced_guid != 0 ? | |
1766 | spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid; | |
1767 | ||
1768 | /* | |
1769 | * Return the most recently synced out guid unless we're | |
1770 | * in syncing context. | |
1771 | */ | |
1772 | if (dp && dsl_pool_sync_context(dp)) | |
34dc7c2f BB |
1773 | return (spa->spa_root_vdev->vdev_guid); |
1774 | else | |
3bc7e0fb | 1775 | return (guid); |
3541dc6d GA |
1776 | } |
1777 | ||
1778 | uint64_t | |
1779 | spa_load_guid(spa_t *spa) | |
1780 | { | |
1781 | /* | |
1782 | * This is a GUID that exists solely as a reference for the | |
1783 | * purposes of the arc. It is generated at load time, and | |
1784 | * is never written to persistent storage. | |
1785 | */ | |
1786 | return (spa->spa_load_guid); | |
34dc7c2f BB |
1787 | } |
1788 | ||
1789 | uint64_t | |
1790 | spa_last_synced_txg(spa_t *spa) | |
1791 | { | |
1792 | return (spa->spa_ubsync.ub_txg); | |
1793 | } | |
1794 | ||
1795 | uint64_t | |
1796 | spa_first_txg(spa_t *spa) | |
1797 | { | |
1798 | return (spa->spa_first_txg); | |
1799 | } | |
1800 | ||
428870ff BB |
1801 | uint64_t |
1802 | spa_syncing_txg(spa_t *spa) | |
1803 | { | |
1804 | return (spa->spa_syncing_txg); | |
1805 | } | |
1806 | ||
3b7f360c GW |
1807 | /* |
1808 | * Return the last txg where data can be dirtied. The final txgs | |
1809 | * will be used to just clear out any deferred frees that remain. | |
1810 | */ | |
1811 | uint64_t | |
1812 | spa_final_dirty_txg(spa_t *spa) | |
1813 | { | |
1814 | return (spa->spa_final_txg - TXG_DEFER_SIZE); | |
1815 | } | |
1816 | ||
b128c09f | 1817 | pool_state_t |
34dc7c2f BB |
1818 | spa_state(spa_t *spa) |
1819 | { | |
1820 | return (spa->spa_state); | |
1821 | } | |
1822 | ||
428870ff BB |
1823 | spa_load_state_t |
1824 | spa_load_state(spa_t *spa) | |
34dc7c2f | 1825 | { |
428870ff | 1826 | return (spa->spa_load_state); |
34dc7c2f BB |
1827 | } |
1828 | ||
34dc7c2f | 1829 | uint64_t |
428870ff | 1830 | spa_freeze_txg(spa_t *spa) |
34dc7c2f | 1831 | { |
428870ff | 1832 | return (spa->spa_freeze_txg); |
34dc7c2f BB |
1833 | } |
1834 | ||
047187c1 | 1835 | /* |
1836 | * Return the inflated asize for a logical write in bytes. This is used by the | |
1837 | * DMU to calculate the space a logical write will require on disk. | |
1838 | * If lsize is smaller than the largest physical block size allocatable on this | |
1839 | * pool we use its value instead, since the write will end up using the whole | |
1840 | * block anyway. | |
1841 | */ | |
34dc7c2f | 1842 | uint64_t |
3ec3bc21 | 1843 | spa_get_worst_case_asize(spa_t *spa, uint64_t lsize) |
34dc7c2f | 1844 | { |
047187c1 | 1845 | if (lsize == 0) |
1846 | return (0); /* No inflation needed */ | |
1847 | return (MAX(lsize, 1 << spa->spa_max_ashift) * spa_asize_inflation); | |
34dc7c2f BB |
1848 | } |
1849 | ||
3d45fdd6 | 1850 | /* |
aa755b35 MA |
1851 | * Return the amount of slop space in bytes. It is typically 1/32 of the pool |
1852 | * (3.2%), minus the embedded log space. On very small pools, it may be | |
f01eaed4 PS |
1853 | * slightly larger than this. On very large pools, it will be capped to |
1854 | * the value of spa_max_slop. The embedded log space is not included in | |
aa755b35 MA |
1855 | * spa_dspace. By subtracting it, the usable space (per "zfs list") is a |
1856 | * constant 97% of the total space, regardless of metaslab size (assuming the | |
1857 | * default spa_slop_shift=5 and a non-tiny pool). | |
1858 | * | |
1859 | * See the comment above spa_slop_shift for more details. | |
3d45fdd6 MA |
1860 | */ |
1861 | uint64_t | |
4ea3f864 GM |
1862 | spa_get_slop_space(spa_t *spa) |
1863 | { | |
1325434b RE |
1864 | uint64_t space = 0; |
1865 | uint64_t slop = 0; | |
1866 | ||
1867 | /* | |
1868 | * Make sure spa_dedup_dspace has been set. | |
1869 | */ | |
1870 | if (spa->spa_dedup_dspace == ~0ULL) | |
1871 | spa_update_dspace(spa); | |
1872 | ||
1873 | /* | |
1874 | * spa_get_dspace() includes the space only logically "used" by | |
1875 | * deduplicated data, so since it's not useful to reserve more | |
1876 | * space with more deduplicated data, we subtract that out here. | |
1877 | */ | |
6cc93ccd YW |
1878 | space = |
1879 | spa_get_dspace(spa) - spa->spa_dedup_dspace - brt_get_dspace(spa); | |
1325434b | 1880 | slop = MIN(space >> spa_slop_shift, spa_max_slop); |
aa755b35 MA |
1881 | |
1882 | /* | |
1883 | * Subtract the embedded log space, but no more than half the (3.2%) | |
1884 | * unusable space. Note, the "no more than half" is only relevant if | |
1885 | * zfs_embedded_slog_min_ms >> spa_slop_shift < 2, which is not true by | |
1886 | * default. | |
1887 | */ | |
1888 | uint64_t embedded_log = | |
1889 | metaslab_class_get_dspace(spa_embedded_log_class(spa)); | |
1890 | slop -= MIN(embedded_log, slop >> 1); | |
1891 | ||
1892 | /* | |
1893 | * Slop space should be at least spa_min_slop, but no more than half | |
1894 | * the entire pool. | |
1895 | */ | |
1896 | slop = MAX(slop, MIN(space >> 1, spa_min_slop)); | |
1897 | return (slop); | |
3d45fdd6 MA |
1898 | } |
1899 | ||
34dc7c2f BB |
1900 | uint64_t |
1901 | spa_get_dspace(spa_t *spa) | |
1902 | { | |
428870ff | 1903 | return (spa->spa_dspace); |
34dc7c2f BB |
1904 | } |
1905 | ||
d2734cce SD |
1906 | uint64_t |
1907 | spa_get_checkpoint_space(spa_t *spa) | |
1908 | { | |
1909 | return (spa->spa_checkpoint_info.sci_dspace); | |
1910 | } | |
1911 | ||
428870ff BB |
1912 | void |
1913 | spa_update_dspace(spa_t *spa) | |
34dc7c2f | 1914 | { |
428870ff | 1915 | spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) + |
67a1b037 | 1916 | ddt_get_dedup_dspace(spa) + brt_get_dspace(spa); |
2a673e76 | 1917 | if (spa->spa_nonallocating_dspace > 0) { |
a1d477c2 | 1918 | /* |
2a673e76 AJ |
1919 | * Subtract the space provided by all non-allocating vdevs that |
1920 | * contribute to dspace. If a file is overwritten, its old | |
1921 | * blocks are freed and new blocks are allocated. If there are | |
1922 | * no snapshots of the file, the available space should remain | |
1923 | * the same. The old blocks could be freed from the | |
1924 | * non-allocating vdev, but the new blocks must be allocated on | |
1925 | * other (allocating) vdevs. By reserving the entire size of | |
1926 | * the non-allocating vdevs (including allocated space), we | |
1927 | * ensure that there will be enough space on the allocating | |
1928 | * vdevs for this file overwrite to succeed. | |
a1d477c2 MA |
1929 | * |
1930 | * Note that the DMU/DSL doesn't actually know or care | |
1931 | * how much space is allocated (it does its own tracking | |
1932 | * of how much space has been logically used). So it | |
1933 | * doesn't matter that the data we are moving may be | |
2a673e76 | 1934 | * allocated twice (on the old device and the new device). |
a1d477c2 | 1935 | */ |
2a673e76 AJ |
1936 | ASSERT3U(spa->spa_dspace, >=, spa->spa_nonallocating_dspace); |
1937 | spa->spa_dspace -= spa->spa_nonallocating_dspace; | |
a1d477c2 | 1938 | } |
34dc7c2f BB |
1939 | } |
1940 | ||
1941 | /* | |
1942 | * Return the failure mode that has been set to this pool. The default | |
1943 | * behavior will be to block all I/Os when a complete failure occurs. | |
1944 | */ | |
8fb1ede1 | 1945 | uint64_t |
34dc7c2f BB |
1946 | spa_get_failmode(spa_t *spa) |
1947 | { | |
1948 | return (spa->spa_failmode); | |
1949 | } | |
1950 | ||
b128c09f BB |
1951 | boolean_t |
1952 | spa_suspended(spa_t *spa) | |
1953 | { | |
cec3a0a1 | 1954 | return (spa->spa_suspended != ZIO_SUSPEND_NONE); |
b128c09f BB |
1955 | } |
1956 | ||
34dc7c2f BB |
1957 | uint64_t |
1958 | spa_version(spa_t *spa) | |
1959 | { | |
1960 | return (spa->spa_ubsync.ub_version); | |
1961 | } | |
1962 | ||
428870ff BB |
1963 | boolean_t |
1964 | spa_deflate(spa_t *spa) | |
1965 | { | |
1966 | return (spa->spa_deflate); | |
1967 | } | |
1968 | ||
1969 | metaslab_class_t * | |
1970 | spa_normal_class(spa_t *spa) | |
1971 | { | |
1972 | return (spa->spa_normal_class); | |
1973 | } | |
1974 | ||
1975 | metaslab_class_t * | |
1976 | spa_log_class(spa_t *spa) | |
1977 | { | |
1978 | return (spa->spa_log_class); | |
1979 | } | |
1980 | ||
aa755b35 MA |
1981 | metaslab_class_t * |
1982 | spa_embedded_log_class(spa_t *spa) | |
1983 | { | |
1984 | return (spa->spa_embedded_log_class); | |
1985 | } | |
1986 | ||
cc99f275 DB |
1987 | metaslab_class_t * |
1988 | spa_special_class(spa_t *spa) | |
1989 | { | |
1990 | return (spa->spa_special_class); | |
1991 | } | |
1992 | ||
1993 | metaslab_class_t * | |
1994 | spa_dedup_class(spa_t *spa) | |
1995 | { | |
1996 | return (spa->spa_dedup_class); | |
1997 | } | |
1998 | ||
1999 | /* | |
2000 | * Locate an appropriate allocation class | |
2001 | */ | |
2002 | metaslab_class_t * | |
2003 | spa_preferred_class(spa_t *spa, uint64_t size, dmu_object_type_t objtype, | |
2004 | uint_t level, uint_t special_smallblk) | |
2005 | { | |
aa755b35 MA |
2006 | /* |
2007 | * ZIL allocations determine their class in zio_alloc_zil(). | |
2008 | */ | |
2009 | ASSERT(objtype != DMU_OT_INTENT_LOG); | |
cc99f275 DB |
2010 | |
2011 | boolean_t has_special_class = spa->spa_special_class->mc_groups != 0; | |
2012 | ||
2013 | if (DMU_OT_IS_DDT(objtype)) { | |
2014 | if (spa->spa_dedup_class->mc_groups != 0) | |
2015 | return (spa_dedup_class(spa)); | |
2016 | else if (has_special_class && zfs_ddt_data_is_special) | |
2017 | return (spa_special_class(spa)); | |
2018 | else | |
2019 | return (spa_normal_class(spa)); | |
2020 | } | |
2021 | ||
2022 | /* Indirect blocks for user data can land in special if allowed */ | |
2023 | if (level > 0 && (DMU_OT_IS_FILE(objtype) || objtype == DMU_OT_ZVOL)) { | |
2024 | if (has_special_class && zfs_user_indirect_is_special) | |
2025 | return (spa_special_class(spa)); | |
2026 | else | |
2027 | return (spa_normal_class(spa)); | |
2028 | } | |
2029 | ||
2030 | if (DMU_OT_IS_METADATA(objtype) || level > 0) { | |
2031 | if (has_special_class) | |
2032 | return (spa_special_class(spa)); | |
2033 | else | |
2034 | return (spa_normal_class(spa)); | |
2035 | } | |
2036 | ||
2037 | /* | |
2038 | * Allow small file blocks in special class in some cases (like | |
2039 | * for the dRAID vdev feature). But always leave a reserve of | |
2040 | * zfs_special_class_metadata_reserve_pct exclusively for metadata. | |
2041 | */ | |
2042 | if (DMU_OT_IS_FILE(objtype) && | |
44170969 | 2043 | has_special_class && size <= special_smallblk) { |
cc99f275 DB |
2044 | metaslab_class_t *special = spa_special_class(spa); |
2045 | uint64_t alloc = metaslab_class_get_alloc(special); | |
2046 | uint64_t space = metaslab_class_get_space(special); | |
2047 | uint64_t limit = | |
2048 | (space * (100 - zfs_special_class_metadata_reserve_pct)) | |
2049 | / 100; | |
2050 | ||
2051 | if (alloc < limit) | |
2052 | return (special); | |
2053 | } | |
2054 | ||
2055 | return (spa_normal_class(spa)); | |
2056 | } | |
2057 | ||
0c66c32d JG |
2058 | void |
2059 | spa_evicting_os_register(spa_t *spa, objset_t *os) | |
2060 | { | |
2061 | mutex_enter(&spa->spa_evicting_os_lock); | |
2062 | list_insert_head(&spa->spa_evicting_os_list, os); | |
2063 | mutex_exit(&spa->spa_evicting_os_lock); | |
2064 | } | |
2065 | ||
2066 | void | |
2067 | spa_evicting_os_deregister(spa_t *spa, objset_t *os) | |
2068 | { | |
2069 | mutex_enter(&spa->spa_evicting_os_lock); | |
2070 | list_remove(&spa->spa_evicting_os_list, os); | |
2071 | cv_broadcast(&spa->spa_evicting_os_cv); | |
2072 | mutex_exit(&spa->spa_evicting_os_lock); | |
2073 | } | |
2074 | ||
2075 | void | |
2076 | spa_evicting_os_wait(spa_t *spa) | |
2077 | { | |
2078 | mutex_enter(&spa->spa_evicting_os_lock); | |
2079 | while (!list_is_empty(&spa->spa_evicting_os_list)) | |
2080 | cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock); | |
2081 | mutex_exit(&spa->spa_evicting_os_lock); | |
2082 | ||
2083 | dmu_buf_user_evict_wait(); | |
2084 | } | |
2085 | ||
34dc7c2f BB |
2086 | int |
2087 | spa_max_replication(spa_t *spa) | |
2088 | { | |
2089 | /* | |
2090 | * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to | |
2091 | * handle BPs with more than one DVA allocated. Set our max | |
2092 | * replication level accordingly. | |
2093 | */ | |
2094 | if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) | |
2095 | return (1); | |
2096 | return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); | |
2097 | } | |
2098 | ||
428870ff BB |
2099 | int |
2100 | spa_prev_software_version(spa_t *spa) | |
2101 | { | |
2102 | return (spa->spa_prev_software_version); | |
2103 | } | |
2104 | ||
cc92e9d0 GW |
2105 | uint64_t |
2106 | spa_deadman_synctime(spa_t *spa) | |
2107 | { | |
2108 | return (spa->spa_deadman_synctime); | |
2109 | } | |
2110 | ||
1b939560 BB |
2111 | spa_autotrim_t |
2112 | spa_get_autotrim(spa_t *spa) | |
2113 | { | |
2114 | return (spa->spa_autotrim); | |
2115 | } | |
2116 | ||
8fb1ede1 BB |
2117 | uint64_t |
2118 | spa_deadman_ziotime(spa_t *spa) | |
2119 | { | |
2120 | return (spa->spa_deadman_ziotime); | |
2121 | } | |
2122 | ||
2123 | uint64_t | |
2124 | spa_get_deadman_failmode(spa_t *spa) | |
2125 | { | |
2126 | return (spa->spa_deadman_failmode); | |
2127 | } | |
2128 | ||
2129 | void | |
2130 | spa_set_deadman_failmode(spa_t *spa, const char *failmode) | |
2131 | { | |
2132 | if (strcmp(failmode, "wait") == 0) | |
2133 | spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT; | |
2134 | else if (strcmp(failmode, "continue") == 0) | |
2135 | spa->spa_deadman_failmode = ZIO_FAILURE_MODE_CONTINUE; | |
2136 | else if (strcmp(failmode, "panic") == 0) | |
2137 | spa->spa_deadman_failmode = ZIO_FAILURE_MODE_PANIC; | |
2138 | else | |
2139 | spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT; | |
2140 | } | |
2141 | ||
57940b43 RM |
2142 | void |
2143 | spa_set_deadman_ziotime(hrtime_t ns) | |
2144 | { | |
2145 | spa_t *spa = NULL; | |
2146 | ||
2147 | if (spa_mode_global != SPA_MODE_UNINIT) { | |
2148 | mutex_enter(&spa_namespace_lock); | |
2149 | while ((spa = spa_next(spa)) != NULL) | |
2150 | spa->spa_deadman_ziotime = ns; | |
2151 | mutex_exit(&spa_namespace_lock); | |
2152 | } | |
2153 | } | |
2154 | ||
2155 | void | |
2156 | spa_set_deadman_synctime(hrtime_t ns) | |
2157 | { | |
2158 | spa_t *spa = NULL; | |
2159 | ||
2160 | if (spa_mode_global != SPA_MODE_UNINIT) { | |
2161 | mutex_enter(&spa_namespace_lock); | |
2162 | while ((spa = spa_next(spa)) != NULL) | |
2163 | spa->spa_deadman_synctime = ns; | |
2164 | mutex_exit(&spa_namespace_lock); | |
2165 | } | |
2166 | } | |
2167 | ||
34dc7c2f | 2168 | uint64_t |
428870ff | 2169 | dva_get_dsize_sync(spa_t *spa, const dva_t *dva) |
34dc7c2f | 2170 | { |
428870ff BB |
2171 | uint64_t asize = DVA_GET_ASIZE(dva); |
2172 | uint64_t dsize = asize; | |
34dc7c2f | 2173 | |
428870ff | 2174 | ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); |
34dc7c2f | 2175 | |
428870ff BB |
2176 | if (asize != 0 && spa->spa_deflate) { |
2177 | vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); | |
2c33b912 BB |
2178 | if (vd != NULL) |
2179 | dsize = (asize >> SPA_MINBLOCKSHIFT) * | |
2180 | vd->vdev_deflate_ratio; | |
34dc7c2f | 2181 | } |
428870ff BB |
2182 | |
2183 | return (dsize); | |
2184 | } | |
2185 | ||
2186 | uint64_t | |
2187 | bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) | |
2188 | { | |
2189 | uint64_t dsize = 0; | |
2190 | ||
1c27024e | 2191 | for (int d = 0; d < BP_GET_NDVAS(bp); d++) |
428870ff BB |
2192 | dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); |
2193 | ||
2194 | return (dsize); | |
2195 | } | |
2196 | ||
2197 | uint64_t | |
2198 | bp_get_dsize(spa_t *spa, const blkptr_t *bp) | |
2199 | { | |
2200 | uint64_t dsize = 0; | |
2201 | ||
2202 | spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); | |
2203 | ||
1c27024e | 2204 | for (int d = 0; d < BP_GET_NDVAS(bp); d++) |
428870ff BB |
2205 | dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); |
2206 | ||
b128c09f | 2207 | spa_config_exit(spa, SCL_VDEV, FTAG); |
428870ff BB |
2208 | |
2209 | return (dsize); | |
34dc7c2f BB |
2210 | } |
2211 | ||
dae3e9ea DB |
2212 | uint64_t |
2213 | spa_dirty_data(spa_t *spa) | |
2214 | { | |
2215 | return (spa->spa_dsl_pool->dp_dirty_total); | |
2216 | } | |
2217 | ||
ca95f70d OF |
2218 | /* |
2219 | * ========================================================================== | |
2220 | * SPA Import Progress Routines | |
2221 | * ========================================================================== | |
2222 | */ | |
2223 | ||
2224 | typedef struct spa_import_progress { | |
2225 | uint64_t pool_guid; /* unique id for updates */ | |
2226 | char *pool_name; | |
2227 | spa_load_state_t spa_load_state; | |
687e4d7f | 2228 | char *spa_load_notes; |
ca95f70d OF |
2229 | uint64_t mmp_sec_remaining; /* MMP activity check */ |
2230 | uint64_t spa_load_max_txg; /* rewind txg */ | |
2231 | procfs_list_node_t smh_node; | |
2232 | } spa_import_progress_t; | |
2233 | ||
2234 | spa_history_list_t *spa_import_progress_list = NULL; | |
2235 | ||
2236 | static int | |
2237 | spa_import_progress_show_header(struct seq_file *f) | |
2238 | { | |
687e4d7f | 2239 | seq_printf(f, "%-20s %-14s %-14s %-12s %-16s %s\n", "pool_guid", |
ca95f70d | 2240 | "load_state", "multihost_secs", "max_txg", |
687e4d7f | 2241 | "pool_name", "notes"); |
ca95f70d OF |
2242 | return (0); |
2243 | } | |
2244 | ||
2245 | static int | |
2246 | spa_import_progress_show(struct seq_file *f, void *data) | |
2247 | { | |
2248 | spa_import_progress_t *sip = (spa_import_progress_t *)data; | |
2249 | ||
687e4d7f | 2250 | seq_printf(f, "%-20llu %-14llu %-14llu %-12llu %-16s %s\n", |
ca95f70d OF |
2251 | (u_longlong_t)sip->pool_guid, (u_longlong_t)sip->spa_load_state, |
2252 | (u_longlong_t)sip->mmp_sec_remaining, | |
2253 | (u_longlong_t)sip->spa_load_max_txg, | |
687e4d7f DB |
2254 | (sip->pool_name ? sip->pool_name : "-"), |
2255 | (sip->spa_load_notes ? sip->spa_load_notes : "-")); | |
ca95f70d OF |
2256 | |
2257 | return (0); | |
2258 | } | |
2259 | ||
2260 | /* Remove oldest elements from list until there are no more than 'size' left */ | |
2261 | static void | |
2262 | spa_import_progress_truncate(spa_history_list_t *shl, unsigned int size) | |
2263 | { | |
2264 | spa_import_progress_t *sip; | |
2265 | while (shl->size > size) { | |
2266 | sip = list_remove_head(&shl->procfs_list.pl_list); | |
2267 | if (sip->pool_name) | |
2268 | spa_strfree(sip->pool_name); | |
687e4d7f DB |
2269 | if (sip->spa_load_notes) |
2270 | kmem_strfree(sip->spa_load_notes); | |
ca95f70d OF |
2271 | kmem_free(sip, sizeof (spa_import_progress_t)); |
2272 | shl->size--; | |
2273 | } | |
2274 | ||
2275 | IMPLY(size == 0, list_is_empty(&shl->procfs_list.pl_list)); | |
2276 | } | |
2277 | ||
2278 | static void | |
2279 | spa_import_progress_init(void) | |
2280 | { | |
2281 | spa_import_progress_list = kmem_zalloc(sizeof (spa_history_list_t), | |
2282 | KM_SLEEP); | |
2283 | ||
2284 | spa_import_progress_list->size = 0; | |
2285 | ||
2286 | spa_import_progress_list->procfs_list.pl_private = | |
2287 | spa_import_progress_list; | |
2288 | ||
2289 | procfs_list_install("zfs", | |
7b8363d7 | 2290 | NULL, |
ca95f70d OF |
2291 | "import_progress", |
2292 | 0644, | |
2293 | &spa_import_progress_list->procfs_list, | |
2294 | spa_import_progress_show, | |
2295 | spa_import_progress_show_header, | |
2296 | NULL, | |
2297 | offsetof(spa_import_progress_t, smh_node)); | |
2298 | } | |
2299 | ||
2300 | static void | |
2301 | spa_import_progress_destroy(void) | |
2302 | { | |
2303 | spa_history_list_t *shl = spa_import_progress_list; | |
2304 | procfs_list_uninstall(&shl->procfs_list); | |
2305 | spa_import_progress_truncate(shl, 0); | |
ca95f70d | 2306 | procfs_list_destroy(&shl->procfs_list); |
75c09c50 | 2307 | kmem_free(shl, sizeof (spa_history_list_t)); |
ca95f70d OF |
2308 | } |
2309 | ||
2310 | int | |
2311 | spa_import_progress_set_state(uint64_t pool_guid, | |
2312 | spa_load_state_t load_state) | |
2313 | { | |
2314 | spa_history_list_t *shl = spa_import_progress_list; | |
2315 | spa_import_progress_t *sip; | |
2316 | int error = ENOENT; | |
2317 | ||
2318 | if (shl->size == 0) | |
2319 | return (0); | |
2320 | ||
2321 | mutex_enter(&shl->procfs_list.pl_lock); | |
2322 | for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; | |
2323 | sip = list_prev(&shl->procfs_list.pl_list, sip)) { | |
2324 | if (sip->pool_guid == pool_guid) { | |
2325 | sip->spa_load_state = load_state; | |
687e4d7f DB |
2326 | if (sip->spa_load_notes != NULL) { |
2327 | kmem_strfree(sip->spa_load_notes); | |
2328 | sip->spa_load_notes = NULL; | |
2329 | } | |
ca95f70d OF |
2330 | error = 0; |
2331 | break; | |
2332 | } | |
2333 | } | |
2334 | mutex_exit(&shl->procfs_list.pl_lock); | |
2335 | ||
2336 | return (error); | |
2337 | } | |
2338 | ||
687e4d7f DB |
2339 | static void |
2340 | spa_import_progress_set_notes_impl(spa_t *spa, boolean_t log_dbgmsg, | |
2341 | const char *fmt, va_list adx) | |
2342 | { | |
2343 | spa_history_list_t *shl = spa_import_progress_list; | |
2344 | spa_import_progress_t *sip; | |
2345 | uint64_t pool_guid = spa_guid(spa); | |
2346 | ||
2347 | if (shl->size == 0) | |
2348 | return; | |
2349 | ||
2350 | char *notes = kmem_vasprintf(fmt, adx); | |
2351 | ||
2352 | mutex_enter(&shl->procfs_list.pl_lock); | |
2353 | for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; | |
2354 | sip = list_prev(&shl->procfs_list.pl_list, sip)) { | |
2355 | if (sip->pool_guid == pool_guid) { | |
2356 | if (sip->spa_load_notes != NULL) { | |
2357 | kmem_strfree(sip->spa_load_notes); | |
2358 | sip->spa_load_notes = NULL; | |
2359 | } | |
2360 | sip->spa_load_notes = notes; | |
2361 | if (log_dbgmsg) | |
2362 | zfs_dbgmsg("'%s' %s", sip->pool_name, notes); | |
2363 | notes = NULL; | |
2364 | break; | |
2365 | } | |
2366 | } | |
2367 | mutex_exit(&shl->procfs_list.pl_lock); | |
2368 | if (notes != NULL) | |
2369 | kmem_strfree(notes); | |
2370 | } | |
2371 | ||
2372 | void | |
2373 | spa_import_progress_set_notes(spa_t *spa, const char *fmt, ...) | |
2374 | { | |
2375 | va_list adx; | |
2376 | ||
2377 | va_start(adx, fmt); | |
2378 | spa_import_progress_set_notes_impl(spa, B_TRUE, fmt, adx); | |
2379 | va_end(adx); | |
2380 | } | |
2381 | ||
2382 | void | |
2383 | spa_import_progress_set_notes_nolog(spa_t *spa, const char *fmt, ...) | |
2384 | { | |
2385 | va_list adx; | |
2386 | ||
2387 | va_start(adx, fmt); | |
2388 | spa_import_progress_set_notes_impl(spa, B_FALSE, fmt, adx); | |
2389 | va_end(adx); | |
2390 | } | |
2391 | ||
ca95f70d OF |
2392 | int |
2393 | spa_import_progress_set_max_txg(uint64_t pool_guid, uint64_t load_max_txg) | |
2394 | { | |
2395 | spa_history_list_t *shl = spa_import_progress_list; | |
2396 | spa_import_progress_t *sip; | |
2397 | int error = ENOENT; | |
2398 | ||
2399 | if (shl->size == 0) | |
2400 | return (0); | |
2401 | ||
2402 | mutex_enter(&shl->procfs_list.pl_lock); | |
2403 | for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; | |
2404 | sip = list_prev(&shl->procfs_list.pl_list, sip)) { | |
2405 | if (sip->pool_guid == pool_guid) { | |
2406 | sip->spa_load_max_txg = load_max_txg; | |
2407 | error = 0; | |
2408 | break; | |
2409 | } | |
2410 | } | |
2411 | mutex_exit(&shl->procfs_list.pl_lock); | |
2412 | ||
2413 | return (error); | |
2414 | } | |
2415 | ||
2416 | int | |
2417 | spa_import_progress_set_mmp_check(uint64_t pool_guid, | |
2418 | uint64_t mmp_sec_remaining) | |
2419 | { | |
2420 | spa_history_list_t *shl = spa_import_progress_list; | |
2421 | spa_import_progress_t *sip; | |
2422 | int error = ENOENT; | |
2423 | ||
2424 | if (shl->size == 0) | |
2425 | return (0); | |
2426 | ||
2427 | mutex_enter(&shl->procfs_list.pl_lock); | |
2428 | for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; | |
2429 | sip = list_prev(&shl->procfs_list.pl_list, sip)) { | |
2430 | if (sip->pool_guid == pool_guid) { | |
2431 | sip->mmp_sec_remaining = mmp_sec_remaining; | |
2432 | error = 0; | |
2433 | break; | |
2434 | } | |
2435 | } | |
2436 | mutex_exit(&shl->procfs_list.pl_lock); | |
2437 | ||
2438 | return (error); | |
2439 | } | |
2440 | ||
2441 | /* | |
2442 | * A new import is in progress, add an entry. | |
2443 | */ | |
2444 | void | |
2445 | spa_import_progress_add(spa_t *spa) | |
2446 | { | |
2447 | spa_history_list_t *shl = spa_import_progress_list; | |
2448 | spa_import_progress_t *sip; | |
d1807f16 | 2449 | const char *poolname = NULL; |
ca95f70d OF |
2450 | |
2451 | sip = kmem_zalloc(sizeof (spa_import_progress_t), KM_SLEEP); | |
2452 | sip->pool_guid = spa_guid(spa); | |
2453 | ||
2454 | (void) nvlist_lookup_string(spa->spa_config, ZPOOL_CONFIG_POOL_NAME, | |
2455 | &poolname); | |
2456 | if (poolname == NULL) | |
2457 | poolname = spa_name(spa); | |
2458 | sip->pool_name = spa_strdup(poolname); | |
2459 | sip->spa_load_state = spa_load_state(spa); | |
687e4d7f | 2460 | sip->spa_load_notes = NULL; |
ca95f70d OF |
2461 | |
2462 | mutex_enter(&shl->procfs_list.pl_lock); | |
2463 | procfs_list_add(&shl->procfs_list, sip); | |
2464 | shl->size++; | |
2465 | mutex_exit(&shl->procfs_list.pl_lock); | |
2466 | } | |
2467 | ||
2468 | void | |
2469 | spa_import_progress_remove(uint64_t pool_guid) | |
2470 | { | |
2471 | spa_history_list_t *shl = spa_import_progress_list; | |
2472 | spa_import_progress_t *sip; | |
2473 | ||
2474 | mutex_enter(&shl->procfs_list.pl_lock); | |
2475 | for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; | |
2476 | sip = list_prev(&shl->procfs_list.pl_list, sip)) { | |
2477 | if (sip->pool_guid == pool_guid) { | |
2478 | if (sip->pool_name) | |
2479 | spa_strfree(sip->pool_name); | |
687e4d7f DB |
2480 | if (sip->spa_load_notes) |
2481 | spa_strfree(sip->spa_load_notes); | |
ca95f70d OF |
2482 | list_remove(&shl->procfs_list.pl_list, sip); |
2483 | shl->size--; | |
2484 | kmem_free(sip, sizeof (spa_import_progress_t)); | |
2485 | break; | |
2486 | } | |
2487 | } | |
2488 | mutex_exit(&shl->procfs_list.pl_lock); | |
2489 | } | |
2490 | ||
34dc7c2f BB |
2491 | /* |
2492 | * ========================================================================== | |
2493 | * Initialization and Termination | |
2494 | * ========================================================================== | |
2495 | */ | |
2496 | ||
2497 | static int | |
2498 | spa_name_compare(const void *a1, const void *a2) | |
2499 | { | |
2500 | const spa_t *s1 = a1; | |
2501 | const spa_t *s2 = a2; | |
2502 | int s; | |
2503 | ||
2504 | s = strcmp(s1->spa_name, s2->spa_name); | |
ee36c709 | 2505 | |
ca577779 | 2506 | return (TREE_ISIGN(s)); |
34dc7c2f BB |
2507 | } |
2508 | ||
34dc7c2f | 2509 | void |
0bc8fd78 | 2510 | spa_boot_init(void) |
34dc7c2f BB |
2511 | { |
2512 | spa_config_load(); | |
2513 | } | |
2514 | ||
2515 | void | |
da92d5cb | 2516 | spa_init(spa_mode_t mode) |
34dc7c2f BB |
2517 | { |
2518 | mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); | |
2519 | mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); | |
2520 | mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); | |
2521 | cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); | |
2522 | ||
2523 | avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), | |
2524 | offsetof(spa_t, spa_avl)); | |
2525 | ||
2526 | avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), | |
2527 | offsetof(spa_aux_t, aux_avl)); | |
2528 | ||
2529 | avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), | |
2530 | offsetof(spa_aux_t, aux_avl)); | |
2531 | ||
fb5f0bc8 | 2532 | spa_mode_global = mode; |
34dc7c2f | 2533 | |
498877ba | 2534 | #ifndef _KERNEL |
da92d5cb | 2535 | if (spa_mode_global != SPA_MODE_READ && dprintf_find_string("watch")) { |
498877ba MA |
2536 | struct sigaction sa; |
2537 | ||
2538 | sa.sa_flags = SA_SIGINFO; | |
2539 | sigemptyset(&sa.sa_mask); | |
2540 | sa.sa_sigaction = arc_buf_sigsegv; | |
2541 | ||
2542 | if (sigaction(SIGSEGV, &sa, NULL) == -1) { | |
2543 | perror("could not enable watchpoints: " | |
2544 | "sigaction(SIGSEGV, ...) = "); | |
2545 | } else { | |
2546 | arc_watch = B_TRUE; | |
2547 | } | |
2548 | } | |
2549 | #endif | |
2550 | ||
26685276 | 2551 | fm_init(); |
424fd7c3 | 2552 | zfs_refcount_init(); |
34dc7c2f | 2553 | unique_init(); |
ca577779 PD |
2554 | zfs_btree_init(); |
2555 | metaslab_stat_init(); | |
67a1b037 | 2556 | brt_init(); |
ecf3d9b8 | 2557 | ddt_init(); |
34dc7c2f BB |
2558 | zio_init(); |
2559 | dmu_init(); | |
2560 | zil_init(); | |
551905dd | 2561 | vdev_mirror_stat_init(); |
ab9f4b0b | 2562 | vdev_raidz_math_init(); |
da8f51e1 | 2563 | vdev_file_init(); |
34dc7c2f | 2564 | zfs_prop_init(); |
985c33b1 | 2565 | chksum_init(); |
34dc7c2f | 2566 | zpool_prop_init(); |
9ae529ec | 2567 | zpool_feature_init(); |
34dc7c2f | 2568 | spa_config_load(); |
2a673e76 | 2569 | vdev_prop_init(); |
b128c09f | 2570 | l2arc_start(); |
d4a72f23 | 2571 | scan_init(); |
6a9d6359 | 2572 | qat_init(); |
ca95f70d | 2573 | spa_import_progress_init(); |
34dc7c2f BB |
2574 | } |
2575 | ||
2576 | void | |
2577 | spa_fini(void) | |
2578 | { | |
b128c09f BB |
2579 | l2arc_stop(); |
2580 | ||
34dc7c2f BB |
2581 | spa_evict_all(); |
2582 | ||
da8f51e1 | 2583 | vdev_file_fini(); |
551905dd | 2584 | vdev_mirror_stat_fini(); |
ab9f4b0b | 2585 | vdev_raidz_math_fini(); |
985c33b1 | 2586 | chksum_fini(); |
34dc7c2f BB |
2587 | zil_fini(); |
2588 | dmu_fini(); | |
2589 | zio_fini(); | |
ecf3d9b8 | 2590 | ddt_fini(); |
67a1b037 | 2591 | brt_fini(); |
ca577779 PD |
2592 | metaslab_stat_fini(); |
2593 | zfs_btree_fini(); | |
34dc7c2f | 2594 | unique_fini(); |
424fd7c3 | 2595 | zfs_refcount_fini(); |
26685276 | 2596 | fm_fini(); |
d4a72f23 | 2597 | scan_fini(); |
6a9d6359 | 2598 | qat_fini(); |
ca95f70d | 2599 | spa_import_progress_destroy(); |
34dc7c2f BB |
2600 | |
2601 | avl_destroy(&spa_namespace_avl); | |
2602 | avl_destroy(&spa_spare_avl); | |
2603 | avl_destroy(&spa_l2cache_avl); | |
2604 | ||
2605 | cv_destroy(&spa_namespace_cv); | |
2606 | mutex_destroy(&spa_namespace_lock); | |
2607 | mutex_destroy(&spa_spare_lock); | |
2608 | mutex_destroy(&spa_l2cache_lock); | |
2609 | } | |
2610 | ||
2611 | /* | |
aa755b35 | 2612 | * Return whether this pool has a dedicated slog device. No locking needed. |
34dc7c2f | 2613 | * It's not a problem if the wrong answer is returned as it's only for |
aa755b35 | 2614 | * performance and not correctness. |
34dc7c2f BB |
2615 | */ |
2616 | boolean_t | |
2617 | spa_has_slogs(spa_t *spa) | |
2618 | { | |
f8020c93 | 2619 | return (spa->spa_log_class->mc_groups != 0); |
34dc7c2f | 2620 | } |
b128c09f | 2621 | |
428870ff BB |
2622 | spa_log_state_t |
2623 | spa_get_log_state(spa_t *spa) | |
2624 | { | |
2625 | return (spa->spa_log_state); | |
2626 | } | |
2627 | ||
2628 | void | |
2629 | spa_set_log_state(spa_t *spa, spa_log_state_t state) | |
2630 | { | |
2631 | spa->spa_log_state = state; | |
2632 | } | |
2633 | ||
b128c09f BB |
2634 | boolean_t |
2635 | spa_is_root(spa_t *spa) | |
2636 | { | |
2637 | return (spa->spa_is_root); | |
2638 | } | |
fb5f0bc8 BB |
2639 | |
2640 | boolean_t | |
2641 | spa_writeable(spa_t *spa) | |
2642 | { | |
da92d5cb | 2643 | return (!!(spa->spa_mode & SPA_MODE_WRITE) && spa->spa_trust_config); |
fb5f0bc8 BB |
2644 | } |
2645 | ||
acbad6ff AR |
2646 | /* |
2647 | * Returns true if there is a pending sync task in any of the current | |
2648 | * syncing txg, the current quiescing txg, or the current open txg. | |
2649 | */ | |
2650 | boolean_t | |
2651 | spa_has_pending_synctask(spa_t *spa) | |
2652 | { | |
d2734cce SD |
2653 | return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) || |
2654 | !txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks)); | |
acbad6ff AR |
2655 | } |
2656 | ||
da92d5cb | 2657 | spa_mode_t |
fb5f0bc8 BB |
2658 | spa_mode(spa_t *spa) |
2659 | { | |
2660 | return (spa->spa_mode); | |
2661 | } | |
428870ff BB |
2662 | |
2663 | uint64_t | |
2664 | spa_bootfs(spa_t *spa) | |
2665 | { | |
2666 | return (spa->spa_bootfs); | |
2667 | } | |
2668 | ||
2669 | uint64_t | |
2670 | spa_delegation(spa_t *spa) | |
2671 | { | |
2672 | return (spa->spa_delegation); | |
2673 | } | |
2674 | ||
2675 | objset_t * | |
2676 | spa_meta_objset(spa_t *spa) | |
2677 | { | |
2678 | return (spa->spa_meta_objset); | |
2679 | } | |
2680 | ||
2681 | enum zio_checksum | |
2682 | spa_dedup_checksum(spa_t *spa) | |
2683 | { | |
2684 | return (spa->spa_dedup_checksum); | |
2685 | } | |
2686 | ||
2687 | /* | |
2688 | * Reset pool scan stat per scan pass (or reboot). | |
2689 | */ | |
2690 | void | |
2691 | spa_scan_stat_init(spa_t *spa) | |
2692 | { | |
2693 | /* data not stored on disk */ | |
2694 | spa->spa_scan_pass_start = gethrestime_sec(); | |
0ea05c64 AP |
2695 | if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan)) |
2696 | spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start; | |
2697 | else | |
2698 | spa->spa_scan_pass_scrub_pause = 0; | |
482eeef8 GA |
2699 | |
2700 | if (dsl_errorscrub_is_paused(spa->spa_dsl_pool->dp_scan)) | |
2701 | spa->spa_scan_pass_errorscrub_pause = spa->spa_scan_pass_start; | |
2702 | else | |
2703 | spa->spa_scan_pass_errorscrub_pause = 0; | |
2704 | ||
0ea05c64 | 2705 | spa->spa_scan_pass_scrub_spent_paused = 0; |
428870ff | 2706 | spa->spa_scan_pass_exam = 0; |
d4a72f23 | 2707 | spa->spa_scan_pass_issued = 0; |
482eeef8 GA |
2708 | |
2709 | // error scrub stats | |
2710 | spa->spa_scan_pass_errorscrub_spent_paused = 0; | |
428870ff BB |
2711 | } |
2712 | ||
2713 | /* | |
2714 | * Get scan stats for zpool status reports | |
2715 | */ | |
2716 | int | |
2717 | spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps) | |
2718 | { | |
2719 | dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL; | |
2720 | ||
482eeef8 GA |
2721 | if (scn == NULL || (scn->scn_phys.scn_func == POOL_SCAN_NONE && |
2722 | scn->errorscrub_phys.dep_func == POOL_SCAN_NONE)) | |
2e528b49 | 2723 | return (SET_ERROR(ENOENT)); |
482eeef8 | 2724 | |
861166b0 | 2725 | memset(ps, 0, sizeof (pool_scan_stat_t)); |
428870ff BB |
2726 | |
2727 | /* data stored on disk */ | |
2728 | ps->pss_func = scn->scn_phys.scn_func; | |
d4a72f23 | 2729 | ps->pss_state = scn->scn_phys.scn_state; |
428870ff BB |
2730 | ps->pss_start_time = scn->scn_phys.scn_start_time; |
2731 | ps->pss_end_time = scn->scn_phys.scn_end_time; | |
2732 | ps->pss_to_examine = scn->scn_phys.scn_to_examine; | |
d4677269 | 2733 | ps->pss_examined = scn->scn_phys.scn_examined; |
fa7b2390 | 2734 | ps->pss_skipped = scn->scn_phys.scn_skipped; |
428870ff BB |
2735 | ps->pss_processed = scn->scn_phys.scn_processed; |
2736 | ps->pss_errors = scn->scn_phys.scn_errors; | |
428870ff BB |
2737 | |
2738 | /* data not stored on disk */ | |
428870ff | 2739 | ps->pss_pass_exam = spa->spa_scan_pass_exam; |
d4677269 | 2740 | ps->pss_pass_start = spa->spa_scan_pass_start; |
0ea05c64 AP |
2741 | ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause; |
2742 | ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused; | |
d4677269 TC |
2743 | ps->pss_pass_issued = spa->spa_scan_pass_issued; |
2744 | ps->pss_issued = | |
2745 | scn->scn_issued_before_pass + spa->spa_scan_pass_issued; | |
428870ff | 2746 | |
482eeef8 GA |
2747 | /* error scrub data stored on disk */ |
2748 | ps->pss_error_scrub_func = scn->errorscrub_phys.dep_func; | |
2749 | ps->pss_error_scrub_state = scn->errorscrub_phys.dep_state; | |
2750 | ps->pss_error_scrub_start = scn->errorscrub_phys.dep_start_time; | |
2751 | ps->pss_error_scrub_end = scn->errorscrub_phys.dep_end_time; | |
2752 | ps->pss_error_scrub_examined = scn->errorscrub_phys.dep_examined; | |
2753 | ps->pss_error_scrub_to_be_examined = | |
2754 | scn->errorscrub_phys.dep_to_examine; | |
2755 | ||
2756 | /* error scrub data not stored on disk */ | |
2757 | ps->pss_pass_error_scrub_pause = spa->spa_scan_pass_errorscrub_pause; | |
2758 | ||
428870ff BB |
2759 | return (0); |
2760 | } | |
c28b2279 | 2761 | |
f1512ee6 MA |
2762 | int |
2763 | spa_maxblocksize(spa_t *spa) | |
2764 | { | |
2765 | if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) | |
2766 | return (SPA_MAXBLOCKSIZE); | |
2767 | else | |
2768 | return (SPA_OLD_MAXBLOCKSIZE); | |
2769 | } | |
2770 | ||
a1d477c2 MA |
2771 | |
2772 | /* | |
2773 | * Returns the txg that the last device removal completed. No indirect mappings | |
2774 | * have been added since this txg. | |
2775 | */ | |
2776 | uint64_t | |
2777 | spa_get_last_removal_txg(spa_t *spa) | |
2778 | { | |
2779 | uint64_t vdevid; | |
2780 | uint64_t ret = -1ULL; | |
2781 | ||
2782 | spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); | |
2783 | /* | |
2784 | * sr_prev_indirect_vdev is only modified while holding all the | |
2785 | * config locks, so it is sufficient to hold SCL_VDEV as reader when | |
2786 | * examining it. | |
2787 | */ | |
2788 | vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev; | |
2789 | ||
2790 | while (vdevid != -1ULL) { | |
2791 | vdev_t *vd = vdev_lookup_top(spa, vdevid); | |
2792 | vdev_indirect_births_t *vib = vd->vdev_indirect_births; | |
2793 | ||
2794 | ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); | |
2795 | ||
2796 | /* | |
2797 | * If the removal did not remap any data, we don't care. | |
2798 | */ | |
2799 | if (vdev_indirect_births_count(vib) != 0) { | |
2800 | ret = vdev_indirect_births_last_entry_txg(vib); | |
2801 | break; | |
2802 | } | |
2803 | ||
2804 | vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev; | |
2805 | } | |
2806 | spa_config_exit(spa, SCL_VDEV, FTAG); | |
2807 | ||
2808 | IMPLY(ret != -1ULL, | |
2809 | spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); | |
2810 | ||
2811 | return (ret); | |
2812 | } | |
2813 | ||
50c957f7 NB |
2814 | int |
2815 | spa_maxdnodesize(spa_t *spa) | |
2816 | { | |
2817 | if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) | |
2818 | return (DNODE_MAX_SIZE); | |
2819 | else | |
2820 | return (DNODE_MIN_SIZE); | |
2821 | } | |
2822 | ||
379ca9cf OF |
2823 | boolean_t |
2824 | spa_multihost(spa_t *spa) | |
2825 | { | |
2826 | return (spa->spa_multihost ? B_TRUE : B_FALSE); | |
2827 | } | |
2828 | ||
25f06d67 BB |
2829 | uint32_t |
2830 | spa_get_hostid(spa_t *spa) | |
379ca9cf | 2831 | { |
25f06d67 | 2832 | return (spa->spa_hostid); |
379ca9cf OF |
2833 | } |
2834 | ||
6cb8e530 PZ |
2835 | boolean_t |
2836 | spa_trust_config(spa_t *spa) | |
2837 | { | |
2838 | return (spa->spa_trust_config); | |
2839 | } | |
2840 | ||
2841 | uint64_t | |
2842 | spa_missing_tvds_allowed(spa_t *spa) | |
2843 | { | |
2844 | return (spa->spa_missing_tvds_allowed); | |
2845 | } | |
2846 | ||
93e28d66 SD |
2847 | space_map_t * |
2848 | spa_syncing_log_sm(spa_t *spa) | |
2849 | { | |
2850 | return (spa->spa_syncing_log_sm); | |
2851 | } | |
2852 | ||
6cb8e530 PZ |
2853 | void |
2854 | spa_set_missing_tvds(spa_t *spa, uint64_t missing) | |
2855 | { | |
2856 | spa->spa_missing_tvds = missing; | |
2857 | } | |
2858 | ||
f0ed6c74 TH |
2859 | /* |
2860 | * Return the pool state string ("ONLINE", "DEGRADED", "SUSPENDED", etc). | |
2861 | */ | |
2862 | const char * | |
2863 | spa_state_to_name(spa_t *spa) | |
2864 | { | |
78fac8d9 RE |
2865 | ASSERT3P(spa, !=, NULL); |
2866 | ||
2867 | /* | |
2868 | * it is possible for the spa to exist, without root vdev | |
2869 | * as the spa transitions during import/export | |
2870 | */ | |
2871 | vdev_t *rvd = spa->spa_root_vdev; | |
2872 | if (rvd == NULL) { | |
2873 | return ("TRANSITIONING"); | |
2874 | } | |
2875 | vdev_state_t state = rvd->vdev_state; | |
2876 | vdev_aux_t aux = rvd->vdev_stat.vs_aux; | |
f0ed6c74 | 2877 | |
4647353c | 2878 | if (spa_suspended(spa)) |
f0ed6c74 TH |
2879 | return ("SUSPENDED"); |
2880 | ||
2881 | switch (state) { | |
2882 | case VDEV_STATE_CLOSED: | |
2883 | case VDEV_STATE_OFFLINE: | |
2884 | return ("OFFLINE"); | |
2885 | case VDEV_STATE_REMOVED: | |
2886 | return ("REMOVED"); | |
2887 | case VDEV_STATE_CANT_OPEN: | |
2888 | if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) | |
2889 | return ("FAULTED"); | |
2890 | else if (aux == VDEV_AUX_SPLIT_POOL) | |
2891 | return ("SPLIT"); | |
2892 | else | |
2893 | return ("UNAVAIL"); | |
2894 | case VDEV_STATE_FAULTED: | |
2895 | return ("FAULTED"); | |
2896 | case VDEV_STATE_DEGRADED: | |
2897 | return ("DEGRADED"); | |
2898 | case VDEV_STATE_HEALTHY: | |
2899 | return ("ONLINE"); | |
2900 | default: | |
2901 | break; | |
2902 | } | |
2903 | ||
2904 | return ("UNKNOWN"); | |
2905 | } | |
2906 | ||
d2734cce SD |
2907 | boolean_t |
2908 | spa_top_vdevs_spacemap_addressable(spa_t *spa) | |
2909 | { | |
2910 | vdev_t *rvd = spa->spa_root_vdev; | |
2911 | for (uint64_t c = 0; c < rvd->vdev_children; c++) { | |
2912 | if (!vdev_is_spacemap_addressable(rvd->vdev_child[c])) | |
2913 | return (B_FALSE); | |
2914 | } | |
2915 | return (B_TRUE); | |
2916 | } | |
2917 | ||
2918 | boolean_t | |
2919 | spa_has_checkpoint(spa_t *spa) | |
2920 | { | |
2921 | return (spa->spa_checkpoint_txg != 0); | |
2922 | } | |
2923 | ||
2924 | boolean_t | |
2925 | spa_importing_readonly_checkpoint(spa_t *spa) | |
2926 | { | |
2927 | return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) && | |
da92d5cb | 2928 | spa->spa_mode == SPA_MODE_READ); |
d2734cce SD |
2929 | } |
2930 | ||
2931 | uint64_t | |
2932 | spa_min_claim_txg(spa_t *spa) | |
2933 | { | |
2934 | uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg; | |
2935 | ||
2936 | if (checkpoint_txg != 0) | |
2937 | return (checkpoint_txg + 1); | |
2938 | ||
2939 | return (spa->spa_first_txg); | |
2940 | } | |
2941 | ||
2942 | /* | |
2943 | * If there is a checkpoint, async destroys may consume more space from | |
2944 | * the pool instead of freeing it. In an attempt to save the pool from | |
2945 | * getting suspended when it is about to run out of space, we stop | |
2946 | * processing async destroys. | |
2947 | */ | |
2948 | boolean_t | |
2949 | spa_suspend_async_destroy(spa_t *spa) | |
2950 | { | |
2951 | dsl_pool_t *dp = spa_get_dsl(spa); | |
2952 | ||
2953 | uint64_t unreserved = dsl_pool_unreserved_space(dp, | |
2954 | ZFS_SPACE_CHECK_EXTRA_RESERVED); | |
2955 | uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes; | |
2956 | uint64_t avail = (unreserved > used) ? (unreserved - used) : 0; | |
2957 | ||
2958 | if (spa_has_checkpoint(spa) && avail == 0) | |
2959 | return (B_TRUE); | |
2960 | ||
2961 | return (B_FALSE); | |
2962 | } | |
2963 | ||
93ce2b4c | 2964 | #if defined(_KERNEL) |
8fb1ede1 | 2965 | |
e64e84ec MM |
2966 | int |
2967 | param_set_deadman_failmode_common(const char *val) | |
8fb1ede1 BB |
2968 | { |
2969 | spa_t *spa = NULL; | |
2970 | char *p; | |
2971 | ||
2972 | if (val == NULL) | |
e64e84ec | 2973 | return (SET_ERROR(EINVAL)); |
8fb1ede1 BB |
2974 | |
2975 | if ((p = strchr(val, '\n')) != NULL) | |
2976 | *p = '\0'; | |
2977 | ||
2978 | if (strcmp(val, "wait") != 0 && strcmp(val, "continue") != 0 && | |
2979 | strcmp(val, "panic")) | |
e64e84ec | 2980 | return (SET_ERROR(EINVAL)); |
8fb1ede1 | 2981 | |
da92d5cb | 2982 | if (spa_mode_global != SPA_MODE_UNINIT) { |
d1043e2f TC |
2983 | mutex_enter(&spa_namespace_lock); |
2984 | while ((spa = spa_next(spa)) != NULL) | |
2985 | spa_set_deadman_failmode(spa, val); | |
2986 | mutex_exit(&spa_namespace_lock); | |
2987 | } | |
8fb1ede1 | 2988 | |
e64e84ec | 2989 | return (0); |
8fb1ede1 | 2990 | } |
03fdcb9a MM |
2991 | #endif |
2992 | ||
c28b2279 BB |
2993 | /* Namespace manipulation */ |
2994 | EXPORT_SYMBOL(spa_lookup); | |
2995 | EXPORT_SYMBOL(spa_add); | |
2996 | EXPORT_SYMBOL(spa_remove); | |
2997 | EXPORT_SYMBOL(spa_next); | |
2998 | ||
2999 | /* Refcount functions */ | |
3000 | EXPORT_SYMBOL(spa_open_ref); | |
3001 | EXPORT_SYMBOL(spa_close); | |
3002 | EXPORT_SYMBOL(spa_refcount_zero); | |
3003 | ||
3004 | /* Pool configuration lock */ | |
3005 | EXPORT_SYMBOL(spa_config_tryenter); | |
3006 | EXPORT_SYMBOL(spa_config_enter); | |
3007 | EXPORT_SYMBOL(spa_config_exit); | |
3008 | EXPORT_SYMBOL(spa_config_held); | |
3009 | ||
3010 | /* Pool vdev add/remove lock */ | |
3011 | EXPORT_SYMBOL(spa_vdev_enter); | |
3012 | EXPORT_SYMBOL(spa_vdev_exit); | |
3013 | ||
3014 | /* Pool vdev state change lock */ | |
3015 | EXPORT_SYMBOL(spa_vdev_state_enter); | |
3016 | EXPORT_SYMBOL(spa_vdev_state_exit); | |
3017 | ||
3018 | /* Accessor functions */ | |
3019 | EXPORT_SYMBOL(spa_shutting_down); | |
3020 | EXPORT_SYMBOL(spa_get_dsl); | |
3021 | EXPORT_SYMBOL(spa_get_rootblkptr); | |
3022 | EXPORT_SYMBOL(spa_set_rootblkptr); | |
3023 | EXPORT_SYMBOL(spa_altroot); | |
3024 | EXPORT_SYMBOL(spa_sync_pass); | |
3025 | EXPORT_SYMBOL(spa_name); | |
3026 | EXPORT_SYMBOL(spa_guid); | |
3027 | EXPORT_SYMBOL(spa_last_synced_txg); | |
3028 | EXPORT_SYMBOL(spa_first_txg); | |
3029 | EXPORT_SYMBOL(spa_syncing_txg); | |
3030 | EXPORT_SYMBOL(spa_version); | |
3031 | EXPORT_SYMBOL(spa_state); | |
3032 | EXPORT_SYMBOL(spa_load_state); | |
3033 | EXPORT_SYMBOL(spa_freeze_txg); | |
c28b2279 BB |
3034 | EXPORT_SYMBOL(spa_get_dspace); |
3035 | EXPORT_SYMBOL(spa_update_dspace); | |
3036 | EXPORT_SYMBOL(spa_deflate); | |
3037 | EXPORT_SYMBOL(spa_normal_class); | |
3038 | EXPORT_SYMBOL(spa_log_class); | |
cc99f275 DB |
3039 | EXPORT_SYMBOL(spa_special_class); |
3040 | EXPORT_SYMBOL(spa_preferred_class); | |
c28b2279 BB |
3041 | EXPORT_SYMBOL(spa_max_replication); |
3042 | EXPORT_SYMBOL(spa_prev_software_version); | |
3043 | EXPORT_SYMBOL(spa_get_failmode); | |
3044 | EXPORT_SYMBOL(spa_suspended); | |
3045 | EXPORT_SYMBOL(spa_bootfs); | |
3046 | EXPORT_SYMBOL(spa_delegation); | |
3047 | EXPORT_SYMBOL(spa_meta_objset); | |
f1512ee6 | 3048 | EXPORT_SYMBOL(spa_maxblocksize); |
50c957f7 | 3049 | EXPORT_SYMBOL(spa_maxdnodesize); |
c28b2279 BB |
3050 | |
3051 | /* Miscellaneous support routines */ | |
c28b2279 BB |
3052 | EXPORT_SYMBOL(spa_guid_exists); |
3053 | EXPORT_SYMBOL(spa_strdup); | |
3054 | EXPORT_SYMBOL(spa_strfree); | |
c28b2279 | 3055 | EXPORT_SYMBOL(spa_generate_guid); |
b0bc7a84 | 3056 | EXPORT_SYMBOL(snprintf_blkptr); |
c28b2279 BB |
3057 | EXPORT_SYMBOL(spa_freeze); |
3058 | EXPORT_SYMBOL(spa_upgrade); | |
3059 | EXPORT_SYMBOL(spa_evict_all); | |
3060 | EXPORT_SYMBOL(spa_lookup_by_guid); | |
3061 | EXPORT_SYMBOL(spa_has_spare); | |
3062 | EXPORT_SYMBOL(dva_get_dsize_sync); | |
3063 | EXPORT_SYMBOL(bp_get_dsize_sync); | |
3064 | EXPORT_SYMBOL(bp_get_dsize); | |
3065 | EXPORT_SYMBOL(spa_has_slogs); | |
3066 | EXPORT_SYMBOL(spa_is_root); | |
3067 | EXPORT_SYMBOL(spa_writeable); | |
3068 | EXPORT_SYMBOL(spa_mode); | |
c28b2279 | 3069 | EXPORT_SYMBOL(spa_namespace_lock); |
6cb8e530 PZ |
3070 | EXPORT_SYMBOL(spa_trust_config); |
3071 | EXPORT_SYMBOL(spa_missing_tvds_allowed); | |
3072 | EXPORT_SYMBOL(spa_set_missing_tvds); | |
f0ed6c74 | 3073 | EXPORT_SYMBOL(spa_state_to_name); |
d2734cce SD |
3074 | EXPORT_SYMBOL(spa_importing_readonly_checkpoint); |
3075 | EXPORT_SYMBOL(spa_min_claim_txg); | |
3076 | EXPORT_SYMBOL(spa_suspend_async_destroy); | |
3077 | EXPORT_SYMBOL(spa_has_checkpoint); | |
3078 | EXPORT_SYMBOL(spa_top_vdevs_spacemap_addressable); | |
cc92e9d0 | 3079 | |
03fdcb9a MM |
3080 | ZFS_MODULE_PARAM(zfs, zfs_, flags, UINT, ZMOD_RW, |
3081 | "Set additional debugging flags"); | |
0b39b9f9 | 3082 | |
03fdcb9a MM |
3083 | ZFS_MODULE_PARAM(zfs, zfs_, recover, INT, ZMOD_RW, |
3084 | "Set to attempt to recover from fatal errors"); | |
0b39b9f9 | 3085 | |
03fdcb9a | 3086 | ZFS_MODULE_PARAM(zfs, zfs_, free_leak_on_eio, INT, ZMOD_RW, |
0b39b9f9 PS |
3087 | "Set to ignore IO errors during free and permanently leak the space"); |
3088 | ||
ab8d9c17 | 3089 | ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, checktime_ms, U64, ZMOD_RW, |
03fdcb9a MM |
3090 | "Dead I/O check interval in milliseconds"); |
3091 | ||
35aa9dc6 | 3092 | ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, enabled, INT, ZMOD_RW, |
03fdcb9a MM |
3093 | "Enable deadman timer"); |
3094 | ||
fdc2d303 | 3095 | ZFS_MODULE_PARAM(zfs_spa, spa_, asize_inflation, UINT, ZMOD_RW, |
03fdcb9a MM |
3096 | "SPA size estimate multiplication factor"); |
3097 | ||
3098 | ZFS_MODULE_PARAM(zfs, zfs_, ddt_data_is_special, INT, ZMOD_RW, | |
3099 | "Place DDT data into the special class"); | |
3100 | ||
3101 | ZFS_MODULE_PARAM(zfs, zfs_, user_indirect_is_special, INT, ZMOD_RW, | |
3102 | "Place user data indirect blocks into the special class"); | |
3103 | ||
03fdcb9a | 3104 | /* BEGIN CSTYLED */ |
e64e84ec MM |
3105 | ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, failmode, |
3106 | param_set_deadman_failmode, param_get_charp, ZMOD_RW, | |
3107 | "Failmode for deadman timer"); | |
3108 | ||
2a3aa5a1 | 3109 | ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, synctime_ms, |
ab8d9c17 | 3110 | param_set_deadman_synctime, spl_param_get_u64, ZMOD_RW, |
2a3aa5a1 MM |
3111 | "Pool sync expiration time in milliseconds"); |
3112 | ||
3113 | ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, ziotime_ms, | |
ab8d9c17 | 3114 | param_set_deadman_ziotime, spl_param_get_u64, ZMOD_RW, |
2a3aa5a1 MM |
3115 | "IO expiration time in milliseconds"); |
3116 | ||
fdc2d303 | 3117 | ZFS_MODULE_PARAM(zfs, zfs_, special_class_metadata_reserve_pct, UINT, ZMOD_RW, |
1f02ecc5 D |
3118 | "Small file blocks in special vdevs depends on this much " |
3119 | "free space available"); | |
02730c33 | 3120 | /* END CSTYLED */ |
2a3aa5a1 | 3121 | |
7e3df9db | 3122 | ZFS_MODULE_PARAM_CALL(zfs_spa, spa_, slop_shift, param_set_slop_shift, |
fdc2d303 | 3123 | param_get_uint, ZMOD_RW, "Reserved free space in pool"); |
3bd4df38 EN |
3124 | |
3125 | ZFS_MODULE_PARAM(zfs, spa_, num_allocators, INT, ZMOD_RW, | |
645b8330 AM |
3126 | "Number of allocators per spa"); |
3127 | ||
3128 | ZFS_MODULE_PARAM(zfs, spa_, cpus_per_allocator, INT, ZMOD_RW, | |
3129 | "Minimum number of CPUs per allocators"); |