]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/spa_misc.c
BRT: Fix slop space calculation with block cloning
[mirror_zfs.git] / module / zfs / spa_misc.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
1d3ba0bf 9 * or https://opensource.org/licenses/CDDL-1.0.
34dc7c2f
BB
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
428870ff 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
93e28d66 23 * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
adfe9d93 24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
0c66c32d 25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
3c67d83a 26 * Copyright 2013 Saso Kiselkov. All rights reserved.
0ea05c64 27 * Copyright (c) 2017 Datto Inc.
cc99f275 28 * Copyright (c) 2017, Intel Corporation.
e3570464 29 * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
4647353c 30 * Copyright (c) 2023, Klara Inc.
34dc7c2f
BB
31 */
32
34dc7c2f 33#include <sys/zfs_context.h>
985c33b1 34#include <sys/zfs_chksum.h>
34dc7c2f
BB
35#include <sys/spa_impl.h>
36#include <sys/zio.h>
37#include <sys/zio_checksum.h>
38#include <sys/zio_compress.h>
39#include <sys/dmu.h>
40#include <sys/dmu_tx.h>
41#include <sys/zap.h>
42#include <sys/zil.h>
43#include <sys/vdev_impl.h>
619f0976 44#include <sys/vdev_initialize.h>
1b939560 45#include <sys/vdev_trim.h>
bc25c932 46#include <sys/vdev_file.h>
ab9f4b0b 47#include <sys/vdev_raidz.h>
34dc7c2f
BB
48#include <sys/metaslab.h>
49#include <sys/uberblock_impl.h>
50#include <sys/txg.h>
51#include <sys/avl.h>
52#include <sys/unique.h>
53#include <sys/dsl_pool.h>
54#include <sys/dsl_dir.h>
55#include <sys/dsl_prop.h>
26685276 56#include <sys/fm/util.h>
428870ff 57#include <sys/dsl_scan.h>
34dc7c2f
BB
58#include <sys/fs/zfs.h>
59#include <sys/metaslab_impl.h>
b128c09f 60#include <sys/arc.h>
67a1b037 61#include <sys/brt.h>
428870ff 62#include <sys/ddt.h>
1421c891 63#include <sys/kstat.h>
34dc7c2f 64#include "zfs_prop.h"
ca577779 65#include <sys/btree.h>
3c67d83a 66#include <sys/zfeature.h>
bced7e3a 67#include <sys/qat.h>
10b3c7f5 68#include <sys/zstd/zstd.h>
34dc7c2f
BB
69
70/*
71 * SPA locking
72 *
93e28d66 73 * There are three basic locks for managing spa_t structures:
34dc7c2f
BB
74 *
75 * spa_namespace_lock (global mutex)
76 *
77 * This lock must be acquired to do any of the following:
78 *
79 * - Lookup a spa_t by name
80 * - Add or remove a spa_t from the namespace
81 * - Increase spa_refcount from non-zero
82 * - Check if spa_refcount is zero
83 * - Rename a spa_t
84 * - add/remove/attach/detach devices
85 * - Held for the duration of create/destroy/import/export
86 *
87 * It does not need to handle recursion. A create or destroy may
88 * reference objects (files or zvols) in other pools, but by
89 * definition they must have an existing reference, and will never need
90 * to lookup a spa_t by name.
91 *
c13060e4 92 * spa_refcount (per-spa zfs_refcount_t protected by mutex)
34dc7c2f
BB
93 *
94 * This reference count keep track of any active users of the spa_t. The
95 * spa_t cannot be destroyed or freed while this is non-zero. Internally,
96 * the refcount is never really 'zero' - opening a pool implicitly keeps
b128c09f 97 * some references in the DMU. Internally we check against spa_minref, but
34dc7c2f
BB
98 * present the image of a zero/non-zero value to consumers.
99 *
b128c09f 100 * spa_config_lock[] (per-spa array of rwlocks)
34dc7c2f
BB
101 *
102 * This protects the spa_t from config changes, and must be held in
103 * the following circumstances:
104 *
105 * - RW_READER to perform I/O to the spa
106 * - RW_WRITER to change the vdev config
107 *
34dc7c2f
BB
108 * The locking order is fairly straightforward:
109 *
110 * spa_namespace_lock -> spa_refcount
111 *
112 * The namespace lock must be acquired to increase the refcount from 0
113 * or to check if it is zero.
114 *
b128c09f 115 * spa_refcount -> spa_config_lock[]
34dc7c2f
BB
116 *
117 * There must be at least one valid reference on the spa_t to acquire
118 * the config lock.
119 *
b128c09f 120 * spa_namespace_lock -> spa_config_lock[]
34dc7c2f
BB
121 *
122 * The namespace lock must always be taken before the config lock.
123 *
124 *
b128c09f 125 * The spa_namespace_lock can be acquired directly and is globally visible.
34dc7c2f 126 *
b128c09f
BB
127 * The namespace is manipulated using the following functions, all of which
128 * require the spa_namespace_lock to be held.
34dc7c2f
BB
129 *
130 * spa_lookup() Lookup a spa_t by name.
131 *
132 * spa_add() Create a new spa_t in the namespace.
133 *
134 * spa_remove() Remove a spa_t from the namespace. This also
135 * frees up any memory associated with the spa_t.
136 *
137 * spa_next() Returns the next spa_t in the system, or the
138 * first if NULL is passed.
139 *
140 * spa_evict_all() Shutdown and remove all spa_t structures in
141 * the system.
142 *
143 * spa_guid_exists() Determine whether a pool/device guid exists.
144 *
145 * The spa_refcount is manipulated using the following functions:
146 *
147 * spa_open_ref() Adds a reference to the given spa_t. Must be
148 * called with spa_namespace_lock held if the
149 * refcount is currently zero.
150 *
151 * spa_close() Remove a reference from the spa_t. This will
152 * not free the spa_t or remove it from the
153 * namespace. No locking is required.
154 *
155 * spa_refcount_zero() Returns true if the refcount is currently
156 * zero. Must be called with spa_namespace_lock
157 * held.
158 *
b128c09f
BB
159 * The spa_config_lock[] is an array of rwlocks, ordered as follows:
160 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV.
161 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
162 *
163 * To read the configuration, it suffices to hold one of these locks as reader.
164 * To modify the configuration, you must hold all locks as writer. To modify
165 * vdev state without altering the vdev tree's topology (e.g. online/offline),
166 * you must hold SCL_STATE and SCL_ZIO as writer.
167 *
168 * We use these distinct config locks to avoid recursive lock entry.
169 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces
170 * block allocations (SCL_ALLOC), which may require reading space maps
171 * from disk (dmu_read() -> zio_read() -> SCL_ZIO).
172 *
173 * The spa config locks cannot be normal rwlocks because we need the
174 * ability to hand off ownership. For example, SCL_ZIO is acquired
175 * by the issuing thread and later released by an interrupt thread.
176 * They do, however, obey the usual write-wanted semantics to prevent
177 * writer (i.e. system administrator) starvation.
178 *
179 * The lock acquisition rules are as follows:
180 *
181 * SCL_CONFIG
182 * Protects changes to the vdev tree topology, such as vdev
183 * add/remove/attach/detach. Protects the dirty config list
184 * (spa_config_dirty_list) and the set of spares and l2arc devices.
185 *
186 * SCL_STATE
187 * Protects changes to pool state and vdev state, such as vdev
188 * online/offline/fault/degrade/clear. Protects the dirty state list
189 * (spa_state_dirty_list) and global pool state (spa_state).
190 *
191 * SCL_ALLOC
192 * Protects changes to metaslab groups and classes.
193 * Held as reader by metaslab_alloc() and metaslab_claim().
194 *
195 * SCL_ZIO
196 * Held by bp-level zios (those which have no io_vd upon entry)
197 * to prevent changes to the vdev tree. The bp-level zio implicitly
198 * protects all of its vdev child zios, which do not hold SCL_ZIO.
199 *
200 * SCL_FREE
201 * Protects changes to metaslab groups and classes.
202 * Held as reader by metaslab_free(). SCL_FREE is distinct from
203 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
204 * blocks in zio_done() while another i/o that holds either
205 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
206 *
207 * SCL_VDEV
208 * Held as reader to prevent changes to the vdev tree during trivial
428870ff 209 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the
b128c09f
BB
210 * other locks, and lower than all of them, to ensure that it's safe
211 * to acquire regardless of caller context.
212 *
213 * In addition, the following rules apply:
214 *
215 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list.
216 * The lock ordering is SCL_CONFIG > spa_props_lock.
217 *
218 * (b) I/O operations on leaf vdevs. For any zio operation that takes
219 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
220 * or zio_write_phys() -- the caller must ensure that the config cannot
221 * cannot change in the interim, and that the vdev cannot be reopened.
222 * SCL_STATE as reader suffices for both.
34dc7c2f
BB
223 *
224 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
225 *
226 * spa_vdev_enter() Acquire the namespace lock and the config lock
227 * for writing.
228 *
229 * spa_vdev_exit() Release the config lock, wait for all I/O
230 * to complete, sync the updated configs to the
231 * cache, and release the namespace lock.
232 *
b128c09f
BB
233 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
234 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
235 * locking is, always, based on spa_namespace_lock and spa_config_lock[].
34dc7c2f
BB
236 */
237
238static avl_tree_t spa_namespace_avl;
239kmutex_t spa_namespace_lock;
240static kcondvar_t spa_namespace_cv;
18168da7 241static const int spa_max_replication_override = SPA_DVAS_PER_BP;
34dc7c2f
BB
242
243static kmutex_t spa_spare_lock;
244static avl_tree_t spa_spare_avl;
245static kmutex_t spa_l2cache_lock;
246static avl_tree_t spa_l2cache_avl;
247
da92d5cb 248spa_mode_t spa_mode_global = SPA_MODE_UNINIT;
34dc7c2f 249
0b39b9f9 250#ifdef ZFS_DEBUG
a1d477c2
MA
251/*
252 * Everything except dprintf, set_error, spa, and indirect_remap is on
253 * by default in debug builds.
254 */
255int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SET_ERROR |
964c2d69 256 ZFS_DEBUG_INDIRECT_REMAP);
0b39b9f9
PS
257#else
258int zfs_flags = 0;
259#endif
260
261/*
262 * zfs_recover can be set to nonzero to attempt to recover from
263 * otherwise-fatal errors, typically caused by on-disk corruption. When
264 * set, calls to zfs_panic_recover() will turn into warning messages.
265 * This should only be used as a last resort, as it typically results
266 * in leaked space, or worse.
267 */
268int zfs_recover = B_FALSE;
269
270/*
271 * If destroy encounters an EIO while reading metadata (e.g. indirect
272 * blocks), space referenced by the missing metadata can not be freed.
273 * Normally this causes the background destroy to become "stalled", as
274 * it is unable to make forward progress. While in this stalled state,
275 * all remaining space to free from the error-encountering filesystem is
276 * "temporarily leaked". Set this flag to cause it to ignore the EIO,
277 * permanently leak the space from indirect blocks that can not be read,
278 * and continue to free everything else that it can.
279 *
280 * The default, "stalling" behavior is useful if the storage partially
281 * fails (i.e. some but not all i/os fail), and then later recovers. In
282 * this case, we will be able to continue pool operations while it is
283 * partially failed, and when it recovers, we can continue to free the
284 * space, with no leaks. However, note that this case is actually
285 * fairly rare.
286 *
287 * Typically pools either (a) fail completely (but perhaps temporarily,
288 * e.g. a top-level vdev going offline), or (b) have localized,
289 * permanent errors (e.g. disk returns the wrong data due to bit flip or
290 * firmware bug). In case (a), this setting does not matter because the
291 * pool will be suspended and the sync thread will not be able to make
292 * forward progress regardless. In case (b), because the error is
293 * permanent, the best we can do is leak the minimum amount of space,
294 * which is what setting this flag will do. Therefore, it is reasonable
295 * for this flag to normally be set, but we chose the more conservative
296 * approach of not setting it, so that there is no possibility of
297 * leaking space in the "partial temporary" failure case.
298 */
299int zfs_free_leak_on_eio = B_FALSE;
300
cc92e9d0 301/*
e8b96c60
MA
302 * Expiration time in milliseconds. This value has two meanings. First it is
303 * used to determine when the spa_deadman() logic should fire. By default the
8fb1ede1 304 * spa_deadman() will fire if spa_sync() has not completed in 600 seconds.
e8b96c60
MA
305 * Secondly, the value determines if an I/O is considered "hung". Any I/O that
306 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
8fb1ede1 307 * in one of three behaviors controlled by zfs_deadman_failmode.
cc92e9d0 308 */
ab8d9c17 309uint64_t zfs_deadman_synctime_ms = 600000UL; /* 10 min. */
8fb1ede1
BB
310
311/*
312 * This value controls the maximum amount of time zio_wait() will block for an
313 * outstanding IO. By default this is 300 seconds at which point the "hung"
314 * behavior will be applied as described for zfs_deadman_synctime_ms.
315 */
ab8d9c17 316uint64_t zfs_deadman_ziotime_ms = 300000UL; /* 5 min. */
cc92e9d0 317
b81a3ddc
TC
318/*
319 * Check time in milliseconds. This defines the frequency at which we check
320 * for hung I/O.
321 */
ab8d9c17 322uint64_t zfs_deadman_checktime_ms = 60000UL; /* 1 min. */
b81a3ddc 323
cc92e9d0
GW
324/*
325 * By default the deadman is enabled.
326 */
18168da7 327int zfs_deadman_enabled = B_TRUE;
cc92e9d0 328
8fb1ede1
BB
329/*
330 * Controls the behavior of the deadman when it detects a "hung" I/O.
331 * Valid values are zfs_deadman_failmode=<wait|continue|panic>.
332 *
333 * wait - Wait for the "hung" I/O (default)
334 * continue - Attempt to recover from a "hung" I/O
335 * panic - Panic the system
336 */
18168da7 337const char *zfs_deadman_failmode = "wait";
8fb1ede1 338
e8b96c60
MA
339/*
340 * The worst case is single-sector max-parity RAID-Z blocks, in which
341 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
342 * times the size; so just assume that. Add to this the fact that
343 * we can have up to 3 DVAs per bp, and one more factor of 2 because
344 * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together,
345 * the worst case is:
346 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
347 */
fdc2d303 348uint_t spa_asize_inflation = 24;
e8b96c60 349
3d45fdd6
MA
350/*
351 * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in
f01eaed4
PS
352 * the pool to be consumed (bounded by spa_max_slop). This ensures that we
353 * don't run the pool completely out of space, due to unaccounted changes (e.g.
354 * to the MOS). It also limits the worst-case time to allocate space. If we
355 * have less than this amount of free space, most ZPL operations (e.g. write,
356 * create) will return ENOSPC. The ZIL metaslabs (spa_embedded_log_class) are
357 * also part of this 3.2% of space which can't be consumed by normal writes;
358 * the slop space "proper" (spa_get_slop_space()) is decreased by the embedded
359 * log space.
3d45fdd6
MA
360 *
361 * Certain operations (e.g. file removal, most administrative actions) can
362 * use half the slop space. They will only return ENOSPC if less than half
363 * the slop space is free. Typically, once the pool has less than the slop
364 * space free, the user will use these operations to free up space in the pool.
365 * These are the operations that call dsl_pool_adjustedsize() with the netfree
366 * argument set to TRUE.
367 *
d2734cce
SD
368 * Operations that are almost guaranteed to free up space in the absence of
369 * a pool checkpoint can use up to three quarters of the slop space
370 * (e.g zfs destroy).
371 *
3d45fdd6
MA
372 * A very restricted set of operations are always permitted, regardless of
373 * the amount of free space. These are the operations that call
d2734cce
SD
374 * dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net
375 * increase in the amount of space used, it is possible to run the pool
376 * completely out of space, causing it to be permanently read-only.
3d45fdd6 377 *
d7958b4c
MA
378 * Note that on very small pools, the slop space will be larger than
379 * 3.2%, in an effort to have it be at least spa_min_slop (128MB),
380 * but we never allow it to be more than half the pool size.
381 *
f01eaed4
PS
382 * Further, on very large pools, the slop space will be smaller than
383 * 3.2%, to avoid reserving much more space than we actually need; bounded
384 * by spa_max_slop (128GB).
385 *
3d45fdd6
MA
386 * See also the comments in zfs_space_check_t.
387 */
fdc2d303 388uint_t spa_slop_shift = 5;
18168da7
AZ
389static const uint64_t spa_min_slop = 128ULL * 1024 * 1024;
390static const uint64_t spa_max_slop = 128ULL * 1024 * 1024 * 1024;
3bd4df38
EN
391
392/*
393 * Number of allocators to use, per spa instance
394 */
395static int spa_num_allocators = 4;
492f64e9 396
95f71c01
EN
397/*
398 * Spa active allocator.
399 * Valid values are zfs_active_allocator=<dynamic|cursor|new-dynamic>.
400 */
401const char *zfs_active_allocator = "dynamic";
3d45fdd6 402
4a0ee12a
PZ
403void
404spa_load_failed(spa_t *spa, const char *fmt, ...)
405{
406 va_list adx;
407 char buf[256];
408
409 va_start(adx, fmt);
410 (void) vsnprintf(buf, sizeof (buf), fmt, adx);
411 va_end(adx);
412
6cb8e530
PZ
413 zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name,
414 spa->spa_trust_config ? "trusted" : "untrusted", buf);
4a0ee12a
PZ
415}
416
4a0ee12a
PZ
417void
418spa_load_note(spa_t *spa, const char *fmt, ...)
419{
420 va_list adx;
421 char buf[256];
422
423 va_start(adx, fmt);
424 (void) vsnprintf(buf, sizeof (buf), fmt, adx);
425 va_end(adx);
426
6cb8e530
PZ
427 zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name,
428 spa->spa_trust_config ? "trusted" : "untrusted", buf);
687e4d7f
DB
429
430 spa_import_progress_set_notes_nolog(spa, "%s", buf);
4a0ee12a
PZ
431}
432
cc99f275
DB
433/*
434 * By default dedup and user data indirects land in the special class
435 */
18168da7
AZ
436static int zfs_ddt_data_is_special = B_TRUE;
437static int zfs_user_indirect_is_special = B_TRUE;
cc99f275
DB
438
439/*
440 * The percentage of special class final space reserved for metadata only.
441 * Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only
442 * let metadata into the class.
443 */
fdc2d303 444static uint_t zfs_special_class_metadata_reserve_pct = 25;
cc99f275 445
34dc7c2f
BB
446/*
447 * ==========================================================================
448 * SPA config locking
449 * ==========================================================================
450 */
451static void
b128c09f
BB
452spa_config_lock_init(spa_t *spa)
453{
1c27024e 454 for (int i = 0; i < SCL_LOCKS; i++) {
b128c09f
BB
455 spa_config_lock_t *scl = &spa->spa_config_lock[i];
456 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
457 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
b128c09f
BB
458 scl->scl_writer = NULL;
459 scl->scl_write_wanted = 0;
42afb12d 460 scl->scl_count = 0;
b128c09f 461 }
34dc7c2f
BB
462}
463
464static void
b128c09f
BB
465spa_config_lock_destroy(spa_t *spa)
466{
1c27024e 467 for (int i = 0; i < SCL_LOCKS; i++) {
b128c09f
BB
468 spa_config_lock_t *scl = &spa->spa_config_lock[i];
469 mutex_destroy(&scl->scl_lock);
470 cv_destroy(&scl->scl_cv);
b128c09f
BB
471 ASSERT(scl->scl_writer == NULL);
472 ASSERT(scl->scl_write_wanted == 0);
42afb12d 473 ASSERT(scl->scl_count == 0);
b128c09f
BB
474 }
475}
476
477int
a926aab9 478spa_config_tryenter(spa_t *spa, int locks, const void *tag, krw_t rw)
34dc7c2f 479{
1c27024e 480 for (int i = 0; i < SCL_LOCKS; i++) {
b128c09f
BB
481 spa_config_lock_t *scl = &spa->spa_config_lock[i];
482 if (!(locks & (1 << i)))
483 continue;
484 mutex_enter(&scl->scl_lock);
485 if (rw == RW_READER) {
486 if (scl->scl_writer || scl->scl_write_wanted) {
487 mutex_exit(&scl->scl_lock);
adfe9d93
SK
488 spa_config_exit(spa, locks & ((1 << i) - 1),
489 tag);
b128c09f
BB
490 return (0);
491 }
492 } else {
493 ASSERT(scl->scl_writer != curthread);
42afb12d 494 if (scl->scl_count != 0) {
b128c09f 495 mutex_exit(&scl->scl_lock);
adfe9d93
SK
496 spa_config_exit(spa, locks & ((1 << i) - 1),
497 tag);
b128c09f
BB
498 return (0);
499 }
500 scl->scl_writer = curthread;
501 }
42afb12d 502 scl->scl_count++;
b128c09f
BB
503 mutex_exit(&scl->scl_lock);
504 }
505 return (1);
34dc7c2f
BB
506}
507
71d191ef
HW
508static void
509spa_config_enter_impl(spa_t *spa, int locks, const void *tag, krw_t rw,
510 int mmp_flag)
34dc7c2f 511{
14e4e3cb 512 (void) tag;
45d1cae3
BB
513 int wlocks_held = 0;
514
13fe0198
MA
515 ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY);
516
1c27024e 517 for (int i = 0; i < SCL_LOCKS; i++) {
b128c09f 518 spa_config_lock_t *scl = &spa->spa_config_lock[i];
45d1cae3
BB
519 if (scl->scl_writer == curthread)
520 wlocks_held |= (1 << i);
b128c09f
BB
521 if (!(locks & (1 << i)))
522 continue;
523 mutex_enter(&scl->scl_lock);
524 if (rw == RW_READER) {
71d191ef
HW
525 while (scl->scl_writer ||
526 (!mmp_flag && scl->scl_write_wanted)) {
b128c09f
BB
527 cv_wait(&scl->scl_cv, &scl->scl_lock);
528 }
529 } else {
530 ASSERT(scl->scl_writer != curthread);
42afb12d 531 while (scl->scl_count != 0) {
b128c09f
BB
532 scl->scl_write_wanted++;
533 cv_wait(&scl->scl_cv, &scl->scl_lock);
534 scl->scl_write_wanted--;
535 }
536 scl->scl_writer = curthread;
537 }
42afb12d 538 scl->scl_count++;
b128c09f 539 mutex_exit(&scl->scl_lock);
34dc7c2f 540 }
a1d477c2 541 ASSERT3U(wlocks_held, <=, locks);
34dc7c2f
BB
542}
543
71d191ef
HW
544void
545spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw)
546{
547 spa_config_enter_impl(spa, locks, tag, rw, 0);
548}
549
550/*
551 * The spa_config_enter_mmp() allows the mmp thread to cut in front of
552 * outstanding write lock requests. This is needed since the mmp updates are
553 * time sensitive and failure to service them promptly will result in a
554 * suspended pool. This pool suspension has been seen in practice when there is
555 * a single disk in a pool that is responding slowly and presumably about to
556 * fail.
557 */
558
559void
560spa_config_enter_mmp(spa_t *spa, int locks, const void *tag, krw_t rw)
561{
562 spa_config_enter_impl(spa, locks, tag, rw, 1);
563}
564
34dc7c2f 565void
dc04a8c7 566spa_config_exit(spa_t *spa, int locks, const void *tag)
34dc7c2f 567{
14e4e3cb 568 (void) tag;
1c27024e 569 for (int i = SCL_LOCKS - 1; i >= 0; i--) {
b128c09f
BB
570 spa_config_lock_t *scl = &spa->spa_config_lock[i];
571 if (!(locks & (1 << i)))
572 continue;
573 mutex_enter(&scl->scl_lock);
42afb12d
AM
574 ASSERT(scl->scl_count > 0);
575 if (--scl->scl_count == 0) {
b128c09f
BB
576 ASSERT(scl->scl_writer == NULL ||
577 scl->scl_writer == curthread);
578 scl->scl_writer = NULL; /* OK in either case */
579 cv_broadcast(&scl->scl_cv);
580 }
581 mutex_exit(&scl->scl_lock);
34dc7c2f 582 }
34dc7c2f
BB
583}
584
b128c09f
BB
585int
586spa_config_held(spa_t *spa, int locks, krw_t rw)
34dc7c2f 587{
1c27024e 588 int locks_held = 0;
34dc7c2f 589
1c27024e 590 for (int i = 0; i < SCL_LOCKS; i++) {
b128c09f
BB
591 spa_config_lock_t *scl = &spa->spa_config_lock[i];
592 if (!(locks & (1 << i)))
593 continue;
42afb12d 594 if ((rw == RW_READER && scl->scl_count != 0) ||
b128c09f
BB
595 (rw == RW_WRITER && scl->scl_writer == curthread))
596 locks_held |= 1 << i;
597 }
598
599 return (locks_held);
34dc7c2f
BB
600}
601
602/*
603 * ==========================================================================
604 * SPA namespace functions
605 * ==========================================================================
606 */
607
608/*
609 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held.
610 * Returns NULL if no matching spa_t is found.
611 */
612spa_t *
613spa_lookup(const char *name)
614{
b128c09f
BB
615 static spa_t search; /* spa_t is large; don't allocate on stack */
616 spa_t *spa;
34dc7c2f 617 avl_index_t where;
34dc7c2f
BB
618 char *cp;
619
620 ASSERT(MUTEX_HELD(&spa_namespace_lock));
621
13fe0198
MA
622 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
623
34dc7c2f
BB
624 /*
625 * If it's a full dataset name, figure out the pool name and
626 * just use that.
627 */
da536844 628 cp = strpbrk(search.spa_name, "/@#");
13fe0198 629 if (cp != NULL)
34dc7c2f 630 *cp = '\0';
34dc7c2f 631
34dc7c2f
BB
632 spa = avl_find(&spa_namespace_avl, &search, &where);
633
34dc7c2f
BB
634 return (spa);
635}
636
cc92e9d0
GW
637/*
638 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
639 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues
640 * looking for potentially hung I/Os.
641 */
642void
643spa_deadman(void *arg)
644{
645 spa_t *spa = arg;
646
b81a3ddc
TC
647 /* Disable the deadman if the pool is suspended. */
648 if (spa_suspended(spa))
649 return;
650
cc92e9d0
GW
651 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
652 (gethrtime() - spa->spa_sync_starttime) / NANOSEC,
8e739b2c 653 (u_longlong_t)++spa->spa_deadman_calls);
cc92e9d0 654 if (zfs_deadman_enabled)
8fb1ede1 655 vdev_deadman(spa->spa_root_vdev, FTAG);
cc92e9d0 656
57ddcda1 657 spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq,
f764edf0 658 spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
b81a3ddc 659 MSEC_TO_TICK(zfs_deadman_checktime_ms));
cc92e9d0
GW
660}
661
65c7cc49 662static int
93e28d66
SD
663spa_log_sm_sort_by_txg(const void *va, const void *vb)
664{
665 const spa_log_sm_t *a = va;
666 const spa_log_sm_t *b = vb;
667
ca577779 668 return (TREE_CMP(a->sls_txg, b->sls_txg));
93e28d66
SD
669}
670
34dc7c2f
BB
671/*
672 * Create an uninitialized spa_t with the given name. Requires
673 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already
674 * exist by calling spa_lookup() first.
675 */
676spa_t *
428870ff 677spa_add(const char *name, nvlist_t *config, const char *altroot)
34dc7c2f
BB
678{
679 spa_t *spa;
b128c09f 680 spa_config_dirent_t *dp;
34dc7c2f
BB
681
682 ASSERT(MUTEX_HELD(&spa_namespace_lock));
683
79c76d5b 684 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
34dc7c2f 685
34dc7c2f 686 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
34dc7c2f 687 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
428870ff 688 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
0c66c32d 689 mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL);
34dc7c2f 690 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
428870ff 691 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
34dc7c2f 692 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
3c67d83a 693 mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL);
428870ff
BB
694 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
695 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
696 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
4eb30c68 697 mutex_init(&spa->spa_feat_stats_lock, NULL, MUTEX_DEFAULT, NULL);
93e28d66 698 mutex_init(&spa->spa_flushed_ms_lock, NULL, MUTEX_DEFAULT, NULL);
e60e158e 699 mutex_init(&spa->spa_activities_lock, NULL, MUTEX_DEFAULT, NULL);
34dc7c2f
BB
700
701 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
0c66c32d 702 cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL);
428870ff 703 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
34dc7c2f 704 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
b128c09f 705 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
e60e158e
JG
706 cv_init(&spa->spa_activities_cv, NULL, CV_DEFAULT, NULL);
707 cv_init(&spa->spa_waiters_cv, NULL, CV_DEFAULT, NULL);
34dc7c2f 708
1c27024e 709 for (int t = 0; t < TXG_SIZE; t++)
428870ff
BB
710 bplist_create(&spa->spa_free_bplist[t]);
711
b128c09f 712 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
34dc7c2f
BB
713 spa->spa_state = POOL_STATE_UNINITIALIZED;
714 spa->spa_freeze_txg = UINT64_MAX;
715 spa->spa_final_txg = UINT64_MAX;
428870ff
BB
716 spa->spa_load_max_txg = UINT64_MAX;
717 spa->spa_proc = &p0;
718 spa->spa_proc_state = SPA_PROC_NONE;
6cb8e530 719 spa->spa_trust_config = B_TRUE;
25f06d67 720 spa->spa_hostid = zone_get_hostid(NULL);
34dc7c2f 721
e8b96c60 722 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms);
8fb1ede1
BB
723 spa->spa_deadman_ziotime = MSEC2NSEC(zfs_deadman_ziotime_ms);
724 spa_set_deadman_failmode(spa, zfs_deadman_failmode);
95f71c01 725 spa_set_allocator(spa, zfs_active_allocator);
cc92e9d0 726
424fd7c3 727 zfs_refcount_create(&spa->spa_refcount);
b128c09f 728 spa_config_lock_init(spa);
1421c891 729 spa_stats_init(spa);
34dc7c2f
BB
730
731 avl_add(&spa_namespace_avl, spa);
732
34dc7c2f
BB
733 /*
734 * Set the alternate root, if there is one.
735 */
0336f3d0 736 if (altroot)
34dc7c2f 737 spa->spa_root = spa_strdup(altroot);
34dc7c2f 738
3bd4df38
EN
739 /* Do not allow more allocators than CPUs. */
740 spa->spa_alloc_count = MIN(MAX(spa_num_allocators, 1), boot_ncpus);
741
1b50749c
AM
742 spa->spa_allocs = kmem_zalloc(spa->spa_alloc_count *
743 sizeof (spa_alloc_t), KM_SLEEP);
492f64e9 744 for (int i = 0; i < spa->spa_alloc_count; i++) {
1b50749c
AM
745 mutex_init(&spa->spa_allocs[i].spaa_lock, NULL, MUTEX_DEFAULT,
746 NULL);
747 avl_create(&spa->spa_allocs[i].spaa_tree, zio_bookmark_compare,
8469b5aa 748 sizeof (zio_t), offsetof(zio_t, io_queue_node.a));
492f64e9 749 }
3bd4df38 750
93e28d66
SD
751 avl_create(&spa->spa_metaslabs_by_flushed, metaslab_sort_by_flushed,
752 sizeof (metaslab_t), offsetof(metaslab_t, ms_spa_txg_node));
753 avl_create(&spa->spa_sm_logs_by_txg, spa_log_sm_sort_by_txg,
754 sizeof (spa_log_sm_t), offsetof(spa_log_sm_t, sls_node));
755 list_create(&spa->spa_log_summary, sizeof (log_summary_entry_t),
756 offsetof(log_summary_entry_t, lse_node));
3dfb57a3 757
b128c09f
BB
758 /*
759 * Every pool starts with the default cachefile
760 */
761 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
762 offsetof(spa_config_dirent_t, scd_link));
763
79c76d5b 764 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
428870ff 765 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
b128c09f
BB
766 list_insert_head(&spa->spa_config_list, dp);
767
572e2857 768 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
79c76d5b 769 KM_SLEEP) == 0);
572e2857 770
9ae529ec
CS
771 if (config != NULL) {
772 nvlist_t *features;
773
774 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
775 &features) == 0) {
776 VERIFY(nvlist_dup(features, &spa->spa_label_features,
777 0) == 0);
778 }
779
428870ff 780 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
9ae529ec
CS
781 }
782
783 if (spa->spa_label_features == NULL) {
784 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
79c76d5b 785 KM_SLEEP) == 0);
9ae529ec 786 }
428870ff 787
c3520e7f
MA
788 spa->spa_min_ashift = INT_MAX;
789 spa->spa_max_ashift = 0;
b2255edc 790 spa->spa_min_alloc = INT_MAX;
d9bb583c 791 spa->spa_gcd_alloc = INT_MAX;
c3520e7f 792
e8a20144
GN
793 /* Reset cached value */
794 spa->spa_dedup_dspace = ~0ULL;
795
b0bc7a84
MG
796 /*
797 * As a pool is being created, treat all features as disabled by
798 * setting SPA_FEATURE_DISABLED for all entries in the feature
799 * refcount cache.
800 */
1c27024e 801 for (int i = 0; i < SPA_FEATURES; i++) {
b0bc7a84
MG
802 spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED;
803 }
804
3d31aad8
OF
805 list_create(&spa->spa_leaf_list, sizeof (vdev_t),
806 offsetof(vdev_t, vdev_leaf_node));
807
34dc7c2f
BB
808 return (spa);
809}
810
811/*
812 * Removes a spa_t from the namespace, freeing up any memory used. Requires
813 * spa_namespace_lock. This is called only after the spa_t has been closed and
814 * deactivated.
815 */
816void
817spa_remove(spa_t *spa)
818{
b128c09f
BB
819 spa_config_dirent_t *dp;
820
34dc7c2f 821 ASSERT(MUTEX_HELD(&spa_namespace_lock));
93e28d66 822 ASSERT(spa_state(spa) == POOL_STATE_UNINITIALIZED);
424fd7c3 823 ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0);
e60e158e 824 ASSERT0(spa->spa_waiters);
34dc7c2f 825
428870ff
BB
826 nvlist_free(spa->spa_config_splitting);
827
34dc7c2f
BB
828 avl_remove(&spa_namespace_avl, spa);
829 cv_broadcast(&spa_namespace_cv);
830
0336f3d0 831 if (spa->spa_root)
34dc7c2f 832 spa_strfree(spa->spa_root);
34dc7c2f 833
b3ad3f48 834 while ((dp = list_remove_head(&spa->spa_config_list)) != NULL) {
b128c09f
BB
835 if (dp->scd_path != NULL)
836 spa_strfree(dp->scd_path);
837 kmem_free(dp, sizeof (spa_config_dirent_t));
838 }
34dc7c2f 839
492f64e9 840 for (int i = 0; i < spa->spa_alloc_count; i++) {
1b50749c
AM
841 avl_destroy(&spa->spa_allocs[i].spaa_tree);
842 mutex_destroy(&spa->spa_allocs[i].spaa_lock);
492f64e9 843 }
1b50749c
AM
844 kmem_free(spa->spa_allocs, spa->spa_alloc_count *
845 sizeof (spa_alloc_t));
492f64e9 846
93e28d66
SD
847 avl_destroy(&spa->spa_metaslabs_by_flushed);
848 avl_destroy(&spa->spa_sm_logs_by_txg);
849 list_destroy(&spa->spa_log_summary);
b128c09f 850 list_destroy(&spa->spa_config_list);
3d31aad8 851 list_destroy(&spa->spa_leaf_list);
34dc7c2f 852
9ae529ec 853 nvlist_free(spa->spa_label_features);
572e2857 854 nvlist_free(spa->spa_load_info);
417104bd 855 nvlist_free(spa->spa_feat_stats);
34dc7c2f
BB
856 spa_config_set(spa, NULL);
857
424fd7c3 858 zfs_refcount_destroy(&spa->spa_refcount);
34dc7c2f 859
1421c891 860 spa_stats_destroy(spa);
b128c09f 861 spa_config_lock_destroy(spa);
34dc7c2f 862
1c27024e 863 for (int t = 0; t < TXG_SIZE; t++)
428870ff
BB
864 bplist_destroy(&spa->spa_free_bplist[t]);
865
3c67d83a
TH
866 zio_checksum_templates_free(spa);
867
34dc7c2f 868 cv_destroy(&spa->spa_async_cv);
0c66c32d 869 cv_destroy(&spa->spa_evicting_os_cv);
428870ff 870 cv_destroy(&spa->spa_proc_cv);
34dc7c2f 871 cv_destroy(&spa->spa_scrub_io_cv);
b128c09f 872 cv_destroy(&spa->spa_suspend_cv);
e60e158e
JG
873 cv_destroy(&spa->spa_activities_cv);
874 cv_destroy(&spa->spa_waiters_cv);
34dc7c2f 875
93e28d66 876 mutex_destroy(&spa->spa_flushed_ms_lock);
34dc7c2f 877 mutex_destroy(&spa->spa_async_lock);
34dc7c2f 878 mutex_destroy(&spa->spa_errlist_lock);
428870ff 879 mutex_destroy(&spa->spa_errlog_lock);
0c66c32d 880 mutex_destroy(&spa->spa_evicting_os_lock);
34dc7c2f 881 mutex_destroy(&spa->spa_history_lock);
428870ff 882 mutex_destroy(&spa->spa_proc_lock);
34dc7c2f 883 mutex_destroy(&spa->spa_props_lock);
3c67d83a 884 mutex_destroy(&spa->spa_cksum_tmpls_lock);
428870ff 885 mutex_destroy(&spa->spa_scrub_lock);
b128c09f 886 mutex_destroy(&spa->spa_suspend_lock);
428870ff 887 mutex_destroy(&spa->spa_vdev_top_lock);
4eb30c68 888 mutex_destroy(&spa->spa_feat_stats_lock);
e60e158e 889 mutex_destroy(&spa->spa_activities_lock);
34dc7c2f
BB
890
891 kmem_free(spa, sizeof (spa_t));
892}
893
894/*
895 * Given a pool, return the next pool in the namespace, or NULL if there is
896 * none. If 'prev' is NULL, return the first pool.
897 */
898spa_t *
899spa_next(spa_t *prev)
900{
901 ASSERT(MUTEX_HELD(&spa_namespace_lock));
902
903 if (prev)
904 return (AVL_NEXT(&spa_namespace_avl, prev));
905 else
906 return (avl_first(&spa_namespace_avl));
907}
908
909/*
910 * ==========================================================================
911 * SPA refcount functions
912 * ==========================================================================
913 */
914
915/*
916 * Add a reference to the given spa_t. Must have at least one reference, or
917 * have the namespace lock held.
918 */
919void
a926aab9 920spa_open_ref(spa_t *spa, const void *tag)
34dc7c2f 921{
424fd7c3 922 ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
34dc7c2f 923 MUTEX_HELD(&spa_namespace_lock));
c13060e4 924 (void) zfs_refcount_add(&spa->spa_refcount, tag);
34dc7c2f
BB
925}
926
927/*
928 * Remove a reference to the given spa_t. Must have at least one reference, or
929 * have the namespace lock held.
930 */
931void
a926aab9 932spa_close(spa_t *spa, const void *tag)
34dc7c2f 933{
424fd7c3 934 ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref ||
34dc7c2f 935 MUTEX_HELD(&spa_namespace_lock));
424fd7c3 936 (void) zfs_refcount_remove(&spa->spa_refcount, tag);
34dc7c2f
BB
937}
938
0c66c32d
JG
939/*
940 * Remove a reference to the given spa_t held by a dsl dir that is
941 * being asynchronously released. Async releases occur from a taskq
942 * performing eviction of dsl datasets and dirs. The namespace lock
943 * isn't held and the hold by the object being evicted may contribute to
944 * spa_minref (e.g. dataset or directory released during pool export),
945 * so the asserts in spa_close() do not apply.
946 */
947void
a926aab9 948spa_async_close(spa_t *spa, const void *tag)
0c66c32d 949{
424fd7c3 950 (void) zfs_refcount_remove(&spa->spa_refcount, tag);
0c66c32d
JG
951}
952
34dc7c2f
BB
953/*
954 * Check to see if the spa refcount is zero. Must be called with
b128c09f 955 * spa_namespace_lock held. We really compare against spa_minref, which is the
34dc7c2f
BB
956 * number of references acquired when opening a pool
957 */
958boolean_t
959spa_refcount_zero(spa_t *spa)
960{
961 ASSERT(MUTEX_HELD(&spa_namespace_lock));
962
424fd7c3 963 return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref);
34dc7c2f
BB
964}
965
966/*
967 * ==========================================================================
968 * SPA spare and l2cache tracking
969 * ==========================================================================
970 */
971
972/*
973 * Hot spares and cache devices are tracked using the same code below,
974 * for 'auxiliary' devices.
975 */
976
977typedef struct spa_aux {
978 uint64_t aux_guid;
979 uint64_t aux_pool;
980 avl_node_t aux_avl;
981 int aux_count;
982} spa_aux_t;
983
ee36c709 984static inline int
34dc7c2f
BB
985spa_aux_compare(const void *a, const void *b)
986{
ee36c709
GN
987 const spa_aux_t *sa = (const spa_aux_t *)a;
988 const spa_aux_t *sb = (const spa_aux_t *)b;
34dc7c2f 989
ca577779 990 return (TREE_CMP(sa->aux_guid, sb->aux_guid));
34dc7c2f
BB
991}
992
65c7cc49 993static void
34dc7c2f
BB
994spa_aux_add(vdev_t *vd, avl_tree_t *avl)
995{
996 avl_index_t where;
997 spa_aux_t search;
998 spa_aux_t *aux;
999
1000 search.aux_guid = vd->vdev_guid;
1001 if ((aux = avl_find(avl, &search, &where)) != NULL) {
1002 aux->aux_count++;
1003 } else {
79c76d5b 1004 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
34dc7c2f
BB
1005 aux->aux_guid = vd->vdev_guid;
1006 aux->aux_count = 1;
1007 avl_insert(avl, aux, where);
1008 }
1009}
1010
65c7cc49 1011static void
34dc7c2f
BB
1012spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
1013{
1014 spa_aux_t search;
1015 spa_aux_t *aux;
1016 avl_index_t where;
1017
1018 search.aux_guid = vd->vdev_guid;
1019 aux = avl_find(avl, &search, &where);
1020
1021 ASSERT(aux != NULL);
1022
1023 if (--aux->aux_count == 0) {
1024 avl_remove(avl, aux);
1025 kmem_free(aux, sizeof (spa_aux_t));
1026 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
1027 aux->aux_pool = 0ULL;
1028 }
1029}
1030
65c7cc49 1031static boolean_t
b128c09f 1032spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl)
34dc7c2f
BB
1033{
1034 spa_aux_t search, *found;
34dc7c2f
BB
1035
1036 search.aux_guid = guid;
b128c09f 1037 found = avl_find(avl, &search, NULL);
34dc7c2f
BB
1038
1039 if (pool) {
1040 if (found)
1041 *pool = found->aux_pool;
1042 else
1043 *pool = 0ULL;
1044 }
1045
b128c09f
BB
1046 if (refcnt) {
1047 if (found)
1048 *refcnt = found->aux_count;
1049 else
1050 *refcnt = 0;
1051 }
1052
34dc7c2f
BB
1053 return (found != NULL);
1054}
1055
65c7cc49 1056static void
34dc7c2f
BB
1057spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
1058{
1059 spa_aux_t search, *found;
1060 avl_index_t where;
1061
1062 search.aux_guid = vd->vdev_guid;
1063 found = avl_find(avl, &search, &where);
1064 ASSERT(found != NULL);
1065 ASSERT(found->aux_pool == 0ULL);
1066
1067 found->aux_pool = spa_guid(vd->vdev_spa);
1068}
1069
1070/*
1071 * Spares are tracked globally due to the following constraints:
1072 *
aa755b35
MA
1073 * - A spare may be part of multiple pools.
1074 * - A spare may be added to a pool even if it's actively in use within
34dc7c2f 1075 * another pool.
aa755b35 1076 * - A spare in use in any pool can only be the source of a replacement if
34dc7c2f
BB
1077 * the target is a spare in the same pool.
1078 *
1079 * We keep track of all spares on the system through the use of a reference
1080 * counted AVL tree. When a vdev is added as a spare, or used as a replacement
1081 * spare, then we bump the reference count in the AVL tree. In addition, we set
1082 * the 'vdev_isspare' member to indicate that the device is a spare (active or
1083 * inactive). When a spare is made active (used to replace a device in the
1084 * pool), we also keep track of which pool its been made a part of.
1085 *
1086 * The 'spa_spare_lock' protects the AVL tree. These functions are normally
1087 * called under the spa_namespace lock as part of vdev reconfiguration. The
1088 * separate spare lock exists for the status query path, which does not need to
1089 * be completely consistent with respect to other vdev configuration changes.
1090 */
1091
1092static int
1093spa_spare_compare(const void *a, const void *b)
1094{
1095 return (spa_aux_compare(a, b));
1096}
1097
1098void
1099spa_spare_add(vdev_t *vd)
1100{
1101 mutex_enter(&spa_spare_lock);
1102 ASSERT(!vd->vdev_isspare);
1103 spa_aux_add(vd, &spa_spare_avl);
1104 vd->vdev_isspare = B_TRUE;
1105 mutex_exit(&spa_spare_lock);
1106}
1107
1108void
1109spa_spare_remove(vdev_t *vd)
1110{
1111 mutex_enter(&spa_spare_lock);
1112 ASSERT(vd->vdev_isspare);
1113 spa_aux_remove(vd, &spa_spare_avl);
1114 vd->vdev_isspare = B_FALSE;
1115 mutex_exit(&spa_spare_lock);
1116}
1117
1118boolean_t
b128c09f 1119spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt)
34dc7c2f
BB
1120{
1121 boolean_t found;
1122
1123 mutex_enter(&spa_spare_lock);
b128c09f 1124 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl);
34dc7c2f
BB
1125 mutex_exit(&spa_spare_lock);
1126
1127 return (found);
1128}
1129
1130void
1131spa_spare_activate(vdev_t *vd)
1132{
1133 mutex_enter(&spa_spare_lock);
1134 ASSERT(vd->vdev_isspare);
1135 spa_aux_activate(vd, &spa_spare_avl);
1136 mutex_exit(&spa_spare_lock);
1137}
1138
1139/*
1140 * Level 2 ARC devices are tracked globally for the same reasons as spares.
1141 * Cache devices currently only support one pool per cache device, and so
1142 * for these devices the aux reference count is currently unused beyond 1.
1143 */
1144
1145static int
1146spa_l2cache_compare(const void *a, const void *b)
1147{
1148 return (spa_aux_compare(a, b));
1149}
1150
1151void
1152spa_l2cache_add(vdev_t *vd)
1153{
1154 mutex_enter(&spa_l2cache_lock);
1155 ASSERT(!vd->vdev_isl2cache);
1156 spa_aux_add(vd, &spa_l2cache_avl);
1157 vd->vdev_isl2cache = B_TRUE;
1158 mutex_exit(&spa_l2cache_lock);
1159}
1160
1161void
1162spa_l2cache_remove(vdev_t *vd)
1163{
1164 mutex_enter(&spa_l2cache_lock);
1165 ASSERT(vd->vdev_isl2cache);
1166 spa_aux_remove(vd, &spa_l2cache_avl);
1167 vd->vdev_isl2cache = B_FALSE;
1168 mutex_exit(&spa_l2cache_lock);
1169}
1170
1171boolean_t
1172spa_l2cache_exists(uint64_t guid, uint64_t *pool)
1173{
1174 boolean_t found;
1175
1176 mutex_enter(&spa_l2cache_lock);
b128c09f 1177 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl);
34dc7c2f
BB
1178 mutex_exit(&spa_l2cache_lock);
1179
1180 return (found);
1181}
1182
1183void
1184spa_l2cache_activate(vdev_t *vd)
1185{
1186 mutex_enter(&spa_l2cache_lock);
1187 ASSERT(vd->vdev_isl2cache);
1188 spa_aux_activate(vd, &spa_l2cache_avl);
1189 mutex_exit(&spa_l2cache_lock);
1190}
1191
34dc7c2f
BB
1192/*
1193 * ==========================================================================
1194 * SPA vdev locking
1195 * ==========================================================================
1196 */
1197
1198/*
1199 * Lock the given spa_t for the purpose of adding or removing a vdev.
1200 * Grabs the global spa_namespace_lock plus the spa config lock for writing.
1201 * It returns the next transaction group for the spa_t.
1202 */
1203uint64_t
1204spa_vdev_enter(spa_t *spa)
1205{
428870ff 1206 mutex_enter(&spa->spa_vdev_top_lock);
34dc7c2f 1207 mutex_enter(&spa_namespace_lock);
1b939560
BB
1208
1209 vdev_autotrim_stop_all(spa);
1210
428870ff
BB
1211 return (spa_vdev_config_enter(spa));
1212}
1213
9a49d3f3
BB
1214/*
1215 * The same as spa_vdev_enter() above but additionally takes the guid of
1216 * the vdev being detached. When there is a rebuild in process it will be
1217 * suspended while the vdev tree is modified then resumed by spa_vdev_exit().
1218 * The rebuild is canceled if only a single child remains after the detach.
1219 */
1220uint64_t
1221spa_vdev_detach_enter(spa_t *spa, uint64_t guid)
1222{
1223 mutex_enter(&spa->spa_vdev_top_lock);
1224 mutex_enter(&spa_namespace_lock);
1225
1226 vdev_autotrim_stop_all(spa);
1227
1228 if (guid != 0) {
1229 vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
1230 if (vd) {
1231 vdev_rebuild_stop_wait(vd->vdev_top);
1232 }
1233 }
1234
1235 return (spa_vdev_config_enter(spa));
1236}
1237
428870ff
BB
1238/*
1239 * Internal implementation for spa_vdev_enter(). Used when a vdev
1240 * operation requires multiple syncs (i.e. removing a device) while
1241 * keeping the spa_namespace_lock held.
1242 */
1243uint64_t
1244spa_vdev_config_enter(spa_t *spa)
1245{
1246 ASSERT(MUTEX_HELD(&spa_namespace_lock));
34dc7c2f 1247
b128c09f 1248 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
34dc7c2f
BB
1249
1250 return (spa_last_synced_txg(spa) + 1);
1251}
1252
1253/*
428870ff
BB
1254 * Used in combination with spa_vdev_config_enter() to allow the syncing
1255 * of multiple transactions without releasing the spa_namespace_lock.
34dc7c2f 1256 */
428870ff 1257void
dd66857d
AZ
1258spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error,
1259 const char *tag)
34dc7c2f 1260{
1c27024e
DB
1261 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1262
34dc7c2f
BB
1263 int config_changed = B_FALSE;
1264
1265 ASSERT(txg > spa_last_synced_txg(spa));
1266
b128c09f
BB
1267 spa->spa_pending_vdev = NULL;
1268
34dc7c2f
BB
1269 /*
1270 * Reassess the DTLs.
1271 */
9a49d3f3 1272 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE, B_FALSE);
34dc7c2f 1273
b128c09f 1274 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) {
34dc7c2f 1275 config_changed = B_TRUE;
428870ff 1276 spa->spa_config_generation++;
34dc7c2f
BB
1277 }
1278
428870ff
BB
1279 /*
1280 * Verify the metaslab classes.
1281 */
1282 ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0);
1283 ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0);
aa755b35 1284 ASSERT(metaslab_class_validate(spa_embedded_log_class(spa)) == 0);
cc99f275
DB
1285 ASSERT(metaslab_class_validate(spa_special_class(spa)) == 0);
1286 ASSERT(metaslab_class_validate(spa_dedup_class(spa)) == 0);
428870ff 1287
b128c09f 1288 spa_config_exit(spa, SCL_ALL, spa);
34dc7c2f 1289
428870ff
BB
1290 /*
1291 * Panic the system if the specified tag requires it. This
1292 * is useful for ensuring that configurations are updated
1293 * transactionally.
1294 */
1295 if (zio_injection_enabled)
1296 zio_handle_panic_injection(spa, tag, 0);
1297
34dc7c2f
BB
1298 /*
1299 * Note: this txg_wait_synced() is important because it ensures
1300 * that there won't be more than one config change per txg.
1301 * This allows us to use the txg as the generation number.
1302 */
1303 if (error == 0)
1304 txg_wait_synced(spa->spa_dsl_pool, txg);
1305
1306 if (vd != NULL) {
93cf2076 1307 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL);
619f0976
GW
1308 if (vd->vdev_ops->vdev_op_leaf) {
1309 mutex_enter(&vd->vdev_initialize_lock);
c10d37dd
GW
1310 vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED,
1311 NULL);
619f0976 1312 mutex_exit(&vd->vdev_initialize_lock);
1b939560
BB
1313
1314 mutex_enter(&vd->vdev_trim_lock);
1315 vdev_trim_stop(vd, VDEV_TRIM_CANCELED, NULL);
1316 mutex_exit(&vd->vdev_trim_lock);
619f0976
GW
1317 }
1318
1b939560
BB
1319 /*
1320 * The vdev may be both a leaf and top-level device.
1321 */
1322 vdev_autotrim_stop_wait(vd);
1323
75a089ed 1324 spa_config_enter(spa, SCL_STATE_ALL, spa, RW_WRITER);
34dc7c2f 1325 vdev_free(vd);
75a089ed 1326 spa_config_exit(spa, SCL_STATE_ALL, spa);
34dc7c2f
BB
1327 }
1328
1329 /*
1330 * If the config changed, update the config cache.
1331 */
1332 if (config_changed)
3a74f488 1333 spa_write_cachefile(spa, B_FALSE, B_TRUE, B_TRUE);
428870ff 1334}
34dc7c2f 1335
428870ff
BB
1336/*
1337 * Unlock the spa_t after adding or removing a vdev. Besides undoing the
1338 * locking of spa_vdev_enter(), we also want make sure the transactions have
1339 * synced to disk, and then update the global configuration cache with the new
1340 * information.
1341 */
1342int
1343spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
1344{
1b939560 1345 vdev_autotrim_restart(spa);
9a49d3f3 1346 vdev_rebuild_restart(spa);
1b939560 1347
428870ff 1348 spa_vdev_config_exit(spa, vd, txg, error, FTAG);
34dc7c2f 1349 mutex_exit(&spa_namespace_lock);
428870ff 1350 mutex_exit(&spa->spa_vdev_top_lock);
34dc7c2f
BB
1351
1352 return (error);
1353}
1354
b128c09f
BB
1355/*
1356 * Lock the given spa_t for the purpose of changing vdev state.
1357 */
1358void
428870ff 1359spa_vdev_state_enter(spa_t *spa, int oplocks)
b128c09f 1360{
428870ff
BB
1361 int locks = SCL_STATE_ALL | oplocks;
1362
1363 /*
1364 * Root pools may need to read of the underlying devfs filesystem
1365 * when opening up a vdev. Unfortunately if we're holding the
1366 * SCL_ZIO lock it will result in a deadlock when we try to issue
1367 * the read from the root filesystem. Instead we "prefetch"
1368 * the associated vnodes that we need prior to opening the
1369 * underlying devices and cache them so that we can prevent
1370 * any I/O when we are doing the actual open.
1371 */
1372 if (spa_is_root(spa)) {
1373 int low = locks & ~(SCL_ZIO - 1);
1374 int high = locks & ~low;
1375
1376 spa_config_enter(spa, high, spa, RW_WRITER);
1377 vdev_hold(spa->spa_root_vdev);
1378 spa_config_enter(spa, low, spa, RW_WRITER);
1379 } else {
1380 spa_config_enter(spa, locks, spa, RW_WRITER);
1381 }
1382 spa->spa_vdev_locks = locks;
b128c09f
BB
1383}
1384
1385int
1386spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
1387{
428870ff 1388 boolean_t config_changed = B_FALSE;
4a283c7f
TH
1389 vdev_t *vdev_top;
1390
1391 if (vd == NULL || vd == spa->spa_root_vdev) {
1392 vdev_top = spa->spa_root_vdev;
1393 } else {
1394 vdev_top = vd->vdev_top;
1395 }
428870ff
BB
1396
1397 if (vd != NULL || error == 0)
9a49d3f3 1398 vdev_dtl_reassess(vdev_top, 0, 0, B_FALSE, B_FALSE);
428870ff
BB
1399
1400 if (vd != NULL) {
4a283c7f
TH
1401 if (vd != spa->spa_root_vdev)
1402 vdev_state_dirty(vdev_top);
1403
428870ff
BB
1404 config_changed = B_TRUE;
1405 spa->spa_config_generation++;
1406 }
b128c09f 1407
428870ff
BB
1408 if (spa_is_root(spa))
1409 vdev_rele(spa->spa_root_vdev);
1410
1411 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL);
1412 spa_config_exit(spa, spa->spa_vdev_locks, spa);
b128c09f 1413
fb5f0bc8
BB
1414 /*
1415 * If anything changed, wait for it to sync. This ensures that,
76d04993 1416 * from the system administrator's perspective, zpool(8) commands
fb5f0bc8
BB
1417 * are synchronous. This is important for things like zpool offline:
1418 * when the command completes, you expect no further I/O from ZFS.
1419 */
1420 if (vd != NULL)
1421 txg_wait_synced(spa->spa_dsl_pool, 0);
1422
428870ff
BB
1423 /*
1424 * If the config changed, update the config cache.
1425 */
1426 if (config_changed) {
1427 mutex_enter(&spa_namespace_lock);
55c12724 1428 spa_write_cachefile(spa, B_FALSE, B_TRUE, B_FALSE);
428870ff
BB
1429 mutex_exit(&spa_namespace_lock);
1430 }
1431
b128c09f
BB
1432 return (error);
1433}
1434
34dc7c2f
BB
1435/*
1436 * ==========================================================================
1437 * Miscellaneous functions
1438 * ==========================================================================
1439 */
1440
9ae529ec 1441void
b0bc7a84 1442spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx)
9ae529ec 1443{
fa86b5db
MA
1444 if (!nvlist_exists(spa->spa_label_features, feature)) {
1445 fnvlist_add_boolean(spa->spa_label_features, feature);
b0bc7a84
MG
1446 /*
1447 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't
1448 * dirty the vdev config because lock SCL_CONFIG is not held.
1449 * Thankfully, in this case we don't need to dirty the config
1450 * because it will be written out anyway when we finish
1451 * creating the pool.
1452 */
1453 if (tx->tx_txg != TXG_INITIAL)
1454 vdev_config_dirty(spa->spa_root_vdev);
fa86b5db 1455 }
9ae529ec
CS
1456}
1457
1458void
1459spa_deactivate_mos_feature(spa_t *spa, const char *feature)
1460{
fa86b5db
MA
1461 if (nvlist_remove_all(spa->spa_label_features, feature) == 0)
1462 vdev_config_dirty(spa->spa_root_vdev);
9ae529ec
CS
1463}
1464
34dc7c2f 1465/*
572e2857
BB
1466 * Return the spa_t associated with given pool_guid, if it exists. If
1467 * device_guid is non-zero, determine whether the pool exists *and* contains
1468 * a device with the specified device_guid.
34dc7c2f 1469 */
572e2857
BB
1470spa_t *
1471spa_by_guid(uint64_t pool_guid, uint64_t device_guid)
34dc7c2f
BB
1472{
1473 spa_t *spa;
1474 avl_tree_t *t = &spa_namespace_avl;
1475
1476 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1477
1478 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
1479 if (spa->spa_state == POOL_STATE_UNINITIALIZED)
1480 continue;
1481 if (spa->spa_root_vdev == NULL)
1482 continue;
1483 if (spa_guid(spa) == pool_guid) {
1484 if (device_guid == 0)
1485 break;
1486
1487 if (vdev_lookup_by_guid(spa->spa_root_vdev,
1488 device_guid) != NULL)
1489 break;
1490
1491 /*
1492 * Check any devices we may be in the process of adding.
1493 */
1494 if (spa->spa_pending_vdev) {
1495 if (vdev_lookup_by_guid(spa->spa_pending_vdev,
1496 device_guid) != NULL)
1497 break;
1498 }
1499 }
1500 }
1501
572e2857
BB
1502 return (spa);
1503}
1504
1505/*
1506 * Determine whether a pool with the given pool_guid exists.
1507 */
1508boolean_t
1509spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
1510{
1511 return (spa_by_guid(pool_guid, device_guid) != NULL);
34dc7c2f
BB
1512}
1513
1514char *
1515spa_strdup(const char *s)
1516{
1517 size_t len;
1518 char *new;
1519
1520 len = strlen(s);
79c76d5b 1521 new = kmem_alloc(len + 1, KM_SLEEP);
861166b0 1522 memcpy(new, s, len + 1);
34dc7c2f
BB
1523
1524 return (new);
1525}
1526
1527void
1528spa_strfree(char *s)
1529{
1530 kmem_free(s, strlen(s) + 1);
1531}
1532
428870ff
BB
1533uint64_t
1534spa_generate_guid(spa_t *spa)
34dc7c2f 1535{
29274c9f 1536 uint64_t guid;
34dc7c2f 1537
428870ff 1538 if (spa != NULL) {
29274c9f
AM
1539 do {
1540 (void) random_get_pseudo_bytes((void *)&guid,
1541 sizeof (guid));
1542 } while (guid == 0 || spa_guid_exists(spa_guid(spa), guid));
428870ff 1543 } else {
29274c9f
AM
1544 do {
1545 (void) random_get_pseudo_bytes((void *)&guid,
1546 sizeof (guid));
1547 } while (guid == 0 || spa_guid_exists(guid, 0));
34dc7c2f
BB
1548 }
1549
428870ff
BB
1550 return (guid);
1551}
1552
1553void
b0bc7a84 1554snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp)
428870ff 1555{
9ae529ec 1556 char type[256];
a926aab9
AZ
1557 const char *checksum = NULL;
1558 const char *compress = NULL;
34dc7c2f 1559
428870ff 1560 if (bp != NULL) {
9ae529ec
CS
1561 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) {
1562 dmu_object_byteswap_t bswap =
1563 DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
1564 (void) snprintf(type, sizeof (type), "bswap %s %s",
1565 DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ?
1566 "metadata" : "data",
1567 dmu_ot_byteswap[bswap].ob_name);
1568 } else {
1569 (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name,
1570 sizeof (type));
1571 }
9b67f605
MA
1572 if (!BP_IS_EMBEDDED(bp)) {
1573 checksum =
1574 zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
1575 }
428870ff 1576 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name;
34dc7c2f
BB
1577 }
1578
97143b9d 1579 SNPRINTF_BLKPTR(kmem_scnprintf, ' ', buf, buflen, bp, type, checksum,
5c27ec10 1580 compress);
34dc7c2f
BB
1581}
1582
1583void
1584spa_freeze(spa_t *spa)
1585{
1586 uint64_t freeze_txg = 0;
1587
b128c09f 1588 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
34dc7c2f
BB
1589 if (spa->spa_freeze_txg == UINT64_MAX) {
1590 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
1591 spa->spa_freeze_txg = freeze_txg;
1592 }
b128c09f 1593 spa_config_exit(spa, SCL_ALL, FTAG);
34dc7c2f
BB
1594 if (freeze_txg != 0)
1595 txg_wait_synced(spa_get_dsl(spa), freeze_txg);
1596}
1597
0b39b9f9
PS
1598void
1599zfs_panic_recover(const char *fmt, ...)
1600{
1601 va_list adx;
1602
1603 va_start(adx, fmt);
1604 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
1605 va_end(adx);
1606}
1607
428870ff
BB
1608/*
1609 * This is a stripped-down version of strtoull, suitable only for converting
d3cc8b15 1610 * lowercase hexadecimal numbers that don't overflow.
428870ff
BB
1611 */
1612uint64_t
e19572e4 1613zfs_strtonum(const char *str, char **nptr)
428870ff
BB
1614{
1615 uint64_t val = 0;
1616 char c;
1617 int digit;
1618
1619 while ((c = *str) != '\0') {
1620 if (c >= '0' && c <= '9')
1621 digit = c - '0';
1622 else if (c >= 'a' && c <= 'f')
1623 digit = 10 + c - 'a';
1624 else
1625 break;
1626
1627 val *= 16;
1628 val += digit;
1629
1630 str++;
1631 }
1632
1633 if (nptr)
1634 *nptr = (char *)str;
1635
1636 return (val);
1637}
1638
cc99f275
DB
1639void
1640spa_activate_allocation_classes(spa_t *spa, dmu_tx_t *tx)
1641{
1642 /*
1643 * We bump the feature refcount for each special vdev added to the pool
1644 */
1645 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_ALLOCATION_CLASSES));
1646 spa_feature_incr(spa, SPA_FEATURE_ALLOCATION_CLASSES, tx);
1647}
1648
34dc7c2f
BB
1649/*
1650 * ==========================================================================
1651 * Accessor functions
1652 * ==========================================================================
1653 */
1654
b128c09f
BB
1655boolean_t
1656spa_shutting_down(spa_t *spa)
34dc7c2f 1657{
b128c09f 1658 return (spa->spa_async_suspended);
34dc7c2f
BB
1659}
1660
1661dsl_pool_t *
1662spa_get_dsl(spa_t *spa)
1663{
1664 return (spa->spa_dsl_pool);
1665}
1666
9ae529ec
CS
1667boolean_t
1668spa_is_initializing(spa_t *spa)
1669{
1670 return (spa->spa_is_initializing);
1671}
1672
a1d477c2
MA
1673boolean_t
1674spa_indirect_vdevs_loaded(spa_t *spa)
1675{
1676 return (spa->spa_indirect_vdevs_loaded);
1677}
1678
34dc7c2f
BB
1679blkptr_t *
1680spa_get_rootblkptr(spa_t *spa)
1681{
1682 return (&spa->spa_ubsync.ub_rootbp);
1683}
1684
1685void
1686spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
1687{
1688 spa->spa_uberblock.ub_rootbp = *bp;
1689}
1690
1691void
1692spa_altroot(spa_t *spa, char *buf, size_t buflen)
1693{
1694 if (spa->spa_root == NULL)
1695 buf[0] = '\0';
1696 else
7584fbe8 1697 (void) strlcpy(buf, spa->spa_root, buflen);
34dc7c2f
BB
1698}
1699
fdc2d303 1700uint32_t
34dc7c2f
BB
1701spa_sync_pass(spa_t *spa)
1702{
1703 return (spa->spa_sync_pass);
1704}
1705
1706char *
1707spa_name(spa_t *spa)
1708{
34dc7c2f
BB
1709 return (spa->spa_name);
1710}
1711
1712uint64_t
1713spa_guid(spa_t *spa)
1714{
3bc7e0fb
GW
1715 dsl_pool_t *dp = spa_get_dsl(spa);
1716 uint64_t guid;
1717
34dc7c2f
BB
1718 /*
1719 * If we fail to parse the config during spa_load(), we can go through
1720 * the error path (which posts an ereport) and end up here with no root
3541dc6d 1721 * vdev. We stash the original pool guid in 'spa_config_guid' to handle
34dc7c2f
BB
1722 * this case.
1723 */
3bc7e0fb
GW
1724 if (spa->spa_root_vdev == NULL)
1725 return (spa->spa_config_guid);
1726
1727 guid = spa->spa_last_synced_guid != 0 ?
1728 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid;
1729
1730 /*
1731 * Return the most recently synced out guid unless we're
1732 * in syncing context.
1733 */
1734 if (dp && dsl_pool_sync_context(dp))
34dc7c2f
BB
1735 return (spa->spa_root_vdev->vdev_guid);
1736 else
3bc7e0fb 1737 return (guid);
3541dc6d
GA
1738}
1739
1740uint64_t
1741spa_load_guid(spa_t *spa)
1742{
1743 /*
1744 * This is a GUID that exists solely as a reference for the
1745 * purposes of the arc. It is generated at load time, and
1746 * is never written to persistent storage.
1747 */
1748 return (spa->spa_load_guid);
34dc7c2f
BB
1749}
1750
1751uint64_t
1752spa_last_synced_txg(spa_t *spa)
1753{
1754 return (spa->spa_ubsync.ub_txg);
1755}
1756
1757uint64_t
1758spa_first_txg(spa_t *spa)
1759{
1760 return (spa->spa_first_txg);
1761}
1762
428870ff
BB
1763uint64_t
1764spa_syncing_txg(spa_t *spa)
1765{
1766 return (spa->spa_syncing_txg);
1767}
1768
3b7f360c
GW
1769/*
1770 * Return the last txg where data can be dirtied. The final txgs
1771 * will be used to just clear out any deferred frees that remain.
1772 */
1773uint64_t
1774spa_final_dirty_txg(spa_t *spa)
1775{
1776 return (spa->spa_final_txg - TXG_DEFER_SIZE);
1777}
1778
b128c09f 1779pool_state_t
34dc7c2f
BB
1780spa_state(spa_t *spa)
1781{
1782 return (spa->spa_state);
1783}
1784
428870ff
BB
1785spa_load_state_t
1786spa_load_state(spa_t *spa)
34dc7c2f 1787{
428870ff 1788 return (spa->spa_load_state);
34dc7c2f
BB
1789}
1790
34dc7c2f 1791uint64_t
428870ff 1792spa_freeze_txg(spa_t *spa)
34dc7c2f 1793{
428870ff 1794 return (spa->spa_freeze_txg);
34dc7c2f
BB
1795}
1796
047187c1 1797/*
1798 * Return the inflated asize for a logical write in bytes. This is used by the
1799 * DMU to calculate the space a logical write will require on disk.
1800 * If lsize is smaller than the largest physical block size allocatable on this
1801 * pool we use its value instead, since the write will end up using the whole
1802 * block anyway.
1803 */
34dc7c2f 1804uint64_t
3ec3bc21 1805spa_get_worst_case_asize(spa_t *spa, uint64_t lsize)
34dc7c2f 1806{
047187c1 1807 if (lsize == 0)
1808 return (0); /* No inflation needed */
1809 return (MAX(lsize, 1 << spa->spa_max_ashift) * spa_asize_inflation);
34dc7c2f
BB
1810}
1811
3d45fdd6 1812/*
aa755b35
MA
1813 * Return the amount of slop space in bytes. It is typically 1/32 of the pool
1814 * (3.2%), minus the embedded log space. On very small pools, it may be
f01eaed4
PS
1815 * slightly larger than this. On very large pools, it will be capped to
1816 * the value of spa_max_slop. The embedded log space is not included in
aa755b35
MA
1817 * spa_dspace. By subtracting it, the usable space (per "zfs list") is a
1818 * constant 97% of the total space, regardless of metaslab size (assuming the
1819 * default spa_slop_shift=5 and a non-tiny pool).
1820 *
1821 * See the comment above spa_slop_shift for more details.
3d45fdd6
MA
1822 */
1823uint64_t
4ea3f864
GM
1824spa_get_slop_space(spa_t *spa)
1825{
1325434b
RE
1826 uint64_t space = 0;
1827 uint64_t slop = 0;
1828
1829 /*
1830 * Make sure spa_dedup_dspace has been set.
1831 */
1832 if (spa->spa_dedup_dspace == ~0ULL)
1833 spa_update_dspace(spa);
1834
1835 /*
1836 * spa_get_dspace() includes the space only logically "used" by
1837 * deduplicated data, so since it's not useful to reserve more
1838 * space with more deduplicated data, we subtract that out here.
1839 */
6cc93ccd
B
1840 space =
1841 spa_get_dspace(spa) - spa->spa_dedup_dspace - brt_get_dspace(spa);
1325434b 1842 slop = MIN(space >> spa_slop_shift, spa_max_slop);
aa755b35
MA
1843
1844 /*
1845 * Subtract the embedded log space, but no more than half the (3.2%)
1846 * unusable space. Note, the "no more than half" is only relevant if
1847 * zfs_embedded_slog_min_ms >> spa_slop_shift < 2, which is not true by
1848 * default.
1849 */
1850 uint64_t embedded_log =
1851 metaslab_class_get_dspace(spa_embedded_log_class(spa));
1852 slop -= MIN(embedded_log, slop >> 1);
1853
1854 /*
1855 * Slop space should be at least spa_min_slop, but no more than half
1856 * the entire pool.
1857 */
1858 slop = MAX(slop, MIN(space >> 1, spa_min_slop));
1859 return (slop);
3d45fdd6
MA
1860}
1861
34dc7c2f
BB
1862uint64_t
1863spa_get_dspace(spa_t *spa)
1864{
428870ff 1865 return (spa->spa_dspace);
34dc7c2f
BB
1866}
1867
d2734cce
SD
1868uint64_t
1869spa_get_checkpoint_space(spa_t *spa)
1870{
1871 return (spa->spa_checkpoint_info.sci_dspace);
1872}
1873
428870ff
BB
1874void
1875spa_update_dspace(spa_t *spa)
34dc7c2f 1876{
428870ff 1877 spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) +
67a1b037 1878 ddt_get_dedup_dspace(spa) + brt_get_dspace(spa);
2a673e76 1879 if (spa->spa_nonallocating_dspace > 0) {
a1d477c2 1880 /*
2a673e76
AJ
1881 * Subtract the space provided by all non-allocating vdevs that
1882 * contribute to dspace. If a file is overwritten, its old
1883 * blocks are freed and new blocks are allocated. If there are
1884 * no snapshots of the file, the available space should remain
1885 * the same. The old blocks could be freed from the
1886 * non-allocating vdev, but the new blocks must be allocated on
1887 * other (allocating) vdevs. By reserving the entire size of
1888 * the non-allocating vdevs (including allocated space), we
1889 * ensure that there will be enough space on the allocating
1890 * vdevs for this file overwrite to succeed.
a1d477c2
MA
1891 *
1892 * Note that the DMU/DSL doesn't actually know or care
1893 * how much space is allocated (it does its own tracking
1894 * of how much space has been logically used). So it
1895 * doesn't matter that the data we are moving may be
2a673e76 1896 * allocated twice (on the old device and the new device).
a1d477c2 1897 */
2a673e76
AJ
1898 ASSERT3U(spa->spa_dspace, >=, spa->spa_nonallocating_dspace);
1899 spa->spa_dspace -= spa->spa_nonallocating_dspace;
a1d477c2 1900 }
34dc7c2f
BB
1901}
1902
1903/*
1904 * Return the failure mode that has been set to this pool. The default
1905 * behavior will be to block all I/Os when a complete failure occurs.
1906 */
8fb1ede1 1907uint64_t
34dc7c2f
BB
1908spa_get_failmode(spa_t *spa)
1909{
1910 return (spa->spa_failmode);
1911}
1912
b128c09f
BB
1913boolean_t
1914spa_suspended(spa_t *spa)
1915{
cec3a0a1 1916 return (spa->spa_suspended != ZIO_SUSPEND_NONE);
b128c09f
BB
1917}
1918
34dc7c2f
BB
1919uint64_t
1920spa_version(spa_t *spa)
1921{
1922 return (spa->spa_ubsync.ub_version);
1923}
1924
428870ff
BB
1925boolean_t
1926spa_deflate(spa_t *spa)
1927{
1928 return (spa->spa_deflate);
1929}
1930
1931metaslab_class_t *
1932spa_normal_class(spa_t *spa)
1933{
1934 return (spa->spa_normal_class);
1935}
1936
1937metaslab_class_t *
1938spa_log_class(spa_t *spa)
1939{
1940 return (spa->spa_log_class);
1941}
1942
aa755b35
MA
1943metaslab_class_t *
1944spa_embedded_log_class(spa_t *spa)
1945{
1946 return (spa->spa_embedded_log_class);
1947}
1948
cc99f275
DB
1949metaslab_class_t *
1950spa_special_class(spa_t *spa)
1951{
1952 return (spa->spa_special_class);
1953}
1954
1955metaslab_class_t *
1956spa_dedup_class(spa_t *spa)
1957{
1958 return (spa->spa_dedup_class);
1959}
1960
1961/*
1962 * Locate an appropriate allocation class
1963 */
1964metaslab_class_t *
1965spa_preferred_class(spa_t *spa, uint64_t size, dmu_object_type_t objtype,
1966 uint_t level, uint_t special_smallblk)
1967{
aa755b35
MA
1968 /*
1969 * ZIL allocations determine their class in zio_alloc_zil().
1970 */
1971 ASSERT(objtype != DMU_OT_INTENT_LOG);
cc99f275
DB
1972
1973 boolean_t has_special_class = spa->spa_special_class->mc_groups != 0;
1974
1975 if (DMU_OT_IS_DDT(objtype)) {
1976 if (spa->spa_dedup_class->mc_groups != 0)
1977 return (spa_dedup_class(spa));
1978 else if (has_special_class && zfs_ddt_data_is_special)
1979 return (spa_special_class(spa));
1980 else
1981 return (spa_normal_class(spa));
1982 }
1983
1984 /* Indirect blocks for user data can land in special if allowed */
1985 if (level > 0 && (DMU_OT_IS_FILE(objtype) || objtype == DMU_OT_ZVOL)) {
1986 if (has_special_class && zfs_user_indirect_is_special)
1987 return (spa_special_class(spa));
1988 else
1989 return (spa_normal_class(spa));
1990 }
1991
1992 if (DMU_OT_IS_METADATA(objtype) || level > 0) {
1993 if (has_special_class)
1994 return (spa_special_class(spa));
1995 else
1996 return (spa_normal_class(spa));
1997 }
1998
1999 /*
2000 * Allow small file blocks in special class in some cases (like
2001 * for the dRAID vdev feature). But always leave a reserve of
2002 * zfs_special_class_metadata_reserve_pct exclusively for metadata.
2003 */
2004 if (DMU_OT_IS_FILE(objtype) &&
44170969 2005 has_special_class && size <= special_smallblk) {
cc99f275
DB
2006 metaslab_class_t *special = spa_special_class(spa);
2007 uint64_t alloc = metaslab_class_get_alloc(special);
2008 uint64_t space = metaslab_class_get_space(special);
2009 uint64_t limit =
2010 (space * (100 - zfs_special_class_metadata_reserve_pct))
2011 / 100;
2012
2013 if (alloc < limit)
2014 return (special);
2015 }
2016
2017 return (spa_normal_class(spa));
2018}
2019
0c66c32d
JG
2020void
2021spa_evicting_os_register(spa_t *spa, objset_t *os)
2022{
2023 mutex_enter(&spa->spa_evicting_os_lock);
2024 list_insert_head(&spa->spa_evicting_os_list, os);
2025 mutex_exit(&spa->spa_evicting_os_lock);
2026}
2027
2028void
2029spa_evicting_os_deregister(spa_t *spa, objset_t *os)
2030{
2031 mutex_enter(&spa->spa_evicting_os_lock);
2032 list_remove(&spa->spa_evicting_os_list, os);
2033 cv_broadcast(&spa->spa_evicting_os_cv);
2034 mutex_exit(&spa->spa_evicting_os_lock);
2035}
2036
2037void
2038spa_evicting_os_wait(spa_t *spa)
2039{
2040 mutex_enter(&spa->spa_evicting_os_lock);
2041 while (!list_is_empty(&spa->spa_evicting_os_list))
2042 cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock);
2043 mutex_exit(&spa->spa_evicting_os_lock);
2044
2045 dmu_buf_user_evict_wait();
2046}
2047
34dc7c2f
BB
2048int
2049spa_max_replication(spa_t *spa)
2050{
2051 /*
2052 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
2053 * handle BPs with more than one DVA allocated. Set our max
2054 * replication level accordingly.
2055 */
2056 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
2057 return (1);
2058 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
2059}
2060
428870ff
BB
2061int
2062spa_prev_software_version(spa_t *spa)
2063{
2064 return (spa->spa_prev_software_version);
2065}
2066
cc92e9d0
GW
2067uint64_t
2068spa_deadman_synctime(spa_t *spa)
2069{
2070 return (spa->spa_deadman_synctime);
2071}
2072
1b939560
BB
2073spa_autotrim_t
2074spa_get_autotrim(spa_t *spa)
2075{
2076 return (spa->spa_autotrim);
2077}
2078
8fb1ede1
BB
2079uint64_t
2080spa_deadman_ziotime(spa_t *spa)
2081{
2082 return (spa->spa_deadman_ziotime);
2083}
2084
2085uint64_t
2086spa_get_deadman_failmode(spa_t *spa)
2087{
2088 return (spa->spa_deadman_failmode);
2089}
2090
2091void
2092spa_set_deadman_failmode(spa_t *spa, const char *failmode)
2093{
2094 if (strcmp(failmode, "wait") == 0)
2095 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT;
2096 else if (strcmp(failmode, "continue") == 0)
2097 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_CONTINUE;
2098 else if (strcmp(failmode, "panic") == 0)
2099 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_PANIC;
2100 else
2101 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT;
2102}
2103
57940b43
RM
2104void
2105spa_set_deadman_ziotime(hrtime_t ns)
2106{
2107 spa_t *spa = NULL;
2108
2109 if (spa_mode_global != SPA_MODE_UNINIT) {
2110 mutex_enter(&spa_namespace_lock);
2111 while ((spa = spa_next(spa)) != NULL)
2112 spa->spa_deadman_ziotime = ns;
2113 mutex_exit(&spa_namespace_lock);
2114 }
2115}
2116
2117void
2118spa_set_deadman_synctime(hrtime_t ns)
2119{
2120 spa_t *spa = NULL;
2121
2122 if (spa_mode_global != SPA_MODE_UNINIT) {
2123 mutex_enter(&spa_namespace_lock);
2124 while ((spa = spa_next(spa)) != NULL)
2125 spa->spa_deadman_synctime = ns;
2126 mutex_exit(&spa_namespace_lock);
2127 }
2128}
2129
34dc7c2f 2130uint64_t
428870ff 2131dva_get_dsize_sync(spa_t *spa, const dva_t *dva)
34dc7c2f 2132{
428870ff
BB
2133 uint64_t asize = DVA_GET_ASIZE(dva);
2134 uint64_t dsize = asize;
34dc7c2f 2135
428870ff 2136 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
34dc7c2f 2137
428870ff
BB
2138 if (asize != 0 && spa->spa_deflate) {
2139 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
2c33b912
BB
2140 if (vd != NULL)
2141 dsize = (asize >> SPA_MINBLOCKSHIFT) *
2142 vd->vdev_deflate_ratio;
34dc7c2f 2143 }
428870ff
BB
2144
2145 return (dsize);
2146}
2147
2148uint64_t
2149bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp)
2150{
2151 uint64_t dsize = 0;
2152
1c27024e 2153 for (int d = 0; d < BP_GET_NDVAS(bp); d++)
428870ff
BB
2154 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
2155
2156 return (dsize);
2157}
2158
2159uint64_t
2160bp_get_dsize(spa_t *spa, const blkptr_t *bp)
2161{
2162 uint64_t dsize = 0;
2163
2164 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2165
1c27024e 2166 for (int d = 0; d < BP_GET_NDVAS(bp); d++)
428870ff
BB
2167 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
2168
b128c09f 2169 spa_config_exit(spa, SCL_VDEV, FTAG);
428870ff
BB
2170
2171 return (dsize);
34dc7c2f
BB
2172}
2173
dae3e9ea
DB
2174uint64_t
2175spa_dirty_data(spa_t *spa)
2176{
2177 return (spa->spa_dsl_pool->dp_dirty_total);
2178}
2179
ca95f70d
OF
2180/*
2181 * ==========================================================================
2182 * SPA Import Progress Routines
2183 * ==========================================================================
2184 */
2185
2186typedef struct spa_import_progress {
2187 uint64_t pool_guid; /* unique id for updates */
2188 char *pool_name;
2189 spa_load_state_t spa_load_state;
687e4d7f 2190 char *spa_load_notes;
ca95f70d
OF
2191 uint64_t mmp_sec_remaining; /* MMP activity check */
2192 uint64_t spa_load_max_txg; /* rewind txg */
2193 procfs_list_node_t smh_node;
2194} spa_import_progress_t;
2195
2196spa_history_list_t *spa_import_progress_list = NULL;
2197
2198static int
2199spa_import_progress_show_header(struct seq_file *f)
2200{
687e4d7f 2201 seq_printf(f, "%-20s %-14s %-14s %-12s %-16s %s\n", "pool_guid",
ca95f70d 2202 "load_state", "multihost_secs", "max_txg",
687e4d7f 2203 "pool_name", "notes");
ca95f70d
OF
2204 return (0);
2205}
2206
2207static int
2208spa_import_progress_show(struct seq_file *f, void *data)
2209{
2210 spa_import_progress_t *sip = (spa_import_progress_t *)data;
2211
687e4d7f 2212 seq_printf(f, "%-20llu %-14llu %-14llu %-12llu %-16s %s\n",
ca95f70d
OF
2213 (u_longlong_t)sip->pool_guid, (u_longlong_t)sip->spa_load_state,
2214 (u_longlong_t)sip->mmp_sec_remaining,
2215 (u_longlong_t)sip->spa_load_max_txg,
687e4d7f
DB
2216 (sip->pool_name ? sip->pool_name : "-"),
2217 (sip->spa_load_notes ? sip->spa_load_notes : "-"));
ca95f70d
OF
2218
2219 return (0);
2220}
2221
2222/* Remove oldest elements from list until there are no more than 'size' left */
2223static void
2224spa_import_progress_truncate(spa_history_list_t *shl, unsigned int size)
2225{
2226 spa_import_progress_t *sip;
2227 while (shl->size > size) {
2228 sip = list_remove_head(&shl->procfs_list.pl_list);
2229 if (sip->pool_name)
2230 spa_strfree(sip->pool_name);
687e4d7f
DB
2231 if (sip->spa_load_notes)
2232 kmem_strfree(sip->spa_load_notes);
ca95f70d
OF
2233 kmem_free(sip, sizeof (spa_import_progress_t));
2234 shl->size--;
2235 }
2236
2237 IMPLY(size == 0, list_is_empty(&shl->procfs_list.pl_list));
2238}
2239
2240static void
2241spa_import_progress_init(void)
2242{
2243 spa_import_progress_list = kmem_zalloc(sizeof (spa_history_list_t),
2244 KM_SLEEP);
2245
2246 spa_import_progress_list->size = 0;
2247
2248 spa_import_progress_list->procfs_list.pl_private =
2249 spa_import_progress_list;
2250
2251 procfs_list_install("zfs",
7b8363d7 2252 NULL,
ca95f70d
OF
2253 "import_progress",
2254 0644,
2255 &spa_import_progress_list->procfs_list,
2256 spa_import_progress_show,
2257 spa_import_progress_show_header,
2258 NULL,
2259 offsetof(spa_import_progress_t, smh_node));
2260}
2261
2262static void
2263spa_import_progress_destroy(void)
2264{
2265 spa_history_list_t *shl = spa_import_progress_list;
2266 procfs_list_uninstall(&shl->procfs_list);
2267 spa_import_progress_truncate(shl, 0);
ca95f70d 2268 procfs_list_destroy(&shl->procfs_list);
75c09c50 2269 kmem_free(shl, sizeof (spa_history_list_t));
ca95f70d
OF
2270}
2271
2272int
2273spa_import_progress_set_state(uint64_t pool_guid,
2274 spa_load_state_t load_state)
2275{
2276 spa_history_list_t *shl = spa_import_progress_list;
2277 spa_import_progress_t *sip;
2278 int error = ENOENT;
2279
2280 if (shl->size == 0)
2281 return (0);
2282
2283 mutex_enter(&shl->procfs_list.pl_lock);
2284 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
2285 sip = list_prev(&shl->procfs_list.pl_list, sip)) {
2286 if (sip->pool_guid == pool_guid) {
2287 sip->spa_load_state = load_state;
687e4d7f
DB
2288 if (sip->spa_load_notes != NULL) {
2289 kmem_strfree(sip->spa_load_notes);
2290 sip->spa_load_notes = NULL;
2291 }
ca95f70d
OF
2292 error = 0;
2293 break;
2294 }
2295 }
2296 mutex_exit(&shl->procfs_list.pl_lock);
2297
2298 return (error);
2299}
2300
687e4d7f
DB
2301static void
2302spa_import_progress_set_notes_impl(spa_t *spa, boolean_t log_dbgmsg,
2303 const char *fmt, va_list adx)
2304{
2305 spa_history_list_t *shl = spa_import_progress_list;
2306 spa_import_progress_t *sip;
2307 uint64_t pool_guid = spa_guid(spa);
2308
2309 if (shl->size == 0)
2310 return;
2311
2312 char *notes = kmem_vasprintf(fmt, adx);
2313
2314 mutex_enter(&shl->procfs_list.pl_lock);
2315 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
2316 sip = list_prev(&shl->procfs_list.pl_list, sip)) {
2317 if (sip->pool_guid == pool_guid) {
2318 if (sip->spa_load_notes != NULL) {
2319 kmem_strfree(sip->spa_load_notes);
2320 sip->spa_load_notes = NULL;
2321 }
2322 sip->spa_load_notes = notes;
2323 if (log_dbgmsg)
2324 zfs_dbgmsg("'%s' %s", sip->pool_name, notes);
2325 notes = NULL;
2326 break;
2327 }
2328 }
2329 mutex_exit(&shl->procfs_list.pl_lock);
2330 if (notes != NULL)
2331 kmem_strfree(notes);
2332}
2333
2334void
2335spa_import_progress_set_notes(spa_t *spa, const char *fmt, ...)
2336{
2337 va_list adx;
2338
2339 va_start(adx, fmt);
2340 spa_import_progress_set_notes_impl(spa, B_TRUE, fmt, adx);
2341 va_end(adx);
2342}
2343
2344void
2345spa_import_progress_set_notes_nolog(spa_t *spa, const char *fmt, ...)
2346{
2347 va_list adx;
2348
2349 va_start(adx, fmt);
2350 spa_import_progress_set_notes_impl(spa, B_FALSE, fmt, adx);
2351 va_end(adx);
2352}
2353
ca95f70d
OF
2354int
2355spa_import_progress_set_max_txg(uint64_t pool_guid, uint64_t load_max_txg)
2356{
2357 spa_history_list_t *shl = spa_import_progress_list;
2358 spa_import_progress_t *sip;
2359 int error = ENOENT;
2360
2361 if (shl->size == 0)
2362 return (0);
2363
2364 mutex_enter(&shl->procfs_list.pl_lock);
2365 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
2366 sip = list_prev(&shl->procfs_list.pl_list, sip)) {
2367 if (sip->pool_guid == pool_guid) {
2368 sip->spa_load_max_txg = load_max_txg;
2369 error = 0;
2370 break;
2371 }
2372 }
2373 mutex_exit(&shl->procfs_list.pl_lock);
2374
2375 return (error);
2376}
2377
2378int
2379spa_import_progress_set_mmp_check(uint64_t pool_guid,
2380 uint64_t mmp_sec_remaining)
2381{
2382 spa_history_list_t *shl = spa_import_progress_list;
2383 spa_import_progress_t *sip;
2384 int error = ENOENT;
2385
2386 if (shl->size == 0)
2387 return (0);
2388
2389 mutex_enter(&shl->procfs_list.pl_lock);
2390 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
2391 sip = list_prev(&shl->procfs_list.pl_list, sip)) {
2392 if (sip->pool_guid == pool_guid) {
2393 sip->mmp_sec_remaining = mmp_sec_remaining;
2394 error = 0;
2395 break;
2396 }
2397 }
2398 mutex_exit(&shl->procfs_list.pl_lock);
2399
2400 return (error);
2401}
2402
2403/*
2404 * A new import is in progress, add an entry.
2405 */
2406void
2407spa_import_progress_add(spa_t *spa)
2408{
2409 spa_history_list_t *shl = spa_import_progress_list;
2410 spa_import_progress_t *sip;
d1807f16 2411 const char *poolname = NULL;
ca95f70d
OF
2412
2413 sip = kmem_zalloc(sizeof (spa_import_progress_t), KM_SLEEP);
2414 sip->pool_guid = spa_guid(spa);
2415
2416 (void) nvlist_lookup_string(spa->spa_config, ZPOOL_CONFIG_POOL_NAME,
2417 &poolname);
2418 if (poolname == NULL)
2419 poolname = spa_name(spa);
2420 sip->pool_name = spa_strdup(poolname);
2421 sip->spa_load_state = spa_load_state(spa);
687e4d7f 2422 sip->spa_load_notes = NULL;
ca95f70d
OF
2423
2424 mutex_enter(&shl->procfs_list.pl_lock);
2425 procfs_list_add(&shl->procfs_list, sip);
2426 shl->size++;
2427 mutex_exit(&shl->procfs_list.pl_lock);
2428}
2429
2430void
2431spa_import_progress_remove(uint64_t pool_guid)
2432{
2433 spa_history_list_t *shl = spa_import_progress_list;
2434 spa_import_progress_t *sip;
2435
2436 mutex_enter(&shl->procfs_list.pl_lock);
2437 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
2438 sip = list_prev(&shl->procfs_list.pl_list, sip)) {
2439 if (sip->pool_guid == pool_guid) {
2440 if (sip->pool_name)
2441 spa_strfree(sip->pool_name);
687e4d7f
DB
2442 if (sip->spa_load_notes)
2443 spa_strfree(sip->spa_load_notes);
ca95f70d
OF
2444 list_remove(&shl->procfs_list.pl_list, sip);
2445 shl->size--;
2446 kmem_free(sip, sizeof (spa_import_progress_t));
2447 break;
2448 }
2449 }
2450 mutex_exit(&shl->procfs_list.pl_lock);
2451}
2452
34dc7c2f
BB
2453/*
2454 * ==========================================================================
2455 * Initialization and Termination
2456 * ==========================================================================
2457 */
2458
2459static int
2460spa_name_compare(const void *a1, const void *a2)
2461{
2462 const spa_t *s1 = a1;
2463 const spa_t *s2 = a2;
2464 int s;
2465
2466 s = strcmp(s1->spa_name, s2->spa_name);
ee36c709 2467
ca577779 2468 return (TREE_ISIGN(s));
34dc7c2f
BB
2469}
2470
34dc7c2f 2471void
0bc8fd78 2472spa_boot_init(void)
34dc7c2f
BB
2473{
2474 spa_config_load();
2475}
2476
2477void
da92d5cb 2478spa_init(spa_mode_t mode)
34dc7c2f
BB
2479{
2480 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
2481 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
2482 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL);
2483 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
2484
2485 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
2486 offsetof(spa_t, spa_avl));
2487
2488 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t),
2489 offsetof(spa_aux_t, aux_avl));
2490
2491 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t),
2492 offsetof(spa_aux_t, aux_avl));
2493
fb5f0bc8 2494 spa_mode_global = mode;
34dc7c2f 2495
498877ba 2496#ifndef _KERNEL
da92d5cb 2497 if (spa_mode_global != SPA_MODE_READ && dprintf_find_string("watch")) {
498877ba
MA
2498 struct sigaction sa;
2499
2500 sa.sa_flags = SA_SIGINFO;
2501 sigemptyset(&sa.sa_mask);
2502 sa.sa_sigaction = arc_buf_sigsegv;
2503
2504 if (sigaction(SIGSEGV, &sa, NULL) == -1) {
2505 perror("could not enable watchpoints: "
2506 "sigaction(SIGSEGV, ...) = ");
2507 } else {
2508 arc_watch = B_TRUE;
2509 }
2510 }
2511#endif
2512
26685276 2513 fm_init();
424fd7c3 2514 zfs_refcount_init();
34dc7c2f 2515 unique_init();
ca577779
PD
2516 zfs_btree_init();
2517 metaslab_stat_init();
67a1b037 2518 brt_init();
ecf3d9b8 2519 ddt_init();
34dc7c2f
BB
2520 zio_init();
2521 dmu_init();
2522 zil_init();
551905dd 2523 vdev_mirror_stat_init();
ab9f4b0b 2524 vdev_raidz_math_init();
da8f51e1 2525 vdev_file_init();
34dc7c2f 2526 zfs_prop_init();
985c33b1 2527 chksum_init();
34dc7c2f 2528 zpool_prop_init();
9ae529ec 2529 zpool_feature_init();
34dc7c2f 2530 spa_config_load();
2a673e76 2531 vdev_prop_init();
b128c09f 2532 l2arc_start();
d4a72f23 2533 scan_init();
6a9d6359 2534 qat_init();
ca95f70d 2535 spa_import_progress_init();
34dc7c2f
BB
2536}
2537
2538void
2539spa_fini(void)
2540{
b128c09f
BB
2541 l2arc_stop();
2542
34dc7c2f
BB
2543 spa_evict_all();
2544
da8f51e1 2545 vdev_file_fini();
551905dd 2546 vdev_mirror_stat_fini();
ab9f4b0b 2547 vdev_raidz_math_fini();
985c33b1 2548 chksum_fini();
34dc7c2f
BB
2549 zil_fini();
2550 dmu_fini();
2551 zio_fini();
ecf3d9b8 2552 ddt_fini();
67a1b037 2553 brt_fini();
ca577779
PD
2554 metaslab_stat_fini();
2555 zfs_btree_fini();
34dc7c2f 2556 unique_fini();
424fd7c3 2557 zfs_refcount_fini();
26685276 2558 fm_fini();
d4a72f23 2559 scan_fini();
6a9d6359 2560 qat_fini();
ca95f70d 2561 spa_import_progress_destroy();
34dc7c2f
BB
2562
2563 avl_destroy(&spa_namespace_avl);
2564 avl_destroy(&spa_spare_avl);
2565 avl_destroy(&spa_l2cache_avl);
2566
2567 cv_destroy(&spa_namespace_cv);
2568 mutex_destroy(&spa_namespace_lock);
2569 mutex_destroy(&spa_spare_lock);
2570 mutex_destroy(&spa_l2cache_lock);
2571}
2572
2573/*
aa755b35 2574 * Return whether this pool has a dedicated slog device. No locking needed.
34dc7c2f 2575 * It's not a problem if the wrong answer is returned as it's only for
aa755b35 2576 * performance and not correctness.
34dc7c2f
BB
2577 */
2578boolean_t
2579spa_has_slogs(spa_t *spa)
2580{
f8020c93 2581 return (spa->spa_log_class->mc_groups != 0);
34dc7c2f 2582}
b128c09f 2583
428870ff
BB
2584spa_log_state_t
2585spa_get_log_state(spa_t *spa)
2586{
2587 return (spa->spa_log_state);
2588}
2589
2590void
2591spa_set_log_state(spa_t *spa, spa_log_state_t state)
2592{
2593 spa->spa_log_state = state;
2594}
2595
b128c09f
BB
2596boolean_t
2597spa_is_root(spa_t *spa)
2598{
2599 return (spa->spa_is_root);
2600}
fb5f0bc8
BB
2601
2602boolean_t
2603spa_writeable(spa_t *spa)
2604{
da92d5cb 2605 return (!!(spa->spa_mode & SPA_MODE_WRITE) && spa->spa_trust_config);
fb5f0bc8
BB
2606}
2607
acbad6ff
AR
2608/*
2609 * Returns true if there is a pending sync task in any of the current
2610 * syncing txg, the current quiescing txg, or the current open txg.
2611 */
2612boolean_t
2613spa_has_pending_synctask(spa_t *spa)
2614{
d2734cce
SD
2615 return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) ||
2616 !txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks));
acbad6ff
AR
2617}
2618
da92d5cb 2619spa_mode_t
fb5f0bc8
BB
2620spa_mode(spa_t *spa)
2621{
2622 return (spa->spa_mode);
2623}
428870ff
BB
2624
2625uint64_t
2626spa_bootfs(spa_t *spa)
2627{
2628 return (spa->spa_bootfs);
2629}
2630
2631uint64_t
2632spa_delegation(spa_t *spa)
2633{
2634 return (spa->spa_delegation);
2635}
2636
2637objset_t *
2638spa_meta_objset(spa_t *spa)
2639{
2640 return (spa->spa_meta_objset);
2641}
2642
2643enum zio_checksum
2644spa_dedup_checksum(spa_t *spa)
2645{
2646 return (spa->spa_dedup_checksum);
2647}
2648
2649/*
2650 * Reset pool scan stat per scan pass (or reboot).
2651 */
2652void
2653spa_scan_stat_init(spa_t *spa)
2654{
2655 /* data not stored on disk */
2656 spa->spa_scan_pass_start = gethrestime_sec();
0ea05c64
AP
2657 if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan))
2658 spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start;
2659 else
2660 spa->spa_scan_pass_scrub_pause = 0;
482eeef8
GA
2661
2662 if (dsl_errorscrub_is_paused(spa->spa_dsl_pool->dp_scan))
2663 spa->spa_scan_pass_errorscrub_pause = spa->spa_scan_pass_start;
2664 else
2665 spa->spa_scan_pass_errorscrub_pause = 0;
2666
0ea05c64 2667 spa->spa_scan_pass_scrub_spent_paused = 0;
428870ff 2668 spa->spa_scan_pass_exam = 0;
d4a72f23 2669 spa->spa_scan_pass_issued = 0;
482eeef8
GA
2670
2671 // error scrub stats
2672 spa->spa_scan_pass_errorscrub_spent_paused = 0;
428870ff
BB
2673}
2674
2675/*
2676 * Get scan stats for zpool status reports
2677 */
2678int
2679spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
2680{
2681 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
2682
482eeef8
GA
2683 if (scn == NULL || (scn->scn_phys.scn_func == POOL_SCAN_NONE &&
2684 scn->errorscrub_phys.dep_func == POOL_SCAN_NONE))
2e528b49 2685 return (SET_ERROR(ENOENT));
482eeef8 2686
861166b0 2687 memset(ps, 0, sizeof (pool_scan_stat_t));
428870ff
BB
2688
2689 /* data stored on disk */
2690 ps->pss_func = scn->scn_phys.scn_func;
d4a72f23 2691 ps->pss_state = scn->scn_phys.scn_state;
428870ff
BB
2692 ps->pss_start_time = scn->scn_phys.scn_start_time;
2693 ps->pss_end_time = scn->scn_phys.scn_end_time;
2694 ps->pss_to_examine = scn->scn_phys.scn_to_examine;
d4677269 2695 ps->pss_examined = scn->scn_phys.scn_examined;
fa7b2390 2696 ps->pss_skipped = scn->scn_phys.scn_skipped;
428870ff
BB
2697 ps->pss_processed = scn->scn_phys.scn_processed;
2698 ps->pss_errors = scn->scn_phys.scn_errors;
428870ff
BB
2699
2700 /* data not stored on disk */
428870ff 2701 ps->pss_pass_exam = spa->spa_scan_pass_exam;
d4677269 2702 ps->pss_pass_start = spa->spa_scan_pass_start;
0ea05c64
AP
2703 ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause;
2704 ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused;
d4677269
TC
2705 ps->pss_pass_issued = spa->spa_scan_pass_issued;
2706 ps->pss_issued =
2707 scn->scn_issued_before_pass + spa->spa_scan_pass_issued;
428870ff 2708
482eeef8
GA
2709 /* error scrub data stored on disk */
2710 ps->pss_error_scrub_func = scn->errorscrub_phys.dep_func;
2711 ps->pss_error_scrub_state = scn->errorscrub_phys.dep_state;
2712 ps->pss_error_scrub_start = scn->errorscrub_phys.dep_start_time;
2713 ps->pss_error_scrub_end = scn->errorscrub_phys.dep_end_time;
2714 ps->pss_error_scrub_examined = scn->errorscrub_phys.dep_examined;
2715 ps->pss_error_scrub_to_be_examined =
2716 scn->errorscrub_phys.dep_to_examine;
2717
2718 /* error scrub data not stored on disk */
2719 ps->pss_pass_error_scrub_pause = spa->spa_scan_pass_errorscrub_pause;
2720
428870ff
BB
2721 return (0);
2722}
c28b2279 2723
f1512ee6
MA
2724int
2725spa_maxblocksize(spa_t *spa)
2726{
2727 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS))
2728 return (SPA_MAXBLOCKSIZE);
2729 else
2730 return (SPA_OLD_MAXBLOCKSIZE);
2731}
2732
a1d477c2
MA
2733
2734/*
2735 * Returns the txg that the last device removal completed. No indirect mappings
2736 * have been added since this txg.
2737 */
2738uint64_t
2739spa_get_last_removal_txg(spa_t *spa)
2740{
2741 uint64_t vdevid;
2742 uint64_t ret = -1ULL;
2743
2744 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2745 /*
2746 * sr_prev_indirect_vdev is only modified while holding all the
2747 * config locks, so it is sufficient to hold SCL_VDEV as reader when
2748 * examining it.
2749 */
2750 vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev;
2751
2752 while (vdevid != -1ULL) {
2753 vdev_t *vd = vdev_lookup_top(spa, vdevid);
2754 vdev_indirect_births_t *vib = vd->vdev_indirect_births;
2755
2756 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
2757
2758 /*
2759 * If the removal did not remap any data, we don't care.
2760 */
2761 if (vdev_indirect_births_count(vib) != 0) {
2762 ret = vdev_indirect_births_last_entry_txg(vib);
2763 break;
2764 }
2765
2766 vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev;
2767 }
2768 spa_config_exit(spa, SCL_VDEV, FTAG);
2769
2770 IMPLY(ret != -1ULL,
2771 spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
2772
2773 return (ret);
2774}
2775
50c957f7
NB
2776int
2777spa_maxdnodesize(spa_t *spa)
2778{
2779 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE))
2780 return (DNODE_MAX_SIZE);
2781 else
2782 return (DNODE_MIN_SIZE);
2783}
2784
379ca9cf
OF
2785boolean_t
2786spa_multihost(spa_t *spa)
2787{
2788 return (spa->spa_multihost ? B_TRUE : B_FALSE);
2789}
2790
25f06d67
BB
2791uint32_t
2792spa_get_hostid(spa_t *spa)
379ca9cf 2793{
25f06d67 2794 return (spa->spa_hostid);
379ca9cf
OF
2795}
2796
6cb8e530
PZ
2797boolean_t
2798spa_trust_config(spa_t *spa)
2799{
2800 return (spa->spa_trust_config);
2801}
2802
2803uint64_t
2804spa_missing_tvds_allowed(spa_t *spa)
2805{
2806 return (spa->spa_missing_tvds_allowed);
2807}
2808
93e28d66
SD
2809space_map_t *
2810spa_syncing_log_sm(spa_t *spa)
2811{
2812 return (spa->spa_syncing_log_sm);
2813}
2814
6cb8e530
PZ
2815void
2816spa_set_missing_tvds(spa_t *spa, uint64_t missing)
2817{
2818 spa->spa_missing_tvds = missing;
2819}
2820
f0ed6c74
TH
2821/*
2822 * Return the pool state string ("ONLINE", "DEGRADED", "SUSPENDED", etc).
2823 */
2824const char *
2825spa_state_to_name(spa_t *spa)
2826{
78fac8d9
RE
2827 ASSERT3P(spa, !=, NULL);
2828
2829 /*
2830 * it is possible for the spa to exist, without root vdev
2831 * as the spa transitions during import/export
2832 */
2833 vdev_t *rvd = spa->spa_root_vdev;
2834 if (rvd == NULL) {
2835 return ("TRANSITIONING");
2836 }
2837 vdev_state_t state = rvd->vdev_state;
2838 vdev_aux_t aux = rvd->vdev_stat.vs_aux;
f0ed6c74 2839
4647353c 2840 if (spa_suspended(spa))
f0ed6c74
TH
2841 return ("SUSPENDED");
2842
2843 switch (state) {
2844 case VDEV_STATE_CLOSED:
2845 case VDEV_STATE_OFFLINE:
2846 return ("OFFLINE");
2847 case VDEV_STATE_REMOVED:
2848 return ("REMOVED");
2849 case VDEV_STATE_CANT_OPEN:
2850 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
2851 return ("FAULTED");
2852 else if (aux == VDEV_AUX_SPLIT_POOL)
2853 return ("SPLIT");
2854 else
2855 return ("UNAVAIL");
2856 case VDEV_STATE_FAULTED:
2857 return ("FAULTED");
2858 case VDEV_STATE_DEGRADED:
2859 return ("DEGRADED");
2860 case VDEV_STATE_HEALTHY:
2861 return ("ONLINE");
2862 default:
2863 break;
2864 }
2865
2866 return ("UNKNOWN");
2867}
2868
d2734cce
SD
2869boolean_t
2870spa_top_vdevs_spacemap_addressable(spa_t *spa)
2871{
2872 vdev_t *rvd = spa->spa_root_vdev;
2873 for (uint64_t c = 0; c < rvd->vdev_children; c++) {
2874 if (!vdev_is_spacemap_addressable(rvd->vdev_child[c]))
2875 return (B_FALSE);
2876 }
2877 return (B_TRUE);
2878}
2879
2880boolean_t
2881spa_has_checkpoint(spa_t *spa)
2882{
2883 return (spa->spa_checkpoint_txg != 0);
2884}
2885
2886boolean_t
2887spa_importing_readonly_checkpoint(spa_t *spa)
2888{
2889 return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) &&
da92d5cb 2890 spa->spa_mode == SPA_MODE_READ);
d2734cce
SD
2891}
2892
2893uint64_t
2894spa_min_claim_txg(spa_t *spa)
2895{
2896 uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg;
2897
2898 if (checkpoint_txg != 0)
2899 return (checkpoint_txg + 1);
2900
2901 return (spa->spa_first_txg);
2902}
2903
2904/*
2905 * If there is a checkpoint, async destroys may consume more space from
2906 * the pool instead of freeing it. In an attempt to save the pool from
2907 * getting suspended when it is about to run out of space, we stop
2908 * processing async destroys.
2909 */
2910boolean_t
2911spa_suspend_async_destroy(spa_t *spa)
2912{
2913 dsl_pool_t *dp = spa_get_dsl(spa);
2914
2915 uint64_t unreserved = dsl_pool_unreserved_space(dp,
2916 ZFS_SPACE_CHECK_EXTRA_RESERVED);
2917 uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes;
2918 uint64_t avail = (unreserved > used) ? (unreserved - used) : 0;
2919
2920 if (spa_has_checkpoint(spa) && avail == 0)
2921 return (B_TRUE);
2922
2923 return (B_FALSE);
2924}
2925
93ce2b4c 2926#if defined(_KERNEL)
8fb1ede1 2927
e64e84ec
MM
2928int
2929param_set_deadman_failmode_common(const char *val)
8fb1ede1
BB
2930{
2931 spa_t *spa = NULL;
2932 char *p;
2933
2934 if (val == NULL)
e64e84ec 2935 return (SET_ERROR(EINVAL));
8fb1ede1
BB
2936
2937 if ((p = strchr(val, '\n')) != NULL)
2938 *p = '\0';
2939
2940 if (strcmp(val, "wait") != 0 && strcmp(val, "continue") != 0 &&
2941 strcmp(val, "panic"))
e64e84ec 2942 return (SET_ERROR(EINVAL));
8fb1ede1 2943
da92d5cb 2944 if (spa_mode_global != SPA_MODE_UNINIT) {
d1043e2f
TC
2945 mutex_enter(&spa_namespace_lock);
2946 while ((spa = spa_next(spa)) != NULL)
2947 spa_set_deadman_failmode(spa, val);
2948 mutex_exit(&spa_namespace_lock);
2949 }
8fb1ede1 2950
e64e84ec 2951 return (0);
8fb1ede1 2952}
03fdcb9a
MM
2953#endif
2954
c28b2279
BB
2955/* Namespace manipulation */
2956EXPORT_SYMBOL(spa_lookup);
2957EXPORT_SYMBOL(spa_add);
2958EXPORT_SYMBOL(spa_remove);
2959EXPORT_SYMBOL(spa_next);
2960
2961/* Refcount functions */
2962EXPORT_SYMBOL(spa_open_ref);
2963EXPORT_SYMBOL(spa_close);
2964EXPORT_SYMBOL(spa_refcount_zero);
2965
2966/* Pool configuration lock */
2967EXPORT_SYMBOL(spa_config_tryenter);
2968EXPORT_SYMBOL(spa_config_enter);
2969EXPORT_SYMBOL(spa_config_exit);
2970EXPORT_SYMBOL(spa_config_held);
2971
2972/* Pool vdev add/remove lock */
2973EXPORT_SYMBOL(spa_vdev_enter);
2974EXPORT_SYMBOL(spa_vdev_exit);
2975
2976/* Pool vdev state change lock */
2977EXPORT_SYMBOL(spa_vdev_state_enter);
2978EXPORT_SYMBOL(spa_vdev_state_exit);
2979
2980/* Accessor functions */
2981EXPORT_SYMBOL(spa_shutting_down);
2982EXPORT_SYMBOL(spa_get_dsl);
2983EXPORT_SYMBOL(spa_get_rootblkptr);
2984EXPORT_SYMBOL(spa_set_rootblkptr);
2985EXPORT_SYMBOL(spa_altroot);
2986EXPORT_SYMBOL(spa_sync_pass);
2987EXPORT_SYMBOL(spa_name);
2988EXPORT_SYMBOL(spa_guid);
2989EXPORT_SYMBOL(spa_last_synced_txg);
2990EXPORT_SYMBOL(spa_first_txg);
2991EXPORT_SYMBOL(spa_syncing_txg);
2992EXPORT_SYMBOL(spa_version);
2993EXPORT_SYMBOL(spa_state);
2994EXPORT_SYMBOL(spa_load_state);
2995EXPORT_SYMBOL(spa_freeze_txg);
c28b2279
BB
2996EXPORT_SYMBOL(spa_get_dspace);
2997EXPORT_SYMBOL(spa_update_dspace);
2998EXPORT_SYMBOL(spa_deflate);
2999EXPORT_SYMBOL(spa_normal_class);
3000EXPORT_SYMBOL(spa_log_class);
cc99f275
DB
3001EXPORT_SYMBOL(spa_special_class);
3002EXPORT_SYMBOL(spa_preferred_class);
c28b2279
BB
3003EXPORT_SYMBOL(spa_max_replication);
3004EXPORT_SYMBOL(spa_prev_software_version);
3005EXPORT_SYMBOL(spa_get_failmode);
3006EXPORT_SYMBOL(spa_suspended);
3007EXPORT_SYMBOL(spa_bootfs);
3008EXPORT_SYMBOL(spa_delegation);
3009EXPORT_SYMBOL(spa_meta_objset);
f1512ee6 3010EXPORT_SYMBOL(spa_maxblocksize);
50c957f7 3011EXPORT_SYMBOL(spa_maxdnodesize);
c28b2279
BB
3012
3013/* Miscellaneous support routines */
c28b2279
BB
3014EXPORT_SYMBOL(spa_guid_exists);
3015EXPORT_SYMBOL(spa_strdup);
3016EXPORT_SYMBOL(spa_strfree);
c28b2279 3017EXPORT_SYMBOL(spa_generate_guid);
b0bc7a84 3018EXPORT_SYMBOL(snprintf_blkptr);
c28b2279
BB
3019EXPORT_SYMBOL(spa_freeze);
3020EXPORT_SYMBOL(spa_upgrade);
3021EXPORT_SYMBOL(spa_evict_all);
3022EXPORT_SYMBOL(spa_lookup_by_guid);
3023EXPORT_SYMBOL(spa_has_spare);
3024EXPORT_SYMBOL(dva_get_dsize_sync);
3025EXPORT_SYMBOL(bp_get_dsize_sync);
3026EXPORT_SYMBOL(bp_get_dsize);
3027EXPORT_SYMBOL(spa_has_slogs);
3028EXPORT_SYMBOL(spa_is_root);
3029EXPORT_SYMBOL(spa_writeable);
3030EXPORT_SYMBOL(spa_mode);
c28b2279 3031EXPORT_SYMBOL(spa_namespace_lock);
6cb8e530
PZ
3032EXPORT_SYMBOL(spa_trust_config);
3033EXPORT_SYMBOL(spa_missing_tvds_allowed);
3034EXPORT_SYMBOL(spa_set_missing_tvds);
f0ed6c74 3035EXPORT_SYMBOL(spa_state_to_name);
d2734cce
SD
3036EXPORT_SYMBOL(spa_importing_readonly_checkpoint);
3037EXPORT_SYMBOL(spa_min_claim_txg);
3038EXPORT_SYMBOL(spa_suspend_async_destroy);
3039EXPORT_SYMBOL(spa_has_checkpoint);
3040EXPORT_SYMBOL(spa_top_vdevs_spacemap_addressable);
cc92e9d0 3041
03fdcb9a
MM
3042ZFS_MODULE_PARAM(zfs, zfs_, flags, UINT, ZMOD_RW,
3043 "Set additional debugging flags");
0b39b9f9 3044
03fdcb9a
MM
3045ZFS_MODULE_PARAM(zfs, zfs_, recover, INT, ZMOD_RW,
3046 "Set to attempt to recover from fatal errors");
0b39b9f9 3047
03fdcb9a 3048ZFS_MODULE_PARAM(zfs, zfs_, free_leak_on_eio, INT, ZMOD_RW,
0b39b9f9
PS
3049 "Set to ignore IO errors during free and permanently leak the space");
3050
ab8d9c17 3051ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, checktime_ms, U64, ZMOD_RW,
03fdcb9a
MM
3052 "Dead I/O check interval in milliseconds");
3053
35aa9dc6 3054ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, enabled, INT, ZMOD_RW,
03fdcb9a
MM
3055 "Enable deadman timer");
3056
fdc2d303 3057ZFS_MODULE_PARAM(zfs_spa, spa_, asize_inflation, UINT, ZMOD_RW,
03fdcb9a
MM
3058 "SPA size estimate multiplication factor");
3059
3060ZFS_MODULE_PARAM(zfs, zfs_, ddt_data_is_special, INT, ZMOD_RW,
3061 "Place DDT data into the special class");
3062
3063ZFS_MODULE_PARAM(zfs, zfs_, user_indirect_is_special, INT, ZMOD_RW,
3064 "Place user data indirect blocks into the special class");
3065
03fdcb9a 3066/* BEGIN CSTYLED */
e64e84ec
MM
3067ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, failmode,
3068 param_set_deadman_failmode, param_get_charp, ZMOD_RW,
3069 "Failmode for deadman timer");
3070
2a3aa5a1 3071ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, synctime_ms,
ab8d9c17 3072 param_set_deadman_synctime, spl_param_get_u64, ZMOD_RW,
2a3aa5a1
MM
3073 "Pool sync expiration time in milliseconds");
3074
3075ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, ziotime_ms,
ab8d9c17 3076 param_set_deadman_ziotime, spl_param_get_u64, ZMOD_RW,
2a3aa5a1
MM
3077 "IO expiration time in milliseconds");
3078
fdc2d303 3079ZFS_MODULE_PARAM(zfs, zfs_, special_class_metadata_reserve_pct, UINT, ZMOD_RW,
1f02ecc5
D
3080 "Small file blocks in special vdevs depends on this much "
3081 "free space available");
02730c33 3082/* END CSTYLED */
2a3aa5a1 3083
7e3df9db 3084ZFS_MODULE_PARAM_CALL(zfs_spa, spa_, slop_shift, param_set_slop_shift,
fdc2d303 3085 param_get_uint, ZMOD_RW, "Reserved free space in pool");
3bd4df38
EN
3086
3087ZFS_MODULE_PARAM(zfs, spa_, num_allocators, INT, ZMOD_RW,
3088 "Number of allocators per spa, capped by ncpus");