]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
428870ff | 22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
64fc7762 | 23 | * Copyright (c) 2011, 2017 by Delphix. All rights reserved. |
adfe9d93 | 24 | * Copyright 2015 Nexenta Systems, Inc. All rights reserved. |
0c66c32d | 25 | * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. |
3c67d83a | 26 | * Copyright 2013 Saso Kiselkov. All rights reserved. |
0ea05c64 | 27 | * Copyright (c) 2017 Datto Inc. |
34dc7c2f BB |
28 | */ |
29 | ||
34dc7c2f BB |
30 | #include <sys/zfs_context.h> |
31 | #include <sys/spa_impl.h> | |
32 | #include <sys/zio.h> | |
33 | #include <sys/zio_checksum.h> | |
34 | #include <sys/zio_compress.h> | |
35 | #include <sys/dmu.h> | |
36 | #include <sys/dmu_tx.h> | |
37 | #include <sys/zap.h> | |
38 | #include <sys/zil.h> | |
39 | #include <sys/vdev_impl.h> | |
bc25c932 | 40 | #include <sys/vdev_file.h> |
ab9f4b0b | 41 | #include <sys/vdev_raidz.h> |
34dc7c2f BB |
42 | #include <sys/metaslab.h> |
43 | #include <sys/uberblock_impl.h> | |
44 | #include <sys/txg.h> | |
45 | #include <sys/avl.h> | |
46 | #include <sys/unique.h> | |
47 | #include <sys/dsl_pool.h> | |
48 | #include <sys/dsl_dir.h> | |
49 | #include <sys/dsl_prop.h> | |
26685276 | 50 | #include <sys/fm/util.h> |
428870ff | 51 | #include <sys/dsl_scan.h> |
34dc7c2f BB |
52 | #include <sys/fs/zfs.h> |
53 | #include <sys/metaslab_impl.h> | |
b128c09f | 54 | #include <sys/arc.h> |
428870ff | 55 | #include <sys/ddt.h> |
1421c891 | 56 | #include <sys/kstat.h> |
34dc7c2f | 57 | #include "zfs_prop.h" |
3c67d83a | 58 | #include <sys/zfeature.h> |
cf637391 | 59 | #include "qat.h" |
34dc7c2f BB |
60 | |
61 | /* | |
62 | * SPA locking | |
63 | * | |
64 | * There are four basic locks for managing spa_t structures: | |
65 | * | |
66 | * spa_namespace_lock (global mutex) | |
67 | * | |
68 | * This lock must be acquired to do any of the following: | |
69 | * | |
70 | * - Lookup a spa_t by name | |
71 | * - Add or remove a spa_t from the namespace | |
72 | * - Increase spa_refcount from non-zero | |
73 | * - Check if spa_refcount is zero | |
74 | * - Rename a spa_t | |
75 | * - add/remove/attach/detach devices | |
76 | * - Held for the duration of create/destroy/import/export | |
77 | * | |
78 | * It does not need to handle recursion. A create or destroy may | |
79 | * reference objects (files or zvols) in other pools, but by | |
80 | * definition they must have an existing reference, and will never need | |
81 | * to lookup a spa_t by name. | |
82 | * | |
83 | * spa_refcount (per-spa refcount_t protected by mutex) | |
84 | * | |
85 | * This reference count keep track of any active users of the spa_t. The | |
86 | * spa_t cannot be destroyed or freed while this is non-zero. Internally, | |
87 | * the refcount is never really 'zero' - opening a pool implicitly keeps | |
b128c09f | 88 | * some references in the DMU. Internally we check against spa_minref, but |
34dc7c2f BB |
89 | * present the image of a zero/non-zero value to consumers. |
90 | * | |
b128c09f | 91 | * spa_config_lock[] (per-spa array of rwlocks) |
34dc7c2f BB |
92 | * |
93 | * This protects the spa_t from config changes, and must be held in | |
94 | * the following circumstances: | |
95 | * | |
96 | * - RW_READER to perform I/O to the spa | |
97 | * - RW_WRITER to change the vdev config | |
98 | * | |
34dc7c2f BB |
99 | * The locking order is fairly straightforward: |
100 | * | |
101 | * spa_namespace_lock -> spa_refcount | |
102 | * | |
103 | * The namespace lock must be acquired to increase the refcount from 0 | |
104 | * or to check if it is zero. | |
105 | * | |
b128c09f | 106 | * spa_refcount -> spa_config_lock[] |
34dc7c2f BB |
107 | * |
108 | * There must be at least one valid reference on the spa_t to acquire | |
109 | * the config lock. | |
110 | * | |
b128c09f | 111 | * spa_namespace_lock -> spa_config_lock[] |
34dc7c2f BB |
112 | * |
113 | * The namespace lock must always be taken before the config lock. | |
114 | * | |
115 | * | |
b128c09f | 116 | * The spa_namespace_lock can be acquired directly and is globally visible. |
34dc7c2f | 117 | * |
b128c09f BB |
118 | * The namespace is manipulated using the following functions, all of which |
119 | * require the spa_namespace_lock to be held. | |
34dc7c2f BB |
120 | * |
121 | * spa_lookup() Lookup a spa_t by name. | |
122 | * | |
123 | * spa_add() Create a new spa_t in the namespace. | |
124 | * | |
125 | * spa_remove() Remove a spa_t from the namespace. This also | |
126 | * frees up any memory associated with the spa_t. | |
127 | * | |
128 | * spa_next() Returns the next spa_t in the system, or the | |
129 | * first if NULL is passed. | |
130 | * | |
131 | * spa_evict_all() Shutdown and remove all spa_t structures in | |
132 | * the system. | |
133 | * | |
134 | * spa_guid_exists() Determine whether a pool/device guid exists. | |
135 | * | |
136 | * The spa_refcount is manipulated using the following functions: | |
137 | * | |
138 | * spa_open_ref() Adds a reference to the given spa_t. Must be | |
139 | * called with spa_namespace_lock held if the | |
140 | * refcount is currently zero. | |
141 | * | |
142 | * spa_close() Remove a reference from the spa_t. This will | |
143 | * not free the spa_t or remove it from the | |
144 | * namespace. No locking is required. | |
145 | * | |
146 | * spa_refcount_zero() Returns true if the refcount is currently | |
147 | * zero. Must be called with spa_namespace_lock | |
148 | * held. | |
149 | * | |
b128c09f BB |
150 | * The spa_config_lock[] is an array of rwlocks, ordered as follows: |
151 | * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. | |
152 | * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). | |
153 | * | |
154 | * To read the configuration, it suffices to hold one of these locks as reader. | |
155 | * To modify the configuration, you must hold all locks as writer. To modify | |
156 | * vdev state without altering the vdev tree's topology (e.g. online/offline), | |
157 | * you must hold SCL_STATE and SCL_ZIO as writer. | |
158 | * | |
159 | * We use these distinct config locks to avoid recursive lock entry. | |
160 | * For example, spa_sync() (which holds SCL_CONFIG as reader) induces | |
161 | * block allocations (SCL_ALLOC), which may require reading space maps | |
162 | * from disk (dmu_read() -> zio_read() -> SCL_ZIO). | |
163 | * | |
164 | * The spa config locks cannot be normal rwlocks because we need the | |
165 | * ability to hand off ownership. For example, SCL_ZIO is acquired | |
166 | * by the issuing thread and later released by an interrupt thread. | |
167 | * They do, however, obey the usual write-wanted semantics to prevent | |
168 | * writer (i.e. system administrator) starvation. | |
169 | * | |
170 | * The lock acquisition rules are as follows: | |
171 | * | |
172 | * SCL_CONFIG | |
173 | * Protects changes to the vdev tree topology, such as vdev | |
174 | * add/remove/attach/detach. Protects the dirty config list | |
175 | * (spa_config_dirty_list) and the set of spares and l2arc devices. | |
176 | * | |
177 | * SCL_STATE | |
178 | * Protects changes to pool state and vdev state, such as vdev | |
179 | * online/offline/fault/degrade/clear. Protects the dirty state list | |
180 | * (spa_state_dirty_list) and global pool state (spa_state). | |
181 | * | |
182 | * SCL_ALLOC | |
183 | * Protects changes to metaslab groups and classes. | |
184 | * Held as reader by metaslab_alloc() and metaslab_claim(). | |
185 | * | |
186 | * SCL_ZIO | |
187 | * Held by bp-level zios (those which have no io_vd upon entry) | |
188 | * to prevent changes to the vdev tree. The bp-level zio implicitly | |
189 | * protects all of its vdev child zios, which do not hold SCL_ZIO. | |
190 | * | |
191 | * SCL_FREE | |
192 | * Protects changes to metaslab groups and classes. | |
193 | * Held as reader by metaslab_free(). SCL_FREE is distinct from | |
194 | * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free | |
195 | * blocks in zio_done() while another i/o that holds either | |
196 | * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. | |
197 | * | |
198 | * SCL_VDEV | |
199 | * Held as reader to prevent changes to the vdev tree during trivial | |
428870ff | 200 | * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the |
b128c09f BB |
201 | * other locks, and lower than all of them, to ensure that it's safe |
202 | * to acquire regardless of caller context. | |
203 | * | |
204 | * In addition, the following rules apply: | |
205 | * | |
206 | * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. | |
207 | * The lock ordering is SCL_CONFIG > spa_props_lock. | |
208 | * | |
209 | * (b) I/O operations on leaf vdevs. For any zio operation that takes | |
210 | * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), | |
211 | * or zio_write_phys() -- the caller must ensure that the config cannot | |
212 | * cannot change in the interim, and that the vdev cannot be reopened. | |
213 | * SCL_STATE as reader suffices for both. | |
34dc7c2f BB |
214 | * |
215 | * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). | |
216 | * | |
217 | * spa_vdev_enter() Acquire the namespace lock and the config lock | |
218 | * for writing. | |
219 | * | |
220 | * spa_vdev_exit() Release the config lock, wait for all I/O | |
221 | * to complete, sync the updated configs to the | |
222 | * cache, and release the namespace lock. | |
223 | * | |
b128c09f BB |
224 | * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). |
225 | * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual | |
226 | * locking is, always, based on spa_namespace_lock and spa_config_lock[]. | |
227 | * | |
9ae529ec | 228 | * spa_rename() is also implemented within this file since it requires |
b128c09f | 229 | * manipulation of the namespace. |
34dc7c2f BB |
230 | */ |
231 | ||
232 | static avl_tree_t spa_namespace_avl; | |
233 | kmutex_t spa_namespace_lock; | |
234 | static kcondvar_t spa_namespace_cv; | |
34dc7c2f BB |
235 | int spa_max_replication_override = SPA_DVAS_PER_BP; |
236 | ||
237 | static kmutex_t spa_spare_lock; | |
238 | static avl_tree_t spa_spare_avl; | |
239 | static kmutex_t spa_l2cache_lock; | |
240 | static avl_tree_t spa_l2cache_avl; | |
241 | ||
242 | kmem_cache_t *spa_buffer_pool; | |
fb5f0bc8 | 243 | int spa_mode_global; |
34dc7c2f | 244 | |
0b39b9f9 | 245 | #ifdef ZFS_DEBUG |
a1d477c2 MA |
246 | /* |
247 | * Everything except dprintf, set_error, spa, and indirect_remap is on | |
248 | * by default in debug builds. | |
249 | */ | |
250 | int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SET_ERROR | | |
964c2d69 | 251 | ZFS_DEBUG_INDIRECT_REMAP); |
0b39b9f9 PS |
252 | #else |
253 | int zfs_flags = 0; | |
254 | #endif | |
255 | ||
256 | /* | |
257 | * zfs_recover can be set to nonzero to attempt to recover from | |
258 | * otherwise-fatal errors, typically caused by on-disk corruption. When | |
259 | * set, calls to zfs_panic_recover() will turn into warning messages. | |
260 | * This should only be used as a last resort, as it typically results | |
261 | * in leaked space, or worse. | |
262 | */ | |
263 | int zfs_recover = B_FALSE; | |
264 | ||
265 | /* | |
266 | * If destroy encounters an EIO while reading metadata (e.g. indirect | |
267 | * blocks), space referenced by the missing metadata can not be freed. | |
268 | * Normally this causes the background destroy to become "stalled", as | |
269 | * it is unable to make forward progress. While in this stalled state, | |
270 | * all remaining space to free from the error-encountering filesystem is | |
271 | * "temporarily leaked". Set this flag to cause it to ignore the EIO, | |
272 | * permanently leak the space from indirect blocks that can not be read, | |
273 | * and continue to free everything else that it can. | |
274 | * | |
275 | * The default, "stalling" behavior is useful if the storage partially | |
276 | * fails (i.e. some but not all i/os fail), and then later recovers. In | |
277 | * this case, we will be able to continue pool operations while it is | |
278 | * partially failed, and when it recovers, we can continue to free the | |
279 | * space, with no leaks. However, note that this case is actually | |
280 | * fairly rare. | |
281 | * | |
282 | * Typically pools either (a) fail completely (but perhaps temporarily, | |
283 | * e.g. a top-level vdev going offline), or (b) have localized, | |
284 | * permanent errors (e.g. disk returns the wrong data due to bit flip or | |
285 | * firmware bug). In case (a), this setting does not matter because the | |
286 | * pool will be suspended and the sync thread will not be able to make | |
287 | * forward progress regardless. In case (b), because the error is | |
288 | * permanent, the best we can do is leak the minimum amount of space, | |
289 | * which is what setting this flag will do. Therefore, it is reasonable | |
290 | * for this flag to normally be set, but we chose the more conservative | |
291 | * approach of not setting it, so that there is no possibility of | |
292 | * leaking space in the "partial temporary" failure case. | |
293 | */ | |
294 | int zfs_free_leak_on_eio = B_FALSE; | |
295 | ||
cc92e9d0 | 296 | /* |
e8b96c60 MA |
297 | * Expiration time in milliseconds. This value has two meanings. First it is |
298 | * used to determine when the spa_deadman() logic should fire. By default the | |
8fb1ede1 | 299 | * spa_deadman() will fire if spa_sync() has not completed in 600 seconds. |
e8b96c60 MA |
300 | * Secondly, the value determines if an I/O is considered "hung". Any I/O that |
301 | * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting | |
8fb1ede1 | 302 | * in one of three behaviors controlled by zfs_deadman_failmode. |
cc92e9d0 | 303 | */ |
8fb1ede1 BB |
304 | unsigned long zfs_deadman_synctime_ms = 600000ULL; |
305 | ||
306 | /* | |
307 | * This value controls the maximum amount of time zio_wait() will block for an | |
308 | * outstanding IO. By default this is 300 seconds at which point the "hung" | |
309 | * behavior will be applied as described for zfs_deadman_synctime_ms. | |
310 | */ | |
311 | unsigned long zfs_deadman_ziotime_ms = 300000ULL; | |
cc92e9d0 | 312 | |
b81a3ddc TC |
313 | /* |
314 | * Check time in milliseconds. This defines the frequency at which we check | |
315 | * for hung I/O. | |
316 | */ | |
8fb1ede1 | 317 | unsigned long zfs_deadman_checktime_ms = 60000ULL; |
b81a3ddc | 318 | |
cc92e9d0 GW |
319 | /* |
320 | * By default the deadman is enabled. | |
321 | */ | |
322 | int zfs_deadman_enabled = 1; | |
323 | ||
8fb1ede1 BB |
324 | /* |
325 | * Controls the behavior of the deadman when it detects a "hung" I/O. | |
326 | * Valid values are zfs_deadman_failmode=<wait|continue|panic>. | |
327 | * | |
328 | * wait - Wait for the "hung" I/O (default) | |
329 | * continue - Attempt to recover from a "hung" I/O | |
330 | * panic - Panic the system | |
331 | */ | |
332 | char *zfs_deadman_failmode = "wait"; | |
333 | ||
e8b96c60 MA |
334 | /* |
335 | * The worst case is single-sector max-parity RAID-Z blocks, in which | |
336 | * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1) | |
337 | * times the size; so just assume that. Add to this the fact that | |
338 | * we can have up to 3 DVAs per bp, and one more factor of 2 because | |
339 | * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together, | |
340 | * the worst case is: | |
341 | * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24 | |
342 | */ | |
343 | int spa_asize_inflation = 24; | |
344 | ||
3d45fdd6 MA |
345 | /* |
346 | * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in | |
347 | * the pool to be consumed. This ensures that we don't run the pool | |
348 | * completely out of space, due to unaccounted changes (e.g. to the MOS). | |
349 | * It also limits the worst-case time to allocate space. If we have | |
350 | * less than this amount of free space, most ZPL operations (e.g. write, | |
351 | * create) will return ENOSPC. | |
352 | * | |
353 | * Certain operations (e.g. file removal, most administrative actions) can | |
354 | * use half the slop space. They will only return ENOSPC if less than half | |
355 | * the slop space is free. Typically, once the pool has less than the slop | |
356 | * space free, the user will use these operations to free up space in the pool. | |
357 | * These are the operations that call dsl_pool_adjustedsize() with the netfree | |
358 | * argument set to TRUE. | |
359 | * | |
360 | * A very restricted set of operations are always permitted, regardless of | |
361 | * the amount of free space. These are the operations that call | |
362 | * dsl_sync_task(ZFS_SPACE_CHECK_NONE), e.g. "zfs destroy". If these | |
363 | * operations result in a net increase in the amount of space used, | |
364 | * it is possible to run the pool completely out of space, causing it to | |
365 | * be permanently read-only. | |
366 | * | |
d7958b4c MA |
367 | * Note that on very small pools, the slop space will be larger than |
368 | * 3.2%, in an effort to have it be at least spa_min_slop (128MB), | |
369 | * but we never allow it to be more than half the pool size. | |
370 | * | |
3d45fdd6 MA |
371 | * See also the comments in zfs_space_check_t. |
372 | */ | |
373 | int spa_slop_shift = 5; | |
d7958b4c | 374 | uint64_t spa_min_slop = 128 * 1024 * 1024; |
3d45fdd6 | 375 | |
34dc7c2f BB |
376 | /* |
377 | * ========================================================================== | |
378 | * SPA config locking | |
379 | * ========================================================================== | |
380 | */ | |
381 | static void | |
b128c09f BB |
382 | spa_config_lock_init(spa_t *spa) |
383 | { | |
1c27024e | 384 | for (int i = 0; i < SCL_LOCKS; i++) { |
b128c09f BB |
385 | spa_config_lock_t *scl = &spa->spa_config_lock[i]; |
386 | mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); | |
387 | cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); | |
13fe0198 | 388 | refcount_create_untracked(&scl->scl_count); |
b128c09f BB |
389 | scl->scl_writer = NULL; |
390 | scl->scl_write_wanted = 0; | |
391 | } | |
34dc7c2f BB |
392 | } |
393 | ||
394 | static void | |
b128c09f BB |
395 | spa_config_lock_destroy(spa_t *spa) |
396 | { | |
1c27024e | 397 | for (int i = 0; i < SCL_LOCKS; i++) { |
b128c09f BB |
398 | spa_config_lock_t *scl = &spa->spa_config_lock[i]; |
399 | mutex_destroy(&scl->scl_lock); | |
400 | cv_destroy(&scl->scl_cv); | |
401 | refcount_destroy(&scl->scl_count); | |
402 | ASSERT(scl->scl_writer == NULL); | |
403 | ASSERT(scl->scl_write_wanted == 0); | |
404 | } | |
405 | } | |
406 | ||
407 | int | |
408 | spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) | |
34dc7c2f | 409 | { |
1c27024e | 410 | for (int i = 0; i < SCL_LOCKS; i++) { |
b128c09f BB |
411 | spa_config_lock_t *scl = &spa->spa_config_lock[i]; |
412 | if (!(locks & (1 << i))) | |
413 | continue; | |
414 | mutex_enter(&scl->scl_lock); | |
415 | if (rw == RW_READER) { | |
416 | if (scl->scl_writer || scl->scl_write_wanted) { | |
417 | mutex_exit(&scl->scl_lock); | |
adfe9d93 SK |
418 | spa_config_exit(spa, locks & ((1 << i) - 1), |
419 | tag); | |
b128c09f BB |
420 | return (0); |
421 | } | |
422 | } else { | |
423 | ASSERT(scl->scl_writer != curthread); | |
424 | if (!refcount_is_zero(&scl->scl_count)) { | |
425 | mutex_exit(&scl->scl_lock); | |
adfe9d93 SK |
426 | spa_config_exit(spa, locks & ((1 << i) - 1), |
427 | tag); | |
b128c09f BB |
428 | return (0); |
429 | } | |
430 | scl->scl_writer = curthread; | |
431 | } | |
432 | (void) refcount_add(&scl->scl_count, tag); | |
433 | mutex_exit(&scl->scl_lock); | |
434 | } | |
435 | return (1); | |
34dc7c2f BB |
436 | } |
437 | ||
438 | void | |
b128c09f | 439 | spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw) |
34dc7c2f | 440 | { |
45d1cae3 BB |
441 | int wlocks_held = 0; |
442 | ||
13fe0198 MA |
443 | ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY); |
444 | ||
1c27024e | 445 | for (int i = 0; i < SCL_LOCKS; i++) { |
b128c09f | 446 | spa_config_lock_t *scl = &spa->spa_config_lock[i]; |
45d1cae3 BB |
447 | if (scl->scl_writer == curthread) |
448 | wlocks_held |= (1 << i); | |
b128c09f BB |
449 | if (!(locks & (1 << i))) |
450 | continue; | |
451 | mutex_enter(&scl->scl_lock); | |
452 | if (rw == RW_READER) { | |
453 | while (scl->scl_writer || scl->scl_write_wanted) { | |
454 | cv_wait(&scl->scl_cv, &scl->scl_lock); | |
455 | } | |
456 | } else { | |
457 | ASSERT(scl->scl_writer != curthread); | |
458 | while (!refcount_is_zero(&scl->scl_count)) { | |
459 | scl->scl_write_wanted++; | |
460 | cv_wait(&scl->scl_cv, &scl->scl_lock); | |
461 | scl->scl_write_wanted--; | |
462 | } | |
463 | scl->scl_writer = curthread; | |
464 | } | |
465 | (void) refcount_add(&scl->scl_count, tag); | |
466 | mutex_exit(&scl->scl_lock); | |
34dc7c2f | 467 | } |
a1d477c2 | 468 | ASSERT3U(wlocks_held, <=, locks); |
34dc7c2f BB |
469 | } |
470 | ||
471 | void | |
b128c09f | 472 | spa_config_exit(spa_t *spa, int locks, void *tag) |
34dc7c2f | 473 | { |
1c27024e | 474 | for (int i = SCL_LOCKS - 1; i >= 0; i--) { |
b128c09f BB |
475 | spa_config_lock_t *scl = &spa->spa_config_lock[i]; |
476 | if (!(locks & (1 << i))) | |
477 | continue; | |
478 | mutex_enter(&scl->scl_lock); | |
479 | ASSERT(!refcount_is_zero(&scl->scl_count)); | |
480 | if (refcount_remove(&scl->scl_count, tag) == 0) { | |
481 | ASSERT(scl->scl_writer == NULL || | |
482 | scl->scl_writer == curthread); | |
483 | scl->scl_writer = NULL; /* OK in either case */ | |
484 | cv_broadcast(&scl->scl_cv); | |
485 | } | |
486 | mutex_exit(&scl->scl_lock); | |
34dc7c2f | 487 | } |
34dc7c2f BB |
488 | } |
489 | ||
b128c09f BB |
490 | int |
491 | spa_config_held(spa_t *spa, int locks, krw_t rw) | |
34dc7c2f | 492 | { |
1c27024e | 493 | int locks_held = 0; |
34dc7c2f | 494 | |
1c27024e | 495 | for (int i = 0; i < SCL_LOCKS; i++) { |
b128c09f BB |
496 | spa_config_lock_t *scl = &spa->spa_config_lock[i]; |
497 | if (!(locks & (1 << i))) | |
498 | continue; | |
499 | if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) || | |
500 | (rw == RW_WRITER && scl->scl_writer == curthread)) | |
501 | locks_held |= 1 << i; | |
502 | } | |
503 | ||
504 | return (locks_held); | |
34dc7c2f BB |
505 | } |
506 | ||
507 | /* | |
508 | * ========================================================================== | |
509 | * SPA namespace functions | |
510 | * ========================================================================== | |
511 | */ | |
512 | ||
513 | /* | |
514 | * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. | |
515 | * Returns NULL if no matching spa_t is found. | |
516 | */ | |
517 | spa_t * | |
518 | spa_lookup(const char *name) | |
519 | { | |
b128c09f BB |
520 | static spa_t search; /* spa_t is large; don't allocate on stack */ |
521 | spa_t *spa; | |
34dc7c2f | 522 | avl_index_t where; |
34dc7c2f BB |
523 | char *cp; |
524 | ||
525 | ASSERT(MUTEX_HELD(&spa_namespace_lock)); | |
526 | ||
13fe0198 MA |
527 | (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); |
528 | ||
34dc7c2f BB |
529 | /* |
530 | * If it's a full dataset name, figure out the pool name and | |
531 | * just use that. | |
532 | */ | |
da536844 | 533 | cp = strpbrk(search.spa_name, "/@#"); |
13fe0198 | 534 | if (cp != NULL) |
34dc7c2f | 535 | *cp = '\0'; |
34dc7c2f | 536 | |
34dc7c2f BB |
537 | spa = avl_find(&spa_namespace_avl, &search, &where); |
538 | ||
34dc7c2f BB |
539 | return (spa); |
540 | } | |
541 | ||
cc92e9d0 GW |
542 | /* |
543 | * Fires when spa_sync has not completed within zfs_deadman_synctime_ms. | |
544 | * If the zfs_deadman_enabled flag is set then it inspects all vdev queues | |
545 | * looking for potentially hung I/Os. | |
546 | */ | |
547 | void | |
548 | spa_deadman(void *arg) | |
549 | { | |
550 | spa_t *spa = arg; | |
551 | ||
b81a3ddc TC |
552 | /* Disable the deadman if the pool is suspended. */ |
553 | if (spa_suspended(spa)) | |
554 | return; | |
555 | ||
cc92e9d0 GW |
556 | zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu", |
557 | (gethrtime() - spa->spa_sync_starttime) / NANOSEC, | |
558 | ++spa->spa_deadman_calls); | |
559 | if (zfs_deadman_enabled) | |
8fb1ede1 | 560 | vdev_deadman(spa->spa_root_vdev, FTAG); |
cc92e9d0 | 561 | |
57ddcda1 | 562 | spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq, |
f764edf0 | 563 | spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() + |
b81a3ddc | 564 | MSEC_TO_TICK(zfs_deadman_checktime_ms)); |
cc92e9d0 GW |
565 | } |
566 | ||
34dc7c2f BB |
567 | /* |
568 | * Create an uninitialized spa_t with the given name. Requires | |
569 | * spa_namespace_lock. The caller must ensure that the spa_t doesn't already | |
570 | * exist by calling spa_lookup() first. | |
571 | */ | |
572 | spa_t * | |
428870ff | 573 | spa_add(const char *name, nvlist_t *config, const char *altroot) |
34dc7c2f BB |
574 | { |
575 | spa_t *spa; | |
b128c09f | 576 | spa_config_dirent_t *dp; |
34dc7c2f BB |
577 | |
578 | ASSERT(MUTEX_HELD(&spa_namespace_lock)); | |
579 | ||
79c76d5b | 580 | spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); |
34dc7c2f | 581 | |
34dc7c2f | 582 | mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); |
34dc7c2f | 583 | mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); |
428870ff | 584 | mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); |
0c66c32d | 585 | mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL); |
34dc7c2f | 586 | mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); |
428870ff | 587 | mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL); |
34dc7c2f | 588 | mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); |
3c67d83a | 589 | mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL); |
428870ff BB |
590 | mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); |
591 | mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); | |
592 | mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL); | |
4eb30c68 | 593 | mutex_init(&spa->spa_feat_stats_lock, NULL, MUTEX_DEFAULT, NULL); |
3dfb57a3 | 594 | mutex_init(&spa->spa_alloc_lock, NULL, MUTEX_DEFAULT, NULL); |
34dc7c2f BB |
595 | |
596 | cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); | |
0c66c32d | 597 | cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL); |
428870ff | 598 | cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL); |
34dc7c2f | 599 | cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); |
b128c09f | 600 | cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); |
34dc7c2f | 601 | |
1c27024e | 602 | for (int t = 0; t < TXG_SIZE; t++) |
428870ff BB |
603 | bplist_create(&spa->spa_free_bplist[t]); |
604 | ||
b128c09f | 605 | (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); |
34dc7c2f BB |
606 | spa->spa_state = POOL_STATE_UNINITIALIZED; |
607 | spa->spa_freeze_txg = UINT64_MAX; | |
608 | spa->spa_final_txg = UINT64_MAX; | |
428870ff BB |
609 | spa->spa_load_max_txg = UINT64_MAX; |
610 | spa->spa_proc = &p0; | |
611 | spa->spa_proc_state = SPA_PROC_NONE; | |
34dc7c2f | 612 | |
e8b96c60 | 613 | spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms); |
8fb1ede1 BB |
614 | spa->spa_deadman_ziotime = MSEC2NSEC(zfs_deadman_ziotime_ms); |
615 | spa_set_deadman_failmode(spa, zfs_deadman_failmode); | |
cc92e9d0 | 616 | |
34dc7c2f | 617 | refcount_create(&spa->spa_refcount); |
b128c09f | 618 | spa_config_lock_init(spa); |
1421c891 | 619 | spa_stats_init(spa); |
34dc7c2f BB |
620 | |
621 | avl_add(&spa_namespace_avl, spa); | |
622 | ||
34dc7c2f BB |
623 | /* |
624 | * Set the alternate root, if there is one. | |
625 | */ | |
0336f3d0 | 626 | if (altroot) |
34dc7c2f | 627 | spa->spa_root = spa_strdup(altroot); |
34dc7c2f | 628 | |
64fc7762 | 629 | avl_create(&spa->spa_alloc_tree, zio_bookmark_compare, |
3dfb57a3 DB |
630 | sizeof (zio_t), offsetof(zio_t, io_alloc_node)); |
631 | ||
b128c09f BB |
632 | /* |
633 | * Every pool starts with the default cachefile | |
634 | */ | |
635 | list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), | |
636 | offsetof(spa_config_dirent_t, scd_link)); | |
637 | ||
79c76d5b | 638 | dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); |
428870ff | 639 | dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path); |
b128c09f BB |
640 | list_insert_head(&spa->spa_config_list, dp); |
641 | ||
572e2857 | 642 | VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, |
79c76d5b | 643 | KM_SLEEP) == 0); |
572e2857 | 644 | |
9ae529ec CS |
645 | if (config != NULL) { |
646 | nvlist_t *features; | |
647 | ||
648 | if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, | |
649 | &features) == 0) { | |
650 | VERIFY(nvlist_dup(features, &spa->spa_label_features, | |
651 | 0) == 0); | |
652 | } | |
653 | ||
428870ff | 654 | VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); |
9ae529ec CS |
655 | } |
656 | ||
657 | if (spa->spa_label_features == NULL) { | |
658 | VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME, | |
79c76d5b | 659 | KM_SLEEP) == 0); |
9ae529ec | 660 | } |
428870ff | 661 | |
c3520e7f MA |
662 | spa->spa_min_ashift = INT_MAX; |
663 | spa->spa_max_ashift = 0; | |
664 | ||
e8a20144 GN |
665 | /* Reset cached value */ |
666 | spa->spa_dedup_dspace = ~0ULL; | |
667 | ||
b0bc7a84 MG |
668 | /* |
669 | * As a pool is being created, treat all features as disabled by | |
670 | * setting SPA_FEATURE_DISABLED for all entries in the feature | |
671 | * refcount cache. | |
672 | */ | |
1c27024e | 673 | for (int i = 0; i < SPA_FEATURES; i++) { |
b0bc7a84 MG |
674 | spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED; |
675 | } | |
676 | ||
34dc7c2f BB |
677 | return (spa); |
678 | } | |
679 | ||
680 | /* | |
681 | * Removes a spa_t from the namespace, freeing up any memory used. Requires | |
682 | * spa_namespace_lock. This is called only after the spa_t has been closed and | |
683 | * deactivated. | |
684 | */ | |
685 | void | |
686 | spa_remove(spa_t *spa) | |
687 | { | |
b128c09f BB |
688 | spa_config_dirent_t *dp; |
689 | ||
34dc7c2f BB |
690 | ASSERT(MUTEX_HELD(&spa_namespace_lock)); |
691 | ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); | |
0c66c32d | 692 | ASSERT3U(refcount_count(&spa->spa_refcount), ==, 0); |
34dc7c2f | 693 | |
428870ff BB |
694 | nvlist_free(spa->spa_config_splitting); |
695 | ||
34dc7c2f BB |
696 | avl_remove(&spa_namespace_avl, spa); |
697 | cv_broadcast(&spa_namespace_cv); | |
698 | ||
0336f3d0 | 699 | if (spa->spa_root) |
34dc7c2f | 700 | spa_strfree(spa->spa_root); |
34dc7c2f | 701 | |
b128c09f BB |
702 | while ((dp = list_head(&spa->spa_config_list)) != NULL) { |
703 | list_remove(&spa->spa_config_list, dp); | |
704 | if (dp->scd_path != NULL) | |
705 | spa_strfree(dp->scd_path); | |
706 | kmem_free(dp, sizeof (spa_config_dirent_t)); | |
707 | } | |
34dc7c2f | 708 | |
3dfb57a3 | 709 | avl_destroy(&spa->spa_alloc_tree); |
b128c09f | 710 | list_destroy(&spa->spa_config_list); |
34dc7c2f | 711 | |
9ae529ec | 712 | nvlist_free(spa->spa_label_features); |
572e2857 | 713 | nvlist_free(spa->spa_load_info); |
417104bd | 714 | nvlist_free(spa->spa_feat_stats); |
34dc7c2f BB |
715 | spa_config_set(spa, NULL); |
716 | ||
717 | refcount_destroy(&spa->spa_refcount); | |
718 | ||
1421c891 | 719 | spa_stats_destroy(spa); |
b128c09f | 720 | spa_config_lock_destroy(spa); |
34dc7c2f | 721 | |
1c27024e | 722 | for (int t = 0; t < TXG_SIZE; t++) |
428870ff BB |
723 | bplist_destroy(&spa->spa_free_bplist[t]); |
724 | ||
3c67d83a TH |
725 | zio_checksum_templates_free(spa); |
726 | ||
34dc7c2f | 727 | cv_destroy(&spa->spa_async_cv); |
0c66c32d | 728 | cv_destroy(&spa->spa_evicting_os_cv); |
428870ff | 729 | cv_destroy(&spa->spa_proc_cv); |
34dc7c2f | 730 | cv_destroy(&spa->spa_scrub_io_cv); |
b128c09f | 731 | cv_destroy(&spa->spa_suspend_cv); |
34dc7c2f | 732 | |
3dfb57a3 | 733 | mutex_destroy(&spa->spa_alloc_lock); |
34dc7c2f | 734 | mutex_destroy(&spa->spa_async_lock); |
34dc7c2f | 735 | mutex_destroy(&spa->spa_errlist_lock); |
428870ff | 736 | mutex_destroy(&spa->spa_errlog_lock); |
0c66c32d | 737 | mutex_destroy(&spa->spa_evicting_os_lock); |
34dc7c2f | 738 | mutex_destroy(&spa->spa_history_lock); |
428870ff | 739 | mutex_destroy(&spa->spa_proc_lock); |
34dc7c2f | 740 | mutex_destroy(&spa->spa_props_lock); |
3c67d83a | 741 | mutex_destroy(&spa->spa_cksum_tmpls_lock); |
428870ff | 742 | mutex_destroy(&spa->spa_scrub_lock); |
b128c09f | 743 | mutex_destroy(&spa->spa_suspend_lock); |
428870ff | 744 | mutex_destroy(&spa->spa_vdev_top_lock); |
4eb30c68 | 745 | mutex_destroy(&spa->spa_feat_stats_lock); |
34dc7c2f BB |
746 | |
747 | kmem_free(spa, sizeof (spa_t)); | |
748 | } | |
749 | ||
750 | /* | |
751 | * Given a pool, return the next pool in the namespace, or NULL if there is | |
752 | * none. If 'prev' is NULL, return the first pool. | |
753 | */ | |
754 | spa_t * | |
755 | spa_next(spa_t *prev) | |
756 | { | |
757 | ASSERT(MUTEX_HELD(&spa_namespace_lock)); | |
758 | ||
759 | if (prev) | |
760 | return (AVL_NEXT(&spa_namespace_avl, prev)); | |
761 | else | |
762 | return (avl_first(&spa_namespace_avl)); | |
763 | } | |
764 | ||
765 | /* | |
766 | * ========================================================================== | |
767 | * SPA refcount functions | |
768 | * ========================================================================== | |
769 | */ | |
770 | ||
771 | /* | |
772 | * Add a reference to the given spa_t. Must have at least one reference, or | |
773 | * have the namespace lock held. | |
774 | */ | |
775 | void | |
776 | spa_open_ref(spa_t *spa, void *tag) | |
777 | { | |
b128c09f | 778 | ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref || |
34dc7c2f | 779 | MUTEX_HELD(&spa_namespace_lock)); |
34dc7c2f BB |
780 | (void) refcount_add(&spa->spa_refcount, tag); |
781 | } | |
782 | ||
783 | /* | |
784 | * Remove a reference to the given spa_t. Must have at least one reference, or | |
785 | * have the namespace lock held. | |
786 | */ | |
787 | void | |
788 | spa_close(spa_t *spa, void *tag) | |
789 | { | |
b128c09f | 790 | ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref || |
34dc7c2f | 791 | MUTEX_HELD(&spa_namespace_lock)); |
34dc7c2f BB |
792 | (void) refcount_remove(&spa->spa_refcount, tag); |
793 | } | |
794 | ||
0c66c32d JG |
795 | /* |
796 | * Remove a reference to the given spa_t held by a dsl dir that is | |
797 | * being asynchronously released. Async releases occur from a taskq | |
798 | * performing eviction of dsl datasets and dirs. The namespace lock | |
799 | * isn't held and the hold by the object being evicted may contribute to | |
800 | * spa_minref (e.g. dataset or directory released during pool export), | |
801 | * so the asserts in spa_close() do not apply. | |
802 | */ | |
803 | void | |
804 | spa_async_close(spa_t *spa, void *tag) | |
805 | { | |
806 | (void) refcount_remove(&spa->spa_refcount, tag); | |
807 | } | |
808 | ||
34dc7c2f BB |
809 | /* |
810 | * Check to see if the spa refcount is zero. Must be called with | |
b128c09f | 811 | * spa_namespace_lock held. We really compare against spa_minref, which is the |
34dc7c2f BB |
812 | * number of references acquired when opening a pool |
813 | */ | |
814 | boolean_t | |
815 | spa_refcount_zero(spa_t *spa) | |
816 | { | |
817 | ASSERT(MUTEX_HELD(&spa_namespace_lock)); | |
818 | ||
b128c09f | 819 | return (refcount_count(&spa->spa_refcount) == spa->spa_minref); |
34dc7c2f BB |
820 | } |
821 | ||
822 | /* | |
823 | * ========================================================================== | |
824 | * SPA spare and l2cache tracking | |
825 | * ========================================================================== | |
826 | */ | |
827 | ||
828 | /* | |
829 | * Hot spares and cache devices are tracked using the same code below, | |
830 | * for 'auxiliary' devices. | |
831 | */ | |
832 | ||
833 | typedef struct spa_aux { | |
834 | uint64_t aux_guid; | |
835 | uint64_t aux_pool; | |
836 | avl_node_t aux_avl; | |
837 | int aux_count; | |
838 | } spa_aux_t; | |
839 | ||
ee36c709 | 840 | static inline int |
34dc7c2f BB |
841 | spa_aux_compare(const void *a, const void *b) |
842 | { | |
ee36c709 GN |
843 | const spa_aux_t *sa = (const spa_aux_t *)a; |
844 | const spa_aux_t *sb = (const spa_aux_t *)b; | |
34dc7c2f | 845 | |
ee36c709 | 846 | return (AVL_CMP(sa->aux_guid, sb->aux_guid)); |
34dc7c2f BB |
847 | } |
848 | ||
849 | void | |
850 | spa_aux_add(vdev_t *vd, avl_tree_t *avl) | |
851 | { | |
852 | avl_index_t where; | |
853 | spa_aux_t search; | |
854 | spa_aux_t *aux; | |
855 | ||
856 | search.aux_guid = vd->vdev_guid; | |
857 | if ((aux = avl_find(avl, &search, &where)) != NULL) { | |
858 | aux->aux_count++; | |
859 | } else { | |
79c76d5b | 860 | aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); |
34dc7c2f BB |
861 | aux->aux_guid = vd->vdev_guid; |
862 | aux->aux_count = 1; | |
863 | avl_insert(avl, aux, where); | |
864 | } | |
865 | } | |
866 | ||
867 | void | |
868 | spa_aux_remove(vdev_t *vd, avl_tree_t *avl) | |
869 | { | |
870 | spa_aux_t search; | |
871 | spa_aux_t *aux; | |
872 | avl_index_t where; | |
873 | ||
874 | search.aux_guid = vd->vdev_guid; | |
875 | aux = avl_find(avl, &search, &where); | |
876 | ||
877 | ASSERT(aux != NULL); | |
878 | ||
879 | if (--aux->aux_count == 0) { | |
880 | avl_remove(avl, aux); | |
881 | kmem_free(aux, sizeof (spa_aux_t)); | |
882 | } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { | |
883 | aux->aux_pool = 0ULL; | |
884 | } | |
885 | } | |
886 | ||
887 | boolean_t | |
b128c09f | 888 | spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) |
34dc7c2f BB |
889 | { |
890 | spa_aux_t search, *found; | |
34dc7c2f BB |
891 | |
892 | search.aux_guid = guid; | |
b128c09f | 893 | found = avl_find(avl, &search, NULL); |
34dc7c2f BB |
894 | |
895 | if (pool) { | |
896 | if (found) | |
897 | *pool = found->aux_pool; | |
898 | else | |
899 | *pool = 0ULL; | |
900 | } | |
901 | ||
b128c09f BB |
902 | if (refcnt) { |
903 | if (found) | |
904 | *refcnt = found->aux_count; | |
905 | else | |
906 | *refcnt = 0; | |
907 | } | |
908 | ||
34dc7c2f BB |
909 | return (found != NULL); |
910 | } | |
911 | ||
912 | void | |
913 | spa_aux_activate(vdev_t *vd, avl_tree_t *avl) | |
914 | { | |
915 | spa_aux_t search, *found; | |
916 | avl_index_t where; | |
917 | ||
918 | search.aux_guid = vd->vdev_guid; | |
919 | found = avl_find(avl, &search, &where); | |
920 | ASSERT(found != NULL); | |
921 | ASSERT(found->aux_pool == 0ULL); | |
922 | ||
923 | found->aux_pool = spa_guid(vd->vdev_spa); | |
924 | } | |
925 | ||
926 | /* | |
927 | * Spares are tracked globally due to the following constraints: | |
928 | * | |
929 | * - A spare may be part of multiple pools. | |
930 | * - A spare may be added to a pool even if it's actively in use within | |
931 | * another pool. | |
932 | * - A spare in use in any pool can only be the source of a replacement if | |
933 | * the target is a spare in the same pool. | |
934 | * | |
935 | * We keep track of all spares on the system through the use of a reference | |
936 | * counted AVL tree. When a vdev is added as a spare, or used as a replacement | |
937 | * spare, then we bump the reference count in the AVL tree. In addition, we set | |
938 | * the 'vdev_isspare' member to indicate that the device is a spare (active or | |
939 | * inactive). When a spare is made active (used to replace a device in the | |
940 | * pool), we also keep track of which pool its been made a part of. | |
941 | * | |
942 | * The 'spa_spare_lock' protects the AVL tree. These functions are normally | |
943 | * called under the spa_namespace lock as part of vdev reconfiguration. The | |
944 | * separate spare lock exists for the status query path, which does not need to | |
945 | * be completely consistent with respect to other vdev configuration changes. | |
946 | */ | |
947 | ||
948 | static int | |
949 | spa_spare_compare(const void *a, const void *b) | |
950 | { | |
951 | return (spa_aux_compare(a, b)); | |
952 | } | |
953 | ||
954 | void | |
955 | spa_spare_add(vdev_t *vd) | |
956 | { | |
957 | mutex_enter(&spa_spare_lock); | |
958 | ASSERT(!vd->vdev_isspare); | |
959 | spa_aux_add(vd, &spa_spare_avl); | |
960 | vd->vdev_isspare = B_TRUE; | |
961 | mutex_exit(&spa_spare_lock); | |
962 | } | |
963 | ||
964 | void | |
965 | spa_spare_remove(vdev_t *vd) | |
966 | { | |
967 | mutex_enter(&spa_spare_lock); | |
968 | ASSERT(vd->vdev_isspare); | |
969 | spa_aux_remove(vd, &spa_spare_avl); | |
970 | vd->vdev_isspare = B_FALSE; | |
971 | mutex_exit(&spa_spare_lock); | |
972 | } | |
973 | ||
974 | boolean_t | |
b128c09f | 975 | spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) |
34dc7c2f BB |
976 | { |
977 | boolean_t found; | |
978 | ||
979 | mutex_enter(&spa_spare_lock); | |
b128c09f | 980 | found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); |
34dc7c2f BB |
981 | mutex_exit(&spa_spare_lock); |
982 | ||
983 | return (found); | |
984 | } | |
985 | ||
986 | void | |
987 | spa_spare_activate(vdev_t *vd) | |
988 | { | |
989 | mutex_enter(&spa_spare_lock); | |
990 | ASSERT(vd->vdev_isspare); | |
991 | spa_aux_activate(vd, &spa_spare_avl); | |
992 | mutex_exit(&spa_spare_lock); | |
993 | } | |
994 | ||
995 | /* | |
996 | * Level 2 ARC devices are tracked globally for the same reasons as spares. | |
997 | * Cache devices currently only support one pool per cache device, and so | |
998 | * for these devices the aux reference count is currently unused beyond 1. | |
999 | */ | |
1000 | ||
1001 | static int | |
1002 | spa_l2cache_compare(const void *a, const void *b) | |
1003 | { | |
1004 | return (spa_aux_compare(a, b)); | |
1005 | } | |
1006 | ||
1007 | void | |
1008 | spa_l2cache_add(vdev_t *vd) | |
1009 | { | |
1010 | mutex_enter(&spa_l2cache_lock); | |
1011 | ASSERT(!vd->vdev_isl2cache); | |
1012 | spa_aux_add(vd, &spa_l2cache_avl); | |
1013 | vd->vdev_isl2cache = B_TRUE; | |
1014 | mutex_exit(&spa_l2cache_lock); | |
1015 | } | |
1016 | ||
1017 | void | |
1018 | spa_l2cache_remove(vdev_t *vd) | |
1019 | { | |
1020 | mutex_enter(&spa_l2cache_lock); | |
1021 | ASSERT(vd->vdev_isl2cache); | |
1022 | spa_aux_remove(vd, &spa_l2cache_avl); | |
1023 | vd->vdev_isl2cache = B_FALSE; | |
1024 | mutex_exit(&spa_l2cache_lock); | |
1025 | } | |
1026 | ||
1027 | boolean_t | |
1028 | spa_l2cache_exists(uint64_t guid, uint64_t *pool) | |
1029 | { | |
1030 | boolean_t found; | |
1031 | ||
1032 | mutex_enter(&spa_l2cache_lock); | |
b128c09f | 1033 | found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); |
34dc7c2f BB |
1034 | mutex_exit(&spa_l2cache_lock); |
1035 | ||
1036 | return (found); | |
1037 | } | |
1038 | ||
1039 | void | |
1040 | spa_l2cache_activate(vdev_t *vd) | |
1041 | { | |
1042 | mutex_enter(&spa_l2cache_lock); | |
1043 | ASSERT(vd->vdev_isl2cache); | |
1044 | spa_aux_activate(vd, &spa_l2cache_avl); | |
1045 | mutex_exit(&spa_l2cache_lock); | |
1046 | } | |
1047 | ||
34dc7c2f BB |
1048 | /* |
1049 | * ========================================================================== | |
1050 | * SPA vdev locking | |
1051 | * ========================================================================== | |
1052 | */ | |
1053 | ||
1054 | /* | |
1055 | * Lock the given spa_t for the purpose of adding or removing a vdev. | |
1056 | * Grabs the global spa_namespace_lock plus the spa config lock for writing. | |
1057 | * It returns the next transaction group for the spa_t. | |
1058 | */ | |
1059 | uint64_t | |
1060 | spa_vdev_enter(spa_t *spa) | |
1061 | { | |
428870ff | 1062 | mutex_enter(&spa->spa_vdev_top_lock); |
34dc7c2f | 1063 | mutex_enter(&spa_namespace_lock); |
428870ff BB |
1064 | return (spa_vdev_config_enter(spa)); |
1065 | } | |
1066 | ||
1067 | /* | |
1068 | * Internal implementation for spa_vdev_enter(). Used when a vdev | |
1069 | * operation requires multiple syncs (i.e. removing a device) while | |
1070 | * keeping the spa_namespace_lock held. | |
1071 | */ | |
1072 | uint64_t | |
1073 | spa_vdev_config_enter(spa_t *spa) | |
1074 | { | |
1075 | ASSERT(MUTEX_HELD(&spa_namespace_lock)); | |
34dc7c2f | 1076 | |
b128c09f | 1077 | spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); |
34dc7c2f BB |
1078 | |
1079 | return (spa_last_synced_txg(spa) + 1); | |
1080 | } | |
1081 | ||
1082 | /* | |
428870ff BB |
1083 | * Used in combination with spa_vdev_config_enter() to allow the syncing |
1084 | * of multiple transactions without releasing the spa_namespace_lock. | |
34dc7c2f | 1085 | */ |
428870ff BB |
1086 | void |
1087 | spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag) | |
34dc7c2f | 1088 | { |
1c27024e DB |
1089 | ASSERT(MUTEX_HELD(&spa_namespace_lock)); |
1090 | ||
34dc7c2f BB |
1091 | int config_changed = B_FALSE; |
1092 | ||
1093 | ASSERT(txg > spa_last_synced_txg(spa)); | |
1094 | ||
b128c09f BB |
1095 | spa->spa_pending_vdev = NULL; |
1096 | ||
34dc7c2f BB |
1097 | /* |
1098 | * Reassess the DTLs. | |
1099 | */ | |
1100 | vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE); | |
1101 | ||
b128c09f | 1102 | if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { |
34dc7c2f | 1103 | config_changed = B_TRUE; |
428870ff | 1104 | spa->spa_config_generation++; |
34dc7c2f BB |
1105 | } |
1106 | ||
428870ff BB |
1107 | /* |
1108 | * Verify the metaslab classes. | |
1109 | */ | |
1110 | ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0); | |
1111 | ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0); | |
1112 | ||
b128c09f | 1113 | spa_config_exit(spa, SCL_ALL, spa); |
34dc7c2f | 1114 | |
428870ff BB |
1115 | /* |
1116 | * Panic the system if the specified tag requires it. This | |
1117 | * is useful for ensuring that configurations are updated | |
1118 | * transactionally. | |
1119 | */ | |
1120 | if (zio_injection_enabled) | |
1121 | zio_handle_panic_injection(spa, tag, 0); | |
1122 | ||
34dc7c2f BB |
1123 | /* |
1124 | * Note: this txg_wait_synced() is important because it ensures | |
1125 | * that there won't be more than one config change per txg. | |
1126 | * This allows us to use the txg as the generation number. | |
1127 | */ | |
1128 | if (error == 0) | |
1129 | txg_wait_synced(spa->spa_dsl_pool, txg); | |
1130 | ||
1131 | if (vd != NULL) { | |
93cf2076 | 1132 | ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL); |
fb5f0bc8 | 1133 | spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); |
34dc7c2f | 1134 | vdev_free(vd); |
fb5f0bc8 | 1135 | spa_config_exit(spa, SCL_ALL, spa); |
34dc7c2f BB |
1136 | } |
1137 | ||
1138 | /* | |
1139 | * If the config changed, update the config cache. | |
1140 | */ | |
1141 | if (config_changed) | |
a1d477c2 | 1142 | spa_write_cachefile(spa, B_FALSE, B_TRUE); |
428870ff | 1143 | } |
34dc7c2f | 1144 | |
428870ff BB |
1145 | /* |
1146 | * Unlock the spa_t after adding or removing a vdev. Besides undoing the | |
1147 | * locking of spa_vdev_enter(), we also want make sure the transactions have | |
1148 | * synced to disk, and then update the global configuration cache with the new | |
1149 | * information. | |
1150 | */ | |
1151 | int | |
1152 | spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) | |
1153 | { | |
1154 | spa_vdev_config_exit(spa, vd, txg, error, FTAG); | |
34dc7c2f | 1155 | mutex_exit(&spa_namespace_lock); |
428870ff | 1156 | mutex_exit(&spa->spa_vdev_top_lock); |
34dc7c2f BB |
1157 | |
1158 | return (error); | |
1159 | } | |
1160 | ||
b128c09f BB |
1161 | /* |
1162 | * Lock the given spa_t for the purpose of changing vdev state. | |
1163 | */ | |
1164 | void | |
428870ff | 1165 | spa_vdev_state_enter(spa_t *spa, int oplocks) |
b128c09f | 1166 | { |
428870ff BB |
1167 | int locks = SCL_STATE_ALL | oplocks; |
1168 | ||
1169 | /* | |
1170 | * Root pools may need to read of the underlying devfs filesystem | |
1171 | * when opening up a vdev. Unfortunately if we're holding the | |
1172 | * SCL_ZIO lock it will result in a deadlock when we try to issue | |
1173 | * the read from the root filesystem. Instead we "prefetch" | |
1174 | * the associated vnodes that we need prior to opening the | |
1175 | * underlying devices and cache them so that we can prevent | |
1176 | * any I/O when we are doing the actual open. | |
1177 | */ | |
1178 | if (spa_is_root(spa)) { | |
1179 | int low = locks & ~(SCL_ZIO - 1); | |
1180 | int high = locks & ~low; | |
1181 | ||
1182 | spa_config_enter(spa, high, spa, RW_WRITER); | |
1183 | vdev_hold(spa->spa_root_vdev); | |
1184 | spa_config_enter(spa, low, spa, RW_WRITER); | |
1185 | } else { | |
1186 | spa_config_enter(spa, locks, spa, RW_WRITER); | |
1187 | } | |
1188 | spa->spa_vdev_locks = locks; | |
b128c09f BB |
1189 | } |
1190 | ||
1191 | int | |
1192 | spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) | |
1193 | { | |
428870ff | 1194 | boolean_t config_changed = B_FALSE; |
4a283c7f TH |
1195 | vdev_t *vdev_top; |
1196 | ||
1197 | if (vd == NULL || vd == spa->spa_root_vdev) { | |
1198 | vdev_top = spa->spa_root_vdev; | |
1199 | } else { | |
1200 | vdev_top = vd->vdev_top; | |
1201 | } | |
428870ff BB |
1202 | |
1203 | if (vd != NULL || error == 0) | |
4a283c7f | 1204 | vdev_dtl_reassess(vdev_top, 0, 0, B_FALSE); |
428870ff BB |
1205 | |
1206 | if (vd != NULL) { | |
4a283c7f TH |
1207 | if (vd != spa->spa_root_vdev) |
1208 | vdev_state_dirty(vdev_top); | |
1209 | ||
428870ff BB |
1210 | config_changed = B_TRUE; |
1211 | spa->spa_config_generation++; | |
1212 | } | |
b128c09f | 1213 | |
428870ff BB |
1214 | if (spa_is_root(spa)) |
1215 | vdev_rele(spa->spa_root_vdev); | |
1216 | ||
1217 | ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); | |
1218 | spa_config_exit(spa, spa->spa_vdev_locks, spa); | |
b128c09f | 1219 | |
fb5f0bc8 BB |
1220 | /* |
1221 | * If anything changed, wait for it to sync. This ensures that, | |
1222 | * from the system administrator's perspective, zpool(1M) commands | |
1223 | * are synchronous. This is important for things like zpool offline: | |
1224 | * when the command completes, you expect no further I/O from ZFS. | |
1225 | */ | |
1226 | if (vd != NULL) | |
1227 | txg_wait_synced(spa->spa_dsl_pool, 0); | |
1228 | ||
428870ff BB |
1229 | /* |
1230 | * If the config changed, update the config cache. | |
1231 | */ | |
1232 | if (config_changed) { | |
1233 | mutex_enter(&spa_namespace_lock); | |
a1d477c2 | 1234 | spa_write_cachefile(spa, B_FALSE, B_TRUE); |
428870ff BB |
1235 | mutex_exit(&spa_namespace_lock); |
1236 | } | |
1237 | ||
b128c09f BB |
1238 | return (error); |
1239 | } | |
1240 | ||
34dc7c2f BB |
1241 | /* |
1242 | * ========================================================================== | |
1243 | * Miscellaneous functions | |
1244 | * ========================================================================== | |
1245 | */ | |
1246 | ||
9ae529ec | 1247 | void |
b0bc7a84 | 1248 | spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx) |
9ae529ec | 1249 | { |
fa86b5db MA |
1250 | if (!nvlist_exists(spa->spa_label_features, feature)) { |
1251 | fnvlist_add_boolean(spa->spa_label_features, feature); | |
b0bc7a84 MG |
1252 | /* |
1253 | * When we are creating the pool (tx_txg==TXG_INITIAL), we can't | |
1254 | * dirty the vdev config because lock SCL_CONFIG is not held. | |
1255 | * Thankfully, in this case we don't need to dirty the config | |
1256 | * because it will be written out anyway when we finish | |
1257 | * creating the pool. | |
1258 | */ | |
1259 | if (tx->tx_txg != TXG_INITIAL) | |
1260 | vdev_config_dirty(spa->spa_root_vdev); | |
fa86b5db | 1261 | } |
9ae529ec CS |
1262 | } |
1263 | ||
1264 | void | |
1265 | spa_deactivate_mos_feature(spa_t *spa, const char *feature) | |
1266 | { | |
fa86b5db MA |
1267 | if (nvlist_remove_all(spa->spa_label_features, feature) == 0) |
1268 | vdev_config_dirty(spa->spa_root_vdev); | |
9ae529ec CS |
1269 | } |
1270 | ||
34dc7c2f BB |
1271 | /* |
1272 | * Rename a spa_t. | |
1273 | */ | |
1274 | int | |
1275 | spa_rename(const char *name, const char *newname) | |
1276 | { | |
1277 | spa_t *spa; | |
1278 | int err; | |
1279 | ||
1280 | /* | |
1281 | * Lookup the spa_t and grab the config lock for writing. We need to | |
1282 | * actually open the pool so that we can sync out the necessary labels. | |
1283 | * It's OK to call spa_open() with the namespace lock held because we | |
1284 | * allow recursive calls for other reasons. | |
1285 | */ | |
1286 | mutex_enter(&spa_namespace_lock); | |
1287 | if ((err = spa_open(name, &spa, FTAG)) != 0) { | |
1288 | mutex_exit(&spa_namespace_lock); | |
1289 | return (err); | |
1290 | } | |
1291 | ||
b128c09f | 1292 | spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); |
34dc7c2f BB |
1293 | |
1294 | avl_remove(&spa_namespace_avl, spa); | |
b128c09f | 1295 | (void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name)); |
34dc7c2f BB |
1296 | avl_add(&spa_namespace_avl, spa); |
1297 | ||
1298 | /* | |
1299 | * Sync all labels to disk with the new names by marking the root vdev | |
1300 | * dirty and waiting for it to sync. It will pick up the new pool name | |
1301 | * during the sync. | |
1302 | */ | |
1303 | vdev_config_dirty(spa->spa_root_vdev); | |
1304 | ||
b128c09f | 1305 | spa_config_exit(spa, SCL_ALL, FTAG); |
34dc7c2f BB |
1306 | |
1307 | txg_wait_synced(spa->spa_dsl_pool, 0); | |
1308 | ||
1309 | /* | |
1310 | * Sync the updated config cache. | |
1311 | */ | |
a1d477c2 | 1312 | spa_write_cachefile(spa, B_FALSE, B_TRUE); |
34dc7c2f BB |
1313 | |
1314 | spa_close(spa, FTAG); | |
1315 | ||
1316 | mutex_exit(&spa_namespace_lock); | |
1317 | ||
1318 | return (0); | |
1319 | } | |
1320 | ||
34dc7c2f | 1321 | /* |
572e2857 BB |
1322 | * Return the spa_t associated with given pool_guid, if it exists. If |
1323 | * device_guid is non-zero, determine whether the pool exists *and* contains | |
1324 | * a device with the specified device_guid. | |
34dc7c2f | 1325 | */ |
572e2857 BB |
1326 | spa_t * |
1327 | spa_by_guid(uint64_t pool_guid, uint64_t device_guid) | |
34dc7c2f BB |
1328 | { |
1329 | spa_t *spa; | |
1330 | avl_tree_t *t = &spa_namespace_avl; | |
1331 | ||
1332 | ASSERT(MUTEX_HELD(&spa_namespace_lock)); | |
1333 | ||
1334 | for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { | |
1335 | if (spa->spa_state == POOL_STATE_UNINITIALIZED) | |
1336 | continue; | |
1337 | if (spa->spa_root_vdev == NULL) | |
1338 | continue; | |
1339 | if (spa_guid(spa) == pool_guid) { | |
1340 | if (device_guid == 0) | |
1341 | break; | |
1342 | ||
1343 | if (vdev_lookup_by_guid(spa->spa_root_vdev, | |
1344 | device_guid) != NULL) | |
1345 | break; | |
1346 | ||
1347 | /* | |
1348 | * Check any devices we may be in the process of adding. | |
1349 | */ | |
1350 | if (spa->spa_pending_vdev) { | |
1351 | if (vdev_lookup_by_guid(spa->spa_pending_vdev, | |
1352 | device_guid) != NULL) | |
1353 | break; | |
1354 | } | |
1355 | } | |
1356 | } | |
1357 | ||
572e2857 BB |
1358 | return (spa); |
1359 | } | |
1360 | ||
1361 | /* | |
1362 | * Determine whether a pool with the given pool_guid exists. | |
1363 | */ | |
1364 | boolean_t | |
1365 | spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) | |
1366 | { | |
1367 | return (spa_by_guid(pool_guid, device_guid) != NULL); | |
34dc7c2f BB |
1368 | } |
1369 | ||
1370 | char * | |
1371 | spa_strdup(const char *s) | |
1372 | { | |
1373 | size_t len; | |
1374 | char *new; | |
1375 | ||
1376 | len = strlen(s); | |
79c76d5b | 1377 | new = kmem_alloc(len + 1, KM_SLEEP); |
34dc7c2f BB |
1378 | bcopy(s, new, len); |
1379 | new[len] = '\0'; | |
1380 | ||
1381 | return (new); | |
1382 | } | |
1383 | ||
1384 | void | |
1385 | spa_strfree(char *s) | |
1386 | { | |
1387 | kmem_free(s, strlen(s) + 1); | |
1388 | } | |
1389 | ||
1390 | uint64_t | |
1391 | spa_get_random(uint64_t range) | |
1392 | { | |
1393 | uint64_t r; | |
1394 | ||
1395 | ASSERT(range != 0); | |
1396 | ||
379ca9cf OF |
1397 | if (range == 1) |
1398 | return (0); | |
1399 | ||
34dc7c2f BB |
1400 | (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t)); |
1401 | ||
1402 | return (r % range); | |
1403 | } | |
1404 | ||
428870ff BB |
1405 | uint64_t |
1406 | spa_generate_guid(spa_t *spa) | |
34dc7c2f | 1407 | { |
428870ff | 1408 | uint64_t guid = spa_get_random(-1ULL); |
34dc7c2f | 1409 | |
428870ff BB |
1410 | if (spa != NULL) { |
1411 | while (guid == 0 || spa_guid_exists(spa_guid(spa), guid)) | |
1412 | guid = spa_get_random(-1ULL); | |
1413 | } else { | |
1414 | while (guid == 0 || spa_guid_exists(guid, 0)) | |
1415 | guid = spa_get_random(-1ULL); | |
34dc7c2f BB |
1416 | } |
1417 | ||
428870ff BB |
1418 | return (guid); |
1419 | } | |
1420 | ||
1421 | void | |
b0bc7a84 | 1422 | snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp) |
428870ff | 1423 | { |
9ae529ec | 1424 | char type[256]; |
428870ff BB |
1425 | char *checksum = NULL; |
1426 | char *compress = NULL; | |
34dc7c2f | 1427 | |
428870ff | 1428 | if (bp != NULL) { |
9ae529ec CS |
1429 | if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) { |
1430 | dmu_object_byteswap_t bswap = | |
1431 | DMU_OT_BYTESWAP(BP_GET_TYPE(bp)); | |
1432 | (void) snprintf(type, sizeof (type), "bswap %s %s", | |
1433 | DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ? | |
1434 | "metadata" : "data", | |
1435 | dmu_ot_byteswap[bswap].ob_name); | |
1436 | } else { | |
1437 | (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name, | |
1438 | sizeof (type)); | |
1439 | } | |
9b67f605 MA |
1440 | if (!BP_IS_EMBEDDED(bp)) { |
1441 | checksum = | |
1442 | zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name; | |
1443 | } | |
428870ff | 1444 | compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name; |
34dc7c2f BB |
1445 | } |
1446 | ||
b0bc7a84 | 1447 | SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum, |
5c27ec10 | 1448 | compress); |
34dc7c2f BB |
1449 | } |
1450 | ||
1451 | void | |
1452 | spa_freeze(spa_t *spa) | |
1453 | { | |
1454 | uint64_t freeze_txg = 0; | |
1455 | ||
b128c09f | 1456 | spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); |
34dc7c2f BB |
1457 | if (spa->spa_freeze_txg == UINT64_MAX) { |
1458 | freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; | |
1459 | spa->spa_freeze_txg = freeze_txg; | |
1460 | } | |
b128c09f | 1461 | spa_config_exit(spa, SCL_ALL, FTAG); |
34dc7c2f BB |
1462 | if (freeze_txg != 0) |
1463 | txg_wait_synced(spa_get_dsl(spa), freeze_txg); | |
1464 | } | |
1465 | ||
0b39b9f9 PS |
1466 | void |
1467 | zfs_panic_recover(const char *fmt, ...) | |
1468 | { | |
1469 | va_list adx; | |
1470 | ||
1471 | va_start(adx, fmt); | |
1472 | vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); | |
1473 | va_end(adx); | |
1474 | } | |
1475 | ||
428870ff BB |
1476 | /* |
1477 | * This is a stripped-down version of strtoull, suitable only for converting | |
d3cc8b15 | 1478 | * lowercase hexadecimal numbers that don't overflow. |
428870ff BB |
1479 | */ |
1480 | uint64_t | |
e19572e4 | 1481 | zfs_strtonum(const char *str, char **nptr) |
428870ff BB |
1482 | { |
1483 | uint64_t val = 0; | |
1484 | char c; | |
1485 | int digit; | |
1486 | ||
1487 | while ((c = *str) != '\0') { | |
1488 | if (c >= '0' && c <= '9') | |
1489 | digit = c - '0'; | |
1490 | else if (c >= 'a' && c <= 'f') | |
1491 | digit = 10 + c - 'a'; | |
1492 | else | |
1493 | break; | |
1494 | ||
1495 | val *= 16; | |
1496 | val += digit; | |
1497 | ||
1498 | str++; | |
1499 | } | |
1500 | ||
1501 | if (nptr) | |
1502 | *nptr = (char *)str; | |
1503 | ||
1504 | return (val); | |
1505 | } | |
1506 | ||
34dc7c2f BB |
1507 | /* |
1508 | * ========================================================================== | |
1509 | * Accessor functions | |
1510 | * ========================================================================== | |
1511 | */ | |
1512 | ||
b128c09f BB |
1513 | boolean_t |
1514 | spa_shutting_down(spa_t *spa) | |
34dc7c2f | 1515 | { |
b128c09f | 1516 | return (spa->spa_async_suspended); |
34dc7c2f BB |
1517 | } |
1518 | ||
1519 | dsl_pool_t * | |
1520 | spa_get_dsl(spa_t *spa) | |
1521 | { | |
1522 | return (spa->spa_dsl_pool); | |
1523 | } | |
1524 | ||
9ae529ec CS |
1525 | boolean_t |
1526 | spa_is_initializing(spa_t *spa) | |
1527 | { | |
1528 | return (spa->spa_is_initializing); | |
1529 | } | |
1530 | ||
a1d477c2 MA |
1531 | boolean_t |
1532 | spa_indirect_vdevs_loaded(spa_t *spa) | |
1533 | { | |
1534 | return (spa->spa_indirect_vdevs_loaded); | |
1535 | } | |
1536 | ||
34dc7c2f BB |
1537 | blkptr_t * |
1538 | spa_get_rootblkptr(spa_t *spa) | |
1539 | { | |
1540 | return (&spa->spa_ubsync.ub_rootbp); | |
1541 | } | |
1542 | ||
1543 | void | |
1544 | spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) | |
1545 | { | |
1546 | spa->spa_uberblock.ub_rootbp = *bp; | |
1547 | } | |
1548 | ||
1549 | void | |
1550 | spa_altroot(spa_t *spa, char *buf, size_t buflen) | |
1551 | { | |
1552 | if (spa->spa_root == NULL) | |
1553 | buf[0] = '\0'; | |
1554 | else | |
1555 | (void) strncpy(buf, spa->spa_root, buflen); | |
1556 | } | |
1557 | ||
1558 | int | |
1559 | spa_sync_pass(spa_t *spa) | |
1560 | { | |
1561 | return (spa->spa_sync_pass); | |
1562 | } | |
1563 | ||
1564 | char * | |
1565 | spa_name(spa_t *spa) | |
1566 | { | |
34dc7c2f BB |
1567 | return (spa->spa_name); |
1568 | } | |
1569 | ||
1570 | uint64_t | |
1571 | spa_guid(spa_t *spa) | |
1572 | { | |
3bc7e0fb GW |
1573 | dsl_pool_t *dp = spa_get_dsl(spa); |
1574 | uint64_t guid; | |
1575 | ||
34dc7c2f BB |
1576 | /* |
1577 | * If we fail to parse the config during spa_load(), we can go through | |
1578 | * the error path (which posts an ereport) and end up here with no root | |
3541dc6d | 1579 | * vdev. We stash the original pool guid in 'spa_config_guid' to handle |
34dc7c2f BB |
1580 | * this case. |
1581 | */ | |
3bc7e0fb GW |
1582 | if (spa->spa_root_vdev == NULL) |
1583 | return (spa->spa_config_guid); | |
1584 | ||
1585 | guid = spa->spa_last_synced_guid != 0 ? | |
1586 | spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid; | |
1587 | ||
1588 | /* | |
1589 | * Return the most recently synced out guid unless we're | |
1590 | * in syncing context. | |
1591 | */ | |
1592 | if (dp && dsl_pool_sync_context(dp)) | |
34dc7c2f BB |
1593 | return (spa->spa_root_vdev->vdev_guid); |
1594 | else | |
3bc7e0fb | 1595 | return (guid); |
3541dc6d GA |
1596 | } |
1597 | ||
1598 | uint64_t | |
1599 | spa_load_guid(spa_t *spa) | |
1600 | { | |
1601 | /* | |
1602 | * This is a GUID that exists solely as a reference for the | |
1603 | * purposes of the arc. It is generated at load time, and | |
1604 | * is never written to persistent storage. | |
1605 | */ | |
1606 | return (spa->spa_load_guid); | |
34dc7c2f BB |
1607 | } |
1608 | ||
1609 | uint64_t | |
1610 | spa_last_synced_txg(spa_t *spa) | |
1611 | { | |
1612 | return (spa->spa_ubsync.ub_txg); | |
1613 | } | |
1614 | ||
1615 | uint64_t | |
1616 | spa_first_txg(spa_t *spa) | |
1617 | { | |
1618 | return (spa->spa_first_txg); | |
1619 | } | |
1620 | ||
428870ff BB |
1621 | uint64_t |
1622 | spa_syncing_txg(spa_t *spa) | |
1623 | { | |
1624 | return (spa->spa_syncing_txg); | |
1625 | } | |
1626 | ||
3b7f360c GW |
1627 | /* |
1628 | * Return the last txg where data can be dirtied. The final txgs | |
1629 | * will be used to just clear out any deferred frees that remain. | |
1630 | */ | |
1631 | uint64_t | |
1632 | spa_final_dirty_txg(spa_t *spa) | |
1633 | { | |
1634 | return (spa->spa_final_txg - TXG_DEFER_SIZE); | |
1635 | } | |
1636 | ||
b128c09f | 1637 | pool_state_t |
34dc7c2f BB |
1638 | spa_state(spa_t *spa) |
1639 | { | |
1640 | return (spa->spa_state); | |
1641 | } | |
1642 | ||
428870ff BB |
1643 | spa_load_state_t |
1644 | spa_load_state(spa_t *spa) | |
34dc7c2f | 1645 | { |
428870ff | 1646 | return (spa->spa_load_state); |
34dc7c2f BB |
1647 | } |
1648 | ||
34dc7c2f | 1649 | uint64_t |
428870ff | 1650 | spa_freeze_txg(spa_t *spa) |
34dc7c2f | 1651 | { |
428870ff | 1652 | return (spa->spa_freeze_txg); |
34dc7c2f BB |
1653 | } |
1654 | ||
047187c1 | 1655 | /* |
1656 | * Return the inflated asize for a logical write in bytes. This is used by the | |
1657 | * DMU to calculate the space a logical write will require on disk. | |
1658 | * If lsize is smaller than the largest physical block size allocatable on this | |
1659 | * pool we use its value instead, since the write will end up using the whole | |
1660 | * block anyway. | |
1661 | */ | |
34dc7c2f | 1662 | uint64_t |
3ec3bc21 | 1663 | spa_get_worst_case_asize(spa_t *spa, uint64_t lsize) |
34dc7c2f | 1664 | { |
047187c1 | 1665 | if (lsize == 0) |
1666 | return (0); /* No inflation needed */ | |
1667 | return (MAX(lsize, 1 << spa->spa_max_ashift) * spa_asize_inflation); | |
34dc7c2f BB |
1668 | } |
1669 | ||
3d45fdd6 MA |
1670 | /* |
1671 | * Return the amount of slop space in bytes. It is 1/32 of the pool (3.2%), | |
d7958b4c MA |
1672 | * or at least 128MB, unless that would cause it to be more than half the |
1673 | * pool size. | |
3d45fdd6 MA |
1674 | * |
1675 | * See the comment above spa_slop_shift for details. | |
1676 | */ | |
1677 | uint64_t | |
4ea3f864 GM |
1678 | spa_get_slop_space(spa_t *spa) |
1679 | { | |
3d45fdd6 | 1680 | uint64_t space = spa_get_dspace(spa); |
d7958b4c | 1681 | return (MAX(space >> spa_slop_shift, MIN(space >> 1, spa_min_slop))); |
3d45fdd6 MA |
1682 | } |
1683 | ||
34dc7c2f BB |
1684 | uint64_t |
1685 | spa_get_dspace(spa_t *spa) | |
1686 | { | |
428870ff | 1687 | return (spa->spa_dspace); |
34dc7c2f BB |
1688 | } |
1689 | ||
428870ff BB |
1690 | void |
1691 | spa_update_dspace(spa_t *spa) | |
34dc7c2f | 1692 | { |
428870ff BB |
1693 | spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) + |
1694 | ddt_get_dedup_dspace(spa); | |
a1d477c2 MA |
1695 | if (spa->spa_vdev_removal != NULL) { |
1696 | /* | |
1697 | * We can't allocate from the removing device, so | |
1698 | * subtract its size. This prevents the DMU/DSL from | |
1699 | * filling up the (now smaller) pool while we are in the | |
1700 | * middle of removing the device. | |
1701 | * | |
1702 | * Note that the DMU/DSL doesn't actually know or care | |
1703 | * how much space is allocated (it does its own tracking | |
1704 | * of how much space has been logically used). So it | |
1705 | * doesn't matter that the data we are moving may be | |
1706 | * allocated twice (on the old device and the new | |
1707 | * device). | |
1708 | */ | |
9e052db4 MA |
1709 | spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); |
1710 | vdev_t *vd = | |
1711 | vdev_lookup_top(spa, spa->spa_vdev_removal->svr_vdev_id); | |
a1d477c2 MA |
1712 | spa->spa_dspace -= spa_deflate(spa) ? |
1713 | vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space; | |
9e052db4 | 1714 | spa_config_exit(spa, SCL_VDEV, FTAG); |
a1d477c2 | 1715 | } |
34dc7c2f BB |
1716 | } |
1717 | ||
1718 | /* | |
1719 | * Return the failure mode that has been set to this pool. The default | |
1720 | * behavior will be to block all I/Os when a complete failure occurs. | |
1721 | */ | |
8fb1ede1 | 1722 | uint64_t |
34dc7c2f BB |
1723 | spa_get_failmode(spa_t *spa) |
1724 | { | |
1725 | return (spa->spa_failmode); | |
1726 | } | |
1727 | ||
b128c09f BB |
1728 | boolean_t |
1729 | spa_suspended(spa_t *spa) | |
1730 | { | |
cec3a0a1 | 1731 | return (spa->spa_suspended != ZIO_SUSPEND_NONE); |
b128c09f BB |
1732 | } |
1733 | ||
34dc7c2f BB |
1734 | uint64_t |
1735 | spa_version(spa_t *spa) | |
1736 | { | |
1737 | return (spa->spa_ubsync.ub_version); | |
1738 | } | |
1739 | ||
428870ff BB |
1740 | boolean_t |
1741 | spa_deflate(spa_t *spa) | |
1742 | { | |
1743 | return (spa->spa_deflate); | |
1744 | } | |
1745 | ||
1746 | metaslab_class_t * | |
1747 | spa_normal_class(spa_t *spa) | |
1748 | { | |
1749 | return (spa->spa_normal_class); | |
1750 | } | |
1751 | ||
1752 | metaslab_class_t * | |
1753 | spa_log_class(spa_t *spa) | |
1754 | { | |
1755 | return (spa->spa_log_class); | |
1756 | } | |
1757 | ||
0c66c32d JG |
1758 | void |
1759 | spa_evicting_os_register(spa_t *spa, objset_t *os) | |
1760 | { | |
1761 | mutex_enter(&spa->spa_evicting_os_lock); | |
1762 | list_insert_head(&spa->spa_evicting_os_list, os); | |
1763 | mutex_exit(&spa->spa_evicting_os_lock); | |
1764 | } | |
1765 | ||
1766 | void | |
1767 | spa_evicting_os_deregister(spa_t *spa, objset_t *os) | |
1768 | { | |
1769 | mutex_enter(&spa->spa_evicting_os_lock); | |
1770 | list_remove(&spa->spa_evicting_os_list, os); | |
1771 | cv_broadcast(&spa->spa_evicting_os_cv); | |
1772 | mutex_exit(&spa->spa_evicting_os_lock); | |
1773 | } | |
1774 | ||
1775 | void | |
1776 | spa_evicting_os_wait(spa_t *spa) | |
1777 | { | |
1778 | mutex_enter(&spa->spa_evicting_os_lock); | |
1779 | while (!list_is_empty(&spa->spa_evicting_os_list)) | |
1780 | cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock); | |
1781 | mutex_exit(&spa->spa_evicting_os_lock); | |
1782 | ||
1783 | dmu_buf_user_evict_wait(); | |
1784 | } | |
1785 | ||
34dc7c2f BB |
1786 | int |
1787 | spa_max_replication(spa_t *spa) | |
1788 | { | |
1789 | /* | |
1790 | * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to | |
1791 | * handle BPs with more than one DVA allocated. Set our max | |
1792 | * replication level accordingly. | |
1793 | */ | |
1794 | if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) | |
1795 | return (1); | |
1796 | return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); | |
1797 | } | |
1798 | ||
428870ff BB |
1799 | int |
1800 | spa_prev_software_version(spa_t *spa) | |
1801 | { | |
1802 | return (spa->spa_prev_software_version); | |
1803 | } | |
1804 | ||
cc92e9d0 GW |
1805 | uint64_t |
1806 | spa_deadman_synctime(spa_t *spa) | |
1807 | { | |
1808 | return (spa->spa_deadman_synctime); | |
1809 | } | |
1810 | ||
8fb1ede1 BB |
1811 | uint64_t |
1812 | spa_deadman_ziotime(spa_t *spa) | |
1813 | { | |
1814 | return (spa->spa_deadman_ziotime); | |
1815 | } | |
1816 | ||
1817 | uint64_t | |
1818 | spa_get_deadman_failmode(spa_t *spa) | |
1819 | { | |
1820 | return (spa->spa_deadman_failmode); | |
1821 | } | |
1822 | ||
1823 | void | |
1824 | spa_set_deadman_failmode(spa_t *spa, const char *failmode) | |
1825 | { | |
1826 | if (strcmp(failmode, "wait") == 0) | |
1827 | spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT; | |
1828 | else if (strcmp(failmode, "continue") == 0) | |
1829 | spa->spa_deadman_failmode = ZIO_FAILURE_MODE_CONTINUE; | |
1830 | else if (strcmp(failmode, "panic") == 0) | |
1831 | spa->spa_deadman_failmode = ZIO_FAILURE_MODE_PANIC; | |
1832 | else | |
1833 | spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT; | |
1834 | } | |
1835 | ||
34dc7c2f | 1836 | uint64_t |
428870ff | 1837 | dva_get_dsize_sync(spa_t *spa, const dva_t *dva) |
34dc7c2f | 1838 | { |
428870ff BB |
1839 | uint64_t asize = DVA_GET_ASIZE(dva); |
1840 | uint64_t dsize = asize; | |
34dc7c2f | 1841 | |
428870ff | 1842 | ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); |
34dc7c2f | 1843 | |
428870ff BB |
1844 | if (asize != 0 && spa->spa_deflate) { |
1845 | vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); | |
2c33b912 BB |
1846 | if (vd != NULL) |
1847 | dsize = (asize >> SPA_MINBLOCKSHIFT) * | |
1848 | vd->vdev_deflate_ratio; | |
34dc7c2f | 1849 | } |
428870ff BB |
1850 | |
1851 | return (dsize); | |
1852 | } | |
1853 | ||
1854 | uint64_t | |
1855 | bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) | |
1856 | { | |
1857 | uint64_t dsize = 0; | |
1858 | ||
1c27024e | 1859 | for (int d = 0; d < BP_GET_NDVAS(bp); d++) |
428870ff BB |
1860 | dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); |
1861 | ||
1862 | return (dsize); | |
1863 | } | |
1864 | ||
1865 | uint64_t | |
1866 | bp_get_dsize(spa_t *spa, const blkptr_t *bp) | |
1867 | { | |
1868 | uint64_t dsize = 0; | |
1869 | ||
1870 | spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); | |
1871 | ||
1c27024e | 1872 | for (int d = 0; d < BP_GET_NDVAS(bp); d++) |
428870ff BB |
1873 | dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); |
1874 | ||
b128c09f | 1875 | spa_config_exit(spa, SCL_VDEV, FTAG); |
428870ff BB |
1876 | |
1877 | return (dsize); | |
34dc7c2f BB |
1878 | } |
1879 | ||
1880 | /* | |
1881 | * ========================================================================== | |
1882 | * Initialization and Termination | |
1883 | * ========================================================================== | |
1884 | */ | |
1885 | ||
1886 | static int | |
1887 | spa_name_compare(const void *a1, const void *a2) | |
1888 | { | |
1889 | const spa_t *s1 = a1; | |
1890 | const spa_t *s2 = a2; | |
1891 | int s; | |
1892 | ||
1893 | s = strcmp(s1->spa_name, s2->spa_name); | |
ee36c709 GN |
1894 | |
1895 | return (AVL_ISIGN(s)); | |
34dc7c2f BB |
1896 | } |
1897 | ||
34dc7c2f | 1898 | void |
0bc8fd78 | 1899 | spa_boot_init(void) |
34dc7c2f BB |
1900 | { |
1901 | spa_config_load(); | |
1902 | } | |
1903 | ||
1904 | void | |
1905 | spa_init(int mode) | |
1906 | { | |
1907 | mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); | |
1908 | mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); | |
1909 | mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); | |
1910 | cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); | |
1911 | ||
1912 | avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), | |
1913 | offsetof(spa_t, spa_avl)); | |
1914 | ||
1915 | avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), | |
1916 | offsetof(spa_aux_t, aux_avl)); | |
1917 | ||
1918 | avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), | |
1919 | offsetof(spa_aux_t, aux_avl)); | |
1920 | ||
fb5f0bc8 | 1921 | spa_mode_global = mode; |
34dc7c2f | 1922 | |
498877ba MA |
1923 | #ifndef _KERNEL |
1924 | if (spa_mode_global != FREAD && dprintf_find_string("watch")) { | |
1925 | struct sigaction sa; | |
1926 | ||
1927 | sa.sa_flags = SA_SIGINFO; | |
1928 | sigemptyset(&sa.sa_mask); | |
1929 | sa.sa_sigaction = arc_buf_sigsegv; | |
1930 | ||
1931 | if (sigaction(SIGSEGV, &sa, NULL) == -1) { | |
1932 | perror("could not enable watchpoints: " | |
1933 | "sigaction(SIGSEGV, ...) = "); | |
1934 | } else { | |
1935 | arc_watch = B_TRUE; | |
1936 | } | |
1937 | } | |
1938 | #endif | |
1939 | ||
26685276 | 1940 | fm_init(); |
34dc7c2f BB |
1941 | refcount_init(); |
1942 | unique_init(); | |
93cf2076 | 1943 | range_tree_init(); |
4e21fd06 | 1944 | metaslab_alloc_trace_init(); |
ecf3d9b8 | 1945 | ddt_init(); |
34dc7c2f BB |
1946 | zio_init(); |
1947 | dmu_init(); | |
1948 | zil_init(); | |
1949 | vdev_cache_stat_init(); | |
551905dd | 1950 | vdev_mirror_stat_init(); |
ab9f4b0b | 1951 | vdev_raidz_math_init(); |
da8f51e1 | 1952 | vdev_file_init(); |
34dc7c2f BB |
1953 | zfs_prop_init(); |
1954 | zpool_prop_init(); | |
9ae529ec | 1955 | zpool_feature_init(); |
34dc7c2f | 1956 | spa_config_load(); |
b128c09f | 1957 | l2arc_start(); |
d4a72f23 | 1958 | scan_init(); |
6a9d6359 | 1959 | qat_init(); |
34dc7c2f BB |
1960 | } |
1961 | ||
1962 | void | |
1963 | spa_fini(void) | |
1964 | { | |
b128c09f BB |
1965 | l2arc_stop(); |
1966 | ||
34dc7c2f BB |
1967 | spa_evict_all(); |
1968 | ||
da8f51e1 | 1969 | vdev_file_fini(); |
34dc7c2f | 1970 | vdev_cache_stat_fini(); |
551905dd | 1971 | vdev_mirror_stat_fini(); |
ab9f4b0b | 1972 | vdev_raidz_math_fini(); |
34dc7c2f BB |
1973 | zil_fini(); |
1974 | dmu_fini(); | |
1975 | zio_fini(); | |
ecf3d9b8 | 1976 | ddt_fini(); |
4e21fd06 | 1977 | metaslab_alloc_trace_fini(); |
93cf2076 | 1978 | range_tree_fini(); |
34dc7c2f BB |
1979 | unique_fini(); |
1980 | refcount_fini(); | |
26685276 | 1981 | fm_fini(); |
d4a72f23 | 1982 | scan_fini(); |
6a9d6359 | 1983 | qat_fini(); |
34dc7c2f BB |
1984 | |
1985 | avl_destroy(&spa_namespace_avl); | |
1986 | avl_destroy(&spa_spare_avl); | |
1987 | avl_destroy(&spa_l2cache_avl); | |
1988 | ||
1989 | cv_destroy(&spa_namespace_cv); | |
1990 | mutex_destroy(&spa_namespace_lock); | |
1991 | mutex_destroy(&spa_spare_lock); | |
1992 | mutex_destroy(&spa_l2cache_lock); | |
1993 | } | |
1994 | ||
1995 | /* | |
1996 | * Return whether this pool has slogs. No locking needed. | |
1997 | * It's not a problem if the wrong answer is returned as it's only for | |
1998 | * performance and not correctness | |
1999 | */ | |
2000 | boolean_t | |
2001 | spa_has_slogs(spa_t *spa) | |
2002 | { | |
2003 | return (spa->spa_log_class->mc_rotor != NULL); | |
2004 | } | |
b128c09f | 2005 | |
428870ff BB |
2006 | spa_log_state_t |
2007 | spa_get_log_state(spa_t *spa) | |
2008 | { | |
2009 | return (spa->spa_log_state); | |
2010 | } | |
2011 | ||
2012 | void | |
2013 | spa_set_log_state(spa_t *spa, spa_log_state_t state) | |
2014 | { | |
2015 | spa->spa_log_state = state; | |
2016 | } | |
2017 | ||
b128c09f BB |
2018 | boolean_t |
2019 | spa_is_root(spa_t *spa) | |
2020 | { | |
2021 | return (spa->spa_is_root); | |
2022 | } | |
fb5f0bc8 BB |
2023 | |
2024 | boolean_t | |
2025 | spa_writeable(spa_t *spa) | |
2026 | { | |
2027 | return (!!(spa->spa_mode & FWRITE)); | |
2028 | } | |
2029 | ||
acbad6ff AR |
2030 | /* |
2031 | * Returns true if there is a pending sync task in any of the current | |
2032 | * syncing txg, the current quiescing txg, or the current open txg. | |
2033 | */ | |
2034 | boolean_t | |
2035 | spa_has_pending_synctask(spa_t *spa) | |
2036 | { | |
2037 | return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks)); | |
2038 | } | |
2039 | ||
fb5f0bc8 BB |
2040 | int |
2041 | spa_mode(spa_t *spa) | |
2042 | { | |
2043 | return (spa->spa_mode); | |
2044 | } | |
428870ff BB |
2045 | |
2046 | uint64_t | |
2047 | spa_bootfs(spa_t *spa) | |
2048 | { | |
2049 | return (spa->spa_bootfs); | |
2050 | } | |
2051 | ||
2052 | uint64_t | |
2053 | spa_delegation(spa_t *spa) | |
2054 | { | |
2055 | return (spa->spa_delegation); | |
2056 | } | |
2057 | ||
2058 | objset_t * | |
2059 | spa_meta_objset(spa_t *spa) | |
2060 | { | |
2061 | return (spa->spa_meta_objset); | |
2062 | } | |
2063 | ||
2064 | enum zio_checksum | |
2065 | spa_dedup_checksum(spa_t *spa) | |
2066 | { | |
2067 | return (spa->spa_dedup_checksum); | |
2068 | } | |
2069 | ||
2070 | /* | |
2071 | * Reset pool scan stat per scan pass (or reboot). | |
2072 | */ | |
2073 | void | |
2074 | spa_scan_stat_init(spa_t *spa) | |
2075 | { | |
2076 | /* data not stored on disk */ | |
2077 | spa->spa_scan_pass_start = gethrestime_sec(); | |
0ea05c64 AP |
2078 | if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan)) |
2079 | spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start; | |
2080 | else | |
2081 | spa->spa_scan_pass_scrub_pause = 0; | |
2082 | spa->spa_scan_pass_scrub_spent_paused = 0; | |
428870ff | 2083 | spa->spa_scan_pass_exam = 0; |
d4a72f23 | 2084 | spa->spa_scan_pass_issued = 0; |
428870ff BB |
2085 | vdev_scan_stat_init(spa->spa_root_vdev); |
2086 | } | |
2087 | ||
2088 | /* | |
2089 | * Get scan stats for zpool status reports | |
2090 | */ | |
2091 | int | |
2092 | spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps) | |
2093 | { | |
2094 | dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL; | |
2095 | ||
2096 | if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE) | |
2e528b49 | 2097 | return (SET_ERROR(ENOENT)); |
428870ff BB |
2098 | bzero(ps, sizeof (pool_scan_stat_t)); |
2099 | ||
2100 | /* data stored on disk */ | |
2101 | ps->pss_func = scn->scn_phys.scn_func; | |
d4a72f23 | 2102 | ps->pss_state = scn->scn_phys.scn_state; |
428870ff BB |
2103 | ps->pss_start_time = scn->scn_phys.scn_start_time; |
2104 | ps->pss_end_time = scn->scn_phys.scn_end_time; | |
2105 | ps->pss_to_examine = scn->scn_phys.scn_to_examine; | |
d4677269 | 2106 | ps->pss_examined = scn->scn_phys.scn_examined; |
428870ff BB |
2107 | ps->pss_to_process = scn->scn_phys.scn_to_process; |
2108 | ps->pss_processed = scn->scn_phys.scn_processed; | |
2109 | ps->pss_errors = scn->scn_phys.scn_errors; | |
428870ff BB |
2110 | |
2111 | /* data not stored on disk */ | |
428870ff | 2112 | ps->pss_pass_exam = spa->spa_scan_pass_exam; |
d4677269 | 2113 | ps->pss_pass_start = spa->spa_scan_pass_start; |
0ea05c64 AP |
2114 | ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause; |
2115 | ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused; | |
d4677269 TC |
2116 | ps->pss_pass_issued = spa->spa_scan_pass_issued; |
2117 | ps->pss_issued = | |
2118 | scn->scn_issued_before_pass + spa->spa_scan_pass_issued; | |
428870ff BB |
2119 | |
2120 | return (0); | |
2121 | } | |
c28b2279 | 2122 | |
f1512ee6 MA |
2123 | int |
2124 | spa_maxblocksize(spa_t *spa) | |
2125 | { | |
2126 | if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) | |
2127 | return (SPA_MAXBLOCKSIZE); | |
2128 | else | |
2129 | return (SPA_OLD_MAXBLOCKSIZE); | |
2130 | } | |
2131 | ||
a1d477c2 MA |
2132 | |
2133 | /* | |
2134 | * Returns the txg that the last device removal completed. No indirect mappings | |
2135 | * have been added since this txg. | |
2136 | */ | |
2137 | uint64_t | |
2138 | spa_get_last_removal_txg(spa_t *spa) | |
2139 | { | |
2140 | uint64_t vdevid; | |
2141 | uint64_t ret = -1ULL; | |
2142 | ||
2143 | spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); | |
2144 | /* | |
2145 | * sr_prev_indirect_vdev is only modified while holding all the | |
2146 | * config locks, so it is sufficient to hold SCL_VDEV as reader when | |
2147 | * examining it. | |
2148 | */ | |
2149 | vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev; | |
2150 | ||
2151 | while (vdevid != -1ULL) { | |
2152 | vdev_t *vd = vdev_lookup_top(spa, vdevid); | |
2153 | vdev_indirect_births_t *vib = vd->vdev_indirect_births; | |
2154 | ||
2155 | ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); | |
2156 | ||
2157 | /* | |
2158 | * If the removal did not remap any data, we don't care. | |
2159 | */ | |
2160 | if (vdev_indirect_births_count(vib) != 0) { | |
2161 | ret = vdev_indirect_births_last_entry_txg(vib); | |
2162 | break; | |
2163 | } | |
2164 | ||
2165 | vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev; | |
2166 | } | |
2167 | spa_config_exit(spa, SCL_VDEV, FTAG); | |
2168 | ||
2169 | IMPLY(ret != -1ULL, | |
2170 | spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); | |
2171 | ||
2172 | return (ret); | |
2173 | } | |
2174 | ||
50c957f7 NB |
2175 | int |
2176 | spa_maxdnodesize(spa_t *spa) | |
2177 | { | |
2178 | if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) | |
2179 | return (DNODE_MAX_SIZE); | |
2180 | else | |
2181 | return (DNODE_MIN_SIZE); | |
2182 | } | |
2183 | ||
379ca9cf OF |
2184 | boolean_t |
2185 | spa_multihost(spa_t *spa) | |
2186 | { | |
2187 | return (spa->spa_multihost ? B_TRUE : B_FALSE); | |
2188 | } | |
2189 | ||
2190 | unsigned long | |
2191 | spa_get_hostid(void) | |
2192 | { | |
2193 | unsigned long myhostid; | |
2194 | ||
2195 | #ifdef _KERNEL | |
2196 | myhostid = zone_get_hostid(NULL); | |
2197 | #else /* _KERNEL */ | |
2198 | /* | |
2199 | * We're emulating the system's hostid in userland, so | |
2200 | * we can't use zone_get_hostid(). | |
2201 | */ | |
2202 | (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid); | |
2203 | #endif /* _KERNEL */ | |
2204 | ||
2205 | return (myhostid); | |
2206 | } | |
2207 | ||
c28b2279 | 2208 | #if defined(_KERNEL) && defined(HAVE_SPL) |
8fb1ede1 BB |
2209 | |
2210 | #include <linux/mod_compat.h> | |
2211 | ||
2212 | static int | |
2213 | param_set_deadman_failmode(const char *val, zfs_kernel_param_t *kp) | |
2214 | { | |
2215 | spa_t *spa = NULL; | |
2216 | char *p; | |
2217 | ||
2218 | if (val == NULL) | |
2219 | return (SET_ERROR(-EINVAL)); | |
2220 | ||
2221 | if ((p = strchr(val, '\n')) != NULL) | |
2222 | *p = '\0'; | |
2223 | ||
2224 | if (strcmp(val, "wait") != 0 && strcmp(val, "continue") != 0 && | |
2225 | strcmp(val, "panic")) | |
2226 | return (SET_ERROR(-EINVAL)); | |
2227 | ||
2228 | mutex_enter(&spa_namespace_lock); | |
2229 | while ((spa = spa_next(spa)) != NULL) | |
2230 | spa_set_deadman_failmode(spa, val); | |
2231 | mutex_exit(&spa_namespace_lock); | |
2232 | ||
2233 | return (param_set_charp(val, kp)); | |
2234 | } | |
2235 | ||
c28b2279 BB |
2236 | /* Namespace manipulation */ |
2237 | EXPORT_SYMBOL(spa_lookup); | |
2238 | EXPORT_SYMBOL(spa_add); | |
2239 | EXPORT_SYMBOL(spa_remove); | |
2240 | EXPORT_SYMBOL(spa_next); | |
2241 | ||
2242 | /* Refcount functions */ | |
2243 | EXPORT_SYMBOL(spa_open_ref); | |
2244 | EXPORT_SYMBOL(spa_close); | |
2245 | EXPORT_SYMBOL(spa_refcount_zero); | |
2246 | ||
2247 | /* Pool configuration lock */ | |
2248 | EXPORT_SYMBOL(spa_config_tryenter); | |
2249 | EXPORT_SYMBOL(spa_config_enter); | |
2250 | EXPORT_SYMBOL(spa_config_exit); | |
2251 | EXPORT_SYMBOL(spa_config_held); | |
2252 | ||
2253 | /* Pool vdev add/remove lock */ | |
2254 | EXPORT_SYMBOL(spa_vdev_enter); | |
2255 | EXPORT_SYMBOL(spa_vdev_exit); | |
2256 | ||
2257 | /* Pool vdev state change lock */ | |
2258 | EXPORT_SYMBOL(spa_vdev_state_enter); | |
2259 | EXPORT_SYMBOL(spa_vdev_state_exit); | |
2260 | ||
2261 | /* Accessor functions */ | |
2262 | EXPORT_SYMBOL(spa_shutting_down); | |
2263 | EXPORT_SYMBOL(spa_get_dsl); | |
2264 | EXPORT_SYMBOL(spa_get_rootblkptr); | |
2265 | EXPORT_SYMBOL(spa_set_rootblkptr); | |
2266 | EXPORT_SYMBOL(spa_altroot); | |
2267 | EXPORT_SYMBOL(spa_sync_pass); | |
2268 | EXPORT_SYMBOL(spa_name); | |
2269 | EXPORT_SYMBOL(spa_guid); | |
2270 | EXPORT_SYMBOL(spa_last_synced_txg); | |
2271 | EXPORT_SYMBOL(spa_first_txg); | |
2272 | EXPORT_SYMBOL(spa_syncing_txg); | |
2273 | EXPORT_SYMBOL(spa_version); | |
2274 | EXPORT_SYMBOL(spa_state); | |
2275 | EXPORT_SYMBOL(spa_load_state); | |
2276 | EXPORT_SYMBOL(spa_freeze_txg); | |
c28b2279 BB |
2277 | EXPORT_SYMBOL(spa_get_dspace); |
2278 | EXPORT_SYMBOL(spa_update_dspace); | |
2279 | EXPORT_SYMBOL(spa_deflate); | |
2280 | EXPORT_SYMBOL(spa_normal_class); | |
2281 | EXPORT_SYMBOL(spa_log_class); | |
2282 | EXPORT_SYMBOL(spa_max_replication); | |
2283 | EXPORT_SYMBOL(spa_prev_software_version); | |
2284 | EXPORT_SYMBOL(spa_get_failmode); | |
2285 | EXPORT_SYMBOL(spa_suspended); | |
2286 | EXPORT_SYMBOL(spa_bootfs); | |
2287 | EXPORT_SYMBOL(spa_delegation); | |
2288 | EXPORT_SYMBOL(spa_meta_objset); | |
f1512ee6 | 2289 | EXPORT_SYMBOL(spa_maxblocksize); |
50c957f7 | 2290 | EXPORT_SYMBOL(spa_maxdnodesize); |
c28b2279 BB |
2291 | |
2292 | /* Miscellaneous support routines */ | |
2293 | EXPORT_SYMBOL(spa_rename); | |
2294 | EXPORT_SYMBOL(spa_guid_exists); | |
2295 | EXPORT_SYMBOL(spa_strdup); | |
2296 | EXPORT_SYMBOL(spa_strfree); | |
2297 | EXPORT_SYMBOL(spa_get_random); | |
2298 | EXPORT_SYMBOL(spa_generate_guid); | |
b0bc7a84 | 2299 | EXPORT_SYMBOL(snprintf_blkptr); |
c28b2279 BB |
2300 | EXPORT_SYMBOL(spa_freeze); |
2301 | EXPORT_SYMBOL(spa_upgrade); | |
2302 | EXPORT_SYMBOL(spa_evict_all); | |
2303 | EXPORT_SYMBOL(spa_lookup_by_guid); | |
2304 | EXPORT_SYMBOL(spa_has_spare); | |
2305 | EXPORT_SYMBOL(dva_get_dsize_sync); | |
2306 | EXPORT_SYMBOL(bp_get_dsize_sync); | |
2307 | EXPORT_SYMBOL(bp_get_dsize); | |
2308 | EXPORT_SYMBOL(spa_has_slogs); | |
2309 | EXPORT_SYMBOL(spa_is_root); | |
2310 | EXPORT_SYMBOL(spa_writeable); | |
2311 | EXPORT_SYMBOL(spa_mode); | |
c28b2279 | 2312 | EXPORT_SYMBOL(spa_namespace_lock); |
cc92e9d0 | 2313 | |
02730c33 | 2314 | /* BEGIN CSTYLED */ |
33b6dbbc | 2315 | module_param(zfs_flags, uint, 0644); |
0b39b9f9 PS |
2316 | MODULE_PARM_DESC(zfs_flags, "Set additional debugging flags"); |
2317 | ||
2318 | module_param(zfs_recover, int, 0644); | |
2319 | MODULE_PARM_DESC(zfs_recover, "Set to attempt to recover from fatal errors"); | |
2320 | ||
2321 | module_param(zfs_free_leak_on_eio, int, 0644); | |
2322 | MODULE_PARM_DESC(zfs_free_leak_on_eio, | |
2323 | "Set to ignore IO errors during free and permanently leak the space"); | |
2324 | ||
e8b96c60 | 2325 | module_param(zfs_deadman_synctime_ms, ulong, 0644); |
8fb1ede1 BB |
2326 | MODULE_PARM_DESC(zfs_deadman_synctime_ms, |
2327 | "Pool sync expiration time in milliseconds"); | |
2328 | ||
2329 | module_param(zfs_deadman_ziotime_ms, ulong, 0644); | |
2330 | MODULE_PARM_DESC(zfs_deadman_ziotime_ms, | |
2331 | "IO expiration time in milliseconds"); | |
cc92e9d0 | 2332 | |
b81a3ddc TC |
2333 | module_param(zfs_deadman_checktime_ms, ulong, 0644); |
2334 | MODULE_PARM_DESC(zfs_deadman_checktime_ms, | |
2335 | "Dead I/O check interval in milliseconds"); | |
2336 | ||
cc92e9d0 GW |
2337 | module_param(zfs_deadman_enabled, int, 0644); |
2338 | MODULE_PARM_DESC(zfs_deadman_enabled, "Enable deadman timer"); | |
e8b96c60 | 2339 | |
8fb1ede1 BB |
2340 | module_param_call(zfs_deadman_failmode, param_set_deadman_failmode, |
2341 | param_get_charp, &zfs_deadman_failmode, 0644); | |
2342 | MODULE_PARM_DESC(zfs_deadman_failmode, "Failmode for deadman timer"); | |
2343 | ||
e8b96c60 MA |
2344 | module_param(spa_asize_inflation, int, 0644); |
2345 | MODULE_PARM_DESC(spa_asize_inflation, | |
d1d7e268 | 2346 | "SPA size estimate multiplication factor"); |
6cde6435 BB |
2347 | |
2348 | module_param(spa_slop_shift, int, 0644); | |
2349 | MODULE_PARM_DESC(spa_slop_shift, "Reserved free space in pool"); | |
02730c33 | 2350 | /* END CSTYLED */ |
c28b2279 | 2351 | #endif |