]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
428870ff | 22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
34dc7c2f BB |
23 | */ |
24 | ||
34dc7c2f BB |
25 | #include <sys/zfs_context.h> |
26 | #include <sys/spa_impl.h> | |
27 | #include <sys/zio.h> | |
28 | #include <sys/zio_checksum.h> | |
29 | #include <sys/zio_compress.h> | |
30 | #include <sys/dmu.h> | |
31 | #include <sys/dmu_tx.h> | |
32 | #include <sys/zap.h> | |
33 | #include <sys/zil.h> | |
34 | #include <sys/vdev_impl.h> | |
35 | #include <sys/metaslab.h> | |
36 | #include <sys/uberblock_impl.h> | |
37 | #include <sys/txg.h> | |
38 | #include <sys/avl.h> | |
39 | #include <sys/unique.h> | |
40 | #include <sys/dsl_pool.h> | |
41 | #include <sys/dsl_dir.h> | |
42 | #include <sys/dsl_prop.h> | |
26685276 | 43 | #include <sys/fm/util.h> |
428870ff | 44 | #include <sys/dsl_scan.h> |
34dc7c2f BB |
45 | #include <sys/fs/zfs.h> |
46 | #include <sys/metaslab_impl.h> | |
b128c09f | 47 | #include <sys/arc.h> |
428870ff | 48 | #include <sys/ddt.h> |
34dc7c2f BB |
49 | #include "zfs_prop.h" |
50 | ||
51 | /* | |
52 | * SPA locking | |
53 | * | |
54 | * There are four basic locks for managing spa_t structures: | |
55 | * | |
56 | * spa_namespace_lock (global mutex) | |
57 | * | |
58 | * This lock must be acquired to do any of the following: | |
59 | * | |
60 | * - Lookup a spa_t by name | |
61 | * - Add or remove a spa_t from the namespace | |
62 | * - Increase spa_refcount from non-zero | |
63 | * - Check if spa_refcount is zero | |
64 | * - Rename a spa_t | |
65 | * - add/remove/attach/detach devices | |
66 | * - Held for the duration of create/destroy/import/export | |
67 | * | |
68 | * It does not need to handle recursion. A create or destroy may | |
69 | * reference objects (files or zvols) in other pools, but by | |
70 | * definition they must have an existing reference, and will never need | |
71 | * to lookup a spa_t by name. | |
72 | * | |
73 | * spa_refcount (per-spa refcount_t protected by mutex) | |
74 | * | |
75 | * This reference count keep track of any active users of the spa_t. The | |
76 | * spa_t cannot be destroyed or freed while this is non-zero. Internally, | |
77 | * the refcount is never really 'zero' - opening a pool implicitly keeps | |
b128c09f | 78 | * some references in the DMU. Internally we check against spa_minref, but |
34dc7c2f BB |
79 | * present the image of a zero/non-zero value to consumers. |
80 | * | |
b128c09f | 81 | * spa_config_lock[] (per-spa array of rwlocks) |
34dc7c2f BB |
82 | * |
83 | * This protects the spa_t from config changes, and must be held in | |
84 | * the following circumstances: | |
85 | * | |
86 | * - RW_READER to perform I/O to the spa | |
87 | * - RW_WRITER to change the vdev config | |
88 | * | |
34dc7c2f BB |
89 | * The locking order is fairly straightforward: |
90 | * | |
91 | * spa_namespace_lock -> spa_refcount | |
92 | * | |
93 | * The namespace lock must be acquired to increase the refcount from 0 | |
94 | * or to check if it is zero. | |
95 | * | |
b128c09f | 96 | * spa_refcount -> spa_config_lock[] |
34dc7c2f BB |
97 | * |
98 | * There must be at least one valid reference on the spa_t to acquire | |
99 | * the config lock. | |
100 | * | |
b128c09f | 101 | * spa_namespace_lock -> spa_config_lock[] |
34dc7c2f BB |
102 | * |
103 | * The namespace lock must always be taken before the config lock. | |
104 | * | |
105 | * | |
b128c09f | 106 | * The spa_namespace_lock can be acquired directly and is globally visible. |
34dc7c2f | 107 | * |
b128c09f BB |
108 | * The namespace is manipulated using the following functions, all of which |
109 | * require the spa_namespace_lock to be held. | |
34dc7c2f BB |
110 | * |
111 | * spa_lookup() Lookup a spa_t by name. | |
112 | * | |
113 | * spa_add() Create a new spa_t in the namespace. | |
114 | * | |
115 | * spa_remove() Remove a spa_t from the namespace. This also | |
116 | * frees up any memory associated with the spa_t. | |
117 | * | |
118 | * spa_next() Returns the next spa_t in the system, or the | |
119 | * first if NULL is passed. | |
120 | * | |
121 | * spa_evict_all() Shutdown and remove all spa_t structures in | |
122 | * the system. | |
123 | * | |
124 | * spa_guid_exists() Determine whether a pool/device guid exists. | |
125 | * | |
126 | * The spa_refcount is manipulated using the following functions: | |
127 | * | |
128 | * spa_open_ref() Adds a reference to the given spa_t. Must be | |
129 | * called with spa_namespace_lock held if the | |
130 | * refcount is currently zero. | |
131 | * | |
132 | * spa_close() Remove a reference from the spa_t. This will | |
133 | * not free the spa_t or remove it from the | |
134 | * namespace. No locking is required. | |
135 | * | |
136 | * spa_refcount_zero() Returns true if the refcount is currently | |
137 | * zero. Must be called with spa_namespace_lock | |
138 | * held. | |
139 | * | |
b128c09f BB |
140 | * The spa_config_lock[] is an array of rwlocks, ordered as follows: |
141 | * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. | |
142 | * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). | |
143 | * | |
144 | * To read the configuration, it suffices to hold one of these locks as reader. | |
145 | * To modify the configuration, you must hold all locks as writer. To modify | |
146 | * vdev state without altering the vdev tree's topology (e.g. online/offline), | |
147 | * you must hold SCL_STATE and SCL_ZIO as writer. | |
148 | * | |
149 | * We use these distinct config locks to avoid recursive lock entry. | |
150 | * For example, spa_sync() (which holds SCL_CONFIG as reader) induces | |
151 | * block allocations (SCL_ALLOC), which may require reading space maps | |
152 | * from disk (dmu_read() -> zio_read() -> SCL_ZIO). | |
153 | * | |
154 | * The spa config locks cannot be normal rwlocks because we need the | |
155 | * ability to hand off ownership. For example, SCL_ZIO is acquired | |
156 | * by the issuing thread and later released by an interrupt thread. | |
157 | * They do, however, obey the usual write-wanted semantics to prevent | |
158 | * writer (i.e. system administrator) starvation. | |
159 | * | |
160 | * The lock acquisition rules are as follows: | |
161 | * | |
162 | * SCL_CONFIG | |
163 | * Protects changes to the vdev tree topology, such as vdev | |
164 | * add/remove/attach/detach. Protects the dirty config list | |
165 | * (spa_config_dirty_list) and the set of spares and l2arc devices. | |
166 | * | |
167 | * SCL_STATE | |
168 | * Protects changes to pool state and vdev state, such as vdev | |
169 | * online/offline/fault/degrade/clear. Protects the dirty state list | |
170 | * (spa_state_dirty_list) and global pool state (spa_state). | |
171 | * | |
172 | * SCL_ALLOC | |
173 | * Protects changes to metaslab groups and classes. | |
174 | * Held as reader by metaslab_alloc() and metaslab_claim(). | |
175 | * | |
176 | * SCL_ZIO | |
177 | * Held by bp-level zios (those which have no io_vd upon entry) | |
178 | * to prevent changes to the vdev tree. The bp-level zio implicitly | |
179 | * protects all of its vdev child zios, which do not hold SCL_ZIO. | |
180 | * | |
181 | * SCL_FREE | |
182 | * Protects changes to metaslab groups and classes. | |
183 | * Held as reader by metaslab_free(). SCL_FREE is distinct from | |
184 | * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free | |
185 | * blocks in zio_done() while another i/o that holds either | |
186 | * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. | |
187 | * | |
188 | * SCL_VDEV | |
189 | * Held as reader to prevent changes to the vdev tree during trivial | |
428870ff | 190 | * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the |
b128c09f BB |
191 | * other locks, and lower than all of them, to ensure that it's safe |
192 | * to acquire regardless of caller context. | |
193 | * | |
194 | * In addition, the following rules apply: | |
195 | * | |
196 | * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. | |
197 | * The lock ordering is SCL_CONFIG > spa_props_lock. | |
198 | * | |
199 | * (b) I/O operations on leaf vdevs. For any zio operation that takes | |
200 | * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), | |
201 | * or zio_write_phys() -- the caller must ensure that the config cannot | |
202 | * cannot change in the interim, and that the vdev cannot be reopened. | |
203 | * SCL_STATE as reader suffices for both. | |
34dc7c2f BB |
204 | * |
205 | * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). | |
206 | * | |
207 | * spa_vdev_enter() Acquire the namespace lock and the config lock | |
208 | * for writing. | |
209 | * | |
210 | * spa_vdev_exit() Release the config lock, wait for all I/O | |
211 | * to complete, sync the updated configs to the | |
212 | * cache, and release the namespace lock. | |
213 | * | |
b128c09f BB |
214 | * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). |
215 | * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual | |
216 | * locking is, always, based on spa_namespace_lock and spa_config_lock[]. | |
217 | * | |
218 | * spa_rename() is also implemented within this file since is requires | |
219 | * manipulation of the namespace. | |
34dc7c2f BB |
220 | */ |
221 | ||
222 | static avl_tree_t spa_namespace_avl; | |
223 | kmutex_t spa_namespace_lock; | |
224 | static kcondvar_t spa_namespace_cv; | |
225 | static int spa_active_count; | |
226 | int spa_max_replication_override = SPA_DVAS_PER_BP; | |
227 | ||
228 | static kmutex_t spa_spare_lock; | |
229 | static avl_tree_t spa_spare_avl; | |
230 | static kmutex_t spa_l2cache_lock; | |
231 | static avl_tree_t spa_l2cache_avl; | |
232 | ||
233 | kmem_cache_t *spa_buffer_pool; | |
fb5f0bc8 | 234 | int spa_mode_global; |
34dc7c2f BB |
235 | |
236 | #ifdef ZFS_DEBUG | |
237 | /* Everything except dprintf is on by default in debug builds */ | |
238 | int zfs_flags = ~ZFS_DEBUG_DPRINTF; | |
239 | #else | |
240 | int zfs_flags = 0; | |
241 | #endif | |
242 | ||
243 | /* | |
244 | * zfs_recover can be set to nonzero to attempt to recover from | |
245 | * otherwise-fatal errors, typically caused by on-disk corruption. When | |
246 | * set, calls to zfs_panic_recover() will turn into warning messages. | |
247 | */ | |
248 | int zfs_recover = 0; | |
249 | ||
34dc7c2f BB |
250 | |
251 | /* | |
252 | * ========================================================================== | |
253 | * SPA config locking | |
254 | * ========================================================================== | |
255 | */ | |
256 | static void | |
b128c09f BB |
257 | spa_config_lock_init(spa_t *spa) |
258 | { | |
d6320ddb BB |
259 | int i; |
260 | ||
261 | for (i = 0; i < SCL_LOCKS; i++) { | |
b128c09f BB |
262 | spa_config_lock_t *scl = &spa->spa_config_lock[i]; |
263 | mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); | |
264 | cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); | |
265 | refcount_create(&scl->scl_count); | |
266 | scl->scl_writer = NULL; | |
267 | scl->scl_write_wanted = 0; | |
268 | } | |
34dc7c2f BB |
269 | } |
270 | ||
271 | static void | |
b128c09f BB |
272 | spa_config_lock_destroy(spa_t *spa) |
273 | { | |
d6320ddb BB |
274 | int i; |
275 | ||
276 | for (i = 0; i < SCL_LOCKS; i++) { | |
b128c09f BB |
277 | spa_config_lock_t *scl = &spa->spa_config_lock[i]; |
278 | mutex_destroy(&scl->scl_lock); | |
279 | cv_destroy(&scl->scl_cv); | |
280 | refcount_destroy(&scl->scl_count); | |
281 | ASSERT(scl->scl_writer == NULL); | |
282 | ASSERT(scl->scl_write_wanted == 0); | |
283 | } | |
284 | } | |
285 | ||
286 | int | |
287 | spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) | |
34dc7c2f | 288 | { |
d6320ddb BB |
289 | int i; |
290 | ||
291 | for (i = 0; i < SCL_LOCKS; i++) { | |
b128c09f BB |
292 | spa_config_lock_t *scl = &spa->spa_config_lock[i]; |
293 | if (!(locks & (1 << i))) | |
294 | continue; | |
295 | mutex_enter(&scl->scl_lock); | |
296 | if (rw == RW_READER) { | |
297 | if (scl->scl_writer || scl->scl_write_wanted) { | |
298 | mutex_exit(&scl->scl_lock); | |
299 | spa_config_exit(spa, locks ^ (1 << i), tag); | |
300 | return (0); | |
301 | } | |
302 | } else { | |
303 | ASSERT(scl->scl_writer != curthread); | |
304 | if (!refcount_is_zero(&scl->scl_count)) { | |
305 | mutex_exit(&scl->scl_lock); | |
306 | spa_config_exit(spa, locks ^ (1 << i), tag); | |
307 | return (0); | |
308 | } | |
309 | scl->scl_writer = curthread; | |
310 | } | |
311 | (void) refcount_add(&scl->scl_count, tag); | |
312 | mutex_exit(&scl->scl_lock); | |
313 | } | |
314 | return (1); | |
34dc7c2f BB |
315 | } |
316 | ||
317 | void | |
b128c09f | 318 | spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw) |
34dc7c2f | 319 | { |
45d1cae3 | 320 | int wlocks_held = 0; |
d6320ddb | 321 | int i; |
45d1cae3 | 322 | |
d6320ddb | 323 | for (i = 0; i < SCL_LOCKS; i++) { |
b128c09f | 324 | spa_config_lock_t *scl = &spa->spa_config_lock[i]; |
45d1cae3 BB |
325 | if (scl->scl_writer == curthread) |
326 | wlocks_held |= (1 << i); | |
b128c09f BB |
327 | if (!(locks & (1 << i))) |
328 | continue; | |
329 | mutex_enter(&scl->scl_lock); | |
330 | if (rw == RW_READER) { | |
331 | while (scl->scl_writer || scl->scl_write_wanted) { | |
332 | cv_wait(&scl->scl_cv, &scl->scl_lock); | |
333 | } | |
334 | } else { | |
335 | ASSERT(scl->scl_writer != curthread); | |
336 | while (!refcount_is_zero(&scl->scl_count)) { | |
337 | scl->scl_write_wanted++; | |
338 | cv_wait(&scl->scl_cv, &scl->scl_lock); | |
339 | scl->scl_write_wanted--; | |
340 | } | |
341 | scl->scl_writer = curthread; | |
342 | } | |
343 | (void) refcount_add(&scl->scl_count, tag); | |
344 | mutex_exit(&scl->scl_lock); | |
34dc7c2f | 345 | } |
45d1cae3 | 346 | ASSERT(wlocks_held <= locks); |
34dc7c2f BB |
347 | } |
348 | ||
349 | void | |
b128c09f | 350 | spa_config_exit(spa_t *spa, int locks, void *tag) |
34dc7c2f | 351 | { |
d6320ddb BB |
352 | int i; |
353 | ||
354 | for (i = SCL_LOCKS - 1; i >= 0; i--) { | |
b128c09f BB |
355 | spa_config_lock_t *scl = &spa->spa_config_lock[i]; |
356 | if (!(locks & (1 << i))) | |
357 | continue; | |
358 | mutex_enter(&scl->scl_lock); | |
359 | ASSERT(!refcount_is_zero(&scl->scl_count)); | |
360 | if (refcount_remove(&scl->scl_count, tag) == 0) { | |
361 | ASSERT(scl->scl_writer == NULL || | |
362 | scl->scl_writer == curthread); | |
363 | scl->scl_writer = NULL; /* OK in either case */ | |
364 | cv_broadcast(&scl->scl_cv); | |
365 | } | |
366 | mutex_exit(&scl->scl_lock); | |
34dc7c2f | 367 | } |
34dc7c2f BB |
368 | } |
369 | ||
b128c09f BB |
370 | int |
371 | spa_config_held(spa_t *spa, int locks, krw_t rw) | |
34dc7c2f | 372 | { |
d6320ddb | 373 | int i, locks_held = 0; |
34dc7c2f | 374 | |
d6320ddb | 375 | for (i = 0; i < SCL_LOCKS; i++) { |
b128c09f BB |
376 | spa_config_lock_t *scl = &spa->spa_config_lock[i]; |
377 | if (!(locks & (1 << i))) | |
378 | continue; | |
379 | if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) || | |
380 | (rw == RW_WRITER && scl->scl_writer == curthread)) | |
381 | locks_held |= 1 << i; | |
382 | } | |
383 | ||
384 | return (locks_held); | |
34dc7c2f BB |
385 | } |
386 | ||
387 | /* | |
388 | * ========================================================================== | |
389 | * SPA namespace functions | |
390 | * ========================================================================== | |
391 | */ | |
392 | ||
393 | /* | |
394 | * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. | |
395 | * Returns NULL if no matching spa_t is found. | |
396 | */ | |
397 | spa_t * | |
398 | spa_lookup(const char *name) | |
399 | { | |
b128c09f BB |
400 | static spa_t search; /* spa_t is large; don't allocate on stack */ |
401 | spa_t *spa; | |
34dc7c2f | 402 | avl_index_t where; |
d4ed6673 | 403 | char c = 0; |
34dc7c2f BB |
404 | char *cp; |
405 | ||
406 | ASSERT(MUTEX_HELD(&spa_namespace_lock)); | |
407 | ||
408 | /* | |
409 | * If it's a full dataset name, figure out the pool name and | |
410 | * just use that. | |
411 | */ | |
412 | cp = strpbrk(name, "/@"); | |
413 | if (cp) { | |
414 | c = *cp; | |
415 | *cp = '\0'; | |
416 | } | |
417 | ||
b128c09f | 418 | (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); |
34dc7c2f BB |
419 | spa = avl_find(&spa_namespace_avl, &search, &where); |
420 | ||
421 | if (cp) | |
422 | *cp = c; | |
423 | ||
424 | return (spa); | |
425 | } | |
426 | ||
427 | /* | |
428 | * Create an uninitialized spa_t with the given name. Requires | |
429 | * spa_namespace_lock. The caller must ensure that the spa_t doesn't already | |
430 | * exist by calling spa_lookup() first. | |
431 | */ | |
432 | spa_t * | |
428870ff | 433 | spa_add(const char *name, nvlist_t *config, const char *altroot) |
34dc7c2f BB |
434 | { |
435 | spa_t *spa; | |
b128c09f | 436 | spa_config_dirent_t *dp; |
d6320ddb | 437 | int t; |
34dc7c2f BB |
438 | |
439 | ASSERT(MUTEX_HELD(&spa_namespace_lock)); | |
440 | ||
00b46022 | 441 | spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP | KM_NODEBUG); |
34dc7c2f | 442 | |
34dc7c2f | 443 | mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); |
34dc7c2f | 444 | mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); |
428870ff | 445 | mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); |
34dc7c2f | 446 | mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); |
428870ff | 447 | mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL); |
34dc7c2f | 448 | mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); |
428870ff BB |
449 | mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); |
450 | mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); | |
451 | mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL); | |
34dc7c2f BB |
452 | |
453 | cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); | |
428870ff | 454 | cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL); |
34dc7c2f | 455 | cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); |
b128c09f | 456 | cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); |
34dc7c2f | 457 | |
d6320ddb | 458 | for (t = 0; t < TXG_SIZE; t++) |
428870ff BB |
459 | bplist_create(&spa->spa_free_bplist[t]); |
460 | ||
b128c09f | 461 | (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); |
34dc7c2f BB |
462 | spa->spa_state = POOL_STATE_UNINITIALIZED; |
463 | spa->spa_freeze_txg = UINT64_MAX; | |
464 | spa->spa_final_txg = UINT64_MAX; | |
428870ff BB |
465 | spa->spa_load_max_txg = UINT64_MAX; |
466 | spa->spa_proc = &p0; | |
467 | spa->spa_proc_state = SPA_PROC_NONE; | |
34dc7c2f BB |
468 | |
469 | refcount_create(&spa->spa_refcount); | |
b128c09f | 470 | spa_config_lock_init(spa); |
34dc7c2f BB |
471 | |
472 | avl_add(&spa_namespace_avl, spa); | |
473 | ||
34dc7c2f BB |
474 | /* |
475 | * Set the alternate root, if there is one. | |
476 | */ | |
477 | if (altroot) { | |
478 | spa->spa_root = spa_strdup(altroot); | |
479 | spa_active_count++; | |
480 | } | |
481 | ||
b128c09f BB |
482 | /* |
483 | * Every pool starts with the default cachefile | |
484 | */ | |
485 | list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), | |
486 | offsetof(spa_config_dirent_t, scd_link)); | |
487 | ||
488 | dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); | |
428870ff | 489 | dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path); |
b128c09f BB |
490 | list_insert_head(&spa->spa_config_list, dp); |
491 | ||
572e2857 BB |
492 | VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, |
493 | KM_SLEEP) == 0); | |
494 | ||
428870ff BB |
495 | if (config != NULL) |
496 | VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); | |
497 | ||
34dc7c2f BB |
498 | return (spa); |
499 | } | |
500 | ||
501 | /* | |
502 | * Removes a spa_t from the namespace, freeing up any memory used. Requires | |
503 | * spa_namespace_lock. This is called only after the spa_t has been closed and | |
504 | * deactivated. | |
505 | */ | |
506 | void | |
507 | spa_remove(spa_t *spa) | |
508 | { | |
b128c09f | 509 | spa_config_dirent_t *dp; |
d6320ddb | 510 | int t; |
b128c09f | 511 | |
34dc7c2f BB |
512 | ASSERT(MUTEX_HELD(&spa_namespace_lock)); |
513 | ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); | |
34dc7c2f | 514 | |
428870ff BB |
515 | nvlist_free(spa->spa_config_splitting); |
516 | ||
34dc7c2f BB |
517 | avl_remove(&spa_namespace_avl, spa); |
518 | cv_broadcast(&spa_namespace_cv); | |
519 | ||
520 | if (spa->spa_root) { | |
521 | spa_strfree(spa->spa_root); | |
522 | spa_active_count--; | |
523 | } | |
524 | ||
b128c09f BB |
525 | while ((dp = list_head(&spa->spa_config_list)) != NULL) { |
526 | list_remove(&spa->spa_config_list, dp); | |
527 | if (dp->scd_path != NULL) | |
528 | spa_strfree(dp->scd_path); | |
529 | kmem_free(dp, sizeof (spa_config_dirent_t)); | |
530 | } | |
34dc7c2f | 531 | |
b128c09f | 532 | list_destroy(&spa->spa_config_list); |
34dc7c2f | 533 | |
572e2857 | 534 | nvlist_free(spa->spa_load_info); |
34dc7c2f BB |
535 | spa_config_set(spa, NULL); |
536 | ||
537 | refcount_destroy(&spa->spa_refcount); | |
538 | ||
b128c09f | 539 | spa_config_lock_destroy(spa); |
34dc7c2f | 540 | |
d6320ddb | 541 | for (t = 0; t < TXG_SIZE; t++) |
428870ff BB |
542 | bplist_destroy(&spa->spa_free_bplist[t]); |
543 | ||
34dc7c2f | 544 | cv_destroy(&spa->spa_async_cv); |
428870ff | 545 | cv_destroy(&spa->spa_proc_cv); |
34dc7c2f | 546 | cv_destroy(&spa->spa_scrub_io_cv); |
b128c09f | 547 | cv_destroy(&spa->spa_suspend_cv); |
34dc7c2f | 548 | |
34dc7c2f | 549 | mutex_destroy(&spa->spa_async_lock); |
34dc7c2f | 550 | mutex_destroy(&spa->spa_errlist_lock); |
428870ff | 551 | mutex_destroy(&spa->spa_errlog_lock); |
34dc7c2f | 552 | mutex_destroy(&spa->spa_history_lock); |
428870ff | 553 | mutex_destroy(&spa->spa_proc_lock); |
34dc7c2f | 554 | mutex_destroy(&spa->spa_props_lock); |
428870ff | 555 | mutex_destroy(&spa->spa_scrub_lock); |
b128c09f | 556 | mutex_destroy(&spa->spa_suspend_lock); |
428870ff | 557 | mutex_destroy(&spa->spa_vdev_top_lock); |
34dc7c2f BB |
558 | |
559 | kmem_free(spa, sizeof (spa_t)); | |
560 | } | |
561 | ||
562 | /* | |
563 | * Given a pool, return the next pool in the namespace, or NULL if there is | |
564 | * none. If 'prev' is NULL, return the first pool. | |
565 | */ | |
566 | spa_t * | |
567 | spa_next(spa_t *prev) | |
568 | { | |
569 | ASSERT(MUTEX_HELD(&spa_namespace_lock)); | |
570 | ||
571 | if (prev) | |
572 | return (AVL_NEXT(&spa_namespace_avl, prev)); | |
573 | else | |
574 | return (avl_first(&spa_namespace_avl)); | |
575 | } | |
576 | ||
577 | /* | |
578 | * ========================================================================== | |
579 | * SPA refcount functions | |
580 | * ========================================================================== | |
581 | */ | |
582 | ||
583 | /* | |
584 | * Add a reference to the given spa_t. Must have at least one reference, or | |
585 | * have the namespace lock held. | |
586 | */ | |
587 | void | |
588 | spa_open_ref(spa_t *spa, void *tag) | |
589 | { | |
b128c09f | 590 | ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref || |
34dc7c2f | 591 | MUTEX_HELD(&spa_namespace_lock)); |
34dc7c2f BB |
592 | (void) refcount_add(&spa->spa_refcount, tag); |
593 | } | |
594 | ||
595 | /* | |
596 | * Remove a reference to the given spa_t. Must have at least one reference, or | |
597 | * have the namespace lock held. | |
598 | */ | |
599 | void | |
600 | spa_close(spa_t *spa, void *tag) | |
601 | { | |
b128c09f | 602 | ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref || |
34dc7c2f | 603 | MUTEX_HELD(&spa_namespace_lock)); |
34dc7c2f BB |
604 | (void) refcount_remove(&spa->spa_refcount, tag); |
605 | } | |
606 | ||
607 | /* | |
608 | * Check to see if the spa refcount is zero. Must be called with | |
b128c09f | 609 | * spa_namespace_lock held. We really compare against spa_minref, which is the |
34dc7c2f BB |
610 | * number of references acquired when opening a pool |
611 | */ | |
612 | boolean_t | |
613 | spa_refcount_zero(spa_t *spa) | |
614 | { | |
615 | ASSERT(MUTEX_HELD(&spa_namespace_lock)); | |
616 | ||
b128c09f | 617 | return (refcount_count(&spa->spa_refcount) == spa->spa_minref); |
34dc7c2f BB |
618 | } |
619 | ||
620 | /* | |
621 | * ========================================================================== | |
622 | * SPA spare and l2cache tracking | |
623 | * ========================================================================== | |
624 | */ | |
625 | ||
626 | /* | |
627 | * Hot spares and cache devices are tracked using the same code below, | |
628 | * for 'auxiliary' devices. | |
629 | */ | |
630 | ||
631 | typedef struct spa_aux { | |
632 | uint64_t aux_guid; | |
633 | uint64_t aux_pool; | |
634 | avl_node_t aux_avl; | |
635 | int aux_count; | |
636 | } spa_aux_t; | |
637 | ||
638 | static int | |
639 | spa_aux_compare(const void *a, const void *b) | |
640 | { | |
641 | const spa_aux_t *sa = a; | |
642 | const spa_aux_t *sb = b; | |
643 | ||
644 | if (sa->aux_guid < sb->aux_guid) | |
645 | return (-1); | |
646 | else if (sa->aux_guid > sb->aux_guid) | |
647 | return (1); | |
648 | else | |
649 | return (0); | |
650 | } | |
651 | ||
652 | void | |
653 | spa_aux_add(vdev_t *vd, avl_tree_t *avl) | |
654 | { | |
655 | avl_index_t where; | |
656 | spa_aux_t search; | |
657 | spa_aux_t *aux; | |
658 | ||
659 | search.aux_guid = vd->vdev_guid; | |
660 | if ((aux = avl_find(avl, &search, &where)) != NULL) { | |
661 | aux->aux_count++; | |
662 | } else { | |
663 | aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); | |
664 | aux->aux_guid = vd->vdev_guid; | |
665 | aux->aux_count = 1; | |
666 | avl_insert(avl, aux, where); | |
667 | } | |
668 | } | |
669 | ||
670 | void | |
671 | spa_aux_remove(vdev_t *vd, avl_tree_t *avl) | |
672 | { | |
673 | spa_aux_t search; | |
674 | spa_aux_t *aux; | |
675 | avl_index_t where; | |
676 | ||
677 | search.aux_guid = vd->vdev_guid; | |
678 | aux = avl_find(avl, &search, &where); | |
679 | ||
680 | ASSERT(aux != NULL); | |
681 | ||
682 | if (--aux->aux_count == 0) { | |
683 | avl_remove(avl, aux); | |
684 | kmem_free(aux, sizeof (spa_aux_t)); | |
685 | } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { | |
686 | aux->aux_pool = 0ULL; | |
687 | } | |
688 | } | |
689 | ||
690 | boolean_t | |
b128c09f | 691 | spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) |
34dc7c2f BB |
692 | { |
693 | spa_aux_t search, *found; | |
34dc7c2f BB |
694 | |
695 | search.aux_guid = guid; | |
b128c09f | 696 | found = avl_find(avl, &search, NULL); |
34dc7c2f BB |
697 | |
698 | if (pool) { | |
699 | if (found) | |
700 | *pool = found->aux_pool; | |
701 | else | |
702 | *pool = 0ULL; | |
703 | } | |
704 | ||
b128c09f BB |
705 | if (refcnt) { |
706 | if (found) | |
707 | *refcnt = found->aux_count; | |
708 | else | |
709 | *refcnt = 0; | |
710 | } | |
711 | ||
34dc7c2f BB |
712 | return (found != NULL); |
713 | } | |
714 | ||
715 | void | |
716 | spa_aux_activate(vdev_t *vd, avl_tree_t *avl) | |
717 | { | |
718 | spa_aux_t search, *found; | |
719 | avl_index_t where; | |
720 | ||
721 | search.aux_guid = vd->vdev_guid; | |
722 | found = avl_find(avl, &search, &where); | |
723 | ASSERT(found != NULL); | |
724 | ASSERT(found->aux_pool == 0ULL); | |
725 | ||
726 | found->aux_pool = spa_guid(vd->vdev_spa); | |
727 | } | |
728 | ||
729 | /* | |
730 | * Spares are tracked globally due to the following constraints: | |
731 | * | |
732 | * - A spare may be part of multiple pools. | |
733 | * - A spare may be added to a pool even if it's actively in use within | |
734 | * another pool. | |
735 | * - A spare in use in any pool can only be the source of a replacement if | |
736 | * the target is a spare in the same pool. | |
737 | * | |
738 | * We keep track of all spares on the system through the use of a reference | |
739 | * counted AVL tree. When a vdev is added as a spare, or used as a replacement | |
740 | * spare, then we bump the reference count in the AVL tree. In addition, we set | |
741 | * the 'vdev_isspare' member to indicate that the device is a spare (active or | |
742 | * inactive). When a spare is made active (used to replace a device in the | |
743 | * pool), we also keep track of which pool its been made a part of. | |
744 | * | |
745 | * The 'spa_spare_lock' protects the AVL tree. These functions are normally | |
746 | * called under the spa_namespace lock as part of vdev reconfiguration. The | |
747 | * separate spare lock exists for the status query path, which does not need to | |
748 | * be completely consistent with respect to other vdev configuration changes. | |
749 | */ | |
750 | ||
751 | static int | |
752 | spa_spare_compare(const void *a, const void *b) | |
753 | { | |
754 | return (spa_aux_compare(a, b)); | |
755 | } | |
756 | ||
757 | void | |
758 | spa_spare_add(vdev_t *vd) | |
759 | { | |
760 | mutex_enter(&spa_spare_lock); | |
761 | ASSERT(!vd->vdev_isspare); | |
762 | spa_aux_add(vd, &spa_spare_avl); | |
763 | vd->vdev_isspare = B_TRUE; | |
764 | mutex_exit(&spa_spare_lock); | |
765 | } | |
766 | ||
767 | void | |
768 | spa_spare_remove(vdev_t *vd) | |
769 | { | |
770 | mutex_enter(&spa_spare_lock); | |
771 | ASSERT(vd->vdev_isspare); | |
772 | spa_aux_remove(vd, &spa_spare_avl); | |
773 | vd->vdev_isspare = B_FALSE; | |
774 | mutex_exit(&spa_spare_lock); | |
775 | } | |
776 | ||
777 | boolean_t | |
b128c09f | 778 | spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) |
34dc7c2f BB |
779 | { |
780 | boolean_t found; | |
781 | ||
782 | mutex_enter(&spa_spare_lock); | |
b128c09f | 783 | found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); |
34dc7c2f BB |
784 | mutex_exit(&spa_spare_lock); |
785 | ||
786 | return (found); | |
787 | } | |
788 | ||
789 | void | |
790 | spa_spare_activate(vdev_t *vd) | |
791 | { | |
792 | mutex_enter(&spa_spare_lock); | |
793 | ASSERT(vd->vdev_isspare); | |
794 | spa_aux_activate(vd, &spa_spare_avl); | |
795 | mutex_exit(&spa_spare_lock); | |
796 | } | |
797 | ||
798 | /* | |
799 | * Level 2 ARC devices are tracked globally for the same reasons as spares. | |
800 | * Cache devices currently only support one pool per cache device, and so | |
801 | * for these devices the aux reference count is currently unused beyond 1. | |
802 | */ | |
803 | ||
804 | static int | |
805 | spa_l2cache_compare(const void *a, const void *b) | |
806 | { | |
807 | return (spa_aux_compare(a, b)); | |
808 | } | |
809 | ||
810 | void | |
811 | spa_l2cache_add(vdev_t *vd) | |
812 | { | |
813 | mutex_enter(&spa_l2cache_lock); | |
814 | ASSERT(!vd->vdev_isl2cache); | |
815 | spa_aux_add(vd, &spa_l2cache_avl); | |
816 | vd->vdev_isl2cache = B_TRUE; | |
817 | mutex_exit(&spa_l2cache_lock); | |
818 | } | |
819 | ||
820 | void | |
821 | spa_l2cache_remove(vdev_t *vd) | |
822 | { | |
823 | mutex_enter(&spa_l2cache_lock); | |
824 | ASSERT(vd->vdev_isl2cache); | |
825 | spa_aux_remove(vd, &spa_l2cache_avl); | |
826 | vd->vdev_isl2cache = B_FALSE; | |
827 | mutex_exit(&spa_l2cache_lock); | |
828 | } | |
829 | ||
830 | boolean_t | |
831 | spa_l2cache_exists(uint64_t guid, uint64_t *pool) | |
832 | { | |
833 | boolean_t found; | |
834 | ||
835 | mutex_enter(&spa_l2cache_lock); | |
b128c09f | 836 | found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); |
34dc7c2f BB |
837 | mutex_exit(&spa_l2cache_lock); |
838 | ||
839 | return (found); | |
840 | } | |
841 | ||
842 | void | |
843 | spa_l2cache_activate(vdev_t *vd) | |
844 | { | |
845 | mutex_enter(&spa_l2cache_lock); | |
846 | ASSERT(vd->vdev_isl2cache); | |
847 | spa_aux_activate(vd, &spa_l2cache_avl); | |
848 | mutex_exit(&spa_l2cache_lock); | |
849 | } | |
850 | ||
34dc7c2f BB |
851 | /* |
852 | * ========================================================================== | |
853 | * SPA vdev locking | |
854 | * ========================================================================== | |
855 | */ | |
856 | ||
857 | /* | |
858 | * Lock the given spa_t for the purpose of adding or removing a vdev. | |
859 | * Grabs the global spa_namespace_lock plus the spa config lock for writing. | |
860 | * It returns the next transaction group for the spa_t. | |
861 | */ | |
862 | uint64_t | |
863 | spa_vdev_enter(spa_t *spa) | |
864 | { | |
428870ff | 865 | mutex_enter(&spa->spa_vdev_top_lock); |
34dc7c2f | 866 | mutex_enter(&spa_namespace_lock); |
428870ff BB |
867 | return (spa_vdev_config_enter(spa)); |
868 | } | |
869 | ||
870 | /* | |
871 | * Internal implementation for spa_vdev_enter(). Used when a vdev | |
872 | * operation requires multiple syncs (i.e. removing a device) while | |
873 | * keeping the spa_namespace_lock held. | |
874 | */ | |
875 | uint64_t | |
876 | spa_vdev_config_enter(spa_t *spa) | |
877 | { | |
878 | ASSERT(MUTEX_HELD(&spa_namespace_lock)); | |
34dc7c2f | 879 | |
b128c09f | 880 | spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); |
34dc7c2f BB |
881 | |
882 | return (spa_last_synced_txg(spa) + 1); | |
883 | } | |
884 | ||
885 | /* | |
428870ff BB |
886 | * Used in combination with spa_vdev_config_enter() to allow the syncing |
887 | * of multiple transactions without releasing the spa_namespace_lock. | |
34dc7c2f | 888 | */ |
428870ff BB |
889 | void |
890 | spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag) | |
34dc7c2f BB |
891 | { |
892 | int config_changed = B_FALSE; | |
893 | ||
d6320ddb | 894 | ASSERT(MUTEX_HELD(&spa_namespace_lock)); |
34dc7c2f BB |
895 | ASSERT(txg > spa_last_synced_txg(spa)); |
896 | ||
b128c09f BB |
897 | spa->spa_pending_vdev = NULL; |
898 | ||
34dc7c2f BB |
899 | /* |
900 | * Reassess the DTLs. | |
901 | */ | |
902 | vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE); | |
903 | ||
b128c09f | 904 | if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { |
34dc7c2f | 905 | config_changed = B_TRUE; |
428870ff | 906 | spa->spa_config_generation++; |
34dc7c2f BB |
907 | } |
908 | ||
428870ff BB |
909 | /* |
910 | * Verify the metaslab classes. | |
911 | */ | |
912 | ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0); | |
913 | ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0); | |
914 | ||
b128c09f | 915 | spa_config_exit(spa, SCL_ALL, spa); |
34dc7c2f | 916 | |
428870ff BB |
917 | /* |
918 | * Panic the system if the specified tag requires it. This | |
919 | * is useful for ensuring that configurations are updated | |
920 | * transactionally. | |
921 | */ | |
922 | if (zio_injection_enabled) | |
923 | zio_handle_panic_injection(spa, tag, 0); | |
924 | ||
34dc7c2f BB |
925 | /* |
926 | * Note: this txg_wait_synced() is important because it ensures | |
927 | * that there won't be more than one config change per txg. | |
928 | * This allows us to use the txg as the generation number. | |
929 | */ | |
930 | if (error == 0) | |
931 | txg_wait_synced(spa->spa_dsl_pool, txg); | |
932 | ||
933 | if (vd != NULL) { | |
fb5f0bc8 BB |
934 | ASSERT(!vd->vdev_detached || vd->vdev_dtl_smo.smo_object == 0); |
935 | spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); | |
34dc7c2f | 936 | vdev_free(vd); |
fb5f0bc8 | 937 | spa_config_exit(spa, SCL_ALL, spa); |
34dc7c2f BB |
938 | } |
939 | ||
940 | /* | |
941 | * If the config changed, update the config cache. | |
942 | */ | |
943 | if (config_changed) | |
b128c09f | 944 | spa_config_sync(spa, B_FALSE, B_TRUE); |
428870ff | 945 | } |
34dc7c2f | 946 | |
428870ff BB |
947 | /* |
948 | * Unlock the spa_t after adding or removing a vdev. Besides undoing the | |
949 | * locking of spa_vdev_enter(), we also want make sure the transactions have | |
950 | * synced to disk, and then update the global configuration cache with the new | |
951 | * information. | |
952 | */ | |
953 | int | |
954 | spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) | |
955 | { | |
956 | spa_vdev_config_exit(spa, vd, txg, error, FTAG); | |
34dc7c2f | 957 | mutex_exit(&spa_namespace_lock); |
428870ff | 958 | mutex_exit(&spa->spa_vdev_top_lock); |
34dc7c2f BB |
959 | |
960 | return (error); | |
961 | } | |
962 | ||
b128c09f BB |
963 | /* |
964 | * Lock the given spa_t for the purpose of changing vdev state. | |
965 | */ | |
966 | void | |
428870ff | 967 | spa_vdev_state_enter(spa_t *spa, int oplocks) |
b128c09f | 968 | { |
428870ff BB |
969 | int locks = SCL_STATE_ALL | oplocks; |
970 | ||
971 | /* | |
972 | * Root pools may need to read of the underlying devfs filesystem | |
973 | * when opening up a vdev. Unfortunately if we're holding the | |
974 | * SCL_ZIO lock it will result in a deadlock when we try to issue | |
975 | * the read from the root filesystem. Instead we "prefetch" | |
976 | * the associated vnodes that we need prior to opening the | |
977 | * underlying devices and cache them so that we can prevent | |
978 | * any I/O when we are doing the actual open. | |
979 | */ | |
980 | if (spa_is_root(spa)) { | |
981 | int low = locks & ~(SCL_ZIO - 1); | |
982 | int high = locks & ~low; | |
983 | ||
984 | spa_config_enter(spa, high, spa, RW_WRITER); | |
985 | vdev_hold(spa->spa_root_vdev); | |
986 | spa_config_enter(spa, low, spa, RW_WRITER); | |
987 | } else { | |
988 | spa_config_enter(spa, locks, spa, RW_WRITER); | |
989 | } | |
990 | spa->spa_vdev_locks = locks; | |
b128c09f BB |
991 | } |
992 | ||
993 | int | |
994 | spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) | |
995 | { | |
428870ff BB |
996 | boolean_t config_changed = B_FALSE; |
997 | ||
998 | if (vd != NULL || error == 0) | |
999 | vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev, | |
1000 | 0, 0, B_FALSE); | |
1001 | ||
1002 | if (vd != NULL) { | |
b128c09f | 1003 | vdev_state_dirty(vd->vdev_top); |
428870ff BB |
1004 | config_changed = B_TRUE; |
1005 | spa->spa_config_generation++; | |
1006 | } | |
b128c09f | 1007 | |
428870ff BB |
1008 | if (spa_is_root(spa)) |
1009 | vdev_rele(spa->spa_root_vdev); | |
1010 | ||
1011 | ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); | |
1012 | spa_config_exit(spa, spa->spa_vdev_locks, spa); | |
b128c09f | 1013 | |
fb5f0bc8 BB |
1014 | /* |
1015 | * If anything changed, wait for it to sync. This ensures that, | |
1016 | * from the system administrator's perspective, zpool(1M) commands | |
1017 | * are synchronous. This is important for things like zpool offline: | |
1018 | * when the command completes, you expect no further I/O from ZFS. | |
1019 | */ | |
1020 | if (vd != NULL) | |
1021 | txg_wait_synced(spa->spa_dsl_pool, 0); | |
1022 | ||
428870ff BB |
1023 | /* |
1024 | * If the config changed, update the config cache. | |
1025 | */ | |
1026 | if (config_changed) { | |
1027 | mutex_enter(&spa_namespace_lock); | |
1028 | spa_config_sync(spa, B_FALSE, B_TRUE); | |
1029 | mutex_exit(&spa_namespace_lock); | |
1030 | } | |
1031 | ||
b128c09f BB |
1032 | return (error); |
1033 | } | |
1034 | ||
34dc7c2f BB |
1035 | /* |
1036 | * ========================================================================== | |
1037 | * Miscellaneous functions | |
1038 | * ========================================================================== | |
1039 | */ | |
1040 | ||
1041 | /* | |
1042 | * Rename a spa_t. | |
1043 | */ | |
1044 | int | |
1045 | spa_rename(const char *name, const char *newname) | |
1046 | { | |
1047 | spa_t *spa; | |
1048 | int err; | |
1049 | ||
1050 | /* | |
1051 | * Lookup the spa_t and grab the config lock for writing. We need to | |
1052 | * actually open the pool so that we can sync out the necessary labels. | |
1053 | * It's OK to call spa_open() with the namespace lock held because we | |
1054 | * allow recursive calls for other reasons. | |
1055 | */ | |
1056 | mutex_enter(&spa_namespace_lock); | |
1057 | if ((err = spa_open(name, &spa, FTAG)) != 0) { | |
1058 | mutex_exit(&spa_namespace_lock); | |
1059 | return (err); | |
1060 | } | |
1061 | ||
b128c09f | 1062 | spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); |
34dc7c2f BB |
1063 | |
1064 | avl_remove(&spa_namespace_avl, spa); | |
b128c09f | 1065 | (void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name)); |
34dc7c2f BB |
1066 | avl_add(&spa_namespace_avl, spa); |
1067 | ||
1068 | /* | |
1069 | * Sync all labels to disk with the new names by marking the root vdev | |
1070 | * dirty and waiting for it to sync. It will pick up the new pool name | |
1071 | * during the sync. | |
1072 | */ | |
1073 | vdev_config_dirty(spa->spa_root_vdev); | |
1074 | ||
b128c09f | 1075 | spa_config_exit(spa, SCL_ALL, FTAG); |
34dc7c2f BB |
1076 | |
1077 | txg_wait_synced(spa->spa_dsl_pool, 0); | |
1078 | ||
1079 | /* | |
1080 | * Sync the updated config cache. | |
1081 | */ | |
b128c09f | 1082 | spa_config_sync(spa, B_FALSE, B_TRUE); |
34dc7c2f BB |
1083 | |
1084 | spa_close(spa, FTAG); | |
1085 | ||
1086 | mutex_exit(&spa_namespace_lock); | |
1087 | ||
1088 | return (0); | |
1089 | } | |
1090 | ||
34dc7c2f | 1091 | /* |
572e2857 BB |
1092 | * Return the spa_t associated with given pool_guid, if it exists. If |
1093 | * device_guid is non-zero, determine whether the pool exists *and* contains | |
1094 | * a device with the specified device_guid. | |
34dc7c2f | 1095 | */ |
572e2857 BB |
1096 | spa_t * |
1097 | spa_by_guid(uint64_t pool_guid, uint64_t device_guid) | |
34dc7c2f BB |
1098 | { |
1099 | spa_t *spa; | |
1100 | avl_tree_t *t = &spa_namespace_avl; | |
1101 | ||
1102 | ASSERT(MUTEX_HELD(&spa_namespace_lock)); | |
1103 | ||
1104 | for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { | |
1105 | if (spa->spa_state == POOL_STATE_UNINITIALIZED) | |
1106 | continue; | |
1107 | if (spa->spa_root_vdev == NULL) | |
1108 | continue; | |
1109 | if (spa_guid(spa) == pool_guid) { | |
1110 | if (device_guid == 0) | |
1111 | break; | |
1112 | ||
1113 | if (vdev_lookup_by_guid(spa->spa_root_vdev, | |
1114 | device_guid) != NULL) | |
1115 | break; | |
1116 | ||
1117 | /* | |
1118 | * Check any devices we may be in the process of adding. | |
1119 | */ | |
1120 | if (spa->spa_pending_vdev) { | |
1121 | if (vdev_lookup_by_guid(spa->spa_pending_vdev, | |
1122 | device_guid) != NULL) | |
1123 | break; | |
1124 | } | |
1125 | } | |
1126 | } | |
1127 | ||
572e2857 BB |
1128 | return (spa); |
1129 | } | |
1130 | ||
1131 | /* | |
1132 | * Determine whether a pool with the given pool_guid exists. | |
1133 | */ | |
1134 | boolean_t | |
1135 | spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) | |
1136 | { | |
1137 | return (spa_by_guid(pool_guid, device_guid) != NULL); | |
34dc7c2f BB |
1138 | } |
1139 | ||
1140 | char * | |
1141 | spa_strdup(const char *s) | |
1142 | { | |
1143 | size_t len; | |
1144 | char *new; | |
1145 | ||
1146 | len = strlen(s); | |
1147 | new = kmem_alloc(len + 1, KM_SLEEP); | |
1148 | bcopy(s, new, len); | |
1149 | new[len] = '\0'; | |
1150 | ||
1151 | return (new); | |
1152 | } | |
1153 | ||
1154 | void | |
1155 | spa_strfree(char *s) | |
1156 | { | |
1157 | kmem_free(s, strlen(s) + 1); | |
1158 | } | |
1159 | ||
1160 | uint64_t | |
1161 | spa_get_random(uint64_t range) | |
1162 | { | |
1163 | uint64_t r; | |
1164 | ||
1165 | ASSERT(range != 0); | |
1166 | ||
1167 | (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t)); | |
1168 | ||
1169 | return (r % range); | |
1170 | } | |
1171 | ||
428870ff BB |
1172 | uint64_t |
1173 | spa_generate_guid(spa_t *spa) | |
34dc7c2f | 1174 | { |
428870ff | 1175 | uint64_t guid = spa_get_random(-1ULL); |
34dc7c2f | 1176 | |
428870ff BB |
1177 | if (spa != NULL) { |
1178 | while (guid == 0 || spa_guid_exists(spa_guid(spa), guid)) | |
1179 | guid = spa_get_random(-1ULL); | |
1180 | } else { | |
1181 | while (guid == 0 || spa_guid_exists(guid, 0)) | |
1182 | guid = spa_get_random(-1ULL); | |
34dc7c2f BB |
1183 | } |
1184 | ||
428870ff BB |
1185 | return (guid); |
1186 | } | |
1187 | ||
1188 | void | |
1189 | sprintf_blkptr(char *buf, const blkptr_t *bp) | |
1190 | { | |
1191 | char *type = NULL; | |
1192 | char *checksum = NULL; | |
1193 | char *compress = NULL; | |
34dc7c2f | 1194 | |
428870ff BB |
1195 | if (bp != NULL) { |
1196 | type = dmu_ot[BP_GET_TYPE(bp)].ot_name; | |
1197 | checksum = zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name; | |
1198 | compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name; | |
34dc7c2f BB |
1199 | } |
1200 | ||
428870ff | 1201 | SPRINTF_BLKPTR(snprintf, ' ', buf, bp, type, checksum, compress); |
34dc7c2f BB |
1202 | } |
1203 | ||
1204 | void | |
1205 | spa_freeze(spa_t *spa) | |
1206 | { | |
1207 | uint64_t freeze_txg = 0; | |
1208 | ||
b128c09f | 1209 | spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); |
34dc7c2f BB |
1210 | if (spa->spa_freeze_txg == UINT64_MAX) { |
1211 | freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; | |
1212 | spa->spa_freeze_txg = freeze_txg; | |
1213 | } | |
b128c09f | 1214 | spa_config_exit(spa, SCL_ALL, FTAG); |
34dc7c2f BB |
1215 | if (freeze_txg != 0) |
1216 | txg_wait_synced(spa_get_dsl(spa), freeze_txg); | |
1217 | } | |
1218 | ||
1219 | void | |
1220 | zfs_panic_recover(const char *fmt, ...) | |
1221 | { | |
1222 | va_list adx; | |
1223 | ||
1224 | va_start(adx, fmt); | |
1225 | vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); | |
1226 | va_end(adx); | |
1227 | } | |
1228 | ||
428870ff BB |
1229 | /* |
1230 | * This is a stripped-down version of strtoull, suitable only for converting | |
1231 | * lowercase hexidecimal numbers that don't overflow. | |
1232 | */ | |
1233 | uint64_t | |
1234 | strtonum(const char *str, char **nptr) | |
1235 | { | |
1236 | uint64_t val = 0; | |
1237 | char c; | |
1238 | int digit; | |
1239 | ||
1240 | while ((c = *str) != '\0') { | |
1241 | if (c >= '0' && c <= '9') | |
1242 | digit = c - '0'; | |
1243 | else if (c >= 'a' && c <= 'f') | |
1244 | digit = 10 + c - 'a'; | |
1245 | else | |
1246 | break; | |
1247 | ||
1248 | val *= 16; | |
1249 | val += digit; | |
1250 | ||
1251 | str++; | |
1252 | } | |
1253 | ||
1254 | if (nptr) | |
1255 | *nptr = (char *)str; | |
1256 | ||
1257 | return (val); | |
1258 | } | |
1259 | ||
34dc7c2f BB |
1260 | /* |
1261 | * ========================================================================== | |
1262 | * Accessor functions | |
1263 | * ========================================================================== | |
1264 | */ | |
1265 | ||
b128c09f BB |
1266 | boolean_t |
1267 | spa_shutting_down(spa_t *spa) | |
34dc7c2f | 1268 | { |
b128c09f | 1269 | return (spa->spa_async_suspended); |
34dc7c2f BB |
1270 | } |
1271 | ||
1272 | dsl_pool_t * | |
1273 | spa_get_dsl(spa_t *spa) | |
1274 | { | |
1275 | return (spa->spa_dsl_pool); | |
1276 | } | |
1277 | ||
1278 | blkptr_t * | |
1279 | spa_get_rootblkptr(spa_t *spa) | |
1280 | { | |
1281 | return (&spa->spa_ubsync.ub_rootbp); | |
1282 | } | |
1283 | ||
1284 | void | |
1285 | spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) | |
1286 | { | |
1287 | spa->spa_uberblock.ub_rootbp = *bp; | |
1288 | } | |
1289 | ||
1290 | void | |
1291 | spa_altroot(spa_t *spa, char *buf, size_t buflen) | |
1292 | { | |
1293 | if (spa->spa_root == NULL) | |
1294 | buf[0] = '\0'; | |
1295 | else | |
1296 | (void) strncpy(buf, spa->spa_root, buflen); | |
1297 | } | |
1298 | ||
1299 | int | |
1300 | spa_sync_pass(spa_t *spa) | |
1301 | { | |
1302 | return (spa->spa_sync_pass); | |
1303 | } | |
1304 | ||
1305 | char * | |
1306 | spa_name(spa_t *spa) | |
1307 | { | |
34dc7c2f BB |
1308 | return (spa->spa_name); |
1309 | } | |
1310 | ||
1311 | uint64_t | |
1312 | spa_guid(spa_t *spa) | |
1313 | { | |
1314 | /* | |
1315 | * If we fail to parse the config during spa_load(), we can go through | |
1316 | * the error path (which posts an ereport) and end up here with no root | |
1317 | * vdev. We stash the original pool guid in 'spa_load_guid' to handle | |
1318 | * this case. | |
1319 | */ | |
1320 | if (spa->spa_root_vdev != NULL) | |
1321 | return (spa->spa_root_vdev->vdev_guid); | |
1322 | else | |
1323 | return (spa->spa_load_guid); | |
1324 | } | |
1325 | ||
1326 | uint64_t | |
1327 | spa_last_synced_txg(spa_t *spa) | |
1328 | { | |
1329 | return (spa->spa_ubsync.ub_txg); | |
1330 | } | |
1331 | ||
1332 | uint64_t | |
1333 | spa_first_txg(spa_t *spa) | |
1334 | { | |
1335 | return (spa->spa_first_txg); | |
1336 | } | |
1337 | ||
428870ff BB |
1338 | uint64_t |
1339 | spa_syncing_txg(spa_t *spa) | |
1340 | { | |
1341 | return (spa->spa_syncing_txg); | |
1342 | } | |
1343 | ||
b128c09f | 1344 | pool_state_t |
34dc7c2f BB |
1345 | spa_state(spa_t *spa) |
1346 | { | |
1347 | return (spa->spa_state); | |
1348 | } | |
1349 | ||
428870ff BB |
1350 | spa_load_state_t |
1351 | spa_load_state(spa_t *spa) | |
34dc7c2f | 1352 | { |
428870ff | 1353 | return (spa->spa_load_state); |
34dc7c2f BB |
1354 | } |
1355 | ||
34dc7c2f | 1356 | uint64_t |
428870ff | 1357 | spa_freeze_txg(spa_t *spa) |
34dc7c2f | 1358 | { |
428870ff | 1359 | return (spa->spa_freeze_txg); |
34dc7c2f BB |
1360 | } |
1361 | ||
428870ff | 1362 | /* ARGSUSED */ |
34dc7c2f | 1363 | uint64_t |
428870ff | 1364 | spa_get_asize(spa_t *spa, uint64_t lsize) |
34dc7c2f | 1365 | { |
428870ff BB |
1366 | /* |
1367 | * The worst case is single-sector max-parity RAID-Z blocks, in which | |
1368 | * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1) | |
1369 | * times the size; so just assume that. Add to this the fact that | |
1370 | * we can have up to 3 DVAs per bp, and one more factor of 2 because | |
1371 | * the block may be dittoed with up to 3 DVAs by ddt_sync(). | |
1372 | */ | |
1373 | return (lsize * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2); | |
34dc7c2f BB |
1374 | } |
1375 | ||
34dc7c2f BB |
1376 | uint64_t |
1377 | spa_get_dspace(spa_t *spa) | |
1378 | { | |
428870ff | 1379 | return (spa->spa_dspace); |
34dc7c2f BB |
1380 | } |
1381 | ||
428870ff BB |
1382 | void |
1383 | spa_update_dspace(spa_t *spa) | |
34dc7c2f | 1384 | { |
428870ff BB |
1385 | spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) + |
1386 | ddt_get_dedup_dspace(spa); | |
34dc7c2f BB |
1387 | } |
1388 | ||
1389 | /* | |
1390 | * Return the failure mode that has been set to this pool. The default | |
1391 | * behavior will be to block all I/Os when a complete failure occurs. | |
1392 | */ | |
1393 | uint8_t | |
1394 | spa_get_failmode(spa_t *spa) | |
1395 | { | |
1396 | return (spa->spa_failmode); | |
1397 | } | |
1398 | ||
b128c09f BB |
1399 | boolean_t |
1400 | spa_suspended(spa_t *spa) | |
1401 | { | |
1402 | return (spa->spa_suspended); | |
1403 | } | |
1404 | ||
34dc7c2f BB |
1405 | uint64_t |
1406 | spa_version(spa_t *spa) | |
1407 | { | |
1408 | return (spa->spa_ubsync.ub_version); | |
1409 | } | |
1410 | ||
428870ff BB |
1411 | boolean_t |
1412 | spa_deflate(spa_t *spa) | |
1413 | { | |
1414 | return (spa->spa_deflate); | |
1415 | } | |
1416 | ||
1417 | metaslab_class_t * | |
1418 | spa_normal_class(spa_t *spa) | |
1419 | { | |
1420 | return (spa->spa_normal_class); | |
1421 | } | |
1422 | ||
1423 | metaslab_class_t * | |
1424 | spa_log_class(spa_t *spa) | |
1425 | { | |
1426 | return (spa->spa_log_class); | |
1427 | } | |
1428 | ||
34dc7c2f BB |
1429 | int |
1430 | spa_max_replication(spa_t *spa) | |
1431 | { | |
1432 | /* | |
1433 | * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to | |
1434 | * handle BPs with more than one DVA allocated. Set our max | |
1435 | * replication level accordingly. | |
1436 | */ | |
1437 | if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) | |
1438 | return (1); | |
1439 | return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); | |
1440 | } | |
1441 | ||
428870ff BB |
1442 | int |
1443 | spa_prev_software_version(spa_t *spa) | |
1444 | { | |
1445 | return (spa->spa_prev_software_version); | |
1446 | } | |
1447 | ||
34dc7c2f | 1448 | uint64_t |
428870ff | 1449 | dva_get_dsize_sync(spa_t *spa, const dva_t *dva) |
34dc7c2f | 1450 | { |
428870ff BB |
1451 | uint64_t asize = DVA_GET_ASIZE(dva); |
1452 | uint64_t dsize = asize; | |
34dc7c2f | 1453 | |
428870ff | 1454 | ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); |
34dc7c2f | 1455 | |
428870ff BB |
1456 | if (asize != 0 && spa->spa_deflate) { |
1457 | vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); | |
1458 | dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio; | |
34dc7c2f | 1459 | } |
428870ff BB |
1460 | |
1461 | return (dsize); | |
1462 | } | |
1463 | ||
1464 | uint64_t | |
1465 | bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) | |
1466 | { | |
1467 | uint64_t dsize = 0; | |
d6320ddb | 1468 | int d; |
428870ff | 1469 | |
d6320ddb | 1470 | for (d = 0; d < SPA_DVAS_PER_BP; d++) |
428870ff BB |
1471 | dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); |
1472 | ||
1473 | return (dsize); | |
1474 | } | |
1475 | ||
1476 | uint64_t | |
1477 | bp_get_dsize(spa_t *spa, const blkptr_t *bp) | |
1478 | { | |
1479 | uint64_t dsize = 0; | |
d6320ddb | 1480 | int d; |
428870ff BB |
1481 | |
1482 | spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); | |
1483 | ||
d6320ddb | 1484 | for (d = 0; d < SPA_DVAS_PER_BP; d++) |
428870ff BB |
1485 | dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); |
1486 | ||
b128c09f | 1487 | spa_config_exit(spa, SCL_VDEV, FTAG); |
428870ff BB |
1488 | |
1489 | return (dsize); | |
34dc7c2f BB |
1490 | } |
1491 | ||
1492 | /* | |
1493 | * ========================================================================== | |
1494 | * Initialization and Termination | |
1495 | * ========================================================================== | |
1496 | */ | |
1497 | ||
1498 | static int | |
1499 | spa_name_compare(const void *a1, const void *a2) | |
1500 | { | |
1501 | const spa_t *s1 = a1; | |
1502 | const spa_t *s2 = a2; | |
1503 | int s; | |
1504 | ||
1505 | s = strcmp(s1->spa_name, s2->spa_name); | |
1506 | if (s > 0) | |
1507 | return (1); | |
1508 | if (s < 0) | |
1509 | return (-1); | |
1510 | return (0); | |
1511 | } | |
1512 | ||
34dc7c2f | 1513 | void |
0bc8fd78 | 1514 | spa_boot_init(void) |
34dc7c2f BB |
1515 | { |
1516 | spa_config_load(); | |
1517 | } | |
1518 | ||
1519 | void | |
1520 | spa_init(int mode) | |
1521 | { | |
1522 | mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); | |
1523 | mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); | |
1524 | mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); | |
1525 | cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); | |
1526 | ||
1527 | avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), | |
1528 | offsetof(spa_t, spa_avl)); | |
1529 | ||
1530 | avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), | |
1531 | offsetof(spa_aux_t, aux_avl)); | |
1532 | ||
1533 | avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), | |
1534 | offsetof(spa_aux_t, aux_avl)); | |
1535 | ||
fb5f0bc8 | 1536 | spa_mode_global = mode; |
34dc7c2f | 1537 | |
26685276 | 1538 | fm_init(); |
34dc7c2f BB |
1539 | refcount_init(); |
1540 | unique_init(); | |
1541 | zio_init(); | |
1542 | dmu_init(); | |
1543 | zil_init(); | |
1544 | vdev_cache_stat_init(); | |
1545 | zfs_prop_init(); | |
1546 | zpool_prop_init(); | |
1547 | spa_config_load(); | |
b128c09f | 1548 | l2arc_start(); |
34dc7c2f BB |
1549 | } |
1550 | ||
1551 | void | |
1552 | spa_fini(void) | |
1553 | { | |
b128c09f BB |
1554 | l2arc_stop(); |
1555 | ||
34dc7c2f BB |
1556 | spa_evict_all(); |
1557 | ||
1558 | vdev_cache_stat_fini(); | |
1559 | zil_fini(); | |
1560 | dmu_fini(); | |
1561 | zio_fini(); | |
1562 | unique_fini(); | |
1563 | refcount_fini(); | |
26685276 | 1564 | fm_fini(); |
34dc7c2f BB |
1565 | |
1566 | avl_destroy(&spa_namespace_avl); | |
1567 | avl_destroy(&spa_spare_avl); | |
1568 | avl_destroy(&spa_l2cache_avl); | |
1569 | ||
1570 | cv_destroy(&spa_namespace_cv); | |
1571 | mutex_destroy(&spa_namespace_lock); | |
1572 | mutex_destroy(&spa_spare_lock); | |
1573 | mutex_destroy(&spa_l2cache_lock); | |
1574 | } | |
1575 | ||
1576 | /* | |
1577 | * Return whether this pool has slogs. No locking needed. | |
1578 | * It's not a problem if the wrong answer is returned as it's only for | |
1579 | * performance and not correctness | |
1580 | */ | |
1581 | boolean_t | |
1582 | spa_has_slogs(spa_t *spa) | |
1583 | { | |
1584 | return (spa->spa_log_class->mc_rotor != NULL); | |
1585 | } | |
b128c09f | 1586 | |
428870ff BB |
1587 | spa_log_state_t |
1588 | spa_get_log_state(spa_t *spa) | |
1589 | { | |
1590 | return (spa->spa_log_state); | |
1591 | } | |
1592 | ||
1593 | void | |
1594 | spa_set_log_state(spa_t *spa, spa_log_state_t state) | |
1595 | { | |
1596 | spa->spa_log_state = state; | |
1597 | } | |
1598 | ||
b128c09f BB |
1599 | boolean_t |
1600 | spa_is_root(spa_t *spa) | |
1601 | { | |
1602 | return (spa->spa_is_root); | |
1603 | } | |
fb5f0bc8 BB |
1604 | |
1605 | boolean_t | |
1606 | spa_writeable(spa_t *spa) | |
1607 | { | |
1608 | return (!!(spa->spa_mode & FWRITE)); | |
1609 | } | |
1610 | ||
1611 | int | |
1612 | spa_mode(spa_t *spa) | |
1613 | { | |
1614 | return (spa->spa_mode); | |
1615 | } | |
428870ff BB |
1616 | |
1617 | uint64_t | |
1618 | spa_bootfs(spa_t *spa) | |
1619 | { | |
1620 | return (spa->spa_bootfs); | |
1621 | } | |
1622 | ||
1623 | uint64_t | |
1624 | spa_delegation(spa_t *spa) | |
1625 | { | |
1626 | return (spa->spa_delegation); | |
1627 | } | |
1628 | ||
1629 | objset_t * | |
1630 | spa_meta_objset(spa_t *spa) | |
1631 | { | |
1632 | return (spa->spa_meta_objset); | |
1633 | } | |
1634 | ||
1635 | enum zio_checksum | |
1636 | spa_dedup_checksum(spa_t *spa) | |
1637 | { | |
1638 | return (spa->spa_dedup_checksum); | |
1639 | } | |
1640 | ||
1641 | /* | |
1642 | * Reset pool scan stat per scan pass (or reboot). | |
1643 | */ | |
1644 | void | |
1645 | spa_scan_stat_init(spa_t *spa) | |
1646 | { | |
1647 | /* data not stored on disk */ | |
1648 | spa->spa_scan_pass_start = gethrestime_sec(); | |
1649 | spa->spa_scan_pass_exam = 0; | |
1650 | vdev_scan_stat_init(spa->spa_root_vdev); | |
1651 | } | |
1652 | ||
1653 | /* | |
1654 | * Get scan stats for zpool status reports | |
1655 | */ | |
1656 | int | |
1657 | spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps) | |
1658 | { | |
1659 | dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL; | |
1660 | ||
1661 | if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE) | |
1662 | return (ENOENT); | |
1663 | bzero(ps, sizeof (pool_scan_stat_t)); | |
1664 | ||
1665 | /* data stored on disk */ | |
1666 | ps->pss_func = scn->scn_phys.scn_func; | |
1667 | ps->pss_start_time = scn->scn_phys.scn_start_time; | |
1668 | ps->pss_end_time = scn->scn_phys.scn_end_time; | |
1669 | ps->pss_to_examine = scn->scn_phys.scn_to_examine; | |
1670 | ps->pss_examined = scn->scn_phys.scn_examined; | |
1671 | ps->pss_to_process = scn->scn_phys.scn_to_process; | |
1672 | ps->pss_processed = scn->scn_phys.scn_processed; | |
1673 | ps->pss_errors = scn->scn_phys.scn_errors; | |
1674 | ps->pss_state = scn->scn_phys.scn_state; | |
1675 | ||
1676 | /* data not stored on disk */ | |
1677 | ps->pss_pass_start = spa->spa_scan_pass_start; | |
1678 | ps->pss_pass_exam = spa->spa_scan_pass_exam; | |
1679 | ||
1680 | return (0); | |
1681 | } | |
c28b2279 BB |
1682 | |
1683 | #if defined(_KERNEL) && defined(HAVE_SPL) | |
1684 | /* Namespace manipulation */ | |
1685 | EXPORT_SYMBOL(spa_lookup); | |
1686 | EXPORT_SYMBOL(spa_add); | |
1687 | EXPORT_SYMBOL(spa_remove); | |
1688 | EXPORT_SYMBOL(spa_next); | |
1689 | ||
1690 | /* Refcount functions */ | |
1691 | EXPORT_SYMBOL(spa_open_ref); | |
1692 | EXPORT_SYMBOL(spa_close); | |
1693 | EXPORT_SYMBOL(spa_refcount_zero); | |
1694 | ||
1695 | /* Pool configuration lock */ | |
1696 | EXPORT_SYMBOL(spa_config_tryenter); | |
1697 | EXPORT_SYMBOL(spa_config_enter); | |
1698 | EXPORT_SYMBOL(spa_config_exit); | |
1699 | EXPORT_SYMBOL(spa_config_held); | |
1700 | ||
1701 | /* Pool vdev add/remove lock */ | |
1702 | EXPORT_SYMBOL(spa_vdev_enter); | |
1703 | EXPORT_SYMBOL(spa_vdev_exit); | |
1704 | ||
1705 | /* Pool vdev state change lock */ | |
1706 | EXPORT_SYMBOL(spa_vdev_state_enter); | |
1707 | EXPORT_SYMBOL(spa_vdev_state_exit); | |
1708 | ||
1709 | /* Accessor functions */ | |
1710 | EXPORT_SYMBOL(spa_shutting_down); | |
1711 | EXPORT_SYMBOL(spa_get_dsl); | |
1712 | EXPORT_SYMBOL(spa_get_rootblkptr); | |
1713 | EXPORT_SYMBOL(spa_set_rootblkptr); | |
1714 | EXPORT_SYMBOL(spa_altroot); | |
1715 | EXPORT_SYMBOL(spa_sync_pass); | |
1716 | EXPORT_SYMBOL(spa_name); | |
1717 | EXPORT_SYMBOL(spa_guid); | |
1718 | EXPORT_SYMBOL(spa_last_synced_txg); | |
1719 | EXPORT_SYMBOL(spa_first_txg); | |
1720 | EXPORT_SYMBOL(spa_syncing_txg); | |
1721 | EXPORT_SYMBOL(spa_version); | |
1722 | EXPORT_SYMBOL(spa_state); | |
1723 | EXPORT_SYMBOL(spa_load_state); | |
1724 | EXPORT_SYMBOL(spa_freeze_txg); | |
1725 | EXPORT_SYMBOL(spa_get_asize); | |
1726 | EXPORT_SYMBOL(spa_get_dspace); | |
1727 | EXPORT_SYMBOL(spa_update_dspace); | |
1728 | EXPORT_SYMBOL(spa_deflate); | |
1729 | EXPORT_SYMBOL(spa_normal_class); | |
1730 | EXPORT_SYMBOL(spa_log_class); | |
1731 | EXPORT_SYMBOL(spa_max_replication); | |
1732 | EXPORT_SYMBOL(spa_prev_software_version); | |
1733 | EXPORT_SYMBOL(spa_get_failmode); | |
1734 | EXPORT_SYMBOL(spa_suspended); | |
1735 | EXPORT_SYMBOL(spa_bootfs); | |
1736 | EXPORT_SYMBOL(spa_delegation); | |
1737 | EXPORT_SYMBOL(spa_meta_objset); | |
1738 | ||
1739 | /* Miscellaneous support routines */ | |
1740 | EXPORT_SYMBOL(spa_rename); | |
1741 | EXPORT_SYMBOL(spa_guid_exists); | |
1742 | EXPORT_SYMBOL(spa_strdup); | |
1743 | EXPORT_SYMBOL(spa_strfree); | |
1744 | EXPORT_SYMBOL(spa_get_random); | |
1745 | EXPORT_SYMBOL(spa_generate_guid); | |
1746 | EXPORT_SYMBOL(sprintf_blkptr); | |
1747 | EXPORT_SYMBOL(spa_freeze); | |
1748 | EXPORT_SYMBOL(spa_upgrade); | |
1749 | EXPORT_SYMBOL(spa_evict_all); | |
1750 | EXPORT_SYMBOL(spa_lookup_by_guid); | |
1751 | EXPORT_SYMBOL(spa_has_spare); | |
1752 | EXPORT_SYMBOL(dva_get_dsize_sync); | |
1753 | EXPORT_SYMBOL(bp_get_dsize_sync); | |
1754 | EXPORT_SYMBOL(bp_get_dsize); | |
1755 | EXPORT_SYMBOL(spa_has_slogs); | |
1756 | EXPORT_SYMBOL(spa_is_root); | |
1757 | EXPORT_SYMBOL(spa_writeable); | |
1758 | EXPORT_SYMBOL(spa_mode); | |
1759 | ||
1760 | EXPORT_SYMBOL(spa_namespace_lock); | |
c409e464 BB |
1761 | |
1762 | module_param(zfs_recover, int, 0644); | |
1763 | MODULE_PARM_DESC(zfs_recover, "Set to attempt to recover from fatal errors"); | |
c28b2279 | 1764 | #endif |