4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
26 * Copyright 2013 Saso Kiselkov. All rights reserved.
27 * Copyright (c) 2016 Actifio, Inc. All rights reserved.
28 * Copyright (c) 2017 Datto Inc.
29 * Copyright (c) 2017, Intel Corporation.
32 #ifndef _SYS_SPA_IMPL_H
33 #define _SYS_SPA_IMPL_H
36 #include <sys/spa_checkpoint.h>
37 #include <sys/spa_log_spacemap.h>
39 #include <sys/vdev_removal.h>
40 #include <sys/metaslab.h>
42 #include <sys/dsl_pool.h>
43 #include <sys/uberblock_impl.h>
44 #include <sys/zfs_context.h>
46 #include <sys/refcount.h>
47 #include <sys/bplist.h>
48 #include <sys/bpobj.h>
49 #include <sys/dsl_crypt.h>
50 #include <sys/zfeature.h>
52 #include <zfeature_common.h>
58 typedef struct spa_error_entry
{
59 zbookmark_phys_t se_bookmark
;
64 typedef struct spa_history_phys
{
65 uint64_t sh_pool_create_len
; /* ending offset of zpool create */
66 uint64_t sh_phys_max_off
; /* physical EOF */
67 uint64_t sh_bof
; /* logical BOF */
68 uint64_t sh_eof
; /* logical EOF */
69 uint64_t sh_records_lost
; /* num of records overwritten */
73 * All members must be uint64_t, for byteswap purposes.
75 typedef struct spa_removing_phys
{
76 uint64_t sr_state
; /* dsl_scan_state_t */
79 * The vdev ID that we most recently attempted to remove,
80 * or -1 if no removal has been attempted.
82 uint64_t sr_removing_vdev
;
85 * The vdev ID that we most recently successfully removed,
86 * or -1 if no devices have been removed.
88 uint64_t sr_prev_indirect_vdev
;
90 uint64_t sr_start_time
;
94 * Note that we can not use the space map's or indirect mapping's
95 * accounting as a substitute for these values, because we need to
96 * count frees of not-yet-copied data as though it did the copy.
97 * Otherwise, we could get into a situation where copied > to_copy,
98 * or we complete before copied == to_copy.
100 uint64_t sr_to_copy
; /* bytes that need to be copied */
101 uint64_t sr_copied
; /* bytes that have been copied or freed */
102 } spa_removing_phys_t
;
105 * This struct is stored as an entry in the DMU_POOL_DIRECTORY_OBJECT
106 * (with key DMU_POOL_CONDENSING_INDIRECT). It is present if a condense
107 * of an indirect vdev's mapping object is in progress.
109 typedef struct spa_condensing_indirect_phys
{
111 * The vdev ID of the indirect vdev whose indirect mapping is
117 * The vdev's old obsolete spacemap. This spacemap's contents are
118 * being integrated into the new mapping.
120 uint64_t scip_prev_obsolete_sm_object
;
123 * The new mapping object that is being created.
125 uint64_t scip_next_mapping_object
;
126 } spa_condensing_indirect_phys_t
;
128 struct spa_aux_vdev
{
129 uint64_t sav_object
; /* MOS object for device list */
130 nvlist_t
*sav_config
; /* cached device config */
131 vdev_t
**sav_vdevs
; /* devices */
132 int sav_count
; /* number devices */
133 boolean_t sav_sync
; /* sync the device list */
134 nvlist_t
**sav_pending
; /* pending device additions */
135 uint_t sav_npending
; /* # pending devices */
138 typedef struct spa_config_lock
{
140 kthread_t
*scl_writer
;
141 int scl_write_wanted
;
143 zfs_refcount_t scl_count
;
146 typedef struct spa_config_dirent
{
147 list_node_t scd_link
;
149 } spa_config_dirent_t
;
151 typedef enum zio_taskq_type
{
153 ZIO_TASKQ_ISSUE_HIGH
,
155 ZIO_TASKQ_INTERRUPT_HIGH
,
160 * State machine for the zpool-poolname process. The states transitions
161 * are done as follows:
164 * PROC_NONE -> PROC_CREATED spa_activate()
165 * PROC_CREATED -> PROC_ACTIVE spa_thread()
166 * PROC_ACTIVE -> PROC_DEACTIVATE spa_deactivate()
167 * PROC_DEACTIVATE -> PROC_GONE spa_thread()
168 * PROC_GONE -> PROC_NONE spa_deactivate()
170 typedef enum spa_proc_state
{
171 SPA_PROC_NONE
, /* spa_proc = &p0, no process created */
172 SPA_PROC_CREATED
, /* spa_activate() has proc, is waiting */
173 SPA_PROC_ACTIVE
, /* taskqs created, spa_proc set */
174 SPA_PROC_DEACTIVATE
, /* spa_deactivate() requests process exit */
175 SPA_PROC_GONE
/* spa_thread() is exiting, spa_proc = &p0 */
178 typedef struct spa_taskqs
{
180 taskq_t
**stqs_taskq
;
183 typedef enum spa_all_vdev_zap_action
{
185 AVZ_ACTION_DESTROY
, /* Destroy all per-vdev ZAPs and the AVZ. */
186 AVZ_ACTION_REBUILD
, /* Populate the new AVZ, see spa_avz_rebuild */
187 AVZ_ACTION_INITIALIZE
190 typedef enum spa_config_source
{
191 SPA_CONFIG_SRC_NONE
= 0,
192 SPA_CONFIG_SRC_SCAN
, /* scan of path (default: /dev/dsk) */
193 SPA_CONFIG_SRC_CACHEFILE
, /* any cachefile */
194 SPA_CONFIG_SRC_TRYIMPORT
, /* returned from call to tryimport */
195 SPA_CONFIG_SRC_SPLIT
, /* new pool in a pool split */
196 SPA_CONFIG_SRC_MOS
/* MOS, but not always from right txg */
197 } spa_config_source_t
;
201 * Fields protected by spa_namespace_lock.
203 char spa_name
[ZFS_MAX_DATASET_NAME_LEN
]; /* pool name */
204 char *spa_comment
; /* comment */
205 avl_node_t spa_avl
; /* node in spa_namespace_avl */
206 nvlist_t
*spa_config
; /* last synced config */
207 nvlist_t
*spa_config_syncing
; /* currently syncing config */
208 nvlist_t
*spa_config_splitting
; /* config for splitting */
209 nvlist_t
*spa_load_info
; /* info and errors from load */
210 uint64_t spa_config_txg
; /* txg of last config change */
211 int spa_sync_pass
; /* iterate-to-convergence */
212 pool_state_t spa_state
; /* pool state */
213 int spa_inject_ref
; /* injection references */
214 uint8_t spa_sync_on
; /* sync threads are running */
215 spa_load_state_t spa_load_state
; /* current load operation */
216 boolean_t spa_indirect_vdevs_loaded
; /* mappings loaded? */
217 boolean_t spa_trust_config
; /* do we trust vdev tree? */
218 spa_config_source_t spa_config_source
; /* where config comes from? */
219 uint64_t spa_import_flags
; /* import specific flags */
220 spa_taskqs_t spa_zio_taskq
[ZIO_TYPES
][ZIO_TASKQ_TYPES
];
221 dsl_pool_t
*spa_dsl_pool
;
222 boolean_t spa_is_initializing
; /* true while opening pool */
223 metaslab_class_t
*spa_normal_class
; /* normal data class */
224 metaslab_class_t
*spa_log_class
; /* intent log data class */
225 metaslab_class_t
*spa_special_class
; /* special allocation class */
226 metaslab_class_t
*spa_dedup_class
; /* dedup allocation class */
227 uint64_t spa_first_txg
; /* first txg after spa_open() */
228 uint64_t spa_final_txg
; /* txg of export/destroy */
229 uint64_t spa_freeze_txg
; /* freeze pool at this txg */
230 uint64_t spa_load_max_txg
; /* best initial ub_txg */
231 uint64_t spa_claim_max_txg
; /* highest claimed birth txg */
232 inode_timespec_t spa_loaded_ts
; /* 1st successful open time */
233 objset_t
*spa_meta_objset
; /* copy of dp->dp_meta_objset */
234 kmutex_t spa_evicting_os_lock
; /* Evicting objset list lock */
235 list_t spa_evicting_os_list
; /* Objsets being evicted. */
236 kcondvar_t spa_evicting_os_cv
; /* Objset Eviction Completion */
237 txg_list_t spa_vdev_txg_list
; /* per-txg dirty vdev list */
238 vdev_t
*spa_root_vdev
; /* top-level vdev container */
239 int spa_min_ashift
; /* of vdevs in normal class */
240 int spa_max_ashift
; /* of vdevs in normal class */
241 uint64_t spa_config_guid
; /* config pool guid */
242 uint64_t spa_load_guid
; /* spa_load initialized guid */
243 uint64_t spa_last_synced_guid
; /* last synced guid */
244 list_t spa_config_dirty_list
; /* vdevs with dirty config */
245 list_t spa_state_dirty_list
; /* vdevs with dirty state */
247 * spa_alloc_locks and spa_alloc_trees are arrays, whose lengths are
248 * stored in spa_alloc_count. There is one tree and one lock for each
249 * allocator, to help improve allocation performance in write-heavy
252 kmutex_t
*spa_alloc_locks
;
253 avl_tree_t
*spa_alloc_trees
;
256 spa_aux_vdev_t spa_spares
; /* hot spares */
257 spa_aux_vdev_t spa_l2cache
; /* L2ARC cache devices */
258 nvlist_t
*spa_label_features
; /* Features for reading MOS */
259 uint64_t spa_config_object
; /* MOS object for pool config */
260 uint64_t spa_config_generation
; /* config generation number */
261 uint64_t spa_syncing_txg
; /* txg currently syncing */
262 bpobj_t spa_deferred_bpobj
; /* deferred-free bplist */
263 bplist_t spa_free_bplist
[TXG_SIZE
]; /* bplist of stuff to free */
264 zio_cksum_salt_t spa_cksum_salt
; /* secret salt for cksum */
265 /* checksum context templates */
266 kmutex_t spa_cksum_tmpls_lock
;
267 void *spa_cksum_tmpls
[ZIO_CHECKSUM_FUNCTIONS
];
268 uberblock_t spa_ubsync
; /* last synced uberblock */
269 uberblock_t spa_uberblock
; /* current uberblock */
270 boolean_t spa_extreme_rewind
; /* rewind past deferred frees */
271 kmutex_t spa_scrub_lock
; /* resilver/scrub lock */
272 uint64_t spa_scrub_inflight
; /* in-flight scrub bytes */
273 uint64_t spa_load_verify_ios
; /* in-flight verification IOs */
274 kcondvar_t spa_scrub_io_cv
; /* scrub I/O completion */
275 uint8_t spa_scrub_active
; /* active or suspended? */
276 uint8_t spa_scrub_type
; /* type of scrub we're doing */
277 uint8_t spa_scrub_finished
; /* indicator to rotate logs */
278 uint8_t spa_scrub_started
; /* started since last boot */
279 uint8_t spa_scrub_reopen
; /* scrub doing vdev_reopen */
280 uint64_t spa_scan_pass_start
; /* start time per pass/reboot */
281 uint64_t spa_scan_pass_scrub_pause
; /* scrub pause time */
282 uint64_t spa_scan_pass_scrub_spent_paused
; /* total paused */
283 uint64_t spa_scan_pass_exam
; /* examined bytes per pass */
284 uint64_t spa_scan_pass_issued
; /* issued bytes per pass */
287 * We are in the middle of a resilver, and another resilver
288 * is needed once this one completes. This is set iff any
289 * vdev_resilver_deferred is set.
291 boolean_t spa_resilver_deferred
;
292 kmutex_t spa_async_lock
; /* protect async state */
293 kthread_t
*spa_async_thread
; /* thread doing async task */
294 int spa_async_suspended
; /* async tasks suspended */
295 kcondvar_t spa_async_cv
; /* wait for thread_exit() */
296 uint16_t spa_async_tasks
; /* async task mask */
297 uint64_t spa_missing_tvds
; /* unopenable tvds on load */
298 uint64_t spa_missing_tvds_allowed
; /* allow loading spa? */
300 spa_removing_phys_t spa_removing_phys
;
301 spa_vdev_removal_t
*spa_vdev_removal
;
303 spa_condensing_indirect_phys_t spa_condensing_indirect_phys
;
304 spa_condensing_indirect_t
*spa_condensing_indirect
;
305 zthr_t
*spa_condense_zthr
; /* zthr doing condense. */
307 uint64_t spa_checkpoint_txg
; /* the txg of the checkpoint */
308 spa_checkpoint_info_t spa_checkpoint_info
; /* checkpoint accounting */
309 zthr_t
*spa_checkpoint_discard_zthr
;
311 space_map_t
*spa_syncing_log_sm
; /* current log space map */
312 avl_tree_t spa_sm_logs_by_txg
;
313 kmutex_t spa_flushed_ms_lock
; /* for metaslabs_by_flushed */
314 avl_tree_t spa_metaslabs_by_flushed
;
315 spa_unflushed_stats_t spa_unflushed_stats
;
316 list_t spa_log_summary
;
317 uint64_t spa_log_flushall_txg
;
319 char *spa_root
; /* alternate root directory */
320 uint64_t spa_ena
; /* spa-wide ereport ENA */
321 int spa_last_open_failed
; /* error if last open failed */
322 uint64_t spa_last_ubsync_txg
; /* "best" uberblock txg */
323 uint64_t spa_last_ubsync_txg_ts
; /* timestamp from that ub */
324 uint64_t spa_load_txg
; /* ub txg that loaded */
325 uint64_t spa_load_txg_ts
; /* timestamp from that ub */
326 uint64_t spa_load_meta_errors
; /* verify metadata err count */
327 uint64_t spa_load_data_errors
; /* verify data err count */
328 uint64_t spa_verify_min_txg
; /* start txg of verify scrub */
329 kmutex_t spa_errlog_lock
; /* error log lock */
330 uint64_t spa_errlog_last
; /* last error log object */
331 uint64_t spa_errlog_scrub
; /* scrub error log object */
332 kmutex_t spa_errlist_lock
; /* error list/ereport lock */
333 avl_tree_t spa_errlist_last
; /* last error list */
334 avl_tree_t spa_errlist_scrub
; /* scrub error list */
335 uint64_t spa_deflate
; /* should we deflate? */
336 uint64_t spa_history
; /* history object */
337 kmutex_t spa_history_lock
; /* history lock */
338 vdev_t
*spa_pending_vdev
; /* pending vdev additions */
339 kmutex_t spa_props_lock
; /* property lock */
340 uint64_t spa_pool_props_object
; /* object for properties */
341 uint64_t spa_bootfs
; /* default boot filesystem */
342 uint64_t spa_failmode
; /* failure mode for the pool */
343 uint64_t spa_deadman_failmode
; /* failure mode for deadman */
344 uint64_t spa_delegation
; /* delegation on/off */
345 list_t spa_config_list
; /* previous cache file(s) */
346 /* per-CPU array of root of async I/O: */
347 zio_t
**spa_async_zio_root
;
348 zio_t
*spa_suspend_zio_root
; /* root of all suspended I/O */
349 zio_t
*spa_txg_zio
[TXG_SIZE
]; /* spa_sync() waits for this */
350 kmutex_t spa_suspend_lock
; /* protects suspend_zio_root */
351 kcondvar_t spa_suspend_cv
; /* notification of resume */
352 zio_suspend_reason_t spa_suspended
; /* pool is suspended */
353 uint8_t spa_claiming
; /* pool is doing zil_claim() */
354 boolean_t spa_is_root
; /* pool is root */
355 int spa_minref
; /* num refs when first opened */
356 int spa_mode
; /* FREAD | FWRITE */
357 spa_log_state_t spa_log_state
; /* log state */
358 uint64_t spa_autoexpand
; /* lun expansion on/off */
359 ddt_t
*spa_ddt
[ZIO_CHECKSUM_FUNCTIONS
]; /* in-core DDTs */
360 uint64_t spa_ddt_stat_object
; /* DDT statistics */
361 uint64_t spa_dedup_dspace
; /* Cache get_dedup_dspace() */
362 uint64_t spa_dedup_checksum
; /* default dedup checksum */
363 uint64_t spa_dspace
; /* dspace in normal class */
364 kmutex_t spa_vdev_top_lock
; /* dueling offline/remove */
365 kmutex_t spa_proc_lock
; /* protects spa_proc* */
366 kcondvar_t spa_proc_cv
; /* spa_proc_state transitions */
367 spa_proc_state_t spa_proc_state
; /* see definition */
368 proc_t
*spa_proc
; /* "zpool-poolname" process */
369 uint64_t spa_did
; /* if procp != p0, did of t1 */
370 boolean_t spa_autoreplace
; /* autoreplace set in open */
371 int spa_vdev_locks
; /* locks grabbed */
372 uint64_t spa_creation_version
; /* version at pool creation */
373 uint64_t spa_prev_software_version
; /* See ub_software_version */
374 uint64_t spa_feat_for_write_obj
; /* required to write to pool */
375 uint64_t spa_feat_for_read_obj
; /* required to read from pool */
376 uint64_t spa_feat_desc_obj
; /* Feature descriptions */
377 uint64_t spa_feat_enabled_txg_obj
; /* Feature enabled txg */
378 kmutex_t spa_feat_stats_lock
; /* protects spa_feat_stats */
379 nvlist_t
*spa_feat_stats
; /* Cache of enabled features */
380 /* cache feature refcounts */
381 uint64_t spa_feat_refcount_cache
[SPA_FEATURES
];
382 taskqid_t spa_deadman_tqid
; /* Task id */
383 uint64_t spa_deadman_calls
; /* number of deadman calls */
384 hrtime_t spa_sync_starttime
; /* starting time of spa_sync */
385 uint64_t spa_deadman_synctime
; /* deadman sync expiration */
386 uint64_t spa_deadman_ziotime
; /* deadman zio expiration */
387 uint64_t spa_all_vdev_zaps
; /* ZAP of per-vd ZAP obj #s */
388 spa_avz_action_t spa_avz_action
; /* destroy/rebuild AVZ? */
389 uint64_t spa_autotrim
; /* automatic background trim? */
390 uint64_t spa_errata
; /* errata issues detected */
391 spa_stats_t spa_stats
; /* assorted spa statistics */
392 spa_keystore_t spa_keystore
; /* loaded crypto keys */
394 /* arc_memory_throttle() parameters during low memory condition */
395 uint64_t spa_lowmem_page_load
; /* memory load during txg */
396 uint64_t spa_lowmem_last_txg
; /* txg window start */
398 hrtime_t spa_ccw_fail_time
; /* Conf cache write fail time */
399 taskq_t
*spa_zvol_taskq
; /* Taskq for minor management */
400 taskq_t
*spa_prefetch_taskq
; /* Taskq for prefetch threads */
401 uint64_t spa_multihost
; /* multihost aware (mmp) */
402 mmp_thread_t spa_mmp
; /* multihost mmp thread */
403 list_t spa_leaf_list
; /* list of leaf vdevs */
404 uint64_t spa_leaf_list_gen
; /* track leaf_list changes */
407 * spa_refcount & spa_config_lock must be the last elements
408 * because zfs_refcount_t changes size based on compilation options.
409 * In order for the MDB module to function correctly, the other
410 * fields must remain in the same location.
412 spa_config_lock_t spa_config_lock
[SCL_LOCKS
]; /* config changes */
413 zfs_refcount_t spa_refcount
; /* number of opens */
415 taskq_t
*spa_upgrade_taskq
; /* taskq for upgrade jobs */
418 extern char *spa_config_path
;
420 extern void spa_taskq_dispatch_ent(spa_t
*spa
, zio_type_t t
, zio_taskq_type_t q
,
421 task_func_t
*func
, void *arg
, uint_t flags
, taskq_ent_t
*ent
);
422 extern void spa_taskq_dispatch_sync(spa_t
*, zio_type_t t
, zio_taskq_type_t q
,
423 task_func_t
*func
, void *arg
, uint_t flags
);
424 extern void spa_load_spares(spa_t
*spa
);
425 extern void spa_load_l2cache(spa_t
*spa
);
426 extern sysevent_t
*spa_event_create(spa_t
*spa
, vdev_t
*vd
, nvlist_t
*hist_nvl
,
428 extern void spa_event_post(sysevent_t
*ev
);
435 #endif /* _SYS_SPA_IMPL_H */