]> git.proxmox.com Git - mirror_zfs.git/blob - include/sys/zil_impl.h
ZIL: Reduce scope of per-dataset zl_issuer_lock.
[mirror_zfs.git] / include / sys / zil_impl.h
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
24 */
25
26 /* Portions Copyright 2010 Robert Milkowski */
27
28 #ifndef _SYS_ZIL_IMPL_H
29 #define _SYS_ZIL_IMPL_H
30
31 #include <sys/zil.h>
32 #include <sys/dmu_objset.h>
33
34 #ifdef __cplusplus
35 extern "C" {
36 #endif
37
38 /*
39 * Possible states for a given lwb structure.
40 *
41 * An lwb will start out in the "closed" state, and then transition to
42 * the "opened" state via a call to zil_lwb_write_open(). When
43 * transitioning from "closed" to "opened" the zilog's "zl_issuer_lock"
44 * must be held.
45 *
46 * After the lwb is "opened", it can transition into the "issued" state
47 * via zil_lwb_write_close(). Again, the zilog's "zl_issuer_lock" must
48 * be held when making this transition.
49 *
50 * After the lwb's write zio completes, it transitions into the "write
51 * done" state via zil_lwb_write_done(); and then into the "flush done"
52 * state via zil_lwb_flush_vdevs_done(). When transitioning from
53 * "issued" to "write done", and then from "write done" to "flush done",
54 * the zilog's "zl_lock" must be held, *not* the "zl_issuer_lock".
55 *
56 * The zilog's "zl_issuer_lock" can become heavily contended in certain
57 * workloads, so we specifically avoid acquiring that lock when
58 * transitioning an lwb from "issued" to "done". This allows us to avoid
59 * having to acquire the "zl_issuer_lock" for each lwb ZIO completion,
60 * which would have added more lock contention on an already heavily
61 * contended lock.
62 *
63 * Additionally, correctness when reading an lwb's state is often
64 * achieved by exploiting the fact that these state transitions occur in
65 * this specific order; i.e. "closed" to "opened" to "issued" to "done".
66 *
67 * Thus, if an lwb is in the "closed" or "opened" state, holding the
68 * "zl_issuer_lock" will prevent a concurrent thread from transitioning
69 * that lwb to the "issued" state. Likewise, if an lwb is already in the
70 * "issued" state, holding the "zl_lock" will prevent a concurrent
71 * thread from transitioning that lwb to the "write done" state.
72 */
73 typedef enum {
74 LWB_STATE_CLOSED,
75 LWB_STATE_OPENED,
76 LWB_STATE_ISSUED,
77 LWB_STATE_WRITE_DONE,
78 LWB_STATE_FLUSH_DONE,
79 LWB_NUM_STATES
80 } lwb_state_t;
81
82 /*
83 * Log write block (lwb)
84 *
85 * Prior to an lwb being issued to disk via zil_lwb_write_issue(), it
86 * will be protected by the zilog's "zl_issuer_lock". Basically, prior
87 * to it being issued, it will only be accessed by the thread that's
88 * holding the "zl_issuer_lock". After the lwb is issued, the zilog's
89 * "zl_lock" is used to protect the lwb against concurrent access.
90 */
91 typedef struct lwb {
92 zilog_t *lwb_zilog; /* back pointer to log struct */
93 blkptr_t lwb_blk; /* on disk address of this log blk */
94 boolean_t lwb_fastwrite; /* is blk marked for fastwrite? */
95 boolean_t lwb_slog; /* lwb_blk is on SLOG device */
96 boolean_t lwb_indirect; /* do not postpone zil_lwb_commit() */
97 int lwb_nused; /* # used bytes in buffer */
98 int lwb_nfilled; /* # filled bytes in buffer */
99 int lwb_sz; /* size of block and buffer */
100 lwb_state_t lwb_state; /* the state of this lwb */
101 char *lwb_buf; /* log write buffer */
102 zio_t *lwb_write_zio; /* zio for the lwb buffer */
103 zio_t *lwb_root_zio; /* root zio for lwb write and flushes */
104 hrtime_t lwb_issued_timestamp; /* when was the lwb issued? */
105 uint64_t lwb_issued_txg; /* the txg when the write is issued */
106 uint64_t lwb_max_txg; /* highest txg in this lwb */
107 list_node_t lwb_node; /* zilog->zl_lwb_list linkage */
108 list_node_t lwb_issue_node; /* linkage of lwbs ready for issue */
109 list_t lwb_itxs; /* list of itx's */
110 list_t lwb_waiters; /* list of zil_commit_waiter's */
111 avl_tree_t lwb_vdev_tree; /* vdevs to flush after lwb write */
112 kmutex_t lwb_vdev_lock; /* protects lwb_vdev_tree */
113 } lwb_t;
114
115 /*
116 * ZIL commit waiter.
117 *
118 * This structure is allocated each time zil_commit() is called, and is
119 * used by zil_commit() to communicate with other parts of the ZIL, such
120 * that zil_commit() can know when it safe for it return. For more
121 * details, see the comment above zil_commit().
122 *
123 * The "zcw_lock" field is used to protect the commit waiter against
124 * concurrent access. This lock is often acquired while already holding
125 * the zilog's "zl_issuer_lock" or "zl_lock"; see the functions
126 * zil_process_commit_list() and zil_lwb_flush_vdevs_done() as examples
127 * of this. Thus, one must be careful not to acquire the
128 * "zl_issuer_lock" or "zl_lock" when already holding the "zcw_lock";
129 * e.g. see the zil_commit_waiter_timeout() function.
130 */
131 typedef struct zil_commit_waiter {
132 kcondvar_t zcw_cv; /* signalled when "done" */
133 kmutex_t zcw_lock; /* protects fields of this struct */
134 list_node_t zcw_node; /* linkage in lwb_t:lwb_waiter list */
135 lwb_t *zcw_lwb; /* back pointer to lwb when linked */
136 boolean_t zcw_done; /* B_TRUE when "done", else B_FALSE */
137 int zcw_zio_error; /* contains the zio io_error value */
138 } zil_commit_waiter_t;
139
140 /*
141 * Intent log transaction lists
142 */
143 typedef struct itxs {
144 list_t i_sync_list; /* list of synchronous itxs */
145 avl_tree_t i_async_tree; /* tree of foids for async itxs */
146 } itxs_t;
147
148 typedef struct itxg {
149 kmutex_t itxg_lock; /* lock for this structure */
150 uint64_t itxg_txg; /* txg for this chain */
151 itxs_t *itxg_itxs; /* sync and async itxs */
152 } itxg_t;
153
154 /* for async nodes we build up an AVL tree of lists of async itxs per file */
155 typedef struct itx_async_node {
156 uint64_t ia_foid; /* file object id */
157 list_t ia_list; /* list of async itxs for this foid */
158 avl_node_t ia_node; /* AVL tree linkage */
159 } itx_async_node_t;
160
161 /*
162 * Vdev flushing: during a zil_commit(), we build up an AVL tree of the vdevs
163 * we've touched so we know which ones need a write cache flush at the end.
164 */
165 typedef struct zil_vdev_node {
166 uint64_t zv_vdev; /* vdev to be flushed */
167 avl_node_t zv_node; /* AVL tree linkage */
168 } zil_vdev_node_t;
169
170 #define ZIL_PREV_BLKS 16
171
172 /*
173 * Stable storage intent log management structure. One per dataset.
174 */
175 struct zilog {
176 kmutex_t zl_lock; /* protects most zilog_t fields */
177 struct dsl_pool *zl_dmu_pool; /* DSL pool */
178 spa_t *zl_spa; /* handle for read/write log */
179 const zil_header_t *zl_header; /* log header buffer */
180 objset_t *zl_os; /* object set we're logging */
181 zil_get_data_t *zl_get_data; /* callback to get object content */
182 lwb_t *zl_last_lwb_opened; /* most recent lwb opened */
183 hrtime_t zl_last_lwb_latency; /* zio latency of last lwb done */
184 uint64_t zl_lr_seq; /* on-disk log record sequence number */
185 uint64_t zl_commit_lr_seq; /* last committed on-disk lr seq */
186 uint64_t zl_destroy_txg; /* txg of last zil_destroy() */
187 uint64_t zl_replayed_seq[TXG_SIZE]; /* last replayed rec seq */
188 uint64_t zl_replaying_seq; /* current replay seq number */
189 uint32_t zl_suspend; /* log suspend count */
190 kcondvar_t zl_cv_suspend; /* log suspend completion */
191 uint8_t zl_suspending; /* log is currently suspending */
192 uint8_t zl_keep_first; /* keep first log block in destroy */
193 uint8_t zl_replay; /* replaying records while set */
194 uint8_t zl_stop_sync; /* for debugging */
195 kmutex_t zl_issuer_lock; /* single writer, per ZIL, at a time */
196 uint8_t zl_logbias; /* latency or throughput */
197 uint8_t zl_sync; /* synchronous or asynchronous */
198 int zl_parse_error; /* last zil_parse() error */
199 uint64_t zl_parse_blk_seq; /* highest blk seq on last parse */
200 uint64_t zl_parse_lr_seq; /* highest lr seq on last parse */
201 uint64_t zl_parse_blk_count; /* number of blocks parsed */
202 uint64_t zl_parse_lr_count; /* number of log records parsed */
203 itxg_t zl_itxg[TXG_SIZE]; /* intent log txg chains */
204 list_t zl_itx_commit_list; /* itx list to be committed */
205 uint64_t zl_cur_used; /* current commit log size used */
206 list_t zl_lwb_list; /* in-flight log write list */
207 avl_tree_t zl_bp_tree; /* track bps during log parse */
208 clock_t zl_replay_time; /* lbolt of when replay started */
209 uint64_t zl_replay_blks; /* number of log blocks replayed */
210 zil_header_t zl_old_header; /* debugging aid */
211 uint_t zl_prev_blks[ZIL_PREV_BLKS]; /* size - sector rounded */
212 uint_t zl_prev_rotor; /* rotor for zl_prev[] */
213 txg_node_t zl_dirty_link; /* protected by dp_dirty_zilogs list */
214 uint64_t zl_dirty_max_txg; /* highest txg used to dirty zilog */
215
216 kmutex_t zl_lwb_io_lock; /* protect following members */
217 uint64_t zl_lwb_inflight[TXG_SIZE]; /* io issued, but not done */
218 kcondvar_t zl_lwb_io_cv; /* signal when the flush is done */
219 uint64_t zl_lwb_max_issued_txg; /* max txg when lwb io issued */
220
221 /*
222 * Max block size for this ZIL. Note that this can not be changed
223 * while the ZIL is in use because consumers (ZPL/zvol) need to take
224 * this into account when deciding between WR_COPIED and WR_NEED_COPY
225 * (see zil_max_copied_data()).
226 */
227 uint64_t zl_max_block_size;
228
229 /* Pointer for per dataset zil sums */
230 zil_sums_t *zl_sums;
231 };
232
233 typedef struct zil_bp_node {
234 dva_t zn_dva;
235 avl_node_t zn_node;
236 } zil_bp_node_t;
237
238 #ifdef __cplusplus
239 }
240 #endif
241
242 #endif /* _SYS_ZIL_IMPL_H */