]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/xfs/xfs_log_priv.h
Merge tag 'ntb-4.13-bugfixes' of git://github.com/jonmason/ntb
[mirror_ubuntu-artful-kernel.git] / fs / xfs / xfs_log_priv.h
CommitLineData
1da177e4 1/*
7b718769
NS
2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4
LT
17 */
18#ifndef __XFS_LOG_PRIV_H__
19#define __XFS_LOG_PRIV_H__
20
21struct xfs_buf;
ad223e60 22struct xlog;
a844f451 23struct xlog_ticket;
1da177e4 24struct xfs_mount;
239880ef 25struct xfs_log_callback;
1da177e4
LT
26
27/*
fc06c6d0 28 * Flags for log structure
1da177e4 29 */
fc06c6d0
DC
30#define XLOG_ACTIVE_RECOVERY 0x2 /* in the middle of recovery */
31#define XLOG_RECOVERY_NEEDED 0x4 /* log was recovered */
32#define XLOG_IO_ERROR 0x8 /* log hit an I/O error, and being
33 shutdown */
34#define XLOG_TAIL_WARN 0x10 /* log tail verify warning issued */
1da177e4
LT
35
36/*
37 * get client id from packed copy.
38 *
39 * this hack is here because the xlog_pack code copies four bytes
40 * of xlog_op_header containing the fields oh_clientid, oh_flags
41 * and oh_res2 into the packed copy.
42 *
43 * later on this four byte chunk is treated as an int and the
44 * client id is pulled out.
45 *
46 * this has endian issues, of course.
47 */
b53e675d 48static inline uint xlog_get_client_id(__be32 i)
03bea6fe 49{
b53e675d 50 return be32_to_cpu(i) >> 24;
03bea6fe 51}
1da177e4 52
1da177e4
LT
53/*
54 * In core log state
55 */
56#define XLOG_STATE_ACTIVE 0x0001 /* Current IC log being written to */
57#define XLOG_STATE_WANT_SYNC 0x0002 /* Want to sync this iclog; no more writes */
58#define XLOG_STATE_SYNCING 0x0004 /* This IC log is syncing */
59#define XLOG_STATE_DONE_SYNC 0x0008 /* Done syncing to disk */
60#define XLOG_STATE_DO_CALLBACK \
61 0x0010 /* Process callback functions */
62#define XLOG_STATE_CALLBACK 0x0020 /* Callback functions now */
63#define XLOG_STATE_DIRTY 0x0040 /* Dirty IC log, not ready for ACTIVE status*/
64#define XLOG_STATE_IOERROR 0x0080 /* IO error happened in sync'ing log */
609adfc2 65#define XLOG_STATE_IOABORT 0x0100 /* force abort on I/O completion (debug) */
1da177e4
LT
66#define XLOG_STATE_ALL 0x7FFF /* All possible valid flags */
67#define XLOG_STATE_NOTUSED 0x8000 /* This IC log not being used */
1da177e4 68
1da177e4
LT
69/*
70 * Flags to log ticket
71 */
72#define XLOG_TIC_INITED 0x1 /* has been initialized */
73#define XLOG_TIC_PERM_RESERV 0x2 /* permanent reservation */
0b1b213f
CH
74
75#define XLOG_TIC_FLAGS \
76 { XLOG_TIC_INITED, "XLOG_TIC_INITED" }, \
10547941 77 { XLOG_TIC_PERM_RESERV, "XLOG_TIC_PERM_RESERV" }
0b1b213f 78
1da177e4
LT
79/*
80 * Below are states for covering allocation transactions.
81 * By covering, we mean changing the h_tail_lsn in the last on-disk
82 * log write such that no allocation transactions will be re-done during
83 * recovery after a system crash. Recovery starts at the last on-disk
84 * log write.
85 *
86 * These states are used to insert dummy log entries to cover
87 * space allocation transactions which can undo non-transactional changes
88 * after a crash. Writes to a file with space
89 * already allocated do not result in any transactions. Allocations
90 * might include space beyond the EOF. So if we just push the EOF a
91 * little, the last transaction for the file could contain the wrong
92 * size. If there is no file system activity, after an allocation
93 * transaction, and the system crashes, the allocation transaction
94 * will get replayed and the file will be truncated. This could
95 * be hours/days/... after the allocation occurred.
96 *
97 * The fix for this is to do two dummy transactions when the
98 * system is idle. We need two dummy transaction because the h_tail_lsn
99 * in the log record header needs to point beyond the last possible
100 * non-dummy transaction. The first dummy changes the h_tail_lsn to
101 * the first transaction before the dummy. The second dummy causes
102 * h_tail_lsn to point to the first dummy. Recovery starts at h_tail_lsn.
103 *
104 * These dummy transactions get committed when everything
105 * is idle (after there has been some activity).
106 *
107 * There are 5 states used to control this.
108 *
109 * IDLE -- no logging has been done on the file system or
110 * we are done covering previous transactions.
111 * NEED -- logging has occurred and we need a dummy transaction
112 * when the log becomes idle.
113 * DONE -- we were in the NEED state and have committed a dummy
114 * transaction.
115 * NEED2 -- we detected that a dummy transaction has gone to the
116 * on disk log with no other transactions.
117 * DONE2 -- we committed a dummy transaction when in the NEED2 state.
118 *
119 * There are two places where we switch states:
120 *
121 * 1.) In xfs_sync, when we detect an idle log and are in NEED or NEED2.
122 * We commit the dummy transaction and switch to DONE or DONE2,
123 * respectively. In all other states, we don't do anything.
124 *
125 * 2.) When we finish writing the on-disk log (xlog_state_clean_log).
126 *
127 * No matter what state we are in, if this isn't the dummy
128 * transaction going out, the next state is NEED.
129 * So, if we aren't in the DONE or DONE2 states, the next state
130 * is NEED. We can't be finishing a write of the dummy record
131 * unless it was committed and the state switched to DONE or DONE2.
132 *
133 * If we are in the DONE state and this was a write of the
134 * dummy transaction, we move to NEED2.
135 *
136 * If we are in the DONE2 state and this was a write of the
137 * dummy transaction, we move to IDLE.
138 *
139 *
140 * Writing only one dummy transaction can get appended to
141 * one file space allocation. When this happens, the log recovery
142 * code replays the space allocation and a file could be truncated.
143 * This is why we have the NEED2 and DONE2 states before going idle.
144 */
145
146#define XLOG_STATE_COVER_IDLE 0
147#define XLOG_STATE_COVER_NEED 1
148#define XLOG_STATE_COVER_DONE 2
149#define XLOG_STATE_COVER_NEED2 3
150#define XLOG_STATE_COVER_DONE2 4
151
152#define XLOG_COVER_OPS 5
153
7e9c6396 154/* Ticket reservation region accounting */
7e9c6396 155#define XLOG_TIC_LEN_MAX 15
7e9c6396
TS
156
157/*
158 * Reservation region
159 * As would be stored in xfs_log_iovec but without the i_addr which
160 * we don't care about.
161 */
162typedef struct xlog_res {
1259845d
TS
163 uint r_len; /* region length :4 */
164 uint r_type; /* region's transaction type :4 */
7e9c6396 165} xlog_res_t;
7e9c6396 166
1da177e4 167typedef struct xlog_ticket {
10547941 168 struct list_head t_queue; /* reserve/write queue */
14a7235f 169 struct task_struct *t_task; /* task that owns this ticket */
7e9c6396 170 xlog_tid_t t_tid; /* transaction identifier : 4 */
cc09c0dc 171 atomic_t t_ref; /* ticket reference count : 4 */
7e9c6396
TS
172 int t_curr_res; /* current reservation in bytes : 4 */
173 int t_unit_res; /* unit reservation in bytes : 4 */
174 char t_ocnt; /* original count : 1 */
175 char t_cnt; /* current count : 1 */
176 char t_clientid; /* who does this belong to; : 1 */
177 char t_flags; /* properties of reservation : 1 */
7e9c6396 178
7e9c6396
TS
179 /* reservation array fields */
180 uint t_res_num; /* num in array : 4 */
7e9c6396
TS
181 uint t_res_num_ophdrs; /* num op hdrs : 4 */
182 uint t_res_arr_sum; /* array sum : 4 */
183 uint t_res_o_flow; /* sum overflow : 4 */
1259845d 184 xlog_res_t t_res_arr[XLOG_TIC_LEN_MAX]; /* array of res : 8 * 15 */
1da177e4 185} xlog_ticket_t;
7e9c6396 186
1da177e4
LT
187/*
188 * - A log record header is 512 bytes. There is plenty of room to grow the
189 * xlog_rec_header_t into the reserved space.
190 * - ic_data follows, so a write to disk can start at the beginning of
191 * the iclog.
12017faf 192 * - ic_forcewait is used to implement synchronous forcing of the iclog to disk.
1da177e4
LT
193 * - ic_next is the pointer to the next iclog in the ring.
194 * - ic_bp is a pointer to the buffer used to write this incore log to disk.
195 * - ic_log is a pointer back to the global log structure.
196 * - ic_callback is a linked list of callback function/argument pairs to be
197 * called after an iclog finishes writing.
198 * - ic_size is the full size of the header plus data.
199 * - ic_offset is the current number of bytes written to in this iclog.
200 * - ic_refcnt is bumped when someone is writing to the log.
201 * - ic_state is the state of the iclog.
114d23aa
DC
202 *
203 * Because of cacheline contention on large machines, we need to separate
204 * various resources onto different cachelines. To start with, make the
205 * structure cacheline aligned. The following fields can be contended on
206 * by independent processes:
207 *
208 * - ic_callback_*
209 * - ic_refcnt
210 * - fields protected by the global l_icloglock
211 *
212 * so we need to ensure that these fields are located in separate cachelines.
213 * We'll put all the read-only and l_icloglock fields in the first cacheline,
214 * and move everything else out to subsequent cachelines.
1da177e4 215 */
b28708d6 216typedef struct xlog_in_core {
eb40a875
DC
217 wait_queue_head_t ic_force_wait;
218 wait_queue_head_t ic_write_wait;
1da177e4
LT
219 struct xlog_in_core *ic_next;
220 struct xlog_in_core *ic_prev;
221 struct xfs_buf *ic_bp;
ad223e60 222 struct xlog *ic_log;
1da177e4
LT
223 int ic_size;
224 int ic_offset;
1da177e4 225 int ic_bwritecnt;
a5687787 226 unsigned short ic_state;
1da177e4 227 char *ic_datap; /* pointer to iclog data */
114d23aa
DC
228
229 /* Callback structures need their own cacheline */
230 spinlock_t ic_callback_lock ____cacheline_aligned_in_smp;
239880ef
DC
231 struct xfs_log_callback *ic_callback;
232 struct xfs_log_callback **ic_callback_tail;
114d23aa
DC
233
234 /* reference counts need their own cacheline */
235 atomic_t ic_refcnt ____cacheline_aligned_in_smp;
b28708d6
CH
236 xlog_in_core_2_t *ic_data;
237#define ic_header ic_data->hic_header
1da177e4
LT
238} xlog_in_core_t;
239
71e330b5
DC
240/*
241 * The CIL context is used to aggregate per-transaction details as well be
242 * passed to the iclog for checkpoint post-commit processing. After being
243 * passed to the iclog, another context needs to be allocated for tracking the
244 * next set of transactions to be aggregated into a checkpoint.
245 */
246struct xfs_cil;
247
248struct xfs_cil_ctx {
249 struct xfs_cil *cil;
250 xfs_lsn_t sequence; /* chkpt sequence # */
251 xfs_lsn_t start_lsn; /* first LSN of chkpt commit */
252 xfs_lsn_t commit_lsn; /* chkpt commit record lsn */
253 struct xlog_ticket *ticket; /* chkpt ticket */
254 int nvecs; /* number of regions */
255 int space_used; /* aggregate size of regions */
256 struct list_head busy_extents; /* busy extents in chkpt */
257 struct xfs_log_vec *lv_chain; /* logvecs being pushed */
239880ef 258 struct xfs_log_callback log_cb; /* completion callback hook. */
71e330b5 259 struct list_head committing; /* ctx committing list */
4560e78f 260 struct work_struct discard_endio_work;
71e330b5
DC
261};
262
263/*
264 * Committed Item List structure
265 *
266 * This structure is used to track log items that have been committed but not
267 * yet written into the log. It is used only when the delayed logging mount
268 * option is enabled.
269 *
270 * This structure tracks the list of committing checkpoint contexts so
271 * we can avoid the problem of having to hold out new transactions during a
272 * flush until we have a the commit record LSN of the checkpoint. We can
273 * traverse the list of committing contexts in xlog_cil_push_lsn() to find a
274 * sequence match and extract the commit LSN directly from there. If the
275 * checkpoint is still in the process of committing, we can block waiting for
276 * the commit LSN to be determined as well. This should make synchronous
277 * operations almost as efficient as the old logging methods.
278 */
279struct xfs_cil {
ad223e60 280 struct xlog *xc_log;
71e330b5
DC
281 struct list_head xc_cil;
282 spinlock_t xc_cil_lock;
4bb928cd
DC
283
284 struct rw_semaphore xc_ctx_lock ____cacheline_aligned_in_smp;
71e330b5 285 struct xfs_cil_ctx *xc_ctx;
4bb928cd
DC
286
287 spinlock_t xc_push_lock ____cacheline_aligned_in_smp;
288 xfs_lsn_t xc_push_seq;
71e330b5 289 struct list_head xc_committing;
eb40a875 290 wait_queue_head_t xc_commit_wait;
a44f13ed 291 xfs_lsn_t xc_current_sequence;
4c2d542f 292 struct work_struct xc_push_work;
4bb928cd 293} ____cacheline_aligned_in_smp;
71e330b5 294
df806158 295/*
80168676
DC
296 * The amount of log space we allow the CIL to aggregate is difficult to size.
297 * Whatever we choose, we have to make sure we can get a reservation for the
298 * log space effectively, that it is large enough to capture sufficient
299 * relogging to reduce log buffer IO significantly, but it is not too large for
300 * the log or induces too much latency when writing out through the iclogs. We
301 * track both space consumed and the number of vectors in the checkpoint
302 * context, so we need to decide which to use for limiting.
df806158
DC
303 *
304 * Every log buffer we write out during a push needs a header reserved, which
305 * is at least one sector and more for v2 logs. Hence we need a reservation of
306 * at least 512 bytes per 32k of log space just for the LR headers. That means
307 * 16KB of reservation per megabyte of delayed logging space we will consume,
308 * plus various headers. The number of headers will vary based on the num of
309 * io vectors, so limiting on a specific number of vectors is going to result
310 * in transactions of varying size. IOWs, it is more consistent to track and
311 * limit space consumed in the log rather than by the number of objects being
312 * logged in order to prevent checkpoint ticket overruns.
313 *
314 * Further, use of static reservations through the log grant mechanism is
315 * problematic. It introduces a lot of complexity (e.g. reserve grant vs write
316 * grant) and a significant deadlock potential because regranting write space
317 * can block on log pushes. Hence if we have to regrant log space during a log
318 * push, we can deadlock.
319 *
320 * However, we can avoid this by use of a dynamic "reservation stealing"
321 * technique during transaction commit whereby unused reservation space in the
322 * transaction ticket is transferred to the CIL ctx commit ticket to cover the
323 * space needed by the checkpoint transaction. This means that we never need to
324 * specifically reserve space for the CIL checkpoint transaction, nor do we
325 * need to regrant space once the checkpoint completes. This also means the
326 * checkpoint transaction ticket is specific to the checkpoint context, rather
327 * than the CIL itself.
328 *
80168676
DC
329 * With dynamic reservations, we can effectively make up arbitrary limits for
330 * the checkpoint size so long as they don't violate any other size rules.
331 * Recovery imposes a rule that no transaction exceed half the log, so we are
332 * limited by that. Furthermore, the log transaction reservation subsystem
333 * tries to keep 25% of the log free, so we need to keep below that limit or we
334 * risk running out of free log space to start any new transactions.
335 *
336 * In order to keep background CIL push efficient, we will set a lower
337 * threshold at which background pushing is attempted without blocking current
338 * transaction commits. A separate, higher bound defines when CIL pushes are
339 * enforced to ensure we stay within our maximum checkpoint size bounds.
340 * threshold, yet give us plenty of space for aggregation on large logs.
df806158 341 */
80168676 342#define XLOG_CIL_SPACE_LIMIT(log) (log->l_logsize >> 3)
df806158 343
28496968
CH
344/*
345 * ticket grant locks, queues and accounting have their own cachlines
346 * as these are quite hot and can be operated on concurrently.
347 */
348struct xlog_grant_head {
349 spinlock_t lock ____cacheline_aligned_in_smp;
350 struct list_head waiters;
351 atomic64_t grant;
352};
353
1da177e4
LT
354/*
355 * The reservation head lsn is not made up of a cycle number and block number.
356 * Instead, it uses a cycle number and byte number. Logs don't expect to
357 * overflow 31 bits worth of byte offset, so using a byte number will mean
358 * that round off problems won't occur when releasing partial reservations.
359 */
9a8d2fdb 360struct xlog {
4679b2d3
DC
361 /* The following fields don't need locking */
362 struct xfs_mount *l_mp; /* mount point */
a9c21c1b 363 struct xfs_ail *l_ailp; /* AIL log is working with */
71e330b5 364 struct xfs_cil *l_cilp; /* CIL log is working with */
4679b2d3
DC
365 struct xfs_buf *l_xbuf; /* extra buffer for log
366 * wrapping */
367 struct xfs_buftarg *l_targ; /* buftarg of log */
f661f1e0 368 struct delayed_work l_work; /* background flush work */
4679b2d3
DC
369 uint l_flags;
370 uint l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */
d5689eaa 371 struct list_head *l_buf_cancel_table;
4679b2d3
DC
372 int l_iclog_hsize; /* size of iclog header */
373 int l_iclog_heads; /* # of iclog header sectors */
48389ef1 374 uint l_sectBBsize; /* sector size in BBs (2^n) */
4679b2d3
DC
375 int l_iclog_size; /* size of log in bytes */
376 int l_iclog_size_log; /* log power size of log */
377 int l_iclog_bufs; /* number of iclog buffers */
378 xfs_daddr_t l_logBBstart; /* start block of log */
379 int l_logsize; /* size of log in bytes */
380 int l_logBBsize; /* size of log in BB chunks */
381
1da177e4 382 /* The following block of fields are changed while holding icloglock */
eb40a875 383 wait_queue_head_t l_flush_wait ____cacheline_aligned_in_smp;
d748c623 384 /* waiting for iclog flush */
1da177e4
LT
385 int l_covered_state;/* state of "covering disk
386 * log entries" */
1da177e4 387 xlog_in_core_t *l_iclog; /* head log queue */
b22cd72c 388 spinlock_t l_icloglock; /* grab to change iclog state */
1da177e4
LT
389 int l_curr_cycle; /* Cycle number of log writes */
390 int l_prev_cycle; /* Cycle number before last
391 * block increment */
392 int l_curr_block; /* current logical log block */
393 int l_prev_block; /* previous logical log block */
1da177e4 394
84f3c683 395 /*
1c3cb9ec
DC
396 * l_last_sync_lsn and l_tail_lsn are atomics so they can be set and
397 * read without needing to hold specific locks. To avoid operations
398 * contending with other hot objects, place each of them on a separate
399 * cacheline.
84f3c683
DC
400 */
401 /* lsn of last LR on disk */
402 atomic64_t l_last_sync_lsn ____cacheline_aligned_in_smp;
1c3cb9ec
DC
403 /* lsn of 1st LR with unflushed * buffers */
404 atomic64_t l_tail_lsn ____cacheline_aligned_in_smp;
84f3c683 405
28496968
CH
406 struct xlog_grant_head l_reserve_head;
407 struct xlog_grant_head l_write_head;
3f16b985 408
baff4e44
BF
409 struct xfs_kobj l_kobj;
410
4679b2d3
DC
411 /* The following field are used for debugging; need to hold icloglock */
412#ifdef DEBUG
5809d5e0 413 void *l_iclog_bak[XLOG_MAX_ICLOGS];
609adfc2
BF
414 /* log record crc error injection factor */
415 uint32_t l_badcrc_factor;
4679b2d3 416#endif
12818d24
BF
417 /* log recovery lsn tracking (for buffer submission */
418 xfs_lsn_t l_recovery_lsn;
9a8d2fdb 419};
1da177e4 420
d5689eaa 421#define XLOG_BUF_CANCEL_BUCKET(log, blkno) \
c8ce540d 422 ((log)->l_buf_cancel_table + ((uint64_t)blkno % XLOG_BC_TABLE_SIZE))
d5689eaa 423
cfcbbbd0
NS
424#define XLOG_FORCED_SHUTDOWN(log) ((log)->l_flags & XLOG_IO_ERROR)
425
1da177e4 426/* common routines */
9a8d2fdb
MT
427extern int
428xlog_recover(
429 struct xlog *log);
430extern int
431xlog_recover_finish(
432 struct xlog *log);
f0b2efad
BF
433extern int
434xlog_recover_cancel(struct xlog *);
0e446be4 435
f9668a09 436extern __le32 xlog_cksum(struct xlog *log, struct xlog_rec_header *rhead,
0e446be4 437 char *dp, int size);
1da177e4 438
71e330b5 439extern kmem_zone_t *xfs_log_ticket_zone;
ad223e60
MT
440struct xlog_ticket *
441xlog_ticket_alloc(
442 struct xlog *log,
443 int unit_bytes,
444 int count,
445 char client,
446 bool permanent,
447 xfs_km_flags_t alloc_flags);
71e330b5 448
eb01c9cd 449
e6b1f273
CH
450static inline void
451xlog_write_adv_cnt(void **ptr, int *len, int *off, size_t bytes)
452{
453 *ptr += bytes;
454 *len -= bytes;
455 *off += bytes;
456}
457
71e330b5 458void xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket);
d4ca1d55 459void xlog_print_trans(struct xfs_trans *);
ad223e60
MT
460int
461xlog_write(
462 struct xlog *log,
463 struct xfs_log_vec *log_vector,
464 struct xlog_ticket *tic,
465 xfs_lsn_t *start_lsn,
466 struct xlog_in_core **commit_iclog,
467 uint flags);
71e330b5 468
1c3cb9ec
DC
469/*
470 * When we crack an atomic LSN, we sample it first so that the value will not
471 * change while we are cracking it into the component values. This means we
472 * will always get consistent component values to work from. This should always
25985edc 473 * be used to sample and crack LSNs that are stored and updated in atomic
1c3cb9ec
DC
474 * variables.
475 */
476static inline void
477xlog_crack_atomic_lsn(atomic64_t *lsn, uint *cycle, uint *block)
478{
479 xfs_lsn_t val = atomic64_read(lsn);
480
481 *cycle = CYCLE_LSN(val);
482 *block = BLOCK_LSN(val);
483}
484
485/*
486 * Calculate and assign a value to an atomic LSN variable from component pieces.
487 */
488static inline void
489xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block)
490{
491 atomic64_set(lsn, xlog_assign_lsn(cycle, block));
492}
493
a69ed03c 494/*
d0eb2f38 495 * When we crack the grant head, we sample it first so that the value will not
a69ed03c
DC
496 * change while we are cracking it into the component values. This means we
497 * will always get consistent component values to work from.
498 */
499static inline void
d0eb2f38 500xlog_crack_grant_head_val(int64_t val, int *cycle, int *space)
a69ed03c 501{
a69ed03c
DC
502 *cycle = val >> 32;
503 *space = val & 0xffffffff;
504}
505
d0eb2f38
DC
506static inline void
507xlog_crack_grant_head(atomic64_t *head, int *cycle, int *space)
508{
509 xlog_crack_grant_head_val(atomic64_read(head), cycle, space);
510}
511
512static inline int64_t
513xlog_assign_grant_head_val(int cycle, int space)
514{
515 return ((int64_t)cycle << 32) | space;
516}
517
a69ed03c 518static inline void
c8a09ff8 519xlog_assign_grant_head(atomic64_t *head, int cycle, int space)
a69ed03c 520{
d0eb2f38 521 atomic64_set(head, xlog_assign_grant_head_val(cycle, space));
a69ed03c
DC
522}
523
71e330b5
DC
524/*
525 * Committed Item List interfaces
526 */
2c6e24ce
DC
527int xlog_cil_init(struct xlog *log);
528void xlog_cil_init_post_recovery(struct xlog *log);
529void xlog_cil_destroy(struct xlog *log);
530bool xlog_cil_empty(struct xlog *log);
71e330b5 531
a44f13ed
DC
532/*
533 * CIL force routines
534 */
ad223e60
MT
535xfs_lsn_t
536xlog_cil_force_lsn(
537 struct xlog *log,
538 xfs_lsn_t sequence);
a44f13ed
DC
539
540static inline void
ad223e60 541xlog_cil_force(struct xlog *log)
a44f13ed
DC
542{
543 xlog_cil_force_lsn(log, log->l_cilp->xc_current_sequence);
544}
71e330b5 545
955e47ad
TS
546/*
547 * Unmount record type is used as a pseudo transaction type for the ticket.
548 * It's value must be outside the range of XFS_TRANS_* values.
549 */
550#define XLOG_UNMOUNT_REC_TYPE (-1U)
551
eb40a875
DC
552/*
553 * Wrapper function for waiting on a wait queue serialised against wakeups
554 * by a spinlock. This matches the semantics of all the wait queues used in the
555 * log code.
556 */
557static inline void xlog_wait(wait_queue_head_t *wq, spinlock_t *lock)
558{
559 DECLARE_WAITQUEUE(wait, current);
560
561 add_wait_queue_exclusive(wq, &wait);
562 __set_current_state(TASK_UNINTERRUPTIBLE);
563 spin_unlock(lock);
564 schedule();
565 remove_wait_queue(wq, &wait);
566}
1da177e4 567
a45086e2
BF
568/*
569 * The LSN is valid so long as it is behind the current LSN. If it isn't, this
570 * means that the next log record that includes this metadata could have a
571 * smaller LSN. In turn, this means that the modification in the log would not
572 * replay.
573 */
574static inline bool
575xlog_valid_lsn(
576 struct xlog *log,
577 xfs_lsn_t lsn)
578{
579 int cur_cycle;
580 int cur_block;
581 bool valid = true;
582
583 /*
584 * First, sample the current lsn without locking to avoid added
585 * contention from metadata I/O. The current cycle and block are updated
586 * (in xlog_state_switch_iclogs()) and read here in a particular order
587 * to avoid false negatives (e.g., thinking the metadata LSN is valid
588 * when it is not).
589 *
590 * The current block is always rewound before the cycle is bumped in
591 * xlog_state_switch_iclogs() to ensure the current LSN is never seen in
592 * a transiently forward state. Instead, we can see the LSN in a
593 * transiently behind state if we happen to race with a cycle wrap.
594 */
595 cur_cycle = ACCESS_ONCE(log->l_curr_cycle);
596 smp_rmb();
597 cur_block = ACCESS_ONCE(log->l_curr_block);
598
599 if ((CYCLE_LSN(lsn) > cur_cycle) ||
600 (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) {
601 /*
602 * If the metadata LSN appears invalid, it's possible the check
603 * above raced with a wrap to the next log cycle. Grab the lock
604 * to check for sure.
605 */
606 spin_lock(&log->l_icloglock);
607 cur_cycle = log->l_curr_cycle;
608 cur_block = log->l_curr_block;
609 spin_unlock(&log->l_icloglock);
610
611 if ((CYCLE_LSN(lsn) > cur_cycle) ||
612 (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block))
613 valid = false;
614 }
615
616 return valid;
617}
618
1da177e4 619#endif /* __XFS_LOG_PRIV_H__ */