1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved.
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_shared.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_extent_busy.h"
14 #include "xfs_trans.h"
15 #include "xfs_trans_priv.h"
17 #include "xfs_log_priv.h"
18 #include "xfs_trace.h"
20 struct workqueue_struct
*xfs_discard_wq
;
23 * Allocate a new ticket. Failing to get a new ticket makes it really hard to
24 * recover, so we don't allow failure here. Also, we allocate in a context that
25 * we don't want to be issuing transactions from, so we need to tell the
26 * allocation code this as well.
28 * We don't reserve any space for the ticket - we are going to steal whatever
29 * space we require from transactions as they commit. To ensure we reserve all
30 * the space required, we need to set the current reservation of the ticket to
31 * zero so that we know to steal the initial transaction overhead from the
32 * first transaction commit.
34 static struct xlog_ticket
*
35 xlog_cil_ticket_alloc(
38 struct xlog_ticket
*tic
;
40 tic
= xlog_ticket_alloc(log
, 0, 1, XFS_TRANSACTION
, 0);
43 * set the current reservation to zero so we know to steal the basic
44 * transaction overhead reservation from the first transaction commit.
51 * Unavoidable forward declaration - xlog_cil_push_work() calls
52 * xlog_cil_ctx_alloc() itself.
54 static void xlog_cil_push_work(struct work_struct
*work
);
56 static struct xfs_cil_ctx
*
57 xlog_cil_ctx_alloc(void)
59 struct xfs_cil_ctx
*ctx
;
61 ctx
= kmem_zalloc(sizeof(*ctx
), KM_NOFS
);
62 INIT_LIST_HEAD(&ctx
->committing
);
63 INIT_LIST_HEAD(&ctx
->busy_extents
);
64 INIT_WORK(&ctx
->push_work
, xlog_cil_push_work
);
71 struct xfs_cil_ctx
*ctx
)
73 ctx
->sequence
= ++cil
->xc_current_sequence
;
79 * After the first stage of log recovery is done, we know where the head and
80 * tail of the log are. We need this log initialisation done before we can
81 * initialise the first CIL checkpoint context.
83 * Here we allocate a log ticket to track space usage during a CIL push. This
84 * ticket is passed to xlog_write() directly so that we don't slowly leak log
85 * space by failing to account for space used by log headers and additional
86 * region headers for split regions.
89 xlog_cil_init_post_recovery(
92 log
->l_cilp
->xc_ctx
->ticket
= xlog_cil_ticket_alloc(log
);
93 log
->l_cilp
->xc_ctx
->sequence
= 1;
100 return round_up((sizeof(struct xfs_log_vec
) +
101 niovecs
* sizeof(struct xfs_log_iovec
)),
106 * Allocate or pin log vector buffers for CIL insertion.
108 * The CIL currently uses disposable buffers for copying a snapshot of the
109 * modified items into the log during a push. The biggest problem with this is
110 * the requirement to allocate the disposable buffer during the commit if:
111 * a) does not exist; or
114 * If we do this allocation within xlog_cil_insert_format_items(), it is done
115 * under the xc_ctx_lock, which means that a CIL push cannot occur during
116 * the memory allocation. This means that we have a potential deadlock situation
117 * under low memory conditions when we have lots of dirty metadata pinned in
118 * the CIL and we need a CIL commit to occur to free memory.
120 * To avoid this, we need to move the memory allocation outside the
121 * xc_ctx_lock, but because the log vector buffers are disposable, that opens
122 * up a TOCTOU race condition w.r.t. the CIL committing and removing the log
123 * vector buffers between the check and the formatting of the item into the
124 * log vector buffer within the xc_ctx_lock.
126 * Because the log vector buffer needs to be unchanged during the CIL push
127 * process, we cannot share the buffer between the transaction commit (which
128 * modifies the buffer) and the CIL push context that is writing the changes
129 * into the log. This means skipping preallocation of buffer space is
130 * unreliable, but we most definitely do not want to be allocating and freeing
131 * buffers unnecessarily during commits when overwrites can be done safely.
133 * The simplest solution to this problem is to allocate a shadow buffer when a
134 * log item is committed for the second time, and then to only use this buffer
135 * if necessary. The buffer can remain attached to the log item until such time
136 * it is needed, and this is the buffer that is reallocated to match the size of
137 * the incoming modification. Then during the formatting of the item we can swap
138 * the active buffer with the new one if we can't reuse the existing buffer. We
139 * don't free the old buffer as it may be reused on the next modification if
140 * it's size is right, otherwise we'll free and reallocate it at that point.
142 * This function builds a vector for the changes in each log item in the
143 * transaction. It then works out the length of the buffer needed for each log
144 * item, allocates them and attaches the vector to the log item in preparation
145 * for the formatting step which occurs under the xc_ctx_lock.
147 * While this means the memory footprint goes up, it avoids the repeated
148 * alloc/free pattern that repeated modifications of an item would otherwise
149 * cause, and hence minimises the CPU overhead of such behaviour.
152 xlog_cil_alloc_shadow_bufs(
154 struct xfs_trans
*tp
)
156 struct xfs_log_item
*lip
;
158 list_for_each_entry(lip
, &tp
->t_items
, li_trans
) {
159 struct xfs_log_vec
*lv
;
163 bool ordered
= false;
165 /* Skip items which aren't dirty in this transaction. */
166 if (!test_bit(XFS_LI_DIRTY
, &lip
->li_flags
))
169 /* get number of vecs and size of data to be stored */
170 lip
->li_ops
->iop_size(lip
, &niovecs
, &nbytes
);
173 * Ordered items need to be tracked but we do not wish to write
174 * them. We need a logvec to track the object, but we do not
175 * need an iovec or buffer to be allocated for copying data.
177 if (niovecs
== XFS_LOG_VEC_ORDERED
) {
184 * We 64-bit align the length of each iovec so that the start
185 * of the next one is naturally aligned. We'll need to
186 * account for that slack space here. Then round nbytes up
187 * to 64-bit alignment so that the initial buffer alignment is
188 * easy to calculate and verify.
190 nbytes
+= niovecs
* sizeof(uint64_t);
191 nbytes
= round_up(nbytes
, sizeof(uint64_t));
194 * The data buffer needs to start 64-bit aligned, so round up
195 * that space to ensure we can align it appropriately and not
196 * overrun the buffer.
198 buf_size
= nbytes
+ xlog_cil_iovec_space(niovecs
);
201 * if we have no shadow buffer, or it is too small, we need to
204 if (!lip
->li_lv_shadow
||
205 buf_size
> lip
->li_lv_shadow
->lv_size
) {
208 * We free and allocate here as a realloc would copy
209 * unnecessary data. We don't use kmem_zalloc() for the
210 * same reason - we don't need to zero the data area in
211 * the buffer, only the log vector header and the iovec
214 kmem_free(lip
->li_lv_shadow
);
217 * We are in transaction context, which means this
218 * allocation will pick up GFP_NOFS from the
219 * memalloc_nofs_save/restore context the transaction
220 * holds. This means we can use GFP_KERNEL here so the
221 * generic kvmalloc() code will run vmalloc on
222 * contiguous page allocation failure as we require.
224 lv
= kvmalloc(buf_size
, GFP_KERNEL
);
225 memset(lv
, 0, xlog_cil_iovec_space(niovecs
));
228 lv
->lv_size
= buf_size
;
230 lv
->lv_buf_len
= XFS_LOG_VEC_ORDERED
;
232 lv
->lv_iovecp
= (struct xfs_log_iovec
*)&lv
[1];
233 lip
->li_lv_shadow
= lv
;
235 /* same or smaller, optimise common overwrite case */
236 lv
= lip
->li_lv_shadow
;
238 lv
->lv_buf_len
= XFS_LOG_VEC_ORDERED
;
245 /* Ensure the lv is set up according to ->iop_size */
246 lv
->lv_niovecs
= niovecs
;
248 /* The allocated data region lies beyond the iovec region */
249 lv
->lv_buf
= (char *)lv
+ xlog_cil_iovec_space(niovecs
);
255 * Prepare the log item for insertion into the CIL. Calculate the difference in
256 * log space and vectors it will consume, and if it is a new item pin it as
260 xfs_cil_prepare_item(
262 struct xfs_log_vec
*lv
,
263 struct xfs_log_vec
*old_lv
,
267 /* Account for the new LV being passed in */
268 if (lv
->lv_buf_len
!= XFS_LOG_VEC_ORDERED
) {
269 *diff_len
+= lv
->lv_bytes
;
270 *diff_iovecs
+= lv
->lv_niovecs
;
274 * If there is no old LV, this is the first time we've seen the item in
275 * this CIL context and so we need to pin it. If we are replacing the
276 * old_lv, then remove the space it accounts for and make it the shadow
277 * buffer for later freeing. In both cases we are now switching to the
278 * shadow buffer, so update the pointer to it appropriately.
281 if (lv
->lv_item
->li_ops
->iop_pin
)
282 lv
->lv_item
->li_ops
->iop_pin(lv
->lv_item
);
283 lv
->lv_item
->li_lv_shadow
= NULL
;
284 } else if (old_lv
!= lv
) {
285 ASSERT(lv
->lv_buf_len
!= XFS_LOG_VEC_ORDERED
);
287 *diff_len
-= old_lv
->lv_bytes
;
288 *diff_iovecs
-= old_lv
->lv_niovecs
;
289 lv
->lv_item
->li_lv_shadow
= old_lv
;
292 /* attach new log vector to log item */
293 lv
->lv_item
->li_lv
= lv
;
296 * If this is the first time the item is being committed to the
297 * CIL, store the sequence number on the log item so we can
298 * tell in future commits whether this is the first checkpoint
299 * the item is being committed into.
301 if (!lv
->lv_item
->li_seq
)
302 lv
->lv_item
->li_seq
= log
->l_cilp
->xc_ctx
->sequence
;
306 * Format log item into a flat buffers
308 * For delayed logging, we need to hold a formatted buffer containing all the
309 * changes on the log item. This enables us to relog the item in memory and
310 * write it out asynchronously without needing to relock the object that was
311 * modified at the time it gets written into the iclog.
313 * This function takes the prepared log vectors attached to each log item, and
314 * formats the changes into the log vector buffer. The buffer it uses is
315 * dependent on the current state of the vector in the CIL - the shadow lv is
316 * guaranteed to be large enough for the current modification, but we will only
317 * use that if we can't reuse the existing lv. If we can't reuse the existing
318 * lv, then simple swap it out for the shadow lv. We don't free it - that is
319 * done lazily either by th enext modification or the freeing of the log item.
321 * We don't set up region headers during this process; we simply copy the
322 * regions into the flat buffer. We can do this because we still have to do a
323 * formatting step to write the regions into the iclog buffer. Writing the
324 * ophdrs during the iclog write means that we can support splitting large
325 * regions across iclog boundares without needing a change in the format of the
326 * item/region encapsulation.
328 * Hence what we need to do now is change the rewrite the vector array to point
329 * to the copied region inside the buffer we just allocated. This allows us to
330 * format the regions into the iclog as though they are being formatted
331 * directly out of the objects themselves.
334 xlog_cil_insert_format_items(
336 struct xfs_trans
*tp
,
340 struct xfs_log_item
*lip
;
343 /* Bail out if we didn't find a log item. */
344 if (list_empty(&tp
->t_items
)) {
349 list_for_each_entry(lip
, &tp
->t_items
, li_trans
) {
350 struct xfs_log_vec
*lv
;
351 struct xfs_log_vec
*old_lv
= NULL
;
352 struct xfs_log_vec
*shadow
;
353 bool ordered
= false;
355 /* Skip items which aren't dirty in this transaction. */
356 if (!test_bit(XFS_LI_DIRTY
, &lip
->li_flags
))
360 * The formatting size information is already attached to
361 * the shadow lv on the log item.
363 shadow
= lip
->li_lv_shadow
;
364 if (shadow
->lv_buf_len
== XFS_LOG_VEC_ORDERED
)
367 /* Skip items that do not have any vectors for writing */
368 if (!shadow
->lv_niovecs
&& !ordered
)
371 /* compare to existing item size */
373 if (lip
->li_lv
&& shadow
->lv_size
<= lip
->li_lv
->lv_size
) {
374 /* same or smaller, optimise common overwrite case */
382 * set the item up as though it is a new insertion so
383 * that the space reservation accounting is correct.
385 *diff_iovecs
-= lv
->lv_niovecs
;
386 *diff_len
-= lv
->lv_bytes
;
388 /* Ensure the lv is set up according to ->iop_size */
389 lv
->lv_niovecs
= shadow
->lv_niovecs
;
391 /* reset the lv buffer information for new formatting */
394 lv
->lv_buf
= (char *)lv
+
395 xlog_cil_iovec_space(lv
->lv_niovecs
);
397 /* switch to shadow buffer! */
401 /* track as an ordered logvec */
402 ASSERT(lip
->li_lv
== NULL
);
407 ASSERT(IS_ALIGNED((unsigned long)lv
->lv_buf
, sizeof(uint64_t)));
408 lip
->li_ops
->iop_format(lip
, lv
);
410 xfs_cil_prepare_item(log
, lv
, old_lv
, diff_len
, diff_iovecs
);
415 * Insert the log items into the CIL and calculate the difference in space
416 * consumed by the item. Add the space to the checkpoint ticket and calculate
417 * if the change requires additional log metadata. If it does, take that space
418 * as well. Remove the amount of space we added to the checkpoint ticket from
419 * the current transaction ticket so that the accounting works out correctly.
422 xlog_cil_insert_items(
424 struct xfs_trans
*tp
)
426 struct xfs_cil
*cil
= log
->l_cilp
;
427 struct xfs_cil_ctx
*ctx
= cil
->xc_ctx
;
428 struct xfs_log_item
*lip
;
432 int iovhdr_res
= 0, split_res
= 0, ctx_res
= 0;
437 * We can do this safely because the context can't checkpoint until we
438 * are done so it doesn't matter exactly how we update the CIL.
440 xlog_cil_insert_format_items(log
, tp
, &len
, &diff_iovecs
);
442 spin_lock(&cil
->xc_cil_lock
);
444 /* account for space used by new iovec headers */
445 iovhdr_res
= diff_iovecs
* sizeof(xlog_op_header_t
);
447 ctx
->nvecs
+= diff_iovecs
;
449 /* attach the transaction to the CIL if it has any busy extents */
450 if (!list_empty(&tp
->t_busy
))
451 list_splice_init(&tp
->t_busy
, &ctx
->busy_extents
);
454 * Now transfer enough transaction reservation to the context ticket
455 * for the checkpoint. The context ticket is special - the unit
456 * reservation has to grow as well as the current reservation as we
457 * steal from tickets so we can correctly determine the space used
458 * during the transaction commit.
460 if (ctx
->ticket
->t_curr_res
== 0) {
461 ctx_res
= ctx
->ticket
->t_unit_res
;
462 ctx
->ticket
->t_curr_res
= ctx_res
;
463 tp
->t_ticket
->t_curr_res
-= ctx_res
;
466 /* do we need space for more log record headers? */
467 iclog_space
= log
->l_iclog_size
- log
->l_iclog_hsize
;
468 if (len
> 0 && (ctx
->space_used
/ iclog_space
!=
469 (ctx
->space_used
+ len
) / iclog_space
)) {
470 split_res
= (len
+ iclog_space
- 1) / iclog_space
;
471 /* need to take into account split region headers, too */
472 split_res
*= log
->l_iclog_hsize
+ sizeof(struct xlog_op_header
);
473 ctx
->ticket
->t_unit_res
+= split_res
;
474 ctx
->ticket
->t_curr_res
+= split_res
;
475 tp
->t_ticket
->t_curr_res
-= split_res
;
476 ASSERT(tp
->t_ticket
->t_curr_res
>= len
);
478 tp
->t_ticket
->t_curr_res
-= len
;
479 ctx
->space_used
+= len
;
482 * If we've overrun the reservation, dump the tx details before we move
483 * the log items. Shutdown is imminent...
485 if (WARN_ON(tp
->t_ticket
->t_curr_res
< 0)) {
486 xfs_warn(log
->l_mp
, "Transaction log reservation overrun:");
488 " log items: %d bytes (iov hdrs: %d bytes)",
490 xfs_warn(log
->l_mp
, " split region headers: %d bytes",
492 xfs_warn(log
->l_mp
, " ctx ticket: %d bytes", ctx_res
);
493 xlog_print_trans(tp
);
497 * Now (re-)position everything modified at the tail of the CIL.
498 * We do this here so we only need to take the CIL lock once during
499 * the transaction commit.
501 list_for_each_entry(lip
, &tp
->t_items
, li_trans
) {
503 /* Skip items which aren't dirty in this transaction. */
504 if (!test_bit(XFS_LI_DIRTY
, &lip
->li_flags
))
508 * Only move the item if it isn't already at the tail. This is
509 * to prevent a transient list_empty() state when reinserting
510 * an item that is already the only item in the CIL.
512 if (!list_is_last(&lip
->li_cil
, &cil
->xc_cil
))
513 list_move_tail(&lip
->li_cil
, &cil
->xc_cil
);
516 spin_unlock(&cil
->xc_cil_lock
);
518 if (tp
->t_ticket
->t_curr_res
< 0)
519 xfs_force_shutdown(log
->l_mp
, SHUTDOWN_LOG_IO_ERROR
);
523 xlog_cil_free_logvec(
524 struct xfs_log_vec
*log_vector
)
526 struct xfs_log_vec
*lv
;
528 for (lv
= log_vector
; lv
; ) {
529 struct xfs_log_vec
*next
= lv
->lv_next
;
536 xlog_discard_endio_work(
537 struct work_struct
*work
)
539 struct xfs_cil_ctx
*ctx
=
540 container_of(work
, struct xfs_cil_ctx
, discard_endio_work
);
541 struct xfs_mount
*mp
= ctx
->cil
->xc_log
->l_mp
;
543 xfs_extent_busy_clear(mp
, &ctx
->busy_extents
, false);
548 * Queue up the actual completion to a thread to avoid IRQ-safe locking for
549 * pagb_lock. Note that we need a unbounded workqueue, otherwise we might
550 * get the execution delayed up to 30 seconds for weird reasons.
556 struct xfs_cil_ctx
*ctx
= bio
->bi_private
;
558 INIT_WORK(&ctx
->discard_endio_work
, xlog_discard_endio_work
);
559 queue_work(xfs_discard_wq
, &ctx
->discard_endio_work
);
564 xlog_discard_busy_extents(
565 struct xfs_mount
*mp
,
566 struct xfs_cil_ctx
*ctx
)
568 struct list_head
*list
= &ctx
->busy_extents
;
569 struct xfs_extent_busy
*busyp
;
570 struct bio
*bio
= NULL
;
571 struct blk_plug plug
;
574 ASSERT(xfs_has_discard(mp
));
576 blk_start_plug(&plug
);
577 list_for_each_entry(busyp
, list
, list
) {
578 trace_xfs_discard_extent(mp
, busyp
->agno
, busyp
->bno
,
581 error
= __blkdev_issue_discard(mp
->m_ddev_targp
->bt_bdev
,
582 XFS_AGB_TO_DADDR(mp
, busyp
->agno
, busyp
->bno
),
583 XFS_FSB_TO_BB(mp
, busyp
->length
),
585 if (error
&& error
!= -EOPNOTSUPP
) {
587 "discard failed for extent [0x%llx,%u], error %d",
588 (unsigned long long)busyp
->bno
,
596 bio
->bi_private
= ctx
;
597 bio
->bi_end_io
= xlog_discard_endio
;
600 xlog_discard_endio_work(&ctx
->discard_endio_work
);
602 blk_finish_plug(&plug
);
606 * Mark all items committed and clear busy extents. We free the log vector
607 * chains in a separate pass so that we unpin the log items as quickly as
612 struct xfs_cil_ctx
*ctx
)
614 struct xfs_mount
*mp
= ctx
->cil
->xc_log
->l_mp
;
615 bool abort
= xlog_is_shutdown(ctx
->cil
->xc_log
);
618 * If the I/O failed, we're aborting the commit and already shutdown.
619 * Wake any commit waiters before aborting the log items so we don't
620 * block async log pushers on callbacks. Async log pushers explicitly do
621 * not wait on log force completion because they may be holding locks
622 * required to unpin items.
625 spin_lock(&ctx
->cil
->xc_push_lock
);
626 wake_up_all(&ctx
->cil
->xc_start_wait
);
627 wake_up_all(&ctx
->cil
->xc_commit_wait
);
628 spin_unlock(&ctx
->cil
->xc_push_lock
);
631 xfs_trans_committed_bulk(ctx
->cil
->xc_log
->l_ailp
, ctx
->lv_chain
,
632 ctx
->start_lsn
, abort
);
634 xfs_extent_busy_sort(&ctx
->busy_extents
);
635 xfs_extent_busy_clear(mp
, &ctx
->busy_extents
,
636 xfs_has_discard(mp
) && !abort
);
638 spin_lock(&ctx
->cil
->xc_push_lock
);
639 list_del(&ctx
->committing
);
640 spin_unlock(&ctx
->cil
->xc_push_lock
);
642 xlog_cil_free_logvec(ctx
->lv_chain
);
644 if (!list_empty(&ctx
->busy_extents
))
645 xlog_discard_busy_extents(mp
, ctx
);
651 xlog_cil_process_committed(
652 struct list_head
*list
)
654 struct xfs_cil_ctx
*ctx
;
656 while ((ctx
= list_first_entry_or_null(list
,
657 struct xfs_cil_ctx
, iclog_entry
))) {
658 list_del(&ctx
->iclog_entry
);
659 xlog_cil_committed(ctx
);
664 * Record the LSN of the iclog we were just granted space to start writing into.
665 * If the context doesn't have a start_lsn recorded, then this iclog will
666 * contain the start record for the checkpoint. Otherwise this write contains
667 * the commit record for the checkpoint.
670 xlog_cil_set_ctx_write_state(
671 struct xfs_cil_ctx
*ctx
,
672 struct xlog_in_core
*iclog
)
674 struct xfs_cil
*cil
= ctx
->cil
;
675 xfs_lsn_t lsn
= be64_to_cpu(iclog
->ic_header
.h_lsn
);
677 ASSERT(!ctx
->commit_lsn
);
678 if (!ctx
->start_lsn
) {
679 spin_lock(&cil
->xc_push_lock
);
681 * The LSN we need to pass to the log items on transaction
682 * commit is the LSN reported by the first log vector write, not
683 * the commit lsn. If we use the commit record lsn then we can
684 * move the tail beyond the grant write head.
686 ctx
->start_lsn
= lsn
;
687 wake_up_all(&cil
->xc_start_wait
);
688 spin_unlock(&cil
->xc_push_lock
);
693 * Take a reference to the iclog for the context so that we still hold
694 * it when xlog_write is done and has released it. This means the
695 * context controls when the iclog is released for IO.
697 atomic_inc(&iclog
->ic_refcnt
);
700 * xlog_state_get_iclog_space() guarantees there is enough space in the
701 * iclog for an entire commit record, so we can attach the context
702 * callbacks now. This needs to be done before we make the commit_lsn
703 * visible to waiters so that checkpoints with commit records in the
704 * same iclog order their IO completion callbacks in the same order that
705 * the commit records appear in the iclog.
707 spin_lock(&cil
->xc_log
->l_icloglock
);
708 list_add_tail(&ctx
->iclog_entry
, &iclog
->ic_callbacks
);
709 spin_unlock(&cil
->xc_log
->l_icloglock
);
712 * Now we can record the commit LSN and wake anyone waiting for this
713 * sequence to have the ordered commit record assigned to a physical
714 * location in the log.
716 spin_lock(&cil
->xc_push_lock
);
717 ctx
->commit_iclog
= iclog
;
718 ctx
->commit_lsn
= lsn
;
719 wake_up_all(&cil
->xc_commit_wait
);
720 spin_unlock(&cil
->xc_push_lock
);
725 * Ensure that the order of log writes follows checkpoint sequence order. This
726 * relies on the context LSN being zero until the log write has guaranteed the
727 * LSN that the log write will start at via xlog_state_get_iclog_space().
735 xlog_cil_order_write(
738 enum _record_type record
)
740 struct xfs_cil_ctx
*ctx
;
743 spin_lock(&cil
->xc_push_lock
);
744 list_for_each_entry(ctx
, &cil
->xc_committing
, committing
) {
746 * Avoid getting stuck in this loop because we were woken by the
747 * shutdown, but then went back to sleep once already in the
750 if (xlog_is_shutdown(cil
->xc_log
)) {
751 spin_unlock(&cil
->xc_push_lock
);
756 * Higher sequences will wait for this one so skip them.
757 * Don't wait for our own sequence, either.
759 if (ctx
->sequence
>= sequence
)
762 /* Wait until the LSN for the record has been recorded. */
765 if (!ctx
->start_lsn
) {
766 xlog_wait(&cil
->xc_start_wait
, &cil
->xc_push_lock
);
771 if (!ctx
->commit_lsn
) {
772 xlog_wait(&cil
->xc_commit_wait
, &cil
->xc_push_lock
);
778 spin_unlock(&cil
->xc_push_lock
);
783 * Write out the log vector change now attached to the CIL context. This will
784 * write a start record that needs to be strictly ordered in ascending CIL
785 * sequence order so that log recovery will always use in-order start LSNs when
786 * replaying checkpoints.
789 xlog_cil_write_chain(
790 struct xfs_cil_ctx
*ctx
,
791 struct xfs_log_vec
*chain
)
793 struct xlog
*log
= ctx
->cil
->xc_log
;
796 error
= xlog_cil_order_write(ctx
->cil
, ctx
->sequence
, _START_RECORD
);
799 return xlog_write(log
, ctx
, chain
, ctx
->ticket
, XLOG_START_TRANS
);
803 * Write out the commit record of a checkpoint transaction to close off a
804 * running log write. These commit records are strictly ordered in ascending CIL
805 * sequence order so that log recovery will always replay the checkpoints in the
809 xlog_cil_write_commit_record(
810 struct xfs_cil_ctx
*ctx
)
812 struct xlog
*log
= ctx
->cil
->xc_log
;
813 struct xfs_log_iovec reg
= {
816 .i_type
= XLOG_REG_TYPE_COMMIT
,
818 struct xfs_log_vec vec
= {
824 if (xlog_is_shutdown(log
))
827 error
= xlog_cil_order_write(ctx
->cil
, ctx
->sequence
, _COMMIT_RECORD
);
831 error
= xlog_write(log
, ctx
, &vec
, ctx
->ticket
, XLOG_COMMIT_TRANS
);
833 xfs_force_shutdown(log
->l_mp
, SHUTDOWN_LOG_IO_ERROR
);
838 * Push the Committed Item List to the log.
840 * If the current sequence is the same as xc_push_seq we need to do a flush. If
841 * xc_push_seq is less than the current sequence, then it has already been
842 * flushed and we don't need to do anything - the caller will wait for it to
843 * complete if necessary.
845 * xc_push_seq is checked unlocked against the sequence number for a match.
846 * Hence we can allow log forces to run racily and not issue pushes for the
847 * same sequence twice. If we get a race between multiple pushes for the same
848 * sequence they will block on the first one and then abort, hence avoiding
853 struct work_struct
*work
)
855 struct xfs_cil_ctx
*ctx
=
856 container_of(work
, struct xfs_cil_ctx
, push_work
);
857 struct xfs_cil
*cil
= ctx
->cil
;
858 struct xlog
*log
= cil
->xc_log
;
859 struct xfs_log_vec
*lv
;
860 struct xfs_cil_ctx
*new_ctx
;
861 struct xlog_ticket
*tic
;
864 struct xfs_trans_header thdr
;
865 struct xfs_log_iovec lhdr
;
866 struct xfs_log_vec lvhdr
= { NULL
};
867 xfs_lsn_t preflush_tail_lsn
;
870 DECLARE_COMPLETION_ONSTACK(bdev_flush
);
871 bool push_commit_stable
;
873 new_ctx
= xlog_cil_ctx_alloc();
874 new_ctx
->ticket
= xlog_cil_ticket_alloc(log
);
876 down_write(&cil
->xc_ctx_lock
);
878 spin_lock(&cil
->xc_push_lock
);
879 push_seq
= cil
->xc_push_seq
;
880 ASSERT(push_seq
<= ctx
->sequence
);
881 push_commit_stable
= cil
->xc_push_commit_stable
;
882 cil
->xc_push_commit_stable
= false;
885 * As we are about to switch to a new, empty CIL context, we no longer
886 * need to throttle tasks on CIL space overruns. Wake any waiters that
887 * the hard push throttle may have caught so they can start committing
888 * to the new context. The ctx->xc_push_lock provides the serialisation
889 * necessary for safely using the lockless waitqueue_active() check in
892 if (waitqueue_active(&cil
->xc_push_wait
))
893 wake_up_all(&cil
->xc_push_wait
);
896 * Check if we've anything to push. If there is nothing, then we don't
897 * move on to a new sequence number and so we have to be able to push
898 * this sequence again later.
900 if (list_empty(&cil
->xc_cil
)) {
901 cil
->xc_push_seq
= 0;
902 spin_unlock(&cil
->xc_push_lock
);
907 /* check for a previously pushed sequence */
908 if (push_seq
< ctx
->sequence
) {
909 spin_unlock(&cil
->xc_push_lock
);
914 * We are now going to push this context, so add it to the committing
915 * list before we do anything else. This ensures that anyone waiting on
916 * this push can easily detect the difference between a "push in
917 * progress" and "CIL is empty, nothing to do".
919 * IOWs, a wait loop can now check for:
920 * the current sequence not being found on the committing list;
922 * an unchanged sequence number
923 * to detect a push that had nothing to do and therefore does not need
924 * waiting on. If the CIL is not empty, we get put on the committing
925 * list before emptying the CIL and bumping the sequence number. Hence
926 * an empty CIL and an unchanged sequence number means we jumped out
927 * above after doing nothing.
929 * Hence the waiter will either find the commit sequence on the
930 * committing list or the sequence number will be unchanged and the CIL
931 * still dirty. In that latter case, the push has not yet started, and
932 * so the waiter will have to continue trying to check the CIL
933 * committing list until it is found. In extreme cases of delay, the
934 * sequence may fully commit between the attempts the wait makes to wait
935 * on the commit sequence.
937 list_add(&ctx
->committing
, &cil
->xc_committing
);
938 spin_unlock(&cil
->xc_push_lock
);
941 * The CIL is stable at this point - nothing new will be added to it
942 * because we hold the flush lock exclusively. Hence we can now issue
943 * a cache flush to ensure all the completed metadata in the journal we
944 * are about to overwrite is on stable storage.
946 * Because we are issuing this cache flush before we've written the
947 * tail lsn to the iclog, we can have metadata IO completions move the
948 * tail forwards between the completion of this flush and the iclog
949 * being written. In this case, we need to re-issue the cache flush
950 * before the iclog write. To detect whether the log tail moves, sample
951 * the tail LSN *before* we issue the flush.
953 preflush_tail_lsn
= atomic64_read(&log
->l_tail_lsn
);
954 xfs_flush_bdev_async(&bio
, log
->l_mp
->m_ddev_targp
->bt_bdev
,
958 * Pull all the log vectors off the items in the CIL, and remove the
959 * items from the CIL. We don't need the CIL lock here because it's only
960 * needed on the transaction commit side which is currently locked out
965 while (!list_empty(&cil
->xc_cil
)) {
966 struct xfs_log_item
*item
;
968 item
= list_first_entry(&cil
->xc_cil
,
969 struct xfs_log_item
, li_cil
);
970 list_del_init(&item
->li_cil
);
972 ctx
->lv_chain
= item
->li_lv
;
974 lv
->lv_next
= item
->li_lv
;
977 num_iovecs
+= lv
->lv_niovecs
;
981 * Switch the contexts so we can drop the context lock and move out
982 * of a shared context. We can't just go straight to the commit record,
983 * though - we need to synchronise with previous and future commits so
984 * that the commit records are correctly ordered in the log to ensure
985 * that we process items during log IO completion in the correct order.
987 * For example, if we get an EFI in one checkpoint and the EFD in the
988 * next (e.g. due to log forces), we do not want the checkpoint with
989 * the EFD to be committed before the checkpoint with the EFI. Hence
990 * we must strictly order the commit records of the checkpoints so
991 * that: a) the checkpoint callbacks are attached to the iclogs in the
992 * correct order; and b) the checkpoints are replayed in correct order
995 * Hence we need to add this context to the committing context list so
996 * that higher sequences will wait for us to write out a commit record
999 * xfs_log_force_seq requires us to mirror the new sequence into the cil
1000 * structure atomically with the addition of this sequence to the
1001 * committing list. This also ensures that we can do unlocked checks
1002 * against the current sequence in log forces without risking
1003 * deferencing a freed context pointer.
1005 spin_lock(&cil
->xc_push_lock
);
1006 xlog_cil_ctx_switch(cil
, new_ctx
);
1007 spin_unlock(&cil
->xc_push_lock
);
1008 up_write(&cil
->xc_ctx_lock
);
1011 * Build a checkpoint transaction header and write it to the log to
1012 * begin the transaction. We need to account for the space used by the
1013 * transaction header here as it is not accounted for in xlog_write().
1015 * The LSN we need to pass to the log items on transaction commit is
1016 * the LSN reported by the first log vector write. If we use the commit
1017 * record lsn then we can move the tail beyond the grant write head.
1020 thdr
.th_magic
= XFS_TRANS_HEADER_MAGIC
;
1021 thdr
.th_type
= XFS_TRANS_CHECKPOINT
;
1022 thdr
.th_tid
= tic
->t_tid
;
1023 thdr
.th_num_items
= num_iovecs
;
1024 lhdr
.i_addr
= &thdr
;
1025 lhdr
.i_len
= sizeof(xfs_trans_header_t
);
1026 lhdr
.i_type
= XLOG_REG_TYPE_TRANSHDR
;
1027 tic
->t_curr_res
-= lhdr
.i_len
+ sizeof(xlog_op_header_t
);
1029 lvhdr
.lv_niovecs
= 1;
1030 lvhdr
.lv_iovecp
= &lhdr
;
1031 lvhdr
.lv_next
= ctx
->lv_chain
;
1034 * Before we format and submit the first iclog, we have to ensure that
1035 * the metadata writeback ordering cache flush is complete.
1037 wait_for_completion(&bdev_flush
);
1039 error
= xlog_cil_write_chain(ctx
, &lvhdr
);
1041 goto out_abort_free_ticket
;
1043 error
= xlog_cil_write_commit_record(ctx
);
1045 goto out_abort_free_ticket
;
1047 xfs_log_ticket_ungrant(log
, tic
);
1050 * If the checkpoint spans multiple iclogs, wait for all previous iclogs
1051 * to complete before we submit the commit_iclog. We can't use state
1052 * checks for this - ACTIVE can be either a past completed iclog or a
1053 * future iclog being filled, while WANT_SYNC through SYNC_DONE can be a
1054 * past or future iclog awaiting IO or ordered IO completion to be run.
1055 * In the latter case, if it's a future iclog and we wait on it, the we
1056 * will hang because it won't get processed through to ic_force_wait
1057 * wakeup until this commit_iclog is written to disk. Hence we use the
1058 * iclog header lsn and compare it to the commit lsn to determine if we
1059 * need to wait on iclogs or not.
1061 spin_lock(&log
->l_icloglock
);
1062 if (ctx
->start_lsn
!= ctx
->commit_lsn
) {
1065 plsn
= be64_to_cpu(ctx
->commit_iclog
->ic_prev
->ic_header
.h_lsn
);
1066 if (plsn
&& XFS_LSN_CMP(plsn
, ctx
->commit_lsn
) < 0) {
1068 * Waiting on ic_force_wait orders the completion of
1069 * iclogs older than ic_prev. Hence we only need to wait
1070 * on the most recent older iclog here.
1072 xlog_wait_on_iclog(ctx
->commit_iclog
->ic_prev
);
1073 spin_lock(&log
->l_icloglock
);
1077 * We need to issue a pre-flush so that the ordering for this
1078 * checkpoint is correctly preserved down to stable storage.
1080 ctx
->commit_iclog
->ic_flags
|= XLOG_ICL_NEED_FLUSH
;
1084 * The commit iclog must be written to stable storage to guarantee
1085 * journal IO vs metadata writeback IO is correctly ordered on stable
1088 * If the push caller needs the commit to be immediately stable and the
1089 * commit_iclog is not yet marked as XLOG_STATE_WANT_SYNC to indicate it
1090 * will be written when released, switch it's state to WANT_SYNC right
1093 ctx
->commit_iclog
->ic_flags
|= XLOG_ICL_NEED_FUA
;
1094 if (push_commit_stable
&&
1095 ctx
->commit_iclog
->ic_state
== XLOG_STATE_ACTIVE
)
1096 xlog_state_switch_iclogs(log
, ctx
->commit_iclog
, 0);
1097 xlog_state_release_iclog(log
, ctx
->commit_iclog
, preflush_tail_lsn
);
1099 /* Not safe to reference ctx now! */
1101 spin_unlock(&log
->l_icloglock
);
1105 up_write(&cil
->xc_ctx_lock
);
1106 xfs_log_ticket_put(new_ctx
->ticket
);
1110 out_abort_free_ticket
:
1111 xfs_log_ticket_ungrant(log
, tic
);
1112 ASSERT(xlog_is_shutdown(log
));
1113 if (!ctx
->commit_iclog
) {
1114 xlog_cil_committed(ctx
);
1117 spin_lock(&log
->l_icloglock
);
1118 xlog_state_release_iclog(log
, ctx
->commit_iclog
, 0);
1119 /* Not safe to reference ctx now! */
1120 spin_unlock(&log
->l_icloglock
);
1124 * We need to push CIL every so often so we don't cache more than we can fit in
1125 * the log. The limit really is that a checkpoint can't be more than half the
1126 * log (the current checkpoint is not allowed to overwrite the previous
1127 * checkpoint), but commit latency and memory usage limit this to a smaller
1131 xlog_cil_push_background(
1132 struct xlog
*log
) __releases(cil
->xc_ctx_lock
)
1134 struct xfs_cil
*cil
= log
->l_cilp
;
1137 * The cil won't be empty because we are called while holding the
1138 * context lock so whatever we added to the CIL will still be there
1140 ASSERT(!list_empty(&cil
->xc_cil
));
1143 * Don't do a background push if we haven't used up all the
1144 * space available yet.
1146 if (cil
->xc_ctx
->space_used
< XLOG_CIL_SPACE_LIMIT(log
)) {
1147 up_read(&cil
->xc_ctx_lock
);
1151 spin_lock(&cil
->xc_push_lock
);
1152 if (cil
->xc_push_seq
< cil
->xc_current_sequence
) {
1153 cil
->xc_push_seq
= cil
->xc_current_sequence
;
1154 queue_work(cil
->xc_push_wq
, &cil
->xc_ctx
->push_work
);
1158 * Drop the context lock now, we can't hold that if we need to sleep
1159 * because we are over the blocking threshold. The push_lock is still
1160 * held, so blocking threshold sleep/wakeup is still correctly
1163 up_read(&cil
->xc_ctx_lock
);
1166 * If we are well over the space limit, throttle the work that is being
1167 * done until the push work on this context has begun. Enforce the hard
1168 * throttle on all transaction commits once it has been activated, even
1169 * if the committing transactions have resulted in the space usage
1170 * dipping back down under the hard limit.
1172 * The ctx->xc_push_lock provides the serialisation necessary for safely
1173 * using the lockless waitqueue_active() check in this context.
1175 if (cil
->xc_ctx
->space_used
>= XLOG_CIL_BLOCKING_SPACE_LIMIT(log
) ||
1176 waitqueue_active(&cil
->xc_push_wait
)) {
1177 trace_xfs_log_cil_wait(log
, cil
->xc_ctx
->ticket
);
1178 ASSERT(cil
->xc_ctx
->space_used
< log
->l_logsize
);
1179 xlog_wait(&cil
->xc_push_wait
, &cil
->xc_push_lock
);
1183 spin_unlock(&cil
->xc_push_lock
);
1188 * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence
1189 * number that is passed. When it returns, the work will be queued for
1190 * @push_seq, but it won't be completed.
1192 * If the caller is performing a synchronous force, we will flush the workqueue
1193 * to get previously queued work moving to minimise the wait time they will
1194 * undergo waiting for all outstanding pushes to complete. The caller is
1195 * expected to do the required waiting for push_seq to complete.
1197 * If the caller is performing an async push, we need to ensure that the
1198 * checkpoint is fully flushed out of the iclogs when we finish the push. If we
1199 * don't do this, then the commit record may remain sitting in memory in an
1200 * ACTIVE iclog. This then requires another full log force to push to disk,
1201 * which defeats the purpose of having an async, non-blocking CIL force
1202 * mechanism. Hence in this case we need to pass a flag to the push work to
1203 * indicate it needs to flush the commit record itself.
1211 struct xfs_cil
*cil
= log
->l_cilp
;
1216 ASSERT(push_seq
&& push_seq
<= cil
->xc_current_sequence
);
1218 /* start on any pending background push to minimise wait time on it */
1220 flush_workqueue(cil
->xc_push_wq
);
1223 * If the CIL is empty or we've already pushed the sequence then
1224 * there's no work we need to do.
1226 spin_lock(&cil
->xc_push_lock
);
1227 if (list_empty(&cil
->xc_cil
) || push_seq
<= cil
->xc_push_seq
) {
1228 spin_unlock(&cil
->xc_push_lock
);
1232 cil
->xc_push_seq
= push_seq
;
1233 cil
->xc_push_commit_stable
= async
;
1234 queue_work(cil
->xc_push_wq
, &cil
->xc_ctx
->push_work
);
1235 spin_unlock(&cil
->xc_push_lock
);
1242 struct xfs_cil
*cil
= log
->l_cilp
;
1245 spin_lock(&cil
->xc_push_lock
);
1246 if (list_empty(&cil
->xc_cil
))
1248 spin_unlock(&cil
->xc_push_lock
);
1253 * Commit a transaction with the given vector to the Committed Item List.
1255 * To do this, we need to format the item, pin it in memory if required and
1256 * account for the space used by the transaction. Once we have done that we
1257 * need to release the unused reservation for the transaction, attach the
1258 * transaction to the checkpoint context so we carry the busy extents through
1259 * to checkpoint completion, and then unlock all the items in the transaction.
1261 * Called with the context lock already held in read mode to lock out
1262 * background commit, returns without it held once background commits are
1268 struct xfs_trans
*tp
,
1269 xfs_csn_t
*commit_seq
,
1272 struct xfs_cil
*cil
= log
->l_cilp
;
1273 struct xfs_log_item
*lip
, *next
;
1276 * Do all necessary memory allocation before we lock the CIL.
1277 * This ensures the allocation does not deadlock with a CIL
1278 * push in memory reclaim (e.g. from kswapd).
1280 xlog_cil_alloc_shadow_bufs(log
, tp
);
1282 /* lock out background commit */
1283 down_read(&cil
->xc_ctx_lock
);
1285 xlog_cil_insert_items(log
, tp
);
1287 if (regrant
&& !xlog_is_shutdown(log
))
1288 xfs_log_ticket_regrant(log
, tp
->t_ticket
);
1290 xfs_log_ticket_ungrant(log
, tp
->t_ticket
);
1291 tp
->t_ticket
= NULL
;
1292 xfs_trans_unreserve_and_mod_sb(tp
);
1295 * Once all the items of the transaction have been copied to the CIL,
1296 * the items can be unlocked and possibly freed.
1298 * This needs to be done before we drop the CIL context lock because we
1299 * have to update state in the log items and unlock them before they go
1300 * to disk. If we don't, then the CIL checkpoint can race with us and
1301 * we can run checkpoint completion before we've updated and unlocked
1302 * the log items. This affects (at least) processing of stale buffers,
1305 trace_xfs_trans_commit_items(tp
, _RET_IP_
);
1306 list_for_each_entry_safe(lip
, next
, &tp
->t_items
, li_trans
) {
1307 xfs_trans_del_item(lip
);
1308 if (lip
->li_ops
->iop_committing
)
1309 lip
->li_ops
->iop_committing(lip
, cil
->xc_ctx
->sequence
);
1312 *commit_seq
= cil
->xc_ctx
->sequence
;
1314 /* xlog_cil_push_background() releases cil->xc_ctx_lock */
1315 xlog_cil_push_background(log
);
1319 * Flush the CIL to stable storage but don't wait for it to complete. This
1320 * requires the CIL push to ensure the commit record for the push hits the disk,
1321 * but otherwise is no different to a push done from a log force.
1327 xfs_csn_t seq
= log
->l_cilp
->xc_current_sequence
;
1329 trace_xfs_log_force(log
->l_mp
, seq
, _RET_IP_
);
1330 xlog_cil_push_now(log
, seq
, true);
1334 * Conditionally push the CIL based on the sequence passed in.
1336 * We only need to push if we haven't already pushed the sequence number given.
1337 * Hence the only time we will trigger a push here is if the push sequence is
1338 * the same as the current context.
1340 * We return the current commit lsn to allow the callers to determine if a
1341 * iclog flush is necessary following this call.
1348 struct xfs_cil
*cil
= log
->l_cilp
;
1349 struct xfs_cil_ctx
*ctx
;
1350 xfs_lsn_t commit_lsn
= NULLCOMMITLSN
;
1352 ASSERT(sequence
<= cil
->xc_current_sequence
);
1355 sequence
= cil
->xc_current_sequence
;
1356 trace_xfs_log_force(log
->l_mp
, sequence
, _RET_IP_
);
1359 * check to see if we need to force out the current context.
1360 * xlog_cil_push() handles racing pushes for the same sequence,
1361 * so no need to deal with it here.
1364 xlog_cil_push_now(log
, sequence
, false);
1367 * See if we can find a previous sequence still committing.
1368 * We need to wait for all previous sequence commits to complete
1369 * before allowing the force of push_seq to go ahead. Hence block
1370 * on commits for those as well.
1372 spin_lock(&cil
->xc_push_lock
);
1373 list_for_each_entry(ctx
, &cil
->xc_committing
, committing
) {
1375 * Avoid getting stuck in this loop because we were woken by the
1376 * shutdown, but then went back to sleep once already in the
1379 if (xlog_is_shutdown(log
))
1381 if (ctx
->sequence
> sequence
)
1383 if (!ctx
->commit_lsn
) {
1385 * It is still being pushed! Wait for the push to
1386 * complete, then start again from the beginning.
1388 XFS_STATS_INC(log
->l_mp
, xs_log_force_sleep
);
1389 xlog_wait(&cil
->xc_commit_wait
, &cil
->xc_push_lock
);
1392 if (ctx
->sequence
!= sequence
)
1395 commit_lsn
= ctx
->commit_lsn
;
1399 * The call to xlog_cil_push_now() executes the push in the background.
1400 * Hence by the time we have got here it our sequence may not have been
1401 * pushed yet. This is true if the current sequence still matches the
1402 * push sequence after the above wait loop and the CIL still contains
1403 * dirty objects. This is guaranteed by the push code first adding the
1404 * context to the committing list before emptying the CIL.
1406 * Hence if we don't find the context in the committing list and the
1407 * current sequence number is unchanged then the CIL contents are
1408 * significant. If the CIL is empty, if means there was nothing to push
1409 * and that means there is nothing to wait for. If the CIL is not empty,
1410 * it means we haven't yet started the push, because if it had started
1411 * we would have found the context on the committing list.
1413 if (sequence
== cil
->xc_current_sequence
&&
1414 !list_empty(&cil
->xc_cil
)) {
1415 spin_unlock(&cil
->xc_push_lock
);
1419 spin_unlock(&cil
->xc_push_lock
);
1423 * We detected a shutdown in progress. We need to trigger the log force
1424 * to pass through it's iclog state machine error handling, even though
1425 * we are already in a shutdown state. Hence we can't return
1426 * NULLCOMMITLSN here as that has special meaning to log forces (i.e.
1427 * LSN is already stable), so we return a zero LSN instead.
1430 spin_unlock(&cil
->xc_push_lock
);
1435 * Check if the current log item was first committed in this sequence.
1436 * We can't rely on just the log item being in the CIL, we have to check
1437 * the recorded commit sequence number.
1439 * Note: for this to be used in a non-racy manner, it has to be called with
1440 * CIL flushing locked out. As a result, it should only be used during the
1441 * transaction commit process when deciding what to format into the item.
1444 xfs_log_item_in_current_chkpt(
1445 struct xfs_log_item
*lip
)
1447 struct xfs_cil_ctx
*ctx
= lip
->li_mountp
->m_log
->l_cilp
->xc_ctx
;
1449 if (list_empty(&lip
->li_cil
))
1453 * li_seq is written on the first commit of a log item to record the
1454 * first checkpoint it is written to. Hence if it is different to the
1455 * current sequence, we're in a new checkpoint.
1457 return lip
->li_seq
== ctx
->sequence
;
1461 * Perform initial CIL structure initialisation.
1467 struct xfs_cil
*cil
;
1468 struct xfs_cil_ctx
*ctx
;
1470 cil
= kmem_zalloc(sizeof(*cil
), KM_MAYFAIL
);
1474 * Limit the CIL pipeline depth to 4 concurrent works to bound the
1475 * concurrency the log spinlocks will be exposed to.
1477 cil
->xc_push_wq
= alloc_workqueue("xfs-cil/%s",
1478 XFS_WQFLAGS(WQ_FREEZABLE
| WQ_MEM_RECLAIM
| WQ_UNBOUND
),
1479 4, log
->l_mp
->m_super
->s_id
);
1480 if (!cil
->xc_push_wq
)
1481 goto out_destroy_cil
;
1483 INIT_LIST_HEAD(&cil
->xc_cil
);
1484 INIT_LIST_HEAD(&cil
->xc_committing
);
1485 spin_lock_init(&cil
->xc_cil_lock
);
1486 spin_lock_init(&cil
->xc_push_lock
);
1487 init_waitqueue_head(&cil
->xc_push_wait
);
1488 init_rwsem(&cil
->xc_ctx_lock
);
1489 init_waitqueue_head(&cil
->xc_start_wait
);
1490 init_waitqueue_head(&cil
->xc_commit_wait
);
1494 ctx
= xlog_cil_ctx_alloc();
1495 xlog_cil_ctx_switch(cil
, ctx
);
1508 if (log
->l_cilp
->xc_ctx
) {
1509 if (log
->l_cilp
->xc_ctx
->ticket
)
1510 xfs_log_ticket_put(log
->l_cilp
->xc_ctx
->ticket
);
1511 kmem_free(log
->l_cilp
->xc_ctx
);
1514 ASSERT(list_empty(&log
->l_cilp
->xc_cil
));
1515 destroy_workqueue(log
->l_cilp
->xc_push_wq
);
1516 kmem_free(log
->l_cilp
);