1 // SPDX-License-Identifier: GPL-2.0+
3 * linux/fs/jbd2/transaction.c
5 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
7 * Copyright 1998 Red Hat corp --- All Rights Reserved
9 * Generic filesystem transaction handling code; part of the ext2fs
12 * This file manages transactions (compound commits managed by the
13 * journaling code) and handles (individual atomic operations by the
17 #include <linux/time.h>
19 #include <linux/jbd2.h>
20 #include <linux/errno.h>
21 #include <linux/slab.h>
22 #include <linux/timer.h>
24 #include <linux/highmem.h>
25 #include <linux/hrtimer.h>
26 #include <linux/backing-dev.h>
27 #include <linux/bug.h>
28 #include <linux/module.h>
29 #include <linux/sched/mm.h>
31 #include <trace/events/jbd2.h>
33 static void __jbd2_journal_temp_unlink_buffer(struct journal_head
*jh
);
34 static void __jbd2_journal_unfile_buffer(struct journal_head
*jh
);
36 static struct kmem_cache
*transaction_cache
;
37 int __init
jbd2_journal_init_transaction_cache(void)
39 J_ASSERT(!transaction_cache
);
40 transaction_cache
= kmem_cache_create("jbd2_transaction_s",
41 sizeof(transaction_t
),
43 SLAB_HWCACHE_ALIGN
|SLAB_TEMPORARY
,
45 if (!transaction_cache
) {
46 pr_emerg("JBD2: failed to create transaction cache\n");
52 void jbd2_journal_destroy_transaction_cache(void)
54 kmem_cache_destroy(transaction_cache
);
55 transaction_cache
= NULL
;
58 void jbd2_journal_free_transaction(transaction_t
*transaction
)
60 if (unlikely(ZERO_OR_NULL_PTR(transaction
)))
62 kmem_cache_free(transaction_cache
, transaction
);
66 * Base amount of descriptor blocks we reserve for each transaction.
68 static int jbd2_descriptor_blocks_per_trans(journal_t
*journal
)
70 int tag_space
= journal
->j_blocksize
- sizeof(journal_header_t
);
75 if (jbd2_journal_has_csum_v2or3(journal
))
76 tag_space
-= sizeof(struct jbd2_journal_block_tail
);
77 /* Commit code leaves a slack space of 16 bytes at the end of block */
78 tags_per_block
= (tag_space
- 16) / journal_tag_bytes(journal
);
80 * Revoke descriptors are accounted separately so we need to reserve
81 * space for commit block and normal transaction descriptor blocks.
83 return 1 + DIV_ROUND_UP(journal
->j_max_transaction_buffers
,
88 * jbd2_get_transaction: obtain a new transaction_t object.
90 * Simply initialise a new transaction. Initialize it in
91 * RUNNING state and add it to the current journal (which should not
92 * have an existing running transaction: we only make a new transaction
93 * once we have started to commit the old one).
96 * The journal MUST be locked. We don't perform atomic mallocs on the
97 * new transaction and we can't block without protecting against other
98 * processes trying to touch the journal while it is in transition.
102 static void jbd2_get_transaction(journal_t
*journal
,
103 transaction_t
*transaction
)
105 transaction
->t_journal
= journal
;
106 transaction
->t_state
= T_RUNNING
;
107 transaction
->t_start_time
= ktime_get();
108 transaction
->t_tid
= journal
->j_transaction_sequence
++;
109 transaction
->t_expires
= jiffies
+ journal
->j_commit_interval
;
110 spin_lock_init(&transaction
->t_handle_lock
);
111 atomic_set(&transaction
->t_updates
, 0);
112 atomic_set(&transaction
->t_outstanding_credits
,
113 jbd2_descriptor_blocks_per_trans(journal
) +
114 atomic_read(&journal
->j_reserved_credits
));
115 atomic_set(&transaction
->t_outstanding_revokes
, 0);
116 atomic_set(&transaction
->t_handle_count
, 0);
117 INIT_LIST_HEAD(&transaction
->t_inode_list
);
118 INIT_LIST_HEAD(&transaction
->t_private_list
);
120 /* Set up the commit timer for the new transaction. */
121 journal
->j_commit_timer
.expires
= round_jiffies_up(transaction
->t_expires
);
122 add_timer(&journal
->j_commit_timer
);
124 J_ASSERT(journal
->j_running_transaction
== NULL
);
125 journal
->j_running_transaction
= transaction
;
126 transaction
->t_max_wait
= 0;
127 transaction
->t_start
= jiffies
;
128 transaction
->t_requested
= 0;
134 * A handle_t is an object which represents a single atomic update to a
135 * filesystem, and which tracks all of the modifications which form part
136 * of that one update.
140 * Update transaction's maximum wait time, if debugging is enabled.
142 * In order for t_max_wait to be reliable, it must be protected by a
143 * lock. But doing so will mean that start_this_handle() can not be
144 * run in parallel on SMP systems, which limits our scalability. So
145 * unless debugging is enabled, we no longer update t_max_wait, which
146 * means that maximum wait time reported by the jbd2_run_stats
147 * tracepoint will always be zero.
149 static inline void update_t_max_wait(transaction_t
*transaction
,
152 #ifdef CONFIG_JBD2_DEBUG
153 if (jbd2_journal_enable_debug
&&
154 time_after(transaction
->t_start
, ts
)) {
155 ts
= jbd2_time_diff(ts
, transaction
->t_start
);
156 spin_lock(&transaction
->t_handle_lock
);
157 if (ts
> transaction
->t_max_wait
)
158 transaction
->t_max_wait
= ts
;
159 spin_unlock(&transaction
->t_handle_lock
);
165 * Wait until running transaction passes to T_FLUSH state and new transaction
166 * can thus be started. Also starts the commit if needed. The function expects
167 * running transaction to exist and releases j_state_lock.
169 static void wait_transaction_locked(journal_t
*journal
)
170 __releases(journal
->j_state_lock
)
174 tid_t tid
= journal
->j_running_transaction
->t_tid
;
176 prepare_to_wait(&journal
->j_wait_transaction_locked
, &wait
,
177 TASK_UNINTERRUPTIBLE
);
178 need_to_start
= !tid_geq(journal
->j_commit_request
, tid
);
179 read_unlock(&journal
->j_state_lock
);
181 jbd2_log_start_commit(journal
, tid
);
182 jbd2_might_wait_for_commit(journal
);
184 finish_wait(&journal
->j_wait_transaction_locked
, &wait
);
188 * Wait until running transaction transitions from T_SWITCH to T_FLUSH
189 * state and new transaction can thus be started. The function releases
192 static void wait_transaction_switching(journal_t
*journal
)
193 __releases(journal
->j_state_lock
)
197 if (WARN_ON(!journal
->j_running_transaction
||
198 journal
->j_running_transaction
->t_state
!= T_SWITCH
))
200 prepare_to_wait(&journal
->j_wait_transaction_locked
, &wait
,
201 TASK_UNINTERRUPTIBLE
);
202 read_unlock(&journal
->j_state_lock
);
204 * We don't call jbd2_might_wait_for_commit() here as there's no
205 * waiting for outstanding handles happening anymore in T_SWITCH state
206 * and handling of reserved handles actually relies on that for
210 finish_wait(&journal
->j_wait_transaction_locked
, &wait
);
213 static void sub_reserved_credits(journal_t
*journal
, int blocks
)
215 atomic_sub(blocks
, &journal
->j_reserved_credits
);
216 wake_up(&journal
->j_wait_reserved
);
220 * Wait until we can add credits for handle to the running transaction. Called
221 * with j_state_lock held for reading. Returns 0 if handle joined the running
222 * transaction. Returns 1 if we had to wait, j_state_lock is dropped, and
225 static int add_transaction_credits(journal_t
*journal
, int blocks
,
228 transaction_t
*t
= journal
->j_running_transaction
;
230 int total
= blocks
+ rsv_blocks
;
233 * If the current transaction is locked down for commit, wait
234 * for the lock to be released.
236 if (t
->t_state
!= T_RUNNING
) {
237 WARN_ON_ONCE(t
->t_state
>= T_FLUSH
);
238 wait_transaction_locked(journal
);
243 * If there is not enough space left in the log to write all
244 * potential buffers requested by this operation, we need to
245 * stall pending a log checkpoint to free some more log space.
247 needed
= atomic_add_return(total
, &t
->t_outstanding_credits
);
248 if (needed
> journal
->j_max_transaction_buffers
) {
250 * If the current transaction is already too large,
251 * then start to commit it: we can then go back and
252 * attach this handle to a new transaction.
254 atomic_sub(total
, &t
->t_outstanding_credits
);
257 * Is the number of reserved credits in the current transaction too
258 * big to fit this handle? Wait until reserved credits are freed.
260 if (atomic_read(&journal
->j_reserved_credits
) + total
>
261 journal
->j_max_transaction_buffers
) {
262 read_unlock(&journal
->j_state_lock
);
263 jbd2_might_wait_for_commit(journal
);
264 wait_event(journal
->j_wait_reserved
,
265 atomic_read(&journal
->j_reserved_credits
) + total
<=
266 journal
->j_max_transaction_buffers
);
270 wait_transaction_locked(journal
);
275 * The commit code assumes that it can get enough log space
276 * without forcing a checkpoint. This is *critical* for
277 * correctness: a checkpoint of a buffer which is also
278 * associated with a committing transaction creates a deadlock,
279 * so commit simply cannot force through checkpoints.
281 * We must therefore ensure the necessary space in the journal
282 * *before* starting to dirty potentially checkpointed buffers
283 * in the new transaction.
285 if (jbd2_log_space_left(journal
) < journal
->j_max_transaction_buffers
) {
286 atomic_sub(total
, &t
->t_outstanding_credits
);
287 read_unlock(&journal
->j_state_lock
);
288 jbd2_might_wait_for_commit(journal
);
289 write_lock(&journal
->j_state_lock
);
290 if (jbd2_log_space_left(journal
) <
291 journal
->j_max_transaction_buffers
)
292 __jbd2_log_wait_for_space(journal
);
293 write_unlock(&journal
->j_state_lock
);
297 /* No reservation? We are done... */
301 needed
= atomic_add_return(rsv_blocks
, &journal
->j_reserved_credits
);
302 /* We allow at most half of a transaction to be reserved */
303 if (needed
> journal
->j_max_transaction_buffers
/ 2) {
304 sub_reserved_credits(journal
, rsv_blocks
);
305 atomic_sub(total
, &t
->t_outstanding_credits
);
306 read_unlock(&journal
->j_state_lock
);
307 jbd2_might_wait_for_commit(journal
);
308 wait_event(journal
->j_wait_reserved
,
309 atomic_read(&journal
->j_reserved_credits
) + rsv_blocks
310 <= journal
->j_max_transaction_buffers
/ 2);
317 * start_this_handle: Given a handle, deal with any locking or stalling
318 * needed to make sure that there is enough journal space for the handle
319 * to begin. Attach the handle to a transaction and set up the
320 * transaction's buffer credits.
323 static int start_this_handle(journal_t
*journal
, handle_t
*handle
,
326 transaction_t
*transaction
, *new_transaction
= NULL
;
327 int blocks
= handle
->h_total_credits
;
329 unsigned long ts
= jiffies
;
331 if (handle
->h_rsv_handle
)
332 rsv_blocks
= handle
->h_rsv_handle
->h_total_credits
;
335 * Limit the number of reserved credits to 1/2 of maximum transaction
336 * size and limit the number of total credits to not exceed maximum
337 * transaction size per operation.
339 if ((rsv_blocks
> journal
->j_max_transaction_buffers
/ 2) ||
340 (rsv_blocks
+ blocks
> journal
->j_max_transaction_buffers
)) {
341 printk(KERN_ERR
"JBD2: %s wants too many credits "
342 "credits:%d rsv_credits:%d max:%d\n",
343 current
->comm
, blocks
, rsv_blocks
,
344 journal
->j_max_transaction_buffers
);
350 if (!journal
->j_running_transaction
) {
352 * If __GFP_FS is not present, then we may be being called from
353 * inside the fs writeback layer, so we MUST NOT fail.
355 if ((gfp_mask
& __GFP_FS
) == 0)
356 gfp_mask
|= __GFP_NOFAIL
;
357 new_transaction
= kmem_cache_zalloc(transaction_cache
,
359 if (!new_transaction
)
363 jbd_debug(3, "New handle %p going live.\n", handle
);
366 * We need to hold j_state_lock until t_updates has been incremented,
367 * for proper journal barrier handling
370 read_lock(&journal
->j_state_lock
);
371 BUG_ON(journal
->j_flags
& JBD2_UNMOUNT
);
372 if (is_journal_aborted(journal
) ||
373 (journal
->j_errno
!= 0 && !(journal
->j_flags
& JBD2_ACK_ERR
))) {
374 read_unlock(&journal
->j_state_lock
);
375 jbd2_journal_free_transaction(new_transaction
);
380 * Wait on the journal's transaction barrier if necessary. Specifically
381 * we allow reserved handles to proceed because otherwise commit could
382 * deadlock on page writeback not being able to complete.
384 if (!handle
->h_reserved
&& journal
->j_barrier_count
) {
385 read_unlock(&journal
->j_state_lock
);
386 wait_event(journal
->j_wait_transaction_locked
,
387 journal
->j_barrier_count
== 0);
391 if (!journal
->j_running_transaction
) {
392 read_unlock(&journal
->j_state_lock
);
393 if (!new_transaction
)
394 goto alloc_transaction
;
395 write_lock(&journal
->j_state_lock
);
396 if (!journal
->j_running_transaction
&&
397 (handle
->h_reserved
|| !journal
->j_barrier_count
)) {
398 jbd2_get_transaction(journal
, new_transaction
);
399 new_transaction
= NULL
;
401 write_unlock(&journal
->j_state_lock
);
405 transaction
= journal
->j_running_transaction
;
407 if (!handle
->h_reserved
) {
408 /* We may have dropped j_state_lock - restart in that case */
409 if (add_transaction_credits(journal
, blocks
, rsv_blocks
))
413 * We have handle reserved so we are allowed to join T_LOCKED
414 * transaction and we don't have to check for transaction size
415 * and journal space. But we still have to wait while running
416 * transaction is being switched to a committing one as it
417 * won't wait for any handles anymore.
419 if (transaction
->t_state
== T_SWITCH
) {
420 wait_transaction_switching(journal
);
423 sub_reserved_credits(journal
, blocks
);
424 handle
->h_reserved
= 0;
427 /* OK, account for the buffers that this operation expects to
428 * use and add the handle to the running transaction.
430 update_t_max_wait(transaction
, ts
);
431 handle
->h_transaction
= transaction
;
432 handle
->h_requested_credits
= blocks
;
433 handle
->h_revoke_credits_requested
= handle
->h_revoke_credits
;
434 handle
->h_start_jiffies
= jiffies
;
435 atomic_inc(&transaction
->t_updates
);
436 atomic_inc(&transaction
->t_handle_count
);
437 jbd_debug(4, "Handle %p given %d credits (total %d, free %lu)\n",
439 atomic_read(&transaction
->t_outstanding_credits
),
440 jbd2_log_space_left(journal
));
441 read_unlock(&journal
->j_state_lock
);
442 current
->journal_info
= handle
;
444 rwsem_acquire_read(&journal
->j_trans_commit_map
, 0, 0, _THIS_IP_
);
445 jbd2_journal_free_transaction(new_transaction
);
447 * Ensure that no allocations done while the transaction is open are
448 * going to recurse back to the fs layer.
450 handle
->saved_alloc_context
= memalloc_nofs_save();
454 /* Allocate a new handle. This should probably be in a slab... */
455 static handle_t
*new_handle(int nblocks
)
457 handle_t
*handle
= jbd2_alloc_handle(GFP_NOFS
);
460 handle
->h_total_credits
= nblocks
;
466 handle_t
*jbd2__journal_start(journal_t
*journal
, int nblocks
, int rsv_blocks
,
467 int revoke_records
, gfp_t gfp_mask
,
468 unsigned int type
, unsigned int line_no
)
470 handle_t
*handle
= journal_current_handle();
474 return ERR_PTR(-EROFS
);
477 J_ASSERT(handle
->h_transaction
->t_journal
== journal
);
482 nblocks
+= DIV_ROUND_UP(revoke_records
,
483 journal
->j_revoke_records_per_block
);
484 handle
= new_handle(nblocks
);
486 return ERR_PTR(-ENOMEM
);
488 handle_t
*rsv_handle
;
490 rsv_handle
= new_handle(rsv_blocks
);
492 jbd2_free_handle(handle
);
493 return ERR_PTR(-ENOMEM
);
495 rsv_handle
->h_reserved
= 1;
496 rsv_handle
->h_journal
= journal
;
497 handle
->h_rsv_handle
= rsv_handle
;
499 handle
->h_revoke_credits
= revoke_records
;
501 err
= start_this_handle(journal
, handle
, gfp_mask
);
503 if (handle
->h_rsv_handle
)
504 jbd2_free_handle(handle
->h_rsv_handle
);
505 jbd2_free_handle(handle
);
508 handle
->h_type
= type
;
509 handle
->h_line_no
= line_no
;
510 trace_jbd2_handle_start(journal
->j_fs_dev
->bd_dev
,
511 handle
->h_transaction
->t_tid
, type
,
516 EXPORT_SYMBOL(jbd2__journal_start
);
520 * handle_t *jbd2_journal_start() - Obtain a new handle.
521 * @journal: Journal to start transaction on.
522 * @nblocks: number of block buffer we might modify
524 * We make sure that the transaction can guarantee at least nblocks of
525 * modified buffers in the log. We block until the log can guarantee
526 * that much space. Additionally, if rsv_blocks > 0, we also create another
527 * handle with rsv_blocks reserved blocks in the journal. This handle is
528 * stored in h_rsv_handle. It is not attached to any particular transaction
529 * and thus doesn't block transaction commit. If the caller uses this reserved
530 * handle, it has to set h_rsv_handle to NULL as otherwise jbd2_journal_stop()
531 * on the parent handle will dispose the reserved one. Reserved handle has to
532 * be converted to a normal handle using jbd2_journal_start_reserved() before
535 * Return a pointer to a newly allocated handle, or an ERR_PTR() value
538 handle_t
*jbd2_journal_start(journal_t
*journal
, int nblocks
)
540 return jbd2__journal_start(journal
, nblocks
, 0, 0, GFP_NOFS
, 0, 0);
542 EXPORT_SYMBOL(jbd2_journal_start
);
544 static void __jbd2_journal_unreserve_handle(handle_t
*handle
, transaction_t
*t
)
546 journal_t
*journal
= handle
->h_journal
;
548 WARN_ON(!handle
->h_reserved
);
549 sub_reserved_credits(journal
, handle
->h_total_credits
);
551 atomic_sub(handle
->h_total_credits
, &t
->t_outstanding_credits
);
554 void jbd2_journal_free_reserved(handle_t
*handle
)
556 journal_t
*journal
= handle
->h_journal
;
558 /* Get j_state_lock to pin running transaction if it exists */
559 read_lock(&journal
->j_state_lock
);
560 __jbd2_journal_unreserve_handle(handle
, journal
->j_running_transaction
);
561 read_unlock(&journal
->j_state_lock
);
562 jbd2_free_handle(handle
);
564 EXPORT_SYMBOL(jbd2_journal_free_reserved
);
567 * int jbd2_journal_start_reserved() - start reserved handle
568 * @handle: handle to start
569 * @type: for handle statistics
570 * @line_no: for handle statistics
572 * Start handle that has been previously reserved with jbd2_journal_reserve().
573 * This attaches @handle to the running transaction (or creates one if there's
574 * not transaction running). Unlike jbd2_journal_start() this function cannot
575 * block on journal commit, checkpointing, or similar stuff. It can block on
576 * memory allocation or frozen journal though.
578 * Return 0 on success, non-zero on error - handle is freed in that case.
580 int jbd2_journal_start_reserved(handle_t
*handle
, unsigned int type
,
581 unsigned int line_no
)
583 journal_t
*journal
= handle
->h_journal
;
586 if (WARN_ON(!handle
->h_reserved
)) {
587 /* Someone passed in normal handle? Just stop it. */
588 jbd2_journal_stop(handle
);
592 * Usefulness of mixing of reserved and unreserved handles is
593 * questionable. So far nobody seems to need it so just error out.
595 if (WARN_ON(current
->journal_info
)) {
596 jbd2_journal_free_reserved(handle
);
600 handle
->h_journal
= NULL
;
602 * GFP_NOFS is here because callers are likely from writeback or
603 * similarly constrained call sites
605 ret
= start_this_handle(journal
, handle
, GFP_NOFS
);
607 handle
->h_journal
= journal
;
608 jbd2_journal_free_reserved(handle
);
611 handle
->h_type
= type
;
612 handle
->h_line_no
= line_no
;
613 trace_jbd2_handle_start(journal
->j_fs_dev
->bd_dev
,
614 handle
->h_transaction
->t_tid
, type
,
615 line_no
, handle
->h_total_credits
);
618 EXPORT_SYMBOL(jbd2_journal_start_reserved
);
621 * int jbd2_journal_extend() - extend buffer credits.
622 * @handle: handle to 'extend'
623 * @nblocks: nr blocks to try to extend by.
624 * @revoke_records: number of revoke records to try to extend by.
626 * Some transactions, such as large extends and truncates, can be done
627 * atomically all at once or in several stages. The operation requests
628 * a credit for a number of buffer modifications in advance, but can
629 * extend its credit if it needs more.
631 * jbd2_journal_extend tries to give the running handle more buffer credits.
632 * It does not guarantee that allocation - this is a best-effort only.
633 * The calling process MUST be able to deal cleanly with a failure to
636 * Return 0 on success, non-zero on failure.
638 * return code < 0 implies an error
639 * return code > 0 implies normal transaction-full status.
641 int jbd2_journal_extend(handle_t
*handle
, int nblocks
, int revoke_records
)
643 transaction_t
*transaction
= handle
->h_transaction
;
648 if (is_handle_aborted(handle
))
650 journal
= transaction
->t_journal
;
654 read_lock(&journal
->j_state_lock
);
656 /* Don't extend a locked-down transaction! */
657 if (transaction
->t_state
!= T_RUNNING
) {
658 jbd_debug(3, "denied handle %p %d blocks: "
659 "transaction not running\n", handle
, nblocks
);
663 nblocks
+= DIV_ROUND_UP(
664 handle
->h_revoke_credits_requested
+ revoke_records
,
665 journal
->j_revoke_records_per_block
) -
667 handle
->h_revoke_credits_requested
,
668 journal
->j_revoke_records_per_block
);
669 spin_lock(&transaction
->t_handle_lock
);
670 wanted
= atomic_add_return(nblocks
,
671 &transaction
->t_outstanding_credits
);
673 if (wanted
> journal
->j_max_transaction_buffers
) {
674 jbd_debug(3, "denied handle %p %d blocks: "
675 "transaction too large\n", handle
, nblocks
);
676 atomic_sub(nblocks
, &transaction
->t_outstanding_credits
);
680 trace_jbd2_handle_extend(journal
->j_fs_dev
->bd_dev
,
682 handle
->h_type
, handle
->h_line_no
,
683 handle
->h_total_credits
,
686 handle
->h_total_credits
+= nblocks
;
687 handle
->h_requested_credits
+= nblocks
;
688 handle
->h_revoke_credits
+= revoke_records
;
689 handle
->h_revoke_credits_requested
+= revoke_records
;
692 jbd_debug(3, "extended handle %p by %d\n", handle
, nblocks
);
694 spin_unlock(&transaction
->t_handle_lock
);
696 read_unlock(&journal
->j_state_lock
);
700 static void stop_this_handle(handle_t
*handle
)
702 transaction_t
*transaction
= handle
->h_transaction
;
703 journal_t
*journal
= transaction
->t_journal
;
706 J_ASSERT(journal_current_handle() == handle
);
707 J_ASSERT(atomic_read(&transaction
->t_updates
) > 0);
708 current
->journal_info
= NULL
;
710 * Subtract necessary revoke descriptor blocks from handle credits. We
711 * take care to account only for revoke descriptor blocks the
712 * transaction will really need as large sequences of transactions with
713 * small numbers of revokes are relatively common.
715 revokes
= handle
->h_revoke_credits_requested
- handle
->h_revoke_credits
;
717 int t_revokes
, revoke_descriptors
;
718 int rr_per_blk
= journal
->j_revoke_records_per_block
;
720 WARN_ON_ONCE(DIV_ROUND_UP(revokes
, rr_per_blk
)
721 > handle
->h_total_credits
);
722 t_revokes
= atomic_add_return(revokes
,
723 &transaction
->t_outstanding_revokes
);
725 DIV_ROUND_UP(t_revokes
, rr_per_blk
) -
726 DIV_ROUND_UP(t_revokes
- revokes
, rr_per_blk
);
727 handle
->h_total_credits
-= revoke_descriptors
;
729 atomic_sub(handle
->h_total_credits
,
730 &transaction
->t_outstanding_credits
);
731 if (handle
->h_rsv_handle
)
732 __jbd2_journal_unreserve_handle(handle
->h_rsv_handle
,
734 if (atomic_dec_and_test(&transaction
->t_updates
))
735 wake_up(&journal
->j_wait_updates
);
737 rwsem_release(&journal
->j_trans_commit_map
, _THIS_IP_
);
739 * Scope of the GFP_NOFS context is over here and so we can restore the
740 * original alloc context.
742 memalloc_nofs_restore(handle
->saved_alloc_context
);
746 * int jbd2_journal_restart() - restart a handle .
747 * @handle: handle to restart
748 * @nblocks: nr credits requested
749 * @revoke_records: number of revoke record credits requested
750 * @gfp_mask: memory allocation flags (for start_this_handle)
752 * Restart a handle for a multi-transaction filesystem
755 * If the jbd2_journal_extend() call above fails to grant new buffer credits
756 * to a running handle, a call to jbd2_journal_restart will commit the
757 * handle's transaction so far and reattach the handle to a new
758 * transaction capable of guaranteeing the requested number of
759 * credits. We preserve reserved handle if there's any attached to the
762 int jbd2__journal_restart(handle_t
*handle
, int nblocks
, int revoke_records
,
765 transaction_t
*transaction
= handle
->h_transaction
;
771 /* If we've had an abort of any type, don't even think about
772 * actually doing the restart! */
773 if (is_handle_aborted(handle
))
775 journal
= transaction
->t_journal
;
776 tid
= transaction
->t_tid
;
779 * First unlink the handle from its current transaction, and start the
782 jbd_debug(2, "restarting handle %p\n", handle
);
783 stop_this_handle(handle
);
784 handle
->h_transaction
= NULL
;
787 * TODO: If we use READ_ONCE / WRITE_ONCE for j_commit_request we can
788 * get rid of pointless j_state_lock traffic like this.
790 read_lock(&journal
->j_state_lock
);
791 need_to_start
= !tid_geq(journal
->j_commit_request
, tid
);
792 read_unlock(&journal
->j_state_lock
);
794 jbd2_log_start_commit(journal
, tid
);
795 handle
->h_total_credits
= nblocks
+
796 DIV_ROUND_UP(revoke_records
,
797 journal
->j_revoke_records_per_block
);
798 handle
->h_revoke_credits
= revoke_records
;
799 ret
= start_this_handle(journal
, handle
, gfp_mask
);
800 trace_jbd2_handle_restart(journal
->j_fs_dev
->bd_dev
,
801 ret
? 0 : handle
->h_transaction
->t_tid
,
802 handle
->h_type
, handle
->h_line_no
,
803 handle
->h_total_credits
);
806 EXPORT_SYMBOL(jbd2__journal_restart
);
809 int jbd2_journal_restart(handle_t
*handle
, int nblocks
)
811 return jbd2__journal_restart(handle
, nblocks
, 0, GFP_NOFS
);
813 EXPORT_SYMBOL(jbd2_journal_restart
);
816 * void jbd2_journal_lock_updates () - establish a transaction barrier.
817 * @journal: Journal to establish a barrier on.
819 * This locks out any further updates from being started, and blocks
820 * until all existing updates have completed, returning only once the
821 * journal is in a quiescent state with no updates running.
823 * The journal lock should not be held on entry.
825 void jbd2_journal_lock_updates(journal_t
*journal
)
829 jbd2_might_wait_for_commit(journal
);
831 write_lock(&journal
->j_state_lock
);
832 ++journal
->j_barrier_count
;
834 /* Wait until there are no reserved handles */
835 if (atomic_read(&journal
->j_reserved_credits
)) {
836 write_unlock(&journal
->j_state_lock
);
837 wait_event(journal
->j_wait_reserved
,
838 atomic_read(&journal
->j_reserved_credits
) == 0);
839 write_lock(&journal
->j_state_lock
);
842 /* Wait until there are no running updates */
844 transaction_t
*transaction
= journal
->j_running_transaction
;
849 spin_lock(&transaction
->t_handle_lock
);
850 prepare_to_wait(&journal
->j_wait_updates
, &wait
,
851 TASK_UNINTERRUPTIBLE
);
852 if (!atomic_read(&transaction
->t_updates
)) {
853 spin_unlock(&transaction
->t_handle_lock
);
854 finish_wait(&journal
->j_wait_updates
, &wait
);
857 spin_unlock(&transaction
->t_handle_lock
);
858 write_unlock(&journal
->j_state_lock
);
860 finish_wait(&journal
->j_wait_updates
, &wait
);
861 write_lock(&journal
->j_state_lock
);
863 write_unlock(&journal
->j_state_lock
);
866 * We have now established a barrier against other normal updates, but
867 * we also need to barrier against other jbd2_journal_lock_updates() calls
868 * to make sure that we serialise special journal-locked operations
871 mutex_lock(&journal
->j_barrier
);
875 * void jbd2_journal_unlock_updates (journal_t* journal) - release barrier
876 * @journal: Journal to release the barrier on.
878 * Release a transaction barrier obtained with jbd2_journal_lock_updates().
880 * Should be called without the journal lock held.
882 void jbd2_journal_unlock_updates (journal_t
*journal
)
884 J_ASSERT(journal
->j_barrier_count
!= 0);
886 mutex_unlock(&journal
->j_barrier
);
887 write_lock(&journal
->j_state_lock
);
888 --journal
->j_barrier_count
;
889 write_unlock(&journal
->j_state_lock
);
890 wake_up(&journal
->j_wait_transaction_locked
);
893 static void warn_dirty_buffer(struct buffer_head
*bh
)
896 "JBD2: Spotted dirty metadata buffer (dev = %pg, blocknr = %llu). "
897 "There's a risk of filesystem corruption in case of system "
899 bh
->b_bdev
, (unsigned long long)bh
->b_blocknr
);
902 /* Call t_frozen trigger and copy buffer data into jh->b_frozen_data. */
903 static void jbd2_freeze_jh_data(struct journal_head
*jh
)
908 struct buffer_head
*bh
= jh2bh(jh
);
910 J_EXPECT_JH(jh
, buffer_uptodate(bh
), "Possible IO failure.\n");
912 offset
= offset_in_page(bh
->b_data
);
913 source
= kmap_atomic(page
);
914 /* Fire data frozen trigger just before we copy the data */
915 jbd2_buffer_frozen_trigger(jh
, source
+ offset
, jh
->b_triggers
);
916 memcpy(jh
->b_frozen_data
, source
+ offset
, bh
->b_size
);
917 kunmap_atomic(source
);
920 * Now that the frozen data is saved off, we need to store any matching
923 jh
->b_frozen_triggers
= jh
->b_triggers
;
927 * If the buffer is already part of the current transaction, then there
928 * is nothing we need to do. If it is already part of a prior
929 * transaction which we are still committing to disk, then we need to
930 * make sure that we do not overwrite the old copy: we do copy-out to
931 * preserve the copy going to disk. We also account the buffer against
932 * the handle's metadata buffer credits (unless the buffer is already
933 * part of the transaction, that is).
937 do_get_write_access(handle_t
*handle
, struct journal_head
*jh
,
940 struct buffer_head
*bh
;
941 transaction_t
*transaction
= handle
->h_transaction
;
944 char *frozen_buffer
= NULL
;
945 unsigned long start_lock
, time_lock
;
947 journal
= transaction
->t_journal
;
949 jbd_debug(5, "journal_head %p, force_copy %d\n", jh
, force_copy
);
951 JBUFFER_TRACE(jh
, "entry");
955 /* @@@ Need to check for errors here at some point. */
957 start_lock
= jiffies
;
959 spin_lock(&jh
->b_state_lock
);
961 /* If it takes too long to lock the buffer, trace it */
962 time_lock
= jbd2_time_diff(start_lock
, jiffies
);
963 if (time_lock
> HZ
/10)
964 trace_jbd2_lock_buffer_stall(bh
->b_bdev
->bd_dev
,
965 jiffies_to_msecs(time_lock
));
967 /* We now hold the buffer lock so it is safe to query the buffer
968 * state. Is the buffer dirty?
970 * If so, there are two possibilities. The buffer may be
971 * non-journaled, and undergoing a quite legitimate writeback.
972 * Otherwise, it is journaled, and we don't expect dirty buffers
973 * in that state (the buffers should be marked JBD_Dirty
974 * instead.) So either the IO is being done under our own
975 * control and this is a bug, or it's a third party IO such as
976 * dump(8) (which may leave the buffer scheduled for read ---
977 * ie. locked but not dirty) or tune2fs (which may actually have
978 * the buffer dirtied, ugh.) */
980 if (buffer_dirty(bh
)) {
982 * First question: is this buffer already part of the current
983 * transaction or the existing committing transaction?
985 if (jh
->b_transaction
) {
987 jh
->b_transaction
== transaction
||
989 journal
->j_committing_transaction
);
990 if (jh
->b_next_transaction
)
991 J_ASSERT_JH(jh
, jh
->b_next_transaction
==
993 warn_dirty_buffer(bh
);
996 * In any case we need to clean the dirty flag and we must
997 * do it under the buffer lock to be sure we don't race
998 * with running write-out.
1000 JBUFFER_TRACE(jh
, "Journalling dirty buffer");
1001 clear_buffer_dirty(bh
);
1002 set_buffer_jbddirty(bh
);
1008 if (is_handle_aborted(handle
)) {
1009 spin_unlock(&jh
->b_state_lock
);
1015 * The buffer is already part of this transaction if b_transaction or
1016 * b_next_transaction points to it
1018 if (jh
->b_transaction
== transaction
||
1019 jh
->b_next_transaction
== transaction
)
1023 * this is the first time this transaction is touching this buffer,
1024 * reset the modified flag
1029 * If the buffer is not journaled right now, we need to make sure it
1030 * doesn't get written to disk before the caller actually commits the
1033 if (!jh
->b_transaction
) {
1034 JBUFFER_TRACE(jh
, "no transaction");
1035 J_ASSERT_JH(jh
, !jh
->b_next_transaction
);
1036 JBUFFER_TRACE(jh
, "file as BJ_Reserved");
1038 * Make sure all stores to jh (b_modified, b_frozen_data) are
1039 * visible before attaching it to the running transaction.
1040 * Paired with barrier in jbd2_write_access_granted()
1043 spin_lock(&journal
->j_list_lock
);
1044 __jbd2_journal_file_buffer(jh
, transaction
, BJ_Reserved
);
1045 spin_unlock(&journal
->j_list_lock
);
1049 * If there is already a copy-out version of this buffer, then we don't
1050 * need to make another one
1052 if (jh
->b_frozen_data
) {
1053 JBUFFER_TRACE(jh
, "has frozen data");
1054 J_ASSERT_JH(jh
, jh
->b_next_transaction
== NULL
);
1058 JBUFFER_TRACE(jh
, "owned by older transaction");
1059 J_ASSERT_JH(jh
, jh
->b_next_transaction
== NULL
);
1060 J_ASSERT_JH(jh
, jh
->b_transaction
== journal
->j_committing_transaction
);
1063 * There is one case we have to be very careful about. If the
1064 * committing transaction is currently writing this buffer out to disk
1065 * and has NOT made a copy-out, then we cannot modify the buffer
1066 * contents at all right now. The essence of copy-out is that it is
1067 * the extra copy, not the primary copy, which gets journaled. If the
1068 * primary copy is already going to disk then we cannot do copy-out
1071 if (buffer_shadow(bh
)) {
1072 JBUFFER_TRACE(jh
, "on shadow: sleep");
1073 spin_unlock(&jh
->b_state_lock
);
1074 wait_on_bit_io(&bh
->b_state
, BH_Shadow
, TASK_UNINTERRUPTIBLE
);
1079 * Only do the copy if the currently-owning transaction still needs it.
1080 * If buffer isn't on BJ_Metadata list, the committing transaction is
1081 * past that stage (here we use the fact that BH_Shadow is set under
1082 * bh_state lock together with refiling to BJ_Shadow list and at this
1083 * point we know the buffer doesn't have BH_Shadow set).
1085 * Subtle point, though: if this is a get_undo_access, then we will be
1086 * relying on the frozen_data to contain the new value of the
1087 * committed_data record after the transaction, so we HAVE to force the
1088 * frozen_data copy in that case.
1090 if (jh
->b_jlist
== BJ_Metadata
|| force_copy
) {
1091 JBUFFER_TRACE(jh
, "generate frozen data");
1092 if (!frozen_buffer
) {
1093 JBUFFER_TRACE(jh
, "allocate memory for buffer");
1094 spin_unlock(&jh
->b_state_lock
);
1095 frozen_buffer
= jbd2_alloc(jh2bh(jh
)->b_size
,
1096 GFP_NOFS
| __GFP_NOFAIL
);
1099 jh
->b_frozen_data
= frozen_buffer
;
1100 frozen_buffer
= NULL
;
1101 jbd2_freeze_jh_data(jh
);
1105 * Make sure all stores to jh (b_modified, b_frozen_data) are visible
1106 * before attaching it to the running transaction. Paired with barrier
1107 * in jbd2_write_access_granted()
1110 jh
->b_next_transaction
= transaction
;
1113 spin_unlock(&jh
->b_state_lock
);
1116 * If we are about to journal a buffer, then any revoke pending on it is
1119 jbd2_journal_cancel_revoke(handle
, jh
);
1122 if (unlikely(frozen_buffer
)) /* It's usually NULL */
1123 jbd2_free(frozen_buffer
, bh
->b_size
);
1125 JBUFFER_TRACE(jh
, "exit");
1129 /* Fast check whether buffer is already attached to the required transaction */
1130 static bool jbd2_write_access_granted(handle_t
*handle
, struct buffer_head
*bh
,
1133 struct journal_head
*jh
;
1136 /* Dirty buffers require special handling... */
1137 if (buffer_dirty(bh
))
1141 * RCU protects us from dereferencing freed pages. So the checks we do
1142 * are guaranteed not to oops. However the jh slab object can get freed
1143 * & reallocated while we work with it. So we have to be careful. When
1144 * we see jh attached to the running transaction, we know it must stay
1145 * so until the transaction is committed. Thus jh won't be freed and
1146 * will be attached to the same bh while we run. However it can
1147 * happen jh gets freed, reallocated, and attached to the transaction
1148 * just after we get pointer to it from bh. So we have to be careful
1149 * and recheck jh still belongs to our bh before we return success.
1152 if (!buffer_jbd(bh
))
1154 /* This should be bh2jh() but that doesn't work with inline functions */
1155 jh
= READ_ONCE(bh
->b_private
);
1158 /* For undo access buffer must have data copied */
1159 if (undo
&& !jh
->b_committed_data
)
1161 if (READ_ONCE(jh
->b_transaction
) != handle
->h_transaction
&&
1162 READ_ONCE(jh
->b_next_transaction
) != handle
->h_transaction
)
1165 * There are two reasons for the barrier here:
1166 * 1) Make sure to fetch b_bh after we did previous checks so that we
1167 * detect when jh went through free, realloc, attach to transaction
1168 * while we were checking. Paired with implicit barrier in that path.
1169 * 2) So that access to bh done after jbd2_write_access_granted()
1170 * doesn't get reordered and see inconsistent state of concurrent
1171 * do_get_write_access().
1174 if (unlikely(jh
->b_bh
!= bh
))
1183 * int jbd2_journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
1184 * @handle: transaction to add buffer modifications to
1185 * @bh: bh to be used for metadata writes
1187 * Returns: error code or 0 on success.
1189 * In full data journalling mode the buffer may be of type BJ_AsyncData,
1190 * because we're ``write()ing`` a buffer which is also part of a shared mapping.
1193 int jbd2_journal_get_write_access(handle_t
*handle
, struct buffer_head
*bh
)
1195 struct journal_head
*jh
;
1198 if (is_handle_aborted(handle
))
1201 if (jbd2_write_access_granted(handle
, bh
, false))
1204 jh
= jbd2_journal_add_journal_head(bh
);
1205 /* We do not want to get caught playing with fields which the
1206 * log thread also manipulates. Make sure that the buffer
1207 * completes any outstanding IO before proceeding. */
1208 rc
= do_get_write_access(handle
, jh
, 0);
1209 jbd2_journal_put_journal_head(jh
);
1215 * When the user wants to journal a newly created buffer_head
1216 * (ie. getblk() returned a new buffer and we are going to populate it
1217 * manually rather than reading off disk), then we need to keep the
1218 * buffer_head locked until it has been completely filled with new
1219 * data. In this case, we should be able to make the assertion that
1220 * the bh is not already part of an existing transaction.
1222 * The buffer should already be locked by the caller by this point.
1223 * There is no lock ranking violation: it was a newly created,
1224 * unlocked buffer beforehand. */
1227 * int jbd2_journal_get_create_access () - notify intent to use newly created bh
1228 * @handle: transaction to new buffer to
1231 * Call this if you create a new bh.
1233 int jbd2_journal_get_create_access(handle_t
*handle
, struct buffer_head
*bh
)
1235 transaction_t
*transaction
= handle
->h_transaction
;
1237 struct journal_head
*jh
= jbd2_journal_add_journal_head(bh
);
1240 jbd_debug(5, "journal_head %p\n", jh
);
1242 if (is_handle_aborted(handle
))
1244 journal
= transaction
->t_journal
;
1247 JBUFFER_TRACE(jh
, "entry");
1249 * The buffer may already belong to this transaction due to pre-zeroing
1250 * in the filesystem's new_block code. It may also be on the previous,
1251 * committing transaction's lists, but it HAS to be in Forget state in
1252 * that case: the transaction must have deleted the buffer for it to be
1255 spin_lock(&jh
->b_state_lock
);
1256 J_ASSERT_JH(jh
, (jh
->b_transaction
== transaction
||
1257 jh
->b_transaction
== NULL
||
1258 (jh
->b_transaction
== journal
->j_committing_transaction
&&
1259 jh
->b_jlist
== BJ_Forget
)));
1261 J_ASSERT_JH(jh
, jh
->b_next_transaction
== NULL
);
1262 J_ASSERT_JH(jh
, buffer_locked(jh2bh(jh
)));
1264 if (jh
->b_transaction
== NULL
) {
1266 * Previous jbd2_journal_forget() could have left the buffer
1267 * with jbddirty bit set because it was being committed. When
1268 * the commit finished, we've filed the buffer for
1269 * checkpointing and marked it dirty. Now we are reallocating
1270 * the buffer so the transaction freeing it must have
1271 * committed and so it's safe to clear the dirty bit.
1273 clear_buffer_dirty(jh2bh(jh
));
1274 /* first access by this transaction */
1277 JBUFFER_TRACE(jh
, "file as BJ_Reserved");
1278 spin_lock(&journal
->j_list_lock
);
1279 __jbd2_journal_file_buffer(jh
, transaction
, BJ_Reserved
);
1280 spin_unlock(&journal
->j_list_lock
);
1281 } else if (jh
->b_transaction
== journal
->j_committing_transaction
) {
1282 /* first access by this transaction */
1285 JBUFFER_TRACE(jh
, "set next transaction");
1286 spin_lock(&journal
->j_list_lock
);
1287 jh
->b_next_transaction
= transaction
;
1288 spin_unlock(&journal
->j_list_lock
);
1290 spin_unlock(&jh
->b_state_lock
);
1293 * akpm: I added this. ext3_alloc_branch can pick up new indirect
1294 * blocks which contain freed but then revoked metadata. We need
1295 * to cancel the revoke in case we end up freeing it yet again
1296 * and the reallocating as data - this would cause a second revoke,
1297 * which hits an assertion error.
1299 JBUFFER_TRACE(jh
, "cancelling revoke");
1300 jbd2_journal_cancel_revoke(handle
, jh
);
1302 jbd2_journal_put_journal_head(jh
);
1307 * int jbd2_journal_get_undo_access() - Notify intent to modify metadata with
1308 * non-rewindable consequences
1309 * @handle: transaction
1310 * @bh: buffer to undo
1312 * Sometimes there is a need to distinguish between metadata which has
1313 * been committed to disk and that which has not. The ext3fs code uses
1314 * this for freeing and allocating space, we have to make sure that we
1315 * do not reuse freed space until the deallocation has been committed,
1316 * since if we overwrote that space we would make the delete
1317 * un-rewindable in case of a crash.
1319 * To deal with that, jbd2_journal_get_undo_access requests write access to a
1320 * buffer for parts of non-rewindable operations such as delete
1321 * operations on the bitmaps. The journaling code must keep a copy of
1322 * the buffer's contents prior to the undo_access call until such time
1323 * as we know that the buffer has definitely been committed to disk.
1325 * We never need to know which transaction the committed data is part
1326 * of, buffers touched here are guaranteed to be dirtied later and so
1327 * will be committed to a new transaction in due course, at which point
1328 * we can discard the old committed data pointer.
1330 * Returns error number or 0 on success.
1332 int jbd2_journal_get_undo_access(handle_t
*handle
, struct buffer_head
*bh
)
1335 struct journal_head
*jh
;
1336 char *committed_data
= NULL
;
1338 if (is_handle_aborted(handle
))
1341 if (jbd2_write_access_granted(handle
, bh
, true))
1344 jh
= jbd2_journal_add_journal_head(bh
);
1345 JBUFFER_TRACE(jh
, "entry");
1348 * Do this first --- it can drop the journal lock, so we want to
1349 * make sure that obtaining the committed_data is done
1350 * atomically wrt. completion of any outstanding commits.
1352 err
= do_get_write_access(handle
, jh
, 1);
1357 if (!jh
->b_committed_data
)
1358 committed_data
= jbd2_alloc(jh2bh(jh
)->b_size
,
1359 GFP_NOFS
|__GFP_NOFAIL
);
1361 spin_lock(&jh
->b_state_lock
);
1362 if (!jh
->b_committed_data
) {
1363 /* Copy out the current buffer contents into the
1364 * preserved, committed copy. */
1365 JBUFFER_TRACE(jh
, "generate b_committed data");
1366 if (!committed_data
) {
1367 spin_unlock(&jh
->b_state_lock
);
1371 jh
->b_committed_data
= committed_data
;
1372 committed_data
= NULL
;
1373 memcpy(jh
->b_committed_data
, bh
->b_data
, bh
->b_size
);
1375 spin_unlock(&jh
->b_state_lock
);
1377 jbd2_journal_put_journal_head(jh
);
1378 if (unlikely(committed_data
))
1379 jbd2_free(committed_data
, bh
->b_size
);
1384 * void jbd2_journal_set_triggers() - Add triggers for commit writeout
1385 * @bh: buffer to trigger on
1386 * @type: struct jbd2_buffer_trigger_type containing the trigger(s).
1388 * Set any triggers on this journal_head. This is always safe, because
1389 * triggers for a committing buffer will be saved off, and triggers for
1390 * a running transaction will match the buffer in that transaction.
1392 * Call with NULL to clear the triggers.
1394 void jbd2_journal_set_triggers(struct buffer_head
*bh
,
1395 struct jbd2_buffer_trigger_type
*type
)
1397 struct journal_head
*jh
= jbd2_journal_grab_journal_head(bh
);
1401 jh
->b_triggers
= type
;
1402 jbd2_journal_put_journal_head(jh
);
1405 void jbd2_buffer_frozen_trigger(struct journal_head
*jh
, void *mapped_data
,
1406 struct jbd2_buffer_trigger_type
*triggers
)
1408 struct buffer_head
*bh
= jh2bh(jh
);
1410 if (!triggers
|| !triggers
->t_frozen
)
1413 triggers
->t_frozen(triggers
, bh
, mapped_data
, bh
->b_size
);
1416 void jbd2_buffer_abort_trigger(struct journal_head
*jh
,
1417 struct jbd2_buffer_trigger_type
*triggers
)
1419 if (!triggers
|| !triggers
->t_abort
)
1422 triggers
->t_abort(triggers
, jh2bh(jh
));
1426 * int jbd2_journal_dirty_metadata() - mark a buffer as containing dirty metadata
1427 * @handle: transaction to add buffer to.
1428 * @bh: buffer to mark
1430 * mark dirty metadata which needs to be journaled as part of the current
1433 * The buffer must have previously had jbd2_journal_get_write_access()
1434 * called so that it has a valid journal_head attached to the buffer
1437 * The buffer is placed on the transaction's metadata list and is marked
1438 * as belonging to the transaction.
1440 * Returns error number or 0 on success.
1442 * Special care needs to be taken if the buffer already belongs to the
1443 * current committing transaction (in which case we should have frozen
1444 * data present for that commit). In that case, we don't relink the
1445 * buffer: that only gets done when the old transaction finally
1446 * completes its commit.
1448 int jbd2_journal_dirty_metadata(handle_t
*handle
, struct buffer_head
*bh
)
1450 transaction_t
*transaction
= handle
->h_transaction
;
1452 struct journal_head
*jh
;
1455 if (is_handle_aborted(handle
))
1457 if (!buffer_jbd(bh
))
1461 * We don't grab jh reference here since the buffer must be part
1462 * of the running transaction.
1465 jbd_debug(5, "journal_head %p\n", jh
);
1466 JBUFFER_TRACE(jh
, "entry");
1469 * This and the following assertions are unreliable since we may see jh
1470 * in inconsistent state unless we grab bh_state lock. But this is
1471 * crucial to catch bugs so let's do a reliable check until the
1472 * lockless handling is fully proven.
1474 if (jh
->b_transaction
!= transaction
&&
1475 jh
->b_next_transaction
!= transaction
) {
1476 spin_lock(&jh
->b_state_lock
);
1477 J_ASSERT_JH(jh
, jh
->b_transaction
== transaction
||
1478 jh
->b_next_transaction
== transaction
);
1479 spin_unlock(&jh
->b_state_lock
);
1481 if (jh
->b_modified
== 1) {
1482 /* If it's in our transaction it must be in BJ_Metadata list. */
1483 if (jh
->b_transaction
== transaction
&&
1484 jh
->b_jlist
!= BJ_Metadata
) {
1485 spin_lock(&jh
->b_state_lock
);
1486 if (jh
->b_transaction
== transaction
&&
1487 jh
->b_jlist
!= BJ_Metadata
)
1488 pr_err("JBD2: assertion failure: h_type=%u "
1489 "h_line_no=%u block_no=%llu jlist=%u\n",
1490 handle
->h_type
, handle
->h_line_no
,
1491 (unsigned long long) bh
->b_blocknr
,
1493 J_ASSERT_JH(jh
, jh
->b_transaction
!= transaction
||
1494 jh
->b_jlist
== BJ_Metadata
);
1495 spin_unlock(&jh
->b_state_lock
);
1500 journal
= transaction
->t_journal
;
1501 spin_lock(&jh
->b_state_lock
);
1503 if (jh
->b_modified
== 0) {
1505 * This buffer's got modified and becoming part
1506 * of the transaction. This needs to be done
1507 * once a transaction -bzzz
1509 if (WARN_ON_ONCE(jbd2_handle_buffer_credits(handle
) <= 0)) {
1514 handle
->h_total_credits
--;
1518 * fastpath, to avoid expensive locking. If this buffer is already
1519 * on the running transaction's metadata list there is nothing to do.
1520 * Nobody can take it off again because there is a handle open.
1521 * I _think_ we're OK here with SMP barriers - a mistaken decision will
1522 * result in this test being false, so we go in and take the locks.
1524 if (jh
->b_transaction
== transaction
&& jh
->b_jlist
== BJ_Metadata
) {
1525 JBUFFER_TRACE(jh
, "fastpath");
1526 if (unlikely(jh
->b_transaction
!=
1527 journal
->j_running_transaction
)) {
1528 printk(KERN_ERR
"JBD2: %s: "
1529 "jh->b_transaction (%llu, %p, %u) != "
1530 "journal->j_running_transaction (%p, %u)\n",
1532 (unsigned long long) bh
->b_blocknr
,
1534 jh
->b_transaction
? jh
->b_transaction
->t_tid
: 0,
1535 journal
->j_running_transaction
,
1536 journal
->j_running_transaction
?
1537 journal
->j_running_transaction
->t_tid
: 0);
1543 set_buffer_jbddirty(bh
);
1546 * Metadata already on the current transaction list doesn't
1547 * need to be filed. Metadata on another transaction's list must
1548 * be committing, and will be refiled once the commit completes:
1549 * leave it alone for now.
1551 if (jh
->b_transaction
!= transaction
) {
1552 JBUFFER_TRACE(jh
, "already on other transaction");
1553 if (unlikely(((jh
->b_transaction
!=
1554 journal
->j_committing_transaction
)) ||
1555 (jh
->b_next_transaction
!= transaction
))) {
1556 printk(KERN_ERR
"jbd2_journal_dirty_metadata: %s: "
1557 "bad jh for block %llu: "
1558 "transaction (%p, %u), "
1559 "jh->b_transaction (%p, %u), "
1560 "jh->b_next_transaction (%p, %u), jlist %u\n",
1562 (unsigned long long) bh
->b_blocknr
,
1563 transaction
, transaction
->t_tid
,
1566 jh
->b_transaction
->t_tid
: 0,
1567 jh
->b_next_transaction
,
1568 jh
->b_next_transaction
?
1569 jh
->b_next_transaction
->t_tid
: 0,
1574 /* And this case is illegal: we can't reuse another
1575 * transaction's data buffer, ever. */
1579 /* That test should have eliminated the following case: */
1580 J_ASSERT_JH(jh
, jh
->b_frozen_data
== NULL
);
1582 JBUFFER_TRACE(jh
, "file as BJ_Metadata");
1583 spin_lock(&journal
->j_list_lock
);
1584 __jbd2_journal_file_buffer(jh
, transaction
, BJ_Metadata
);
1585 spin_unlock(&journal
->j_list_lock
);
1587 spin_unlock(&jh
->b_state_lock
);
1589 JBUFFER_TRACE(jh
, "exit");
1594 * void jbd2_journal_forget() - bforget() for potentially-journaled buffers.
1595 * @handle: transaction handle
1596 * @bh: bh to 'forget'
1598 * We can only do the bforget if there are no commits pending against the
1599 * buffer. If the buffer is dirty in the current running transaction we
1600 * can safely unlink it.
1602 * bh may not be a journalled buffer at all - it may be a non-JBD
1603 * buffer which came off the hashtable. Check for this.
1605 * Decrements bh->b_count by one.
1607 * Allow this call even if the handle has aborted --- it may be part of
1608 * the caller's cleanup after an abort.
1610 int jbd2_journal_forget(handle_t
*handle
, struct buffer_head
*bh
)
1612 transaction_t
*transaction
= handle
->h_transaction
;
1614 struct journal_head
*jh
;
1615 int drop_reserve
= 0;
1617 int was_modified
= 0;
1619 if (is_handle_aborted(handle
))
1621 journal
= transaction
->t_journal
;
1623 BUFFER_TRACE(bh
, "entry");
1625 jh
= jbd2_journal_grab_journal_head(bh
);
1631 spin_lock(&jh
->b_state_lock
);
1633 /* Critical error: attempting to delete a bitmap buffer, maybe?
1634 * Don't do any jbd operations, and return an error. */
1635 if (!J_EXPECT_JH(jh
, !jh
->b_committed_data
,
1636 "inconsistent data on disk")) {
1641 /* keep track of whether or not this transaction modified us */
1642 was_modified
= jh
->b_modified
;
1645 * The buffer's going from the transaction, we must drop
1646 * all references -bzzz
1650 if (jh
->b_transaction
== transaction
) {
1651 J_ASSERT_JH(jh
, !jh
->b_frozen_data
);
1653 /* If we are forgetting a buffer which is already part
1654 * of this transaction, then we can just drop it from
1655 * the transaction immediately. */
1656 clear_buffer_dirty(bh
);
1657 clear_buffer_jbddirty(bh
);
1659 JBUFFER_TRACE(jh
, "belongs to current transaction: unfile");
1662 * we only want to drop a reference if this transaction
1663 * modified the buffer
1669 * We are no longer going to journal this buffer.
1670 * However, the commit of this transaction is still
1671 * important to the buffer: the delete that we are now
1672 * processing might obsolete an old log entry, so by
1673 * committing, we can satisfy the buffer's checkpoint.
1675 * So, if we have a checkpoint on the buffer, we should
1676 * now refile the buffer on our BJ_Forget list so that
1677 * we know to remove the checkpoint after we commit.
1680 spin_lock(&journal
->j_list_lock
);
1681 if (jh
->b_cp_transaction
) {
1682 __jbd2_journal_temp_unlink_buffer(jh
);
1683 __jbd2_journal_file_buffer(jh
, transaction
, BJ_Forget
);
1685 __jbd2_journal_unfile_buffer(jh
);
1686 jbd2_journal_put_journal_head(jh
);
1688 spin_unlock(&journal
->j_list_lock
);
1689 } else if (jh
->b_transaction
) {
1690 J_ASSERT_JH(jh
, (jh
->b_transaction
==
1691 journal
->j_committing_transaction
));
1692 /* However, if the buffer is still owned by a prior
1693 * (committing) transaction, we can't drop it yet... */
1694 JBUFFER_TRACE(jh
, "belongs to older transaction");
1695 /* ... but we CAN drop it from the new transaction through
1696 * marking the buffer as freed and set j_next_transaction to
1697 * the new transaction, so that not only the commit code
1698 * knows it should clear dirty bits when it is done with the
1699 * buffer, but also the buffer can be checkpointed only
1700 * after the new transaction commits. */
1702 set_buffer_freed(bh
);
1704 if (!jh
->b_next_transaction
) {
1705 spin_lock(&journal
->j_list_lock
);
1706 jh
->b_next_transaction
= transaction
;
1707 spin_unlock(&journal
->j_list_lock
);
1709 J_ASSERT(jh
->b_next_transaction
== transaction
);
1712 * only drop a reference if this transaction modified
1720 * Finally, if the buffer is not belongs to any
1721 * transaction, we can just drop it now if it has no
1724 spin_lock(&journal
->j_list_lock
);
1725 if (!jh
->b_cp_transaction
) {
1726 JBUFFER_TRACE(jh
, "belongs to none transaction");
1727 spin_unlock(&journal
->j_list_lock
);
1732 * Otherwise, if the buffer has been written to disk,
1733 * it is safe to remove the checkpoint and drop it.
1735 if (!buffer_dirty(bh
)) {
1736 __jbd2_journal_remove_checkpoint(jh
);
1737 spin_unlock(&journal
->j_list_lock
);
1742 * The buffer is still not written to disk, we should
1743 * attach this buffer to current transaction so that the
1744 * buffer can be checkpointed only after the current
1745 * transaction commits.
1747 clear_buffer_dirty(bh
);
1748 __jbd2_journal_file_buffer(jh
, transaction
, BJ_Forget
);
1749 spin_unlock(&journal
->j_list_lock
);
1753 spin_unlock(&jh
->b_state_lock
);
1754 jbd2_journal_put_journal_head(jh
);
1756 /* no need to reserve log space for this block -bzzz */
1757 handle
->h_total_credits
++;
1763 * int jbd2_journal_stop() - complete a transaction
1764 * @handle: transaction to complete.
1766 * All done for a particular handle.
1768 * There is not much action needed here. We just return any remaining
1769 * buffer credits to the transaction and remove the handle. The only
1770 * complication is that we need to start a commit operation if the
1771 * filesystem is marked for synchronous update.
1773 * jbd2_journal_stop itself will not usually return an error, but it may
1774 * do so in unusual circumstances. In particular, expect it to
1775 * return -EIO if a jbd2_journal_abort has been executed since the
1776 * transaction began.
1778 int jbd2_journal_stop(handle_t
*handle
)
1780 transaction_t
*transaction
= handle
->h_transaction
;
1782 int err
= 0, wait_for_commit
= 0;
1786 if (--handle
->h_ref
> 0) {
1787 jbd_debug(4, "h_ref %d -> %d\n", handle
->h_ref
+ 1,
1789 if (is_handle_aborted(handle
))
1795 * Handle is already detached from the transaction so there is
1796 * nothing to do other than free the handle.
1798 memalloc_nofs_restore(handle
->saved_alloc_context
);
1801 journal
= transaction
->t_journal
;
1802 tid
= transaction
->t_tid
;
1804 if (is_handle_aborted(handle
))
1807 jbd_debug(4, "Handle %p going down\n", handle
);
1808 trace_jbd2_handle_stats(journal
->j_fs_dev
->bd_dev
,
1809 tid
, handle
->h_type
, handle
->h_line_no
,
1810 jiffies
- handle
->h_start_jiffies
,
1811 handle
->h_sync
, handle
->h_requested_credits
,
1812 (handle
->h_requested_credits
-
1813 handle
->h_total_credits
));
1816 * Implement synchronous transaction batching. If the handle
1817 * was synchronous, don't force a commit immediately. Let's
1818 * yield and let another thread piggyback onto this
1819 * transaction. Keep doing that while new threads continue to
1820 * arrive. It doesn't cost much - we're about to run a commit
1821 * and sleep on IO anyway. Speeds up many-threaded, many-dir
1822 * operations by 30x or more...
1824 * We try and optimize the sleep time against what the
1825 * underlying disk can do, instead of having a static sleep
1826 * time. This is useful for the case where our storage is so
1827 * fast that it is more optimal to go ahead and force a flush
1828 * and wait for the transaction to be committed than it is to
1829 * wait for an arbitrary amount of time for new writers to
1830 * join the transaction. We achieve this by measuring how
1831 * long it takes to commit a transaction, and compare it with
1832 * how long this transaction has been running, and if run time
1833 * < commit time then we sleep for the delta and commit. This
1834 * greatly helps super fast disks that would see slowdowns as
1835 * more threads started doing fsyncs.
1837 * But don't do this if this process was the most recent one
1838 * to perform a synchronous write. We do this to detect the
1839 * case where a single process is doing a stream of sync
1840 * writes. No point in waiting for joiners in that case.
1842 * Setting max_batch_time to 0 disables this completely.
1845 if (handle
->h_sync
&& journal
->j_last_sync_writer
!= pid
&&
1846 journal
->j_max_batch_time
) {
1847 u64 commit_time
, trans_time
;
1849 journal
->j_last_sync_writer
= pid
;
1851 read_lock(&journal
->j_state_lock
);
1852 commit_time
= journal
->j_average_commit_time
;
1853 read_unlock(&journal
->j_state_lock
);
1855 trans_time
= ktime_to_ns(ktime_sub(ktime_get(),
1856 transaction
->t_start_time
));
1858 commit_time
= max_t(u64
, commit_time
,
1859 1000*journal
->j_min_batch_time
);
1860 commit_time
= min_t(u64
, commit_time
,
1861 1000*journal
->j_max_batch_time
);
1863 if (trans_time
< commit_time
) {
1864 ktime_t expires
= ktime_add_ns(ktime_get(),
1866 set_current_state(TASK_UNINTERRUPTIBLE
);
1867 schedule_hrtimeout(&expires
, HRTIMER_MODE_ABS
);
1872 transaction
->t_synchronous_commit
= 1;
1875 * If the handle is marked SYNC, we need to set another commit
1876 * going! We also want to force a commit if the transaction is too
1879 if (handle
->h_sync
||
1880 time_after_eq(jiffies
, transaction
->t_expires
)) {
1881 /* Do this even for aborted journals: an abort still
1882 * completes the commit thread, it just doesn't write
1883 * anything to disk. */
1885 jbd_debug(2, "transaction too old, requesting commit for "
1886 "handle %p\n", handle
);
1887 /* This is non-blocking */
1888 jbd2_log_start_commit(journal
, tid
);
1891 * Special case: JBD2_SYNC synchronous updates require us
1892 * to wait for the commit to complete.
1894 if (handle
->h_sync
&& !(current
->flags
& PF_MEMALLOC
))
1895 wait_for_commit
= 1;
1899 * Once stop_this_handle() drops t_updates, the transaction could start
1900 * committing on us and eventually disappear. So we must not
1901 * dereference transaction pointer again after calling
1902 * stop_this_handle().
1904 stop_this_handle(handle
);
1906 if (wait_for_commit
)
1907 err
= jbd2_log_wait_commit(journal
, tid
);
1910 if (handle
->h_rsv_handle
)
1911 jbd2_free_handle(handle
->h_rsv_handle
);
1912 jbd2_free_handle(handle
);
1918 * List management code snippets: various functions for manipulating the
1919 * transaction buffer lists.
1924 * Append a buffer to a transaction list, given the transaction's list head
1927 * j_list_lock is held.
1929 * jh->b_state_lock is held.
1933 __blist_add_buffer(struct journal_head
**list
, struct journal_head
*jh
)
1936 jh
->b_tnext
= jh
->b_tprev
= jh
;
1939 /* Insert at the tail of the list to preserve order */
1940 struct journal_head
*first
= *list
, *last
= first
->b_tprev
;
1942 jh
->b_tnext
= first
;
1943 last
->b_tnext
= first
->b_tprev
= jh
;
1948 * Remove a buffer from a transaction list, given the transaction's list
1951 * Called with j_list_lock held, and the journal may not be locked.
1953 * jh->b_state_lock is held.
1957 __blist_del_buffer(struct journal_head
**list
, struct journal_head
*jh
)
1960 *list
= jh
->b_tnext
;
1964 jh
->b_tprev
->b_tnext
= jh
->b_tnext
;
1965 jh
->b_tnext
->b_tprev
= jh
->b_tprev
;
1969 * Remove a buffer from the appropriate transaction list.
1971 * Note that this function can *change* the value of
1972 * bh->b_transaction->t_buffers, t_forget, t_shadow_list, t_log_list or
1973 * t_reserved_list. If the caller is holding onto a copy of one of these
1974 * pointers, it could go bad. Generally the caller needs to re-read the
1975 * pointer from the transaction_t.
1977 * Called under j_list_lock.
1979 static void __jbd2_journal_temp_unlink_buffer(struct journal_head
*jh
)
1981 struct journal_head
**list
= NULL
;
1982 transaction_t
*transaction
;
1983 struct buffer_head
*bh
= jh2bh(jh
);
1985 lockdep_assert_held(&jh
->b_state_lock
);
1986 transaction
= jh
->b_transaction
;
1988 assert_spin_locked(&transaction
->t_journal
->j_list_lock
);
1990 J_ASSERT_JH(jh
, jh
->b_jlist
< BJ_Types
);
1991 if (jh
->b_jlist
!= BJ_None
)
1992 J_ASSERT_JH(jh
, transaction
!= NULL
);
1994 switch (jh
->b_jlist
) {
1998 transaction
->t_nr_buffers
--;
1999 J_ASSERT_JH(jh
, transaction
->t_nr_buffers
>= 0);
2000 list
= &transaction
->t_buffers
;
2003 list
= &transaction
->t_forget
;
2006 list
= &transaction
->t_shadow_list
;
2009 list
= &transaction
->t_reserved_list
;
2013 __blist_del_buffer(list
, jh
);
2014 jh
->b_jlist
= BJ_None
;
2015 if (transaction
&& is_journal_aborted(transaction
->t_journal
))
2016 clear_buffer_jbddirty(bh
);
2017 else if (test_clear_buffer_jbddirty(bh
))
2018 mark_buffer_dirty(bh
); /* Expose it to the VM */
2022 * Remove buffer from all transactions. The caller is responsible for dropping
2023 * the jh reference that belonged to the transaction.
2025 * Called with bh_state lock and j_list_lock
2027 static void __jbd2_journal_unfile_buffer(struct journal_head
*jh
)
2029 J_ASSERT_JH(jh
, jh
->b_transaction
!= NULL
);
2030 J_ASSERT_JH(jh
, jh
->b_next_transaction
== NULL
);
2032 __jbd2_journal_temp_unlink_buffer(jh
);
2033 jh
->b_transaction
= NULL
;
2036 void jbd2_journal_unfile_buffer(journal_t
*journal
, struct journal_head
*jh
)
2038 struct buffer_head
*bh
= jh2bh(jh
);
2040 /* Get reference so that buffer cannot be freed before we unlock it */
2042 spin_lock(&jh
->b_state_lock
);
2043 spin_lock(&journal
->j_list_lock
);
2044 __jbd2_journal_unfile_buffer(jh
);
2045 spin_unlock(&journal
->j_list_lock
);
2046 spin_unlock(&jh
->b_state_lock
);
2047 jbd2_journal_put_journal_head(jh
);
2052 * Called from jbd2_journal_try_to_free_buffers().
2054 * Called under jh->b_state_lock
2057 __journal_try_to_free_buffer(journal_t
*journal
, struct buffer_head
*bh
)
2059 struct journal_head
*jh
;
2063 if (buffer_locked(bh
) || buffer_dirty(bh
))
2066 if (jh
->b_next_transaction
!= NULL
|| jh
->b_transaction
!= NULL
)
2069 spin_lock(&journal
->j_list_lock
);
2070 if (jh
->b_cp_transaction
!= NULL
) {
2071 /* written-back checkpointed metadata buffer */
2072 JBUFFER_TRACE(jh
, "remove from checkpoint list");
2073 __jbd2_journal_remove_checkpoint(jh
);
2075 spin_unlock(&journal
->j_list_lock
);
2081 * int jbd2_journal_try_to_free_buffers() - try to free page buffers.
2082 * @journal: journal for operation
2083 * @page: to try and free
2085 * For all the buffers on this page,
2086 * if they are fully written out ordered data, move them onto BUF_CLEAN
2087 * so try_to_free_buffers() can reap them.
2089 * This function returns non-zero if we wish try_to_free_buffers()
2090 * to be called. We do this if the page is releasable by try_to_free_buffers().
2091 * We also do it if the page has locked or dirty buffers and the caller wants
2092 * us to perform sync or async writeout.
2094 * This complicates JBD locking somewhat. We aren't protected by the
2095 * BKL here. We wish to remove the buffer from its committing or
2096 * running transaction's ->t_datalist via __jbd2_journal_unfile_buffer.
2098 * This may *change* the value of transaction_t->t_datalist, so anyone
2099 * who looks at t_datalist needs to lock against this function.
2101 * Even worse, someone may be doing a jbd2_journal_dirty_data on this
2102 * buffer. So we need to lock against that. jbd2_journal_dirty_data()
2103 * will come out of the lock with the buffer dirty, which makes it
2104 * ineligible for release here.
2106 * Who else is affected by this? hmm... Really the only contender
2107 * is do_get_write_access() - it could be looking at the buffer while
2108 * journal_try_to_free_buffer() is changing its state. But that
2109 * cannot happen because we never reallocate freed data as metadata
2110 * while the data is part of a transaction. Yes?
2112 * Return 0 on failure, 1 on success
2114 int jbd2_journal_try_to_free_buffers(journal_t
*journal
, struct page
*page
)
2116 struct buffer_head
*head
;
2117 struct buffer_head
*bh
;
2118 bool has_write_io_error
= false;
2121 J_ASSERT(PageLocked(page
));
2123 head
= page_buffers(page
);
2126 struct journal_head
*jh
;
2129 * We take our own ref against the journal_head here to avoid
2130 * having to add tons of locking around each instance of
2131 * jbd2_journal_put_journal_head().
2133 jh
= jbd2_journal_grab_journal_head(bh
);
2137 spin_lock(&jh
->b_state_lock
);
2138 __journal_try_to_free_buffer(journal
, bh
);
2139 spin_unlock(&jh
->b_state_lock
);
2140 jbd2_journal_put_journal_head(jh
);
2145 * If we free a metadata buffer which has been failed to
2146 * write out, the jbd2 checkpoint procedure will not detect
2147 * this failure and may lead to filesystem inconsistency
2148 * after cleanup journal tail.
2150 if (buffer_write_io_error(bh
)) {
2151 pr_err("JBD2: Error while async write back metadata bh %llu.",
2152 (unsigned long long)bh
->b_blocknr
);
2153 has_write_io_error
= true;
2155 } while ((bh
= bh
->b_this_page
) != head
);
2157 ret
= try_to_free_buffers(page
);
2160 if (has_write_io_error
)
2161 jbd2_journal_abort(journal
, -EIO
);
2167 * This buffer is no longer needed. If it is on an older transaction's
2168 * checkpoint list we need to record it on this transaction's forget list
2169 * to pin this buffer (and hence its checkpointing transaction) down until
2170 * this transaction commits. If the buffer isn't on a checkpoint list, we
2172 * Returns non-zero if JBD no longer has an interest in the buffer.
2174 * Called under j_list_lock.
2176 * Called under jh->b_state_lock.
2178 static int __dispose_buffer(struct journal_head
*jh
, transaction_t
*transaction
)
2181 struct buffer_head
*bh
= jh2bh(jh
);
2183 if (jh
->b_cp_transaction
) {
2184 JBUFFER_TRACE(jh
, "on running+cp transaction");
2185 __jbd2_journal_temp_unlink_buffer(jh
);
2187 * We don't want to write the buffer anymore, clear the
2188 * bit so that we don't confuse checks in
2189 * __journal_file_buffer
2191 clear_buffer_dirty(bh
);
2192 __jbd2_journal_file_buffer(jh
, transaction
, BJ_Forget
);
2195 JBUFFER_TRACE(jh
, "on running transaction");
2196 __jbd2_journal_unfile_buffer(jh
);
2197 jbd2_journal_put_journal_head(jh
);
2203 * jbd2_journal_invalidatepage
2205 * This code is tricky. It has a number of cases to deal with.
2207 * There are two invariants which this code relies on:
2209 * i_size must be updated on disk before we start calling invalidatepage on the
2212 * This is done in ext3 by defining an ext3_setattr method which
2213 * updates i_size before truncate gets going. By maintaining this
2214 * invariant, we can be sure that it is safe to throw away any buffers
2215 * attached to the current transaction: once the transaction commits,
2216 * we know that the data will not be needed.
2218 * Note however that we can *not* throw away data belonging to the
2219 * previous, committing transaction!
2221 * Any disk blocks which *are* part of the previous, committing
2222 * transaction (and which therefore cannot be discarded immediately) are
2223 * not going to be reused in the new running transaction
2225 * The bitmap committed_data images guarantee this: any block which is
2226 * allocated in one transaction and removed in the next will be marked
2227 * as in-use in the committed_data bitmap, so cannot be reused until
2228 * the next transaction to delete the block commits. This means that
2229 * leaving committing buffers dirty is quite safe: the disk blocks
2230 * cannot be reallocated to a different file and so buffer aliasing is
2234 * The above applies mainly to ordered data mode. In writeback mode we
2235 * don't make guarantees about the order in which data hits disk --- in
2236 * particular we don't guarantee that new dirty data is flushed before
2237 * transaction commit --- so it is always safe just to discard data
2238 * immediately in that mode. --sct
2242 * The journal_unmap_buffer helper function returns zero if the buffer
2243 * concerned remains pinned as an anonymous buffer belonging to an older
2246 * We're outside-transaction here. Either or both of j_running_transaction
2247 * and j_committing_transaction may be NULL.
2249 static int journal_unmap_buffer(journal_t
*journal
, struct buffer_head
*bh
,
2252 transaction_t
*transaction
;
2253 struct journal_head
*jh
;
2256 BUFFER_TRACE(bh
, "entry");
2259 * It is safe to proceed here without the j_list_lock because the
2260 * buffers cannot be stolen by try_to_free_buffers as long as we are
2261 * holding the page lock. --sct
2264 jh
= jbd2_journal_grab_journal_head(bh
);
2266 goto zap_buffer_unlocked
;
2268 /* OK, we have data buffer in journaled mode */
2269 write_lock(&journal
->j_state_lock
);
2270 spin_lock(&jh
->b_state_lock
);
2271 spin_lock(&journal
->j_list_lock
);
2274 * We cannot remove the buffer from checkpoint lists until the
2275 * transaction adding inode to orphan list (let's call it T)
2276 * is committed. Otherwise if the transaction changing the
2277 * buffer would be cleaned from the journal before T is
2278 * committed, a crash will cause that the correct contents of
2279 * the buffer will be lost. On the other hand we have to
2280 * clear the buffer dirty bit at latest at the moment when the
2281 * transaction marking the buffer as freed in the filesystem
2282 * structures is committed because from that moment on the
2283 * block can be reallocated and used by a different page.
2284 * Since the block hasn't been freed yet but the inode has
2285 * already been added to orphan list, it is safe for us to add
2286 * the buffer to BJ_Forget list of the newest transaction.
2288 * Also we have to clear buffer_mapped flag of a truncated buffer
2289 * because the buffer_head may be attached to the page straddling
2290 * i_size (can happen only when blocksize < pagesize) and thus the
2291 * buffer_head can be reused when the file is extended again. So we end
2292 * up keeping around invalidated buffers attached to transactions'
2293 * BJ_Forget list just to stop checkpointing code from cleaning up
2294 * the transaction this buffer was modified in.
2296 transaction
= jh
->b_transaction
;
2297 if (transaction
== NULL
) {
2298 /* First case: not on any transaction. If it
2299 * has no checkpoint link, then we can zap it:
2300 * it's a writeback-mode buffer so we don't care
2301 * if it hits disk safely. */
2302 if (!jh
->b_cp_transaction
) {
2303 JBUFFER_TRACE(jh
, "not on any transaction: zap");
2307 if (!buffer_dirty(bh
)) {
2308 /* bdflush has written it. We can drop it now */
2309 __jbd2_journal_remove_checkpoint(jh
);
2313 /* OK, it must be in the journal but still not
2314 * written fully to disk: it's metadata or
2315 * journaled data... */
2317 if (journal
->j_running_transaction
) {
2318 /* ... and once the current transaction has
2319 * committed, the buffer won't be needed any
2321 JBUFFER_TRACE(jh
, "checkpointed: add to BJ_Forget");
2322 may_free
= __dispose_buffer(jh
,
2323 journal
->j_running_transaction
);
2326 /* There is no currently-running transaction. So the
2327 * orphan record which we wrote for this file must have
2328 * passed into commit. We must attach this buffer to
2329 * the committing transaction, if it exists. */
2330 if (journal
->j_committing_transaction
) {
2331 JBUFFER_TRACE(jh
, "give to committing trans");
2332 may_free
= __dispose_buffer(jh
,
2333 journal
->j_committing_transaction
);
2336 /* The orphan record's transaction has
2337 * committed. We can cleanse this buffer */
2338 clear_buffer_jbddirty(bh
);
2339 __jbd2_journal_remove_checkpoint(jh
);
2343 } else if (transaction
== journal
->j_committing_transaction
) {
2344 JBUFFER_TRACE(jh
, "on committing transaction");
2346 * The buffer is committing, we simply cannot touch
2347 * it. If the page is straddling i_size we have to wait
2348 * for commit and try again.
2351 spin_unlock(&journal
->j_list_lock
);
2352 spin_unlock(&jh
->b_state_lock
);
2353 write_unlock(&journal
->j_state_lock
);
2354 jbd2_journal_put_journal_head(jh
);
2358 * OK, buffer won't be reachable after truncate. We just clear
2359 * b_modified to not confuse transaction credit accounting, and
2360 * set j_next_transaction to the running transaction (if there
2361 * is one) and mark buffer as freed so that commit code knows
2362 * it should clear dirty bits when it is done with the buffer.
2364 set_buffer_freed(bh
);
2365 if (journal
->j_running_transaction
&& buffer_jbddirty(bh
))
2366 jh
->b_next_transaction
= journal
->j_running_transaction
;
2368 spin_unlock(&journal
->j_list_lock
);
2369 spin_unlock(&jh
->b_state_lock
);
2370 write_unlock(&journal
->j_state_lock
);
2371 jbd2_journal_put_journal_head(jh
);
2374 /* Good, the buffer belongs to the running transaction.
2375 * We are writing our own transaction's data, not any
2376 * previous one's, so it is safe to throw it away
2377 * (remember that we expect the filesystem to have set
2378 * i_size already for this truncate so recovery will not
2379 * expose the disk blocks we are discarding here.) */
2380 J_ASSERT_JH(jh
, transaction
== journal
->j_running_transaction
);
2381 JBUFFER_TRACE(jh
, "on running transaction");
2382 may_free
= __dispose_buffer(jh
, transaction
);
2387 * This is tricky. Although the buffer is truncated, it may be reused
2388 * if blocksize < pagesize and it is attached to the page straddling
2389 * EOF. Since the buffer might have been added to BJ_Forget list of the
2390 * running transaction, journal_get_write_access() won't clear
2391 * b_modified and credit accounting gets confused. So clear b_modified
2395 spin_unlock(&journal
->j_list_lock
);
2396 spin_unlock(&jh
->b_state_lock
);
2397 write_unlock(&journal
->j_state_lock
);
2398 jbd2_journal_put_journal_head(jh
);
2399 zap_buffer_unlocked
:
2400 clear_buffer_dirty(bh
);
2401 J_ASSERT_BH(bh
, !buffer_jbddirty(bh
));
2402 clear_buffer_mapped(bh
);
2403 clear_buffer_req(bh
);
2404 clear_buffer_new(bh
);
2405 clear_buffer_delay(bh
);
2406 clear_buffer_unwritten(bh
);
2412 * void jbd2_journal_invalidatepage()
2413 * @journal: journal to use for flush...
2414 * @page: page to flush
2415 * @offset: start of the range to invalidate
2416 * @length: length of the range to invalidate
2418 * Reap page buffers containing data after in the specified range in page.
2419 * Can return -EBUSY if buffers are part of the committing transaction and
2420 * the page is straddling i_size. Caller then has to wait for current commit
2423 int jbd2_journal_invalidatepage(journal_t
*journal
,
2425 unsigned int offset
,
2426 unsigned int length
)
2428 struct buffer_head
*head
, *bh
, *next
;
2429 unsigned int stop
= offset
+ length
;
2430 unsigned int curr_off
= 0;
2431 int partial_page
= (offset
|| length
< PAGE_SIZE
);
2435 if (!PageLocked(page
))
2437 if (!page_has_buffers(page
))
2440 BUG_ON(stop
> PAGE_SIZE
|| stop
< length
);
2442 /* We will potentially be playing with lists other than just the
2443 * data lists (especially for journaled data mode), so be
2444 * cautious in our locking. */
2446 head
= bh
= page_buffers(page
);
2448 unsigned int next_off
= curr_off
+ bh
->b_size
;
2449 next
= bh
->b_this_page
;
2451 if (next_off
> stop
)
2454 if (offset
<= curr_off
) {
2455 /* This block is wholly outside the truncation point */
2457 ret
= journal_unmap_buffer(journal
, bh
, partial_page
);
2463 curr_off
= next_off
;
2466 } while (bh
!= head
);
2468 if (!partial_page
) {
2469 if (may_free
&& try_to_free_buffers(page
))
2470 J_ASSERT(!page_has_buffers(page
));
2476 * File a buffer on the given transaction list.
2478 void __jbd2_journal_file_buffer(struct journal_head
*jh
,
2479 transaction_t
*transaction
, int jlist
)
2481 struct journal_head
**list
= NULL
;
2483 struct buffer_head
*bh
= jh2bh(jh
);
2485 lockdep_assert_held(&jh
->b_state_lock
);
2486 assert_spin_locked(&transaction
->t_journal
->j_list_lock
);
2488 J_ASSERT_JH(jh
, jh
->b_jlist
< BJ_Types
);
2489 J_ASSERT_JH(jh
, jh
->b_transaction
== transaction
||
2490 jh
->b_transaction
== NULL
);
2492 if (jh
->b_transaction
&& jh
->b_jlist
== jlist
)
2495 if (jlist
== BJ_Metadata
|| jlist
== BJ_Reserved
||
2496 jlist
== BJ_Shadow
|| jlist
== BJ_Forget
) {
2498 * For metadata buffers, we track dirty bit in buffer_jbddirty
2499 * instead of buffer_dirty. We should not see a dirty bit set
2500 * here because we clear it in do_get_write_access but e.g.
2501 * tune2fs can modify the sb and set the dirty bit at any time
2502 * so we try to gracefully handle that.
2504 if (buffer_dirty(bh
))
2505 warn_dirty_buffer(bh
);
2506 if (test_clear_buffer_dirty(bh
) ||
2507 test_clear_buffer_jbddirty(bh
))
2511 if (jh
->b_transaction
)
2512 __jbd2_journal_temp_unlink_buffer(jh
);
2514 jbd2_journal_grab_journal_head(bh
);
2515 jh
->b_transaction
= transaction
;
2519 J_ASSERT_JH(jh
, !jh
->b_committed_data
);
2520 J_ASSERT_JH(jh
, !jh
->b_frozen_data
);
2523 transaction
->t_nr_buffers
++;
2524 list
= &transaction
->t_buffers
;
2527 list
= &transaction
->t_forget
;
2530 list
= &transaction
->t_shadow_list
;
2533 list
= &transaction
->t_reserved_list
;
2537 __blist_add_buffer(list
, jh
);
2538 jh
->b_jlist
= jlist
;
2541 set_buffer_jbddirty(bh
);
2544 void jbd2_journal_file_buffer(struct journal_head
*jh
,
2545 transaction_t
*transaction
, int jlist
)
2547 spin_lock(&jh
->b_state_lock
);
2548 spin_lock(&transaction
->t_journal
->j_list_lock
);
2549 __jbd2_journal_file_buffer(jh
, transaction
, jlist
);
2550 spin_unlock(&transaction
->t_journal
->j_list_lock
);
2551 spin_unlock(&jh
->b_state_lock
);
2555 * Remove a buffer from its current buffer list in preparation for
2556 * dropping it from its current transaction entirely. If the buffer has
2557 * already started to be used by a subsequent transaction, refile the
2558 * buffer on that transaction's metadata list.
2560 * Called under j_list_lock
2561 * Called under jh->b_state_lock
2563 * When this function returns true, there's no next transaction to refile to
2564 * and the caller has to drop jh reference through
2565 * jbd2_journal_put_journal_head().
2567 bool __jbd2_journal_refile_buffer(struct journal_head
*jh
)
2569 int was_dirty
, jlist
;
2570 struct buffer_head
*bh
= jh2bh(jh
);
2572 lockdep_assert_held(&jh
->b_state_lock
);
2573 if (jh
->b_transaction
)
2574 assert_spin_locked(&jh
->b_transaction
->t_journal
->j_list_lock
);
2576 /* If the buffer is now unused, just drop it. */
2577 if (jh
->b_next_transaction
== NULL
) {
2578 __jbd2_journal_unfile_buffer(jh
);
2583 * It has been modified by a later transaction: add it to the new
2584 * transaction's metadata list.
2587 was_dirty
= test_clear_buffer_jbddirty(bh
);
2588 __jbd2_journal_temp_unlink_buffer(jh
);
2591 * b_transaction must be set, otherwise the new b_transaction won't
2592 * be holding jh reference
2594 J_ASSERT_JH(jh
, jh
->b_transaction
!= NULL
);
2597 * We set b_transaction here because b_next_transaction will inherit
2598 * our jh reference and thus __jbd2_journal_file_buffer() must not
2601 WRITE_ONCE(jh
->b_transaction
, jh
->b_next_transaction
);
2602 WRITE_ONCE(jh
->b_next_transaction
, NULL
);
2603 if (buffer_freed(bh
))
2605 else if (jh
->b_modified
)
2606 jlist
= BJ_Metadata
;
2608 jlist
= BJ_Reserved
;
2609 __jbd2_journal_file_buffer(jh
, jh
->b_transaction
, jlist
);
2610 J_ASSERT_JH(jh
, jh
->b_transaction
->t_state
== T_RUNNING
);
2613 set_buffer_jbddirty(bh
);
2618 * __jbd2_journal_refile_buffer() with necessary locking added. We take our
2619 * bh reference so that we can safely unlock bh.
2621 * The jh and bh may be freed by this call.
2623 void jbd2_journal_refile_buffer(journal_t
*journal
, struct journal_head
*jh
)
2627 spin_lock(&jh
->b_state_lock
);
2628 spin_lock(&journal
->j_list_lock
);
2629 drop
= __jbd2_journal_refile_buffer(jh
);
2630 spin_unlock(&jh
->b_state_lock
);
2631 spin_unlock(&journal
->j_list_lock
);
2633 jbd2_journal_put_journal_head(jh
);
2637 * File inode in the inode list of the handle's transaction
2639 static int jbd2_journal_file_inode(handle_t
*handle
, struct jbd2_inode
*jinode
,
2640 unsigned long flags
, loff_t start_byte
, loff_t end_byte
)
2642 transaction_t
*transaction
= handle
->h_transaction
;
2645 if (is_handle_aborted(handle
))
2647 journal
= transaction
->t_journal
;
2649 jbd_debug(4, "Adding inode %lu, tid:%d\n", jinode
->i_vfs_inode
->i_ino
,
2650 transaction
->t_tid
);
2652 spin_lock(&journal
->j_list_lock
);
2653 jinode
->i_flags
|= flags
;
2655 if (jinode
->i_dirty_end
) {
2656 jinode
->i_dirty_start
= min(jinode
->i_dirty_start
, start_byte
);
2657 jinode
->i_dirty_end
= max(jinode
->i_dirty_end
, end_byte
);
2659 jinode
->i_dirty_start
= start_byte
;
2660 jinode
->i_dirty_end
= end_byte
;
2663 /* Is inode already attached where we need it? */
2664 if (jinode
->i_transaction
== transaction
||
2665 jinode
->i_next_transaction
== transaction
)
2669 * We only ever set this variable to 1 so the test is safe. Since
2670 * t_need_data_flush is likely to be set, we do the test to save some
2671 * cacheline bouncing
2673 if (!transaction
->t_need_data_flush
)
2674 transaction
->t_need_data_flush
= 1;
2675 /* On some different transaction's list - should be
2676 * the committing one */
2677 if (jinode
->i_transaction
) {
2678 J_ASSERT(jinode
->i_next_transaction
== NULL
);
2679 J_ASSERT(jinode
->i_transaction
==
2680 journal
->j_committing_transaction
);
2681 jinode
->i_next_transaction
= transaction
;
2684 /* Not on any transaction list... */
2685 J_ASSERT(!jinode
->i_next_transaction
);
2686 jinode
->i_transaction
= transaction
;
2687 list_add(&jinode
->i_list
, &transaction
->t_inode_list
);
2689 spin_unlock(&journal
->j_list_lock
);
2694 int jbd2_journal_inode_ranged_write(handle_t
*handle
,
2695 struct jbd2_inode
*jinode
, loff_t start_byte
, loff_t length
)
2697 return jbd2_journal_file_inode(handle
, jinode
,
2698 JI_WRITE_DATA
| JI_WAIT_DATA
, start_byte
,
2699 start_byte
+ length
- 1);
2702 int jbd2_journal_inode_ranged_wait(handle_t
*handle
, struct jbd2_inode
*jinode
,
2703 loff_t start_byte
, loff_t length
)
2705 return jbd2_journal_file_inode(handle
, jinode
, JI_WAIT_DATA
,
2706 start_byte
, start_byte
+ length
- 1);
2710 * File truncate and transaction commit interact with each other in a
2711 * non-trivial way. If a transaction writing data block A is
2712 * committing, we cannot discard the data by truncate until we have
2713 * written them. Otherwise if we crashed after the transaction with
2714 * write has committed but before the transaction with truncate has
2715 * committed, we could see stale data in block A. This function is a
2716 * helper to solve this problem. It starts writeout of the truncated
2717 * part in case it is in the committing transaction.
2719 * Filesystem code must call this function when inode is journaled in
2720 * ordered mode before truncation happens and after the inode has been
2721 * placed on orphan list with the new inode size. The second condition
2722 * avoids the race that someone writes new data and we start
2723 * committing the transaction after this function has been called but
2724 * before a transaction for truncate is started (and furthermore it
2725 * allows us to optimize the case where the addition to orphan list
2726 * happens in the same transaction as write --- we don't have to write
2727 * any data in such case).
2729 int jbd2_journal_begin_ordered_truncate(journal_t
*journal
,
2730 struct jbd2_inode
*jinode
,
2733 transaction_t
*inode_trans
, *commit_trans
;
2736 /* This is a quick check to avoid locking if not necessary */
2737 if (!jinode
->i_transaction
)
2739 /* Locks are here just to force reading of recent values, it is
2740 * enough that the transaction was not committing before we started
2741 * a transaction adding the inode to orphan list */
2742 read_lock(&journal
->j_state_lock
);
2743 commit_trans
= journal
->j_committing_transaction
;
2744 read_unlock(&journal
->j_state_lock
);
2745 spin_lock(&journal
->j_list_lock
);
2746 inode_trans
= jinode
->i_transaction
;
2747 spin_unlock(&journal
->j_list_lock
);
2748 if (inode_trans
== commit_trans
) {
2749 ret
= filemap_fdatawrite_range(jinode
->i_vfs_inode
->i_mapping
,
2750 new_size
, LLONG_MAX
);
2752 jbd2_journal_abort(journal
, ret
);