]>
Commit | Line | Data |
---|---|---|
470decc6 | 1 | /* |
f7f4bccb | 2 | * linux/fs/jbd2/commit.c |
470decc6 DK |
3 | * |
4 | * Written by Stephen C. Tweedie <sct@redhat.com>, 1998 | |
5 | * | |
6 | * Copyright 1998 Red Hat corp --- All Rights Reserved | |
7 | * | |
8 | * This file is part of the Linux kernel and is made available under | |
9 | * the terms of the GNU General Public License, version 2, or at your | |
10 | * option, any later version, incorporated herein by reference. | |
11 | * | |
12 | * Journal commit routines for the generic filesystem journaling code; | |
13 | * part of the ext2fs journaling system. | |
14 | */ | |
15 | ||
16 | #include <linux/time.h> | |
17 | #include <linux/fs.h> | |
f7f4bccb | 18 | #include <linux/jbd2.h> |
470decc6 DK |
19 | #include <linux/errno.h> |
20 | #include <linux/slab.h> | |
21 | #include <linux/mm.h> | |
22 | #include <linux/pagemap.h> | |
8e85fb3f | 23 | #include <linux/jiffies.h> |
818d276c | 24 | #include <linux/crc32.h> |
470decc6 DK |
25 | |
26 | /* | |
27 | * Default IO end handler for temporary BJ_IO buffer_heads. | |
28 | */ | |
29 | static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate) | |
30 | { | |
31 | BUFFER_TRACE(bh, ""); | |
32 | if (uptodate) | |
33 | set_buffer_uptodate(bh); | |
34 | else | |
35 | clear_buffer_uptodate(bh); | |
36 | unlock_buffer(bh); | |
37 | } | |
38 | ||
39 | /* | |
40 | * When an ext3-ordered file is truncated, it is possible that many pages are | |
41 | * not sucessfully freed, because they are attached to a committing transaction. | |
42 | * After the transaction commits, these pages are left on the LRU, with no | |
43 | * ->mapping, and with attached buffers. These pages are trivially reclaimable | |
44 | * by the VM, but their apparent absence upsets the VM accounting, and it makes | |
45 | * the numbers in /proc/meminfo look odd. | |
46 | * | |
47 | * So here, we have a buffer which has just come off the forget list. Look to | |
48 | * see if we can strip all buffers from the backing page. | |
49 | * | |
50 | * Called under lock_journal(), and possibly under journal_datalist_lock. The | |
51 | * caller provided us with a ref against the buffer, and we drop that here. | |
52 | */ | |
53 | static void release_buffer_page(struct buffer_head *bh) | |
54 | { | |
55 | struct page *page; | |
56 | ||
57 | if (buffer_dirty(bh)) | |
58 | goto nope; | |
59 | if (atomic_read(&bh->b_count) != 1) | |
60 | goto nope; | |
61 | page = bh->b_page; | |
62 | if (!page) | |
63 | goto nope; | |
64 | if (page->mapping) | |
65 | goto nope; | |
66 | ||
67 | /* OK, it's a truncated page */ | |
68 | if (TestSetPageLocked(page)) | |
69 | goto nope; | |
70 | ||
71 | page_cache_get(page); | |
72 | __brelse(bh); | |
73 | try_to_free_buffers(page); | |
74 | unlock_page(page); | |
75 | page_cache_release(page); | |
76 | return; | |
77 | ||
78 | nope: | |
79 | __brelse(bh); | |
80 | } | |
81 | ||
82 | /* | |
83 | * Try to acquire jbd_lock_bh_state() against the buffer, when j_list_lock is | |
84 | * held. For ranking reasons we must trylock. If we lose, schedule away and | |
85 | * return 0. j_list_lock is dropped in this case. | |
86 | */ | |
87 | static int inverted_lock(journal_t *journal, struct buffer_head *bh) | |
88 | { | |
89 | if (!jbd_trylock_bh_state(bh)) { | |
90 | spin_unlock(&journal->j_list_lock); | |
91 | schedule(); | |
92 | return 0; | |
93 | } | |
94 | return 1; | |
95 | } | |
96 | ||
818d276c GS |
97 | /* |
98 | * Done it all: now submit the commit record. We should have | |
470decc6 DK |
99 | * cleaned up our previous buffers by now, so if we are in abort |
100 | * mode we can now just skip the rest of the journal write | |
101 | * entirely. | |
102 | * | |
103 | * Returns 1 if the journal needs to be aborted or 0 on success | |
104 | */ | |
818d276c GS |
105 | static int journal_submit_commit_record(journal_t *journal, |
106 | transaction_t *commit_transaction, | |
107 | struct buffer_head **cbh, | |
108 | __u32 crc32_sum) | |
470decc6 DK |
109 | { |
110 | struct journal_head *descriptor; | |
818d276c | 111 | struct commit_header *tmp; |
470decc6 | 112 | struct buffer_head *bh; |
818d276c | 113 | int ret; |
470decc6 | 114 | int barrier_done = 0; |
736603ab | 115 | struct timespec now = current_kernel_time(); |
470decc6 DK |
116 | |
117 | if (is_journal_aborted(journal)) | |
118 | return 0; | |
119 | ||
f7f4bccb | 120 | descriptor = jbd2_journal_get_descriptor_buffer(journal); |
470decc6 DK |
121 | if (!descriptor) |
122 | return 1; | |
123 | ||
124 | bh = jh2bh(descriptor); | |
125 | ||
818d276c GS |
126 | tmp = (struct commit_header *)bh->b_data; |
127 | tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER); | |
128 | tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK); | |
129 | tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid); | |
736603ab TT |
130 | tmp->h_commit_sec = cpu_to_be64(now.tv_sec); |
131 | tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec); | |
818d276c GS |
132 | |
133 | if (JBD2_HAS_COMPAT_FEATURE(journal, | |
134 | JBD2_FEATURE_COMPAT_CHECKSUM)) { | |
135 | tmp->h_chksum_type = JBD2_CRC32_CHKSUM; | |
136 | tmp->h_chksum_size = JBD2_CRC32_CHKSUM_SIZE; | |
137 | tmp->h_chksum[0] = cpu_to_be32(crc32_sum); | |
470decc6 DK |
138 | } |
139 | ||
818d276c GS |
140 | JBUFFER_TRACE(descriptor, "submit commit block"); |
141 | lock_buffer(bh); | |
c4b8e635 | 142 | get_bh(bh); |
470decc6 | 143 | set_buffer_dirty(bh); |
818d276c GS |
144 | set_buffer_uptodate(bh); |
145 | bh->b_end_io = journal_end_buffer_io_sync; | |
146 | ||
147 | if (journal->j_flags & JBD2_BARRIER && | |
4d605179 | 148 | !JBD2_HAS_INCOMPAT_FEATURE(journal, |
818d276c | 149 | JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) { |
470decc6 DK |
150 | set_buffer_ordered(bh); |
151 | barrier_done = 1; | |
152 | } | |
818d276c | 153 | ret = submit_bh(WRITE, bh); |
c4e35e07 DK |
154 | if (barrier_done) |
155 | clear_buffer_ordered(bh); | |
818d276c | 156 | |
470decc6 DK |
157 | /* is it possible for another commit to fail at roughly |
158 | * the same time as this one? If so, we don't want to | |
159 | * trust the barrier flag in the super, but instead want | |
160 | * to remember if we sent a barrier request | |
161 | */ | |
162 | if (ret == -EOPNOTSUPP && barrier_done) { | |
163 | char b[BDEVNAME_SIZE]; | |
164 | ||
165 | printk(KERN_WARNING | |
166 | "JBD: barrier-based sync failed on %s - " | |
167 | "disabling barriers\n", | |
168 | bdevname(journal->j_dev, b)); | |
169 | spin_lock(&journal->j_state_lock); | |
f7f4bccb | 170 | journal->j_flags &= ~JBD2_BARRIER; |
470decc6 DK |
171 | spin_unlock(&journal->j_state_lock); |
172 | ||
173 | /* And try again, without the barrier */ | |
034772b0 | 174 | lock_buffer(bh); |
470decc6 DK |
175 | set_buffer_uptodate(bh); |
176 | set_buffer_dirty(bh); | |
818d276c | 177 | ret = submit_bh(WRITE, bh); |
470decc6 | 178 | } |
818d276c GS |
179 | *cbh = bh; |
180 | return ret; | |
181 | } | |
182 | ||
183 | /* | |
184 | * This function along with journal_submit_commit_record | |
185 | * allows to write the commit record asynchronously. | |
186 | */ | |
187 | static int journal_wait_on_commit_record(struct buffer_head *bh) | |
188 | { | |
189 | int ret = 0; | |
190 | ||
191 | clear_buffer_dirty(bh); | |
192 | wait_on_buffer(bh); | |
470decc6 | 193 | |
818d276c GS |
194 | if (unlikely(!buffer_uptodate(bh))) |
195 | ret = -EIO; | |
196 | put_bh(bh); /* One for getblk() */ | |
197 | jbd2_journal_put_journal_head(bh2jh(bh)); | |
198 | ||
199 | return ret; | |
470decc6 DK |
200 | } |
201 | ||
818d276c GS |
202 | /* |
203 | * Wait for all submitted IO to complete. | |
204 | */ | |
205 | static int journal_wait_on_locked_list(journal_t *journal, | |
206 | transaction_t *commit_transaction) | |
207 | { | |
208 | int ret = 0; | |
209 | struct journal_head *jh; | |
210 | ||
211 | while (commit_transaction->t_locked_list) { | |
212 | struct buffer_head *bh; | |
213 | ||
214 | jh = commit_transaction->t_locked_list->b_tprev; | |
215 | bh = jh2bh(jh); | |
216 | get_bh(bh); | |
217 | if (buffer_locked(bh)) { | |
218 | spin_unlock(&journal->j_list_lock); | |
219 | wait_on_buffer(bh); | |
220 | if (unlikely(!buffer_uptodate(bh))) | |
221 | ret = -EIO; | |
222 | spin_lock(&journal->j_list_lock); | |
223 | } | |
224 | if (!inverted_lock(journal, bh)) { | |
225 | put_bh(bh); | |
226 | spin_lock(&journal->j_list_lock); | |
227 | continue; | |
228 | } | |
229 | if (buffer_jbd(bh) && jh->b_jlist == BJ_Locked) { | |
230 | __jbd2_journal_unfile_buffer(jh); | |
231 | jbd_unlock_bh_state(bh); | |
232 | jbd2_journal_remove_journal_head(bh); | |
233 | put_bh(bh); | |
234 | } else { | |
235 | jbd_unlock_bh_state(bh); | |
236 | } | |
237 | put_bh(bh); | |
238 | cond_resched_lock(&journal->j_list_lock); | |
239 | } | |
240 | return ret; | |
241 | } | |
242 | ||
470decc6 DK |
243 | static void journal_do_submit_data(struct buffer_head **wbuf, int bufs) |
244 | { | |
245 | int i; | |
246 | ||
247 | for (i = 0; i < bufs; i++) { | |
248 | wbuf[i]->b_end_io = end_buffer_write_sync; | |
249 | /* We use-up our safety reference in submit_bh() */ | |
250 | submit_bh(WRITE, wbuf[i]); | |
251 | } | |
252 | } | |
253 | ||
254 | /* | |
255 | * Submit all the data buffers to disk | |
256 | */ | |
257 | static void journal_submit_data_buffers(journal_t *journal, | |
258 | transaction_t *commit_transaction) | |
259 | { | |
260 | struct journal_head *jh; | |
261 | struct buffer_head *bh; | |
262 | int locked; | |
263 | int bufs = 0; | |
264 | struct buffer_head **wbuf = journal->j_wbuf; | |
265 | ||
266 | /* | |
267 | * Whenever we unlock the journal and sleep, things can get added | |
268 | * onto ->t_sync_datalist, so we have to keep looping back to | |
269 | * write_out_data until we *know* that the list is empty. | |
270 | * | |
271 | * Cleanup any flushed data buffers from the data list. Even in | |
272 | * abort mode, we want to flush this out as soon as possible. | |
273 | */ | |
274 | write_out_data: | |
275 | cond_resched(); | |
276 | spin_lock(&journal->j_list_lock); | |
277 | ||
278 | while (commit_transaction->t_sync_datalist) { | |
279 | jh = commit_transaction->t_sync_datalist; | |
280 | bh = jh2bh(jh); | |
281 | locked = 0; | |
282 | ||
283 | /* Get reference just to make sure buffer does not disappear | |
284 | * when we are forced to drop various locks */ | |
285 | get_bh(bh); | |
286 | /* If the buffer is dirty, we need to submit IO and hence | |
287 | * we need the buffer lock. We try to lock the buffer without | |
288 | * blocking. If we fail, we need to drop j_list_lock and do | |
289 | * blocking lock_buffer(). | |
290 | */ | |
291 | if (buffer_dirty(bh)) { | |
292 | if (test_set_buffer_locked(bh)) { | |
293 | BUFFER_TRACE(bh, "needs blocking lock"); | |
294 | spin_unlock(&journal->j_list_lock); | |
295 | /* Write out all data to prevent deadlocks */ | |
296 | journal_do_submit_data(wbuf, bufs); | |
297 | bufs = 0; | |
298 | lock_buffer(bh); | |
299 | spin_lock(&journal->j_list_lock); | |
300 | } | |
301 | locked = 1; | |
302 | } | |
303 | /* We have to get bh_state lock. Again out of order, sigh. */ | |
304 | if (!inverted_lock(journal, bh)) { | |
305 | jbd_lock_bh_state(bh); | |
306 | spin_lock(&journal->j_list_lock); | |
307 | } | |
308 | /* Someone already cleaned up the buffer? */ | |
309 | if (!buffer_jbd(bh) | |
310 | || jh->b_transaction != commit_transaction | |
311 | || jh->b_jlist != BJ_SyncData) { | |
312 | jbd_unlock_bh_state(bh); | |
313 | if (locked) | |
314 | unlock_buffer(bh); | |
315 | BUFFER_TRACE(bh, "already cleaned up"); | |
316 | put_bh(bh); | |
317 | continue; | |
318 | } | |
319 | if (locked && test_clear_buffer_dirty(bh)) { | |
320 | BUFFER_TRACE(bh, "needs writeout, adding to array"); | |
321 | wbuf[bufs++] = bh; | |
f7f4bccb | 322 | __jbd2_journal_file_buffer(jh, commit_transaction, |
470decc6 DK |
323 | BJ_Locked); |
324 | jbd_unlock_bh_state(bh); | |
325 | if (bufs == journal->j_wbufsize) { | |
326 | spin_unlock(&journal->j_list_lock); | |
327 | journal_do_submit_data(wbuf, bufs); | |
328 | bufs = 0; | |
329 | goto write_out_data; | |
330 | } | |
12603925 HH |
331 | } else if (!locked && buffer_locked(bh)) { |
332 | __jbd2_journal_file_buffer(jh, commit_transaction, | |
333 | BJ_Locked); | |
334 | jbd_unlock_bh_state(bh); | |
335 | put_bh(bh); | |
336 | } else { | |
470decc6 | 337 | BUFFER_TRACE(bh, "writeout complete: unfile"); |
f7f4bccb | 338 | __jbd2_journal_unfile_buffer(jh); |
470decc6 DK |
339 | jbd_unlock_bh_state(bh); |
340 | if (locked) | |
341 | unlock_buffer(bh); | |
f7f4bccb | 342 | jbd2_journal_remove_journal_head(bh); |
470decc6 | 343 | /* Once for our safety reference, once for |
f7f4bccb | 344 | * jbd2_journal_remove_journal_head() */ |
470decc6 DK |
345 | put_bh(bh); |
346 | put_bh(bh); | |
347 | } | |
348 | ||
95c354fe | 349 | if (need_resched() || spin_needbreak(&journal->j_list_lock)) { |
470decc6 DK |
350 | spin_unlock(&journal->j_list_lock); |
351 | goto write_out_data; | |
352 | } | |
353 | } | |
354 | spin_unlock(&journal->j_list_lock); | |
355 | journal_do_submit_data(wbuf, bufs); | |
356 | } | |
357 | ||
c851ed54 JK |
358 | /* |
359 | * Submit all the data buffers of inode associated with the transaction to | |
360 | * disk. | |
361 | * | |
362 | * We are in a committing transaction. Therefore no new inode can be added to | |
363 | * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently | |
364 | * operate on from being released while we write out pages. | |
365 | */ | |
366 | static int journal_submit_inode_data_buffers(journal_t *journal, | |
367 | transaction_t *commit_transaction) | |
368 | { | |
369 | struct jbd2_inode *jinode; | |
370 | int err, ret = 0; | |
371 | struct address_space *mapping; | |
372 | ||
373 | spin_lock(&journal->j_list_lock); | |
374 | list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) { | |
375 | mapping = jinode->i_vfs_inode->i_mapping; | |
376 | jinode->i_flags |= JI_COMMIT_RUNNING; | |
377 | spin_unlock(&journal->j_list_lock); | |
378 | err = filemap_fdatawrite_range(mapping, 0, | |
379 | i_size_read(jinode->i_vfs_inode)); | |
380 | if (!ret) | |
381 | ret = err; | |
382 | spin_lock(&journal->j_list_lock); | |
383 | J_ASSERT(jinode->i_transaction == commit_transaction); | |
384 | jinode->i_flags &= ~JI_COMMIT_RUNNING; | |
385 | wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING); | |
386 | } | |
387 | spin_unlock(&journal->j_list_lock); | |
388 | return ret; | |
389 | } | |
390 | ||
391 | /* | |
392 | * Wait for data submitted for writeout, refile inodes to proper | |
393 | * transaction if needed. | |
394 | * | |
395 | */ | |
396 | static int journal_finish_inode_data_buffers(journal_t *journal, | |
397 | transaction_t *commit_transaction) | |
398 | { | |
399 | struct jbd2_inode *jinode, *next_i; | |
400 | int err, ret = 0; | |
401 | ||
402 | /* For locking, see the comment in journal_submit_inode_data_buffers() */ | |
403 | spin_lock(&journal->j_list_lock); | |
404 | list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) { | |
405 | jinode->i_flags |= JI_COMMIT_RUNNING; | |
406 | spin_unlock(&journal->j_list_lock); | |
407 | err = filemap_fdatawait(jinode->i_vfs_inode->i_mapping); | |
408 | if (!ret) | |
409 | ret = err; | |
410 | spin_lock(&journal->j_list_lock); | |
411 | jinode->i_flags &= ~JI_COMMIT_RUNNING; | |
412 | wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING); | |
413 | } | |
414 | ||
415 | /* Now refile inode to proper lists */ | |
416 | list_for_each_entry_safe(jinode, next_i, | |
417 | &commit_transaction->t_inode_list, i_list) { | |
418 | list_del(&jinode->i_list); | |
419 | if (jinode->i_next_transaction) { | |
420 | jinode->i_transaction = jinode->i_next_transaction; | |
421 | jinode->i_next_transaction = NULL; | |
422 | list_add(&jinode->i_list, | |
423 | &jinode->i_transaction->t_inode_list); | |
424 | } else { | |
425 | jinode->i_transaction = NULL; | |
426 | } | |
427 | } | |
428 | spin_unlock(&journal->j_list_lock); | |
429 | ||
430 | return ret; | |
431 | } | |
432 | ||
818d276c GS |
433 | static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh) |
434 | { | |
435 | struct page *page = bh->b_page; | |
436 | char *addr; | |
437 | __u32 checksum; | |
438 | ||
439 | addr = kmap_atomic(page, KM_USER0); | |
440 | checksum = crc32_be(crc32_sum, | |
441 | (void *)(addr + offset_in_page(bh->b_data)), bh->b_size); | |
442 | kunmap_atomic(addr, KM_USER0); | |
443 | ||
444 | return checksum; | |
445 | } | |
446 | ||
447 | static void write_tag_block(int tag_bytes, journal_block_tag_t *tag, | |
18eba7aa | 448 | unsigned long long block) |
b517bea1 ZB |
449 | { |
450 | tag->t_blocknr = cpu_to_be32(block & (u32)~0); | |
cd02ff0b | 451 | if (tag_bytes > JBD2_TAG_SIZE32) |
b517bea1 ZB |
452 | tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1); |
453 | } | |
454 | ||
470decc6 | 455 | /* |
f7f4bccb | 456 | * jbd2_journal_commit_transaction |
470decc6 DK |
457 | * |
458 | * The primary function for committing a transaction to the log. This | |
459 | * function is called by the journal thread to begin a complete commit. | |
460 | */ | |
f7f4bccb | 461 | void jbd2_journal_commit_transaction(journal_t *journal) |
470decc6 | 462 | { |
8e85fb3f | 463 | struct transaction_stats_s stats; |
470decc6 DK |
464 | transaction_t *commit_transaction; |
465 | struct journal_head *jh, *new_jh, *descriptor; | |
466 | struct buffer_head **wbuf = journal->j_wbuf; | |
467 | int bufs; | |
468 | int flags; | |
469 | int err; | |
18eba7aa | 470 | unsigned long long blocknr; |
470decc6 DK |
471 | char *tagp = NULL; |
472 | journal_header_t *header; | |
473 | journal_block_tag_t *tag = NULL; | |
474 | int space_left = 0; | |
475 | int first_tag = 0; | |
476 | int tag_flag; | |
477 | int i; | |
b517bea1 | 478 | int tag_bytes = journal_tag_bytes(journal); |
818d276c GS |
479 | struct buffer_head *cbh = NULL; /* For transactional checksums */ |
480 | __u32 crc32_sum = ~0; | |
470decc6 DK |
481 | |
482 | /* | |
483 | * First job: lock down the current transaction and wait for | |
484 | * all outstanding updates to complete. | |
485 | */ | |
486 | ||
487 | #ifdef COMMIT_STATS | |
488 | spin_lock(&journal->j_list_lock); | |
489 | summarise_journal_usage(journal); | |
490 | spin_unlock(&journal->j_list_lock); | |
491 | #endif | |
492 | ||
f7f4bccb MC |
493 | /* Do we need to erase the effects of a prior jbd2_journal_flush? */ |
494 | if (journal->j_flags & JBD2_FLUSHED) { | |
470decc6 | 495 | jbd_debug(3, "super block updated\n"); |
f7f4bccb | 496 | jbd2_journal_update_superblock(journal, 1); |
470decc6 DK |
497 | } else { |
498 | jbd_debug(3, "superblock not updated\n"); | |
499 | } | |
500 | ||
501 | J_ASSERT(journal->j_running_transaction != NULL); | |
502 | J_ASSERT(journal->j_committing_transaction == NULL); | |
503 | ||
504 | commit_transaction = journal->j_running_transaction; | |
505 | J_ASSERT(commit_transaction->t_state == T_RUNNING); | |
506 | ||
507 | jbd_debug(1, "JBD: starting commit of transaction %d\n", | |
508 | commit_transaction->t_tid); | |
509 | ||
510 | spin_lock(&journal->j_state_lock); | |
511 | commit_transaction->t_state = T_LOCKED; | |
512 | ||
8e85fb3f JL |
513 | stats.u.run.rs_wait = commit_transaction->t_max_wait; |
514 | stats.u.run.rs_locked = jiffies; | |
515 | stats.u.run.rs_running = jbd2_time_diff(commit_transaction->t_start, | |
516 | stats.u.run.rs_locked); | |
517 | ||
470decc6 DK |
518 | spin_lock(&commit_transaction->t_handle_lock); |
519 | while (commit_transaction->t_updates) { | |
520 | DEFINE_WAIT(wait); | |
521 | ||
522 | prepare_to_wait(&journal->j_wait_updates, &wait, | |
523 | TASK_UNINTERRUPTIBLE); | |
524 | if (commit_transaction->t_updates) { | |
525 | spin_unlock(&commit_transaction->t_handle_lock); | |
526 | spin_unlock(&journal->j_state_lock); | |
527 | schedule(); | |
528 | spin_lock(&journal->j_state_lock); | |
529 | spin_lock(&commit_transaction->t_handle_lock); | |
530 | } | |
531 | finish_wait(&journal->j_wait_updates, &wait); | |
532 | } | |
533 | spin_unlock(&commit_transaction->t_handle_lock); | |
534 | ||
535 | J_ASSERT (commit_transaction->t_outstanding_credits <= | |
536 | journal->j_max_transaction_buffers); | |
537 | ||
538 | /* | |
539 | * First thing we are allowed to do is to discard any remaining | |
540 | * BJ_Reserved buffers. Note, it is _not_ permissible to assume | |
541 | * that there are no such buffers: if a large filesystem | |
542 | * operation like a truncate needs to split itself over multiple | |
f7f4bccb | 543 | * transactions, then it may try to do a jbd2_journal_restart() while |
470decc6 DK |
544 | * there are still BJ_Reserved buffers outstanding. These must |
545 | * be released cleanly from the current transaction. | |
546 | * | |
547 | * In this case, the filesystem must still reserve write access | |
548 | * again before modifying the buffer in the new transaction, but | |
549 | * we do not require it to remember exactly which old buffers it | |
550 | * has reserved. This is consistent with the existing behaviour | |
f7f4bccb | 551 | * that multiple jbd2_journal_get_write_access() calls to the same |
470decc6 DK |
552 | * buffer are perfectly permissable. |
553 | */ | |
554 | while (commit_transaction->t_reserved_list) { | |
555 | jh = commit_transaction->t_reserved_list; | |
556 | JBUFFER_TRACE(jh, "reserved, unused: refile"); | |
557 | /* | |
f7f4bccb | 558 | * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may |
470decc6 DK |
559 | * leave undo-committed data. |
560 | */ | |
561 | if (jh->b_committed_data) { | |
562 | struct buffer_head *bh = jh2bh(jh); | |
563 | ||
564 | jbd_lock_bh_state(bh); | |
af1e76d6 | 565 | jbd2_free(jh->b_committed_data, bh->b_size); |
470decc6 DK |
566 | jh->b_committed_data = NULL; |
567 | jbd_unlock_bh_state(bh); | |
568 | } | |
f7f4bccb | 569 | jbd2_journal_refile_buffer(journal, jh); |
470decc6 DK |
570 | } |
571 | ||
572 | /* | |
573 | * Now try to drop any written-back buffers from the journal's | |
574 | * checkpoint lists. We do this *before* commit because it potentially | |
575 | * frees some memory | |
576 | */ | |
577 | spin_lock(&journal->j_list_lock); | |
f7f4bccb | 578 | __jbd2_journal_clean_checkpoint_list(journal); |
470decc6 DK |
579 | spin_unlock(&journal->j_list_lock); |
580 | ||
581 | jbd_debug (3, "JBD: commit phase 1\n"); | |
582 | ||
583 | /* | |
584 | * Switch to a new revoke table. | |
585 | */ | |
f7f4bccb | 586 | jbd2_journal_switch_revoke_table(journal); |
470decc6 | 587 | |
8e85fb3f JL |
588 | stats.u.run.rs_flushing = jiffies; |
589 | stats.u.run.rs_locked = jbd2_time_diff(stats.u.run.rs_locked, | |
590 | stats.u.run.rs_flushing); | |
591 | ||
470decc6 DK |
592 | commit_transaction->t_state = T_FLUSH; |
593 | journal->j_committing_transaction = commit_transaction; | |
594 | journal->j_running_transaction = NULL; | |
595 | commit_transaction->t_log_start = journal->j_head; | |
596 | wake_up(&journal->j_wait_transaction_locked); | |
597 | spin_unlock(&journal->j_state_lock); | |
598 | ||
599 | jbd_debug (3, "JBD: commit phase 2\n"); | |
600 | ||
470decc6 DK |
601 | /* |
602 | * Now start flushing things to disk, in the order they appear | |
603 | * on the transaction lists. Data blocks go first. | |
604 | */ | |
605 | err = 0; | |
606 | journal_submit_data_buffers(journal, commit_transaction); | |
c851ed54 JK |
607 | err = journal_submit_inode_data_buffers(journal, commit_transaction); |
608 | if (err) | |
609 | jbd2_journal_abort(journal, err); | |
470decc6 DK |
610 | |
611 | /* | |
818d276c GS |
612 | * Wait for all previously submitted IO to complete if commit |
613 | * record is to be written synchronously. | |
470decc6 DK |
614 | */ |
615 | spin_lock(&journal->j_list_lock); | |
818d276c GS |
616 | if (!JBD2_HAS_INCOMPAT_FEATURE(journal, |
617 | JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) | |
618 | err = journal_wait_on_locked_list(journal, | |
619 | commit_transaction); | |
470decc6 | 620 | |
470decc6 DK |
621 | spin_unlock(&journal->j_list_lock); |
622 | ||
623 | if (err) | |
a7fa2baf | 624 | jbd2_journal_abort(journal, err); |
470decc6 | 625 | |
f7f4bccb | 626 | jbd2_journal_write_revoke_records(journal, commit_transaction); |
470decc6 DK |
627 | |
628 | jbd_debug(3, "JBD: commit phase 2\n"); | |
629 | ||
630 | /* | |
631 | * If we found any dirty or locked buffers, then we should have | |
632 | * looped back up to the write_out_data label. If there weren't | |
633 | * any then journal_clean_data_list should have wiped the list | |
634 | * clean by now, so check that it is in fact empty. | |
635 | */ | |
636 | J_ASSERT (commit_transaction->t_sync_datalist == NULL); | |
637 | ||
638 | jbd_debug (3, "JBD: commit phase 3\n"); | |
639 | ||
640 | /* | |
641 | * Way to go: we have now written out all of the data for a | |
642 | * transaction! Now comes the tricky part: we need to write out | |
643 | * metadata. Loop over the transaction's entire buffer list: | |
644 | */ | |
02c471cb | 645 | spin_lock(&journal->j_state_lock); |
470decc6 | 646 | commit_transaction->t_state = T_COMMIT; |
02c471cb | 647 | spin_unlock(&journal->j_state_lock); |
470decc6 | 648 | |
8e85fb3f JL |
649 | stats.u.run.rs_logging = jiffies; |
650 | stats.u.run.rs_flushing = jbd2_time_diff(stats.u.run.rs_flushing, | |
651 | stats.u.run.rs_logging); | |
652 | stats.u.run.rs_blocks = commit_transaction->t_outstanding_credits; | |
653 | stats.u.run.rs_blocks_logged = 0; | |
654 | ||
1dfc3220 JB |
655 | J_ASSERT(commit_transaction->t_nr_buffers <= |
656 | commit_transaction->t_outstanding_credits); | |
657 | ||
470decc6 DK |
658 | descriptor = NULL; |
659 | bufs = 0; | |
660 | while (commit_transaction->t_buffers) { | |
661 | ||
662 | /* Find the next buffer to be journaled... */ | |
663 | ||
664 | jh = commit_transaction->t_buffers; | |
665 | ||
666 | /* If we're in abort mode, we just un-journal the buffer and | |
667 | release it for background writing. */ | |
668 | ||
669 | if (is_journal_aborted(journal)) { | |
670 | JBUFFER_TRACE(jh, "journal is aborting: refile"); | |
f7f4bccb | 671 | jbd2_journal_refile_buffer(journal, jh); |
470decc6 DK |
672 | /* If that was the last one, we need to clean up |
673 | * any descriptor buffers which may have been | |
674 | * already allocated, even if we are now | |
675 | * aborting. */ | |
676 | if (!commit_transaction->t_buffers) | |
677 | goto start_journal_io; | |
678 | continue; | |
679 | } | |
680 | ||
681 | /* Make sure we have a descriptor block in which to | |
682 | record the metadata buffer. */ | |
683 | ||
684 | if (!descriptor) { | |
685 | struct buffer_head *bh; | |
686 | ||
687 | J_ASSERT (bufs == 0); | |
688 | ||
689 | jbd_debug(4, "JBD: get descriptor\n"); | |
690 | ||
f7f4bccb | 691 | descriptor = jbd2_journal_get_descriptor_buffer(journal); |
470decc6 | 692 | if (!descriptor) { |
a7fa2baf | 693 | jbd2_journal_abort(journal, -EIO); |
470decc6 DK |
694 | continue; |
695 | } | |
696 | ||
697 | bh = jh2bh(descriptor); | |
698 | jbd_debug(4, "JBD: got buffer %llu (%p)\n", | |
699 | (unsigned long long)bh->b_blocknr, bh->b_data); | |
700 | header = (journal_header_t *)&bh->b_data[0]; | |
f7f4bccb MC |
701 | header->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER); |
702 | header->h_blocktype = cpu_to_be32(JBD2_DESCRIPTOR_BLOCK); | |
470decc6 DK |
703 | header->h_sequence = cpu_to_be32(commit_transaction->t_tid); |
704 | ||
705 | tagp = &bh->b_data[sizeof(journal_header_t)]; | |
706 | space_left = bh->b_size - sizeof(journal_header_t); | |
707 | first_tag = 1; | |
708 | set_buffer_jwrite(bh); | |
709 | set_buffer_dirty(bh); | |
710 | wbuf[bufs++] = bh; | |
711 | ||
712 | /* Record it so that we can wait for IO | |
713 | completion later */ | |
714 | BUFFER_TRACE(bh, "ph3: file as descriptor"); | |
f7f4bccb | 715 | jbd2_journal_file_buffer(descriptor, commit_transaction, |
470decc6 DK |
716 | BJ_LogCtl); |
717 | } | |
718 | ||
719 | /* Where is the buffer to be written? */ | |
720 | ||
f7f4bccb | 721 | err = jbd2_journal_next_log_block(journal, &blocknr); |
470decc6 DK |
722 | /* If the block mapping failed, just abandon the buffer |
723 | and repeat this loop: we'll fall into the | |
724 | refile-on-abort condition above. */ | |
725 | if (err) { | |
a7fa2baf | 726 | jbd2_journal_abort(journal, err); |
470decc6 DK |
727 | continue; |
728 | } | |
729 | ||
730 | /* | |
731 | * start_this_handle() uses t_outstanding_credits to determine | |
732 | * the free space in the log, but this counter is changed | |
f7f4bccb | 733 | * by jbd2_journal_next_log_block() also. |
470decc6 DK |
734 | */ |
735 | commit_transaction->t_outstanding_credits--; | |
736 | ||
737 | /* Bump b_count to prevent truncate from stumbling over | |
738 | the shadowed buffer! @@@ This can go if we ever get | |
739 | rid of the BJ_IO/BJ_Shadow pairing of buffers. */ | |
740 | atomic_inc(&jh2bh(jh)->b_count); | |
741 | ||
742 | /* Make a temporary IO buffer with which to write it out | |
743 | (this will requeue both the metadata buffer and the | |
744 | temporary IO buffer). new_bh goes on BJ_IO*/ | |
745 | ||
746 | set_bit(BH_JWrite, &jh2bh(jh)->b_state); | |
747 | /* | |
f7f4bccb | 748 | * akpm: jbd2_journal_write_metadata_buffer() sets |
470decc6 DK |
749 | * new_bh->b_transaction to commit_transaction. |
750 | * We need to clean this up before we release new_bh | |
751 | * (which is of type BJ_IO) | |
752 | */ | |
753 | JBUFFER_TRACE(jh, "ph3: write metadata"); | |
f7f4bccb | 754 | flags = jbd2_journal_write_metadata_buffer(commit_transaction, |
470decc6 DK |
755 | jh, &new_jh, blocknr); |
756 | set_bit(BH_JWrite, &jh2bh(new_jh)->b_state); | |
757 | wbuf[bufs++] = jh2bh(new_jh); | |
758 | ||
759 | /* Record the new block's tag in the current descriptor | |
760 | buffer */ | |
761 | ||
762 | tag_flag = 0; | |
763 | if (flags & 1) | |
f7f4bccb | 764 | tag_flag |= JBD2_FLAG_ESCAPE; |
470decc6 | 765 | if (!first_tag) |
f7f4bccb | 766 | tag_flag |= JBD2_FLAG_SAME_UUID; |
470decc6 DK |
767 | |
768 | tag = (journal_block_tag_t *) tagp; | |
b517bea1 | 769 | write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr); |
470decc6 | 770 | tag->t_flags = cpu_to_be32(tag_flag); |
b517bea1 ZB |
771 | tagp += tag_bytes; |
772 | space_left -= tag_bytes; | |
470decc6 DK |
773 | |
774 | if (first_tag) { | |
775 | memcpy (tagp, journal->j_uuid, 16); | |
776 | tagp += 16; | |
777 | space_left -= 16; | |
778 | first_tag = 0; | |
779 | } | |
780 | ||
781 | /* If there's no more to do, or if the descriptor is full, | |
782 | let the IO rip! */ | |
783 | ||
784 | if (bufs == journal->j_wbufsize || | |
785 | commit_transaction->t_buffers == NULL || | |
b517bea1 | 786 | space_left < tag_bytes + 16) { |
470decc6 DK |
787 | |
788 | jbd_debug(4, "JBD: Submit %d IOs\n", bufs); | |
789 | ||
790 | /* Write an end-of-descriptor marker before | |
791 | submitting the IOs. "tag" still points to | |
792 | the last tag we set up. */ | |
793 | ||
f7f4bccb | 794 | tag->t_flags |= cpu_to_be32(JBD2_FLAG_LAST_TAG); |
470decc6 DK |
795 | |
796 | start_journal_io: | |
797 | for (i = 0; i < bufs; i++) { | |
798 | struct buffer_head *bh = wbuf[i]; | |
818d276c GS |
799 | /* |
800 | * Compute checksum. | |
801 | */ | |
802 | if (JBD2_HAS_COMPAT_FEATURE(journal, | |
803 | JBD2_FEATURE_COMPAT_CHECKSUM)) { | |
804 | crc32_sum = | |
805 | jbd2_checksum_data(crc32_sum, bh); | |
806 | } | |
807 | ||
470decc6 DK |
808 | lock_buffer(bh); |
809 | clear_buffer_dirty(bh); | |
810 | set_buffer_uptodate(bh); | |
811 | bh->b_end_io = journal_end_buffer_io_sync; | |
812 | submit_bh(WRITE, bh); | |
813 | } | |
814 | cond_resched(); | |
8e85fb3f | 815 | stats.u.run.rs_blocks_logged += bufs; |
470decc6 DK |
816 | |
817 | /* Force a new descriptor to be generated next | |
818 | time round the loop. */ | |
819 | descriptor = NULL; | |
820 | bufs = 0; | |
821 | } | |
822 | } | |
823 | ||
818d276c GS |
824 | /* Done it all: now write the commit record asynchronously. */ |
825 | ||
826 | if (JBD2_HAS_INCOMPAT_FEATURE(journal, | |
827 | JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) { | |
828 | err = journal_submit_commit_record(journal, commit_transaction, | |
829 | &cbh, crc32_sum); | |
830 | if (err) | |
831 | __jbd2_journal_abort_hard(journal); | |
832 | ||
833 | spin_lock(&journal->j_list_lock); | |
834 | err = journal_wait_on_locked_list(journal, | |
835 | commit_transaction); | |
836 | spin_unlock(&journal->j_list_lock); | |
837 | if (err) | |
838 | __jbd2_journal_abort_hard(journal); | |
839 | } | |
840 | ||
c851ed54 JK |
841 | /* |
842 | * This is the right place to wait for data buffers both for ASYNC | |
843 | * and !ASYNC commit. If commit is ASYNC, we need to wait only after | |
844 | * the commit block went to disk (which happens above). If commit is | |
845 | * SYNC, we need to wait for data buffers before we start writing | |
846 | * commit block, which happens below in such setting. | |
847 | */ | |
848 | err = journal_finish_inode_data_buffers(journal, commit_transaction); | |
849 | if (err) | |
850 | jbd2_journal_abort(journal, err); | |
851 | ||
470decc6 DK |
852 | /* Lo and behold: we have just managed to send a transaction to |
853 | the log. Before we can commit it, wait for the IO so far to | |
854 | complete. Control buffers being written are on the | |
855 | transaction's t_log_list queue, and metadata buffers are on | |
856 | the t_iobuf_list queue. | |
857 | ||
858 | Wait for the buffers in reverse order. That way we are | |
859 | less likely to be woken up until all IOs have completed, and | |
860 | so we incur less scheduling load. | |
861 | */ | |
862 | ||
863 | jbd_debug(3, "JBD: commit phase 4\n"); | |
864 | ||
865 | /* | |
866 | * akpm: these are BJ_IO, and j_list_lock is not needed. | |
867 | * See __journal_try_to_free_buffer. | |
868 | */ | |
869 | wait_for_iobuf: | |
870 | while (commit_transaction->t_iobuf_list != NULL) { | |
871 | struct buffer_head *bh; | |
872 | ||
873 | jh = commit_transaction->t_iobuf_list->b_tprev; | |
874 | bh = jh2bh(jh); | |
875 | if (buffer_locked(bh)) { | |
876 | wait_on_buffer(bh); | |
877 | goto wait_for_iobuf; | |
878 | } | |
879 | if (cond_resched()) | |
880 | goto wait_for_iobuf; | |
881 | ||
882 | if (unlikely(!buffer_uptodate(bh))) | |
883 | err = -EIO; | |
884 | ||
885 | clear_buffer_jwrite(bh); | |
886 | ||
887 | JBUFFER_TRACE(jh, "ph4: unfile after journal write"); | |
f7f4bccb | 888 | jbd2_journal_unfile_buffer(journal, jh); |
470decc6 DK |
889 | |
890 | /* | |
891 | * ->t_iobuf_list should contain only dummy buffer_heads | |
f7f4bccb | 892 | * which were created by jbd2_journal_write_metadata_buffer(). |
470decc6 DK |
893 | */ |
894 | BUFFER_TRACE(bh, "dumping temporary bh"); | |
f7f4bccb | 895 | jbd2_journal_put_journal_head(jh); |
470decc6 DK |
896 | __brelse(bh); |
897 | J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0); | |
898 | free_buffer_head(bh); | |
899 | ||
900 | /* We also have to unlock and free the corresponding | |
901 | shadowed buffer */ | |
902 | jh = commit_transaction->t_shadow_list->b_tprev; | |
903 | bh = jh2bh(jh); | |
904 | clear_bit(BH_JWrite, &bh->b_state); | |
905 | J_ASSERT_BH(bh, buffer_jbddirty(bh)); | |
906 | ||
907 | /* The metadata is now released for reuse, but we need | |
908 | to remember it against this transaction so that when | |
909 | we finally commit, we can do any checkpointing | |
910 | required. */ | |
911 | JBUFFER_TRACE(jh, "file as BJ_Forget"); | |
f7f4bccb | 912 | jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget); |
470decc6 DK |
913 | /* Wake up any transactions which were waiting for this |
914 | IO to complete */ | |
915 | wake_up_bit(&bh->b_state, BH_Unshadow); | |
916 | JBUFFER_TRACE(jh, "brelse shadowed buffer"); | |
917 | __brelse(bh); | |
918 | } | |
919 | ||
920 | J_ASSERT (commit_transaction->t_shadow_list == NULL); | |
921 | ||
922 | jbd_debug(3, "JBD: commit phase 5\n"); | |
923 | ||
924 | /* Here we wait for the revoke record and descriptor record buffers */ | |
925 | wait_for_ctlbuf: | |
926 | while (commit_transaction->t_log_list != NULL) { | |
927 | struct buffer_head *bh; | |
928 | ||
929 | jh = commit_transaction->t_log_list->b_tprev; | |
930 | bh = jh2bh(jh); | |
931 | if (buffer_locked(bh)) { | |
932 | wait_on_buffer(bh); | |
933 | goto wait_for_ctlbuf; | |
934 | } | |
935 | if (cond_resched()) | |
936 | goto wait_for_ctlbuf; | |
937 | ||
938 | if (unlikely(!buffer_uptodate(bh))) | |
939 | err = -EIO; | |
940 | ||
941 | BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile"); | |
942 | clear_buffer_jwrite(bh); | |
f7f4bccb MC |
943 | jbd2_journal_unfile_buffer(journal, jh); |
944 | jbd2_journal_put_journal_head(jh); | |
470decc6 DK |
945 | __brelse(bh); /* One for getblk */ |
946 | /* AKPM: bforget here */ | |
947 | } | |
948 | ||
949 | jbd_debug(3, "JBD: commit phase 6\n"); | |
950 | ||
818d276c GS |
951 | if (!JBD2_HAS_INCOMPAT_FEATURE(journal, |
952 | JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) { | |
953 | err = journal_submit_commit_record(journal, commit_transaction, | |
954 | &cbh, crc32_sum); | |
955 | if (err) | |
956 | __jbd2_journal_abort_hard(journal); | |
957 | } | |
b048d846 MC |
958 | if (!err && !is_journal_aborted(journal)) |
959 | err = journal_wait_on_commit_record(cbh); | |
470decc6 DK |
960 | |
961 | if (err) | |
a7fa2baf | 962 | jbd2_journal_abort(journal, err); |
470decc6 DK |
963 | |
964 | /* End of a transaction! Finally, we can do checkpoint | |
965 | processing: any buffers committed as a result of this | |
966 | transaction can be removed from any checkpoint list it was on | |
967 | before. */ | |
968 | ||
969 | jbd_debug(3, "JBD: commit phase 7\n"); | |
970 | ||
971 | J_ASSERT(commit_transaction->t_sync_datalist == NULL); | |
c851ed54 | 972 | J_ASSERT(list_empty(&commit_transaction->t_inode_list)); |
470decc6 DK |
973 | J_ASSERT(commit_transaction->t_buffers == NULL); |
974 | J_ASSERT(commit_transaction->t_checkpoint_list == NULL); | |
975 | J_ASSERT(commit_transaction->t_iobuf_list == NULL); | |
976 | J_ASSERT(commit_transaction->t_shadow_list == NULL); | |
977 | J_ASSERT(commit_transaction->t_log_list == NULL); | |
978 | ||
979 | restart_loop: | |
980 | /* | |
981 | * As there are other places (journal_unmap_buffer()) adding buffers | |
982 | * to this list we have to be careful and hold the j_list_lock. | |
983 | */ | |
984 | spin_lock(&journal->j_list_lock); | |
985 | while (commit_transaction->t_forget) { | |
986 | transaction_t *cp_transaction; | |
987 | struct buffer_head *bh; | |
988 | ||
989 | jh = commit_transaction->t_forget; | |
990 | spin_unlock(&journal->j_list_lock); | |
991 | bh = jh2bh(jh); | |
992 | jbd_lock_bh_state(bh); | |
993 | J_ASSERT_JH(jh, jh->b_transaction == commit_transaction || | |
994 | jh->b_transaction == journal->j_running_transaction); | |
995 | ||
996 | /* | |
997 | * If there is undo-protected committed data against | |
998 | * this buffer, then we can remove it now. If it is a | |
999 | * buffer needing such protection, the old frozen_data | |
1000 | * field now points to a committed version of the | |
1001 | * buffer, so rotate that field to the new committed | |
1002 | * data. | |
1003 | * | |
1004 | * Otherwise, we can just throw away the frozen data now. | |
1005 | */ | |
1006 | if (jh->b_committed_data) { | |
af1e76d6 | 1007 | jbd2_free(jh->b_committed_data, bh->b_size); |
470decc6 DK |
1008 | jh->b_committed_data = NULL; |
1009 | if (jh->b_frozen_data) { | |
1010 | jh->b_committed_data = jh->b_frozen_data; | |
1011 | jh->b_frozen_data = NULL; | |
1012 | } | |
1013 | } else if (jh->b_frozen_data) { | |
af1e76d6 | 1014 | jbd2_free(jh->b_frozen_data, bh->b_size); |
470decc6 DK |
1015 | jh->b_frozen_data = NULL; |
1016 | } | |
1017 | ||
1018 | spin_lock(&journal->j_list_lock); | |
1019 | cp_transaction = jh->b_cp_transaction; | |
1020 | if (cp_transaction) { | |
1021 | JBUFFER_TRACE(jh, "remove from old cp transaction"); | |
8e85fb3f | 1022 | cp_transaction->t_chp_stats.cs_dropped++; |
f7f4bccb | 1023 | __jbd2_journal_remove_checkpoint(jh); |
470decc6 DK |
1024 | } |
1025 | ||
1026 | /* Only re-checkpoint the buffer_head if it is marked | |
1027 | * dirty. If the buffer was added to the BJ_Forget list | |
f7f4bccb | 1028 | * by jbd2_journal_forget, it may no longer be dirty and |
470decc6 DK |
1029 | * there's no point in keeping a checkpoint record for |
1030 | * it. */ | |
1031 | ||
1032 | /* A buffer which has been freed while still being | |
1033 | * journaled by a previous transaction may end up still | |
1034 | * being dirty here, but we want to avoid writing back | |
1035 | * that buffer in the future now that the last use has | |
1036 | * been committed. That's not only a performance gain, | |
1037 | * it also stops aliasing problems if the buffer is left | |
1038 | * behind for writeback and gets reallocated for another | |
1039 | * use in a different page. */ | |
1040 | if (buffer_freed(bh)) { | |
1041 | clear_buffer_freed(bh); | |
1042 | clear_buffer_jbddirty(bh); | |
1043 | } | |
1044 | ||
1045 | if (buffer_jbddirty(bh)) { | |
1046 | JBUFFER_TRACE(jh, "add to new checkpointing trans"); | |
f7f4bccb | 1047 | __jbd2_journal_insert_checkpoint(jh, commit_transaction); |
470decc6 | 1048 | JBUFFER_TRACE(jh, "refile for checkpoint writeback"); |
f7f4bccb | 1049 | __jbd2_journal_refile_buffer(jh); |
470decc6 DK |
1050 | jbd_unlock_bh_state(bh); |
1051 | } else { | |
1052 | J_ASSERT_BH(bh, !buffer_dirty(bh)); | |
1053 | /* The buffer on BJ_Forget list and not jbddirty means | |
1054 | * it has been freed by this transaction and hence it | |
1055 | * could not have been reallocated until this | |
1056 | * transaction has committed. *BUT* it could be | |
1057 | * reallocated once we have written all the data to | |
1058 | * disk and before we process the buffer on BJ_Forget | |
1059 | * list. */ | |
1060 | JBUFFER_TRACE(jh, "refile or unfile freed buffer"); | |
f7f4bccb | 1061 | __jbd2_journal_refile_buffer(jh); |
470decc6 DK |
1062 | if (!jh->b_transaction) { |
1063 | jbd_unlock_bh_state(bh); | |
1064 | /* needs a brelse */ | |
f7f4bccb | 1065 | jbd2_journal_remove_journal_head(bh); |
470decc6 DK |
1066 | release_buffer_page(bh); |
1067 | } else | |
1068 | jbd_unlock_bh_state(bh); | |
1069 | } | |
1070 | cond_resched_lock(&journal->j_list_lock); | |
1071 | } | |
1072 | spin_unlock(&journal->j_list_lock); | |
1073 | /* | |
f5a7a6b0 JK |
1074 | * This is a bit sleazy. We use j_list_lock to protect transition |
1075 | * of a transaction into T_FINISHED state and calling | |
1076 | * __jbd2_journal_drop_transaction(). Otherwise we could race with | |
1077 | * other checkpointing code processing the transaction... | |
470decc6 DK |
1078 | */ |
1079 | spin_lock(&journal->j_state_lock); | |
1080 | spin_lock(&journal->j_list_lock); | |
1081 | /* | |
1082 | * Now recheck if some buffers did not get attached to the transaction | |
1083 | * while the lock was dropped... | |
1084 | */ | |
1085 | if (commit_transaction->t_forget) { | |
1086 | spin_unlock(&journal->j_list_lock); | |
1087 | spin_unlock(&journal->j_state_lock); | |
1088 | goto restart_loop; | |
1089 | } | |
1090 | ||
1091 | /* Done with this transaction! */ | |
1092 | ||
1093 | jbd_debug(3, "JBD: commit phase 8\n"); | |
1094 | ||
1095 | J_ASSERT(commit_transaction->t_state == T_COMMIT); | |
1096 | ||
8e85fb3f JL |
1097 | commit_transaction->t_start = jiffies; |
1098 | stats.u.run.rs_logging = jbd2_time_diff(stats.u.run.rs_logging, | |
1099 | commit_transaction->t_start); | |
1100 | ||
1101 | /* | |
1102 | * File the transaction for history | |
1103 | */ | |
1104 | stats.ts_type = JBD2_STATS_RUN; | |
1105 | stats.ts_tid = commit_transaction->t_tid; | |
1106 | stats.u.run.rs_handle_count = commit_transaction->t_handle_count; | |
1107 | spin_lock(&journal->j_history_lock); | |
1108 | memcpy(journal->j_history + journal->j_history_cur, &stats, | |
1109 | sizeof(stats)); | |
1110 | if (++journal->j_history_cur == journal->j_history_max) | |
1111 | journal->j_history_cur = 0; | |
1112 | ||
1113 | /* | |
1114 | * Calculate overall stats | |
1115 | */ | |
1116 | journal->j_stats.ts_tid++; | |
1117 | journal->j_stats.u.run.rs_wait += stats.u.run.rs_wait; | |
1118 | journal->j_stats.u.run.rs_running += stats.u.run.rs_running; | |
1119 | journal->j_stats.u.run.rs_locked += stats.u.run.rs_locked; | |
1120 | journal->j_stats.u.run.rs_flushing += stats.u.run.rs_flushing; | |
1121 | journal->j_stats.u.run.rs_logging += stats.u.run.rs_logging; | |
1122 | journal->j_stats.u.run.rs_handle_count += stats.u.run.rs_handle_count; | |
1123 | journal->j_stats.u.run.rs_blocks += stats.u.run.rs_blocks; | |
1124 | journal->j_stats.u.run.rs_blocks_logged += stats.u.run.rs_blocks_logged; | |
1125 | spin_unlock(&journal->j_history_lock); | |
1126 | ||
470decc6 DK |
1127 | commit_transaction->t_state = T_FINISHED; |
1128 | J_ASSERT(commit_transaction == journal->j_committing_transaction); | |
1129 | journal->j_commit_sequence = commit_transaction->t_tid; | |
1130 | journal->j_committing_transaction = NULL; | |
1131 | spin_unlock(&journal->j_state_lock); | |
1132 | ||
f89b7795 JK |
1133 | if (commit_transaction->t_checkpoint_list == NULL && |
1134 | commit_transaction->t_checkpoint_io_list == NULL) { | |
f7f4bccb | 1135 | __jbd2_journal_drop_transaction(journal, commit_transaction); |
470decc6 DK |
1136 | } else { |
1137 | if (journal->j_checkpoint_transactions == NULL) { | |
1138 | journal->j_checkpoint_transactions = commit_transaction; | |
1139 | commit_transaction->t_cpnext = commit_transaction; | |
1140 | commit_transaction->t_cpprev = commit_transaction; | |
1141 | } else { | |
1142 | commit_transaction->t_cpnext = | |
1143 | journal->j_checkpoint_transactions; | |
1144 | commit_transaction->t_cpprev = | |
1145 | commit_transaction->t_cpnext->t_cpprev; | |
1146 | commit_transaction->t_cpnext->t_cpprev = | |
1147 | commit_transaction; | |
1148 | commit_transaction->t_cpprev->t_cpnext = | |
1149 | commit_transaction; | |
1150 | } | |
1151 | } | |
1152 | spin_unlock(&journal->j_list_lock); | |
1153 | ||
1154 | jbd_debug(1, "JBD: commit %d complete, head %d\n", | |
1155 | journal->j_commit_sequence, journal->j_tail_sequence); | |
1156 | ||
1157 | wake_up(&journal->j_wait_done_commit); | |
1158 | } |