]>
Commit | Line | Data |
---|---|---|
470decc6 DK |
1 | /* |
2 | * linux/fs/jbd/journal.c | |
3 | * | |
4 | * Written by Stephen C. Tweedie <sct@redhat.com>, 1998 | |
5 | * | |
6 | * Copyright 1998 Red Hat corp --- All Rights Reserved | |
7 | * | |
8 | * This file is part of the Linux kernel and is made available under | |
9 | * the terms of the GNU General Public License, version 2, or at your | |
10 | * option, any later version, incorporated herein by reference. | |
11 | * | |
12 | * Generic filesystem journal-writing code; part of the ext2fs | |
13 | * journaling system. | |
14 | * | |
15 | * This file manages journals: areas of disk reserved for logging | |
16 | * transactional updates. This includes the kernel journaling thread | |
17 | * which is responsible for scheduling updates to the log. | |
18 | * | |
19 | * We do not actually manage the physical storage of the journal in this | |
20 | * file: that is left to a per-journal policy function, which allows us | |
21 | * to store the journal within a filesystem-specified area for ext2 | |
22 | * journaling (ext2 can use a reserved inode for storing the log). | |
23 | */ | |
24 | ||
25 | #include <linux/module.h> | |
26 | #include <linux/time.h> | |
27 | #include <linux/fs.h> | |
28 | #include <linux/jbd.h> | |
29 | #include <linux/errno.h> | |
30 | #include <linux/slab.h> | |
31 | #include <linux/smp_lock.h> | |
32 | #include <linux/init.h> | |
33 | #include <linux/mm.h> | |
34 | #include <linux/suspend.h> | |
35 | #include <linux/pagemap.h> | |
36 | #include <linux/kthread.h> | |
37 | #include <linux/poison.h> | |
38 | #include <linux/proc_fs.h> | |
39 | ||
40 | #include <asm/uaccess.h> | |
41 | #include <asm/page.h> | |
42 | ||
43 | EXPORT_SYMBOL(journal_start); | |
44 | EXPORT_SYMBOL(journal_restart); | |
45 | EXPORT_SYMBOL(journal_extend); | |
46 | EXPORT_SYMBOL(journal_stop); | |
47 | EXPORT_SYMBOL(journal_lock_updates); | |
48 | EXPORT_SYMBOL(journal_unlock_updates); | |
49 | EXPORT_SYMBOL(journal_get_write_access); | |
50 | EXPORT_SYMBOL(journal_get_create_access); | |
51 | EXPORT_SYMBOL(journal_get_undo_access); | |
52 | EXPORT_SYMBOL(journal_dirty_data); | |
53 | EXPORT_SYMBOL(journal_dirty_metadata); | |
54 | EXPORT_SYMBOL(journal_release_buffer); | |
55 | EXPORT_SYMBOL(journal_forget); | |
56 | #if 0 | |
57 | EXPORT_SYMBOL(journal_sync_buffer); | |
58 | #endif | |
59 | EXPORT_SYMBOL(journal_flush); | |
60 | EXPORT_SYMBOL(journal_revoke); | |
61 | ||
62 | EXPORT_SYMBOL(journal_init_dev); | |
63 | EXPORT_SYMBOL(journal_init_inode); | |
64 | EXPORT_SYMBOL(journal_update_format); | |
65 | EXPORT_SYMBOL(journal_check_used_features); | |
66 | EXPORT_SYMBOL(journal_check_available_features); | |
67 | EXPORT_SYMBOL(journal_set_features); | |
68 | EXPORT_SYMBOL(journal_create); | |
69 | EXPORT_SYMBOL(journal_load); | |
70 | EXPORT_SYMBOL(journal_destroy); | |
71 | EXPORT_SYMBOL(journal_update_superblock); | |
72 | EXPORT_SYMBOL(journal_abort); | |
73 | EXPORT_SYMBOL(journal_errno); | |
74 | EXPORT_SYMBOL(journal_ack_err); | |
75 | EXPORT_SYMBOL(journal_clear_err); | |
76 | EXPORT_SYMBOL(log_wait_commit); | |
77 | EXPORT_SYMBOL(journal_start_commit); | |
78 | EXPORT_SYMBOL(journal_force_commit_nested); | |
79 | EXPORT_SYMBOL(journal_wipe); | |
80 | EXPORT_SYMBOL(journal_blocks_per_page); | |
81 | EXPORT_SYMBOL(journal_invalidatepage); | |
82 | EXPORT_SYMBOL(journal_try_to_free_buffers); | |
83 | EXPORT_SYMBOL(journal_force_commit); | |
84 | ||
85 | static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *); | |
86 | static void __journal_abort_soft (journal_t *journal, int errno); | |
87 | static int journal_create_jbd_slab(size_t slab_size); | |
88 | ||
89 | /* | |
90 | * Helper function used to manage commit timeouts | |
91 | */ | |
92 | ||
93 | static void commit_timeout(unsigned long __data) | |
94 | { | |
95 | struct task_struct * p = (struct task_struct *) __data; | |
96 | ||
97 | wake_up_process(p); | |
98 | } | |
99 | ||
100 | /* | |
101 | * kjournald: The main thread function used to manage a logging device | |
102 | * journal. | |
103 | * | |
104 | * This kernel thread is responsible for two things: | |
105 | * | |
106 | * 1) COMMIT: Every so often we need to commit the current state of the | |
107 | * filesystem to disk. The journal thread is responsible for writing | |
108 | * all of the metadata buffers to disk. | |
109 | * | |
110 | * 2) CHECKPOINT: We cannot reuse a used section of the log file until all | |
111 | * of the data in that part of the log has been rewritten elsewhere on | |
112 | * the disk. Flushing these old buffers to reclaim space in the log is | |
113 | * known as checkpointing, and this thread is responsible for that job. | |
114 | */ | |
115 | ||
116 | static int kjournald(void *arg) | |
117 | { | |
118 | journal_t *journal = arg; | |
119 | transaction_t *transaction; | |
120 | ||
121 | /* | |
122 | * Set up an interval timer which can be used to trigger a commit wakeup | |
123 | * after the commit interval expires | |
124 | */ | |
125 | setup_timer(&journal->j_commit_timer, commit_timeout, | |
126 | (unsigned long)current); | |
127 | ||
128 | /* Record that the journal thread is running */ | |
129 | journal->j_task = current; | |
130 | wake_up(&journal->j_wait_done_commit); | |
131 | ||
132 | printk(KERN_INFO "kjournald starting. Commit interval %ld seconds\n", | |
133 | journal->j_commit_interval / HZ); | |
134 | ||
135 | /* | |
136 | * And now, wait forever for commit wakeup events. | |
137 | */ | |
138 | spin_lock(&journal->j_state_lock); | |
139 | ||
140 | loop: | |
141 | if (journal->j_flags & JFS_UNMOUNT) | |
142 | goto end_loop; | |
143 | ||
144 | jbd_debug(1, "commit_sequence=%d, commit_request=%d\n", | |
145 | journal->j_commit_sequence, journal->j_commit_request); | |
146 | ||
147 | if (journal->j_commit_sequence != journal->j_commit_request) { | |
148 | jbd_debug(1, "OK, requests differ\n"); | |
149 | spin_unlock(&journal->j_state_lock); | |
150 | del_timer_sync(&journal->j_commit_timer); | |
151 | journal_commit_transaction(journal); | |
152 | spin_lock(&journal->j_state_lock); | |
153 | goto loop; | |
154 | } | |
155 | ||
156 | wake_up(&journal->j_wait_done_commit); | |
157 | if (freezing(current)) { | |
158 | /* | |
159 | * The simpler the better. Flushing journal isn't a | |
160 | * good idea, because that depends on threads that may | |
161 | * be already stopped. | |
162 | */ | |
163 | jbd_debug(1, "Now suspending kjournald\n"); | |
164 | spin_unlock(&journal->j_state_lock); | |
165 | refrigerator(); | |
166 | spin_lock(&journal->j_state_lock); | |
167 | } else { | |
168 | /* | |
169 | * We assume on resume that commits are already there, | |
170 | * so we don't sleep | |
171 | */ | |
172 | DEFINE_WAIT(wait); | |
173 | int should_sleep = 1; | |
174 | ||
175 | prepare_to_wait(&journal->j_wait_commit, &wait, | |
176 | TASK_INTERRUPTIBLE); | |
177 | if (journal->j_commit_sequence != journal->j_commit_request) | |
178 | should_sleep = 0; | |
179 | transaction = journal->j_running_transaction; | |
180 | if (transaction && time_after_eq(jiffies, | |
181 | transaction->t_expires)) | |
182 | should_sleep = 0; | |
183 | if (journal->j_flags & JFS_UNMOUNT) | |
184 | should_sleep = 0; | |
185 | if (should_sleep) { | |
186 | spin_unlock(&journal->j_state_lock); | |
187 | schedule(); | |
188 | spin_lock(&journal->j_state_lock); | |
189 | } | |
190 | finish_wait(&journal->j_wait_commit, &wait); | |
191 | } | |
192 | ||
193 | jbd_debug(1, "kjournald wakes\n"); | |
194 | ||
195 | /* | |
196 | * Were we woken up by a commit wakeup event? | |
197 | */ | |
198 | transaction = journal->j_running_transaction; | |
199 | if (transaction && time_after_eq(jiffies, transaction->t_expires)) { | |
200 | journal->j_commit_request = transaction->t_tid; | |
201 | jbd_debug(1, "woke because of timeout\n"); | |
202 | } | |
203 | goto loop; | |
204 | ||
205 | end_loop: | |
206 | spin_unlock(&journal->j_state_lock); | |
207 | del_timer_sync(&journal->j_commit_timer); | |
208 | journal->j_task = NULL; | |
209 | wake_up(&journal->j_wait_done_commit); | |
210 | jbd_debug(1, "Journal thread exiting.\n"); | |
211 | return 0; | |
212 | } | |
213 | ||
214 | static void journal_start_thread(journal_t *journal) | |
215 | { | |
216 | kthread_run(kjournald, journal, "kjournald"); | |
217 | wait_event(journal->j_wait_done_commit, journal->j_task != 0); | |
218 | } | |
219 | ||
220 | static void journal_kill_thread(journal_t *journal) | |
221 | { | |
222 | spin_lock(&journal->j_state_lock); | |
223 | journal->j_flags |= JFS_UNMOUNT; | |
224 | ||
225 | while (journal->j_task) { | |
226 | wake_up(&journal->j_wait_commit); | |
227 | spin_unlock(&journal->j_state_lock); | |
228 | wait_event(journal->j_wait_done_commit, journal->j_task == 0); | |
229 | spin_lock(&journal->j_state_lock); | |
230 | } | |
231 | spin_unlock(&journal->j_state_lock); | |
232 | } | |
233 | ||
234 | /* | |
235 | * journal_write_metadata_buffer: write a metadata buffer to the journal. | |
236 | * | |
237 | * Writes a metadata buffer to a given disk block. The actual IO is not | |
238 | * performed but a new buffer_head is constructed which labels the data | |
239 | * to be written with the correct destination disk block. | |
240 | * | |
241 | * Any magic-number escaping which needs to be done will cause a | |
242 | * copy-out here. If the buffer happens to start with the | |
243 | * JFS_MAGIC_NUMBER, then we can't write it to the log directly: the | |
244 | * magic number is only written to the log for descripter blocks. In | |
245 | * this case, we copy the data and replace the first word with 0, and we | |
246 | * return a result code which indicates that this buffer needs to be | |
247 | * marked as an escaped buffer in the corresponding log descriptor | |
248 | * block. The missing word can then be restored when the block is read | |
249 | * during recovery. | |
250 | * | |
251 | * If the source buffer has already been modified by a new transaction | |
252 | * since we took the last commit snapshot, we use the frozen copy of | |
253 | * that data for IO. If we end up using the existing buffer_head's data | |
254 | * for the write, then we *have* to lock the buffer to prevent anyone | |
255 | * else from using and possibly modifying it while the IO is in | |
256 | * progress. | |
257 | * | |
258 | * The function returns a pointer to the buffer_heads to be used for IO. | |
259 | * | |
260 | * We assume that the journal has already been locked in this function. | |
261 | * | |
262 | * Return value: | |
263 | * <0: Error | |
264 | * >=0: Finished OK | |
265 | * | |
266 | * On success: | |
267 | * Bit 0 set == escape performed on the data | |
268 | * Bit 1 set == buffer copy-out performed (kfree the data after IO) | |
269 | */ | |
270 | ||
271 | int journal_write_metadata_buffer(transaction_t *transaction, | |
272 | struct journal_head *jh_in, | |
273 | struct journal_head **jh_out, | |
274 | unsigned long blocknr) | |
275 | { | |
276 | int need_copy_out = 0; | |
277 | int done_copy_out = 0; | |
278 | int do_escape = 0; | |
279 | char *mapped_data; | |
280 | struct buffer_head *new_bh; | |
281 | struct journal_head *new_jh; | |
282 | struct page *new_page; | |
283 | unsigned int new_offset; | |
284 | struct buffer_head *bh_in = jh2bh(jh_in); | |
285 | ||
286 | /* | |
287 | * The buffer really shouldn't be locked: only the current committing | |
288 | * transaction is allowed to write it, so nobody else is allowed | |
289 | * to do any IO. | |
290 | * | |
291 | * akpm: except if we're journalling data, and write() output is | |
292 | * also part of a shared mapping, and another thread has | |
293 | * decided to launch a writepage() against this buffer. | |
294 | */ | |
295 | J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in)); | |
296 | ||
297 | new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL); | |
298 | ||
299 | /* | |
300 | * If a new transaction has already done a buffer copy-out, then | |
301 | * we use that version of the data for the commit. | |
302 | */ | |
303 | jbd_lock_bh_state(bh_in); | |
304 | repeat: | |
305 | if (jh_in->b_frozen_data) { | |
306 | done_copy_out = 1; | |
307 | new_page = virt_to_page(jh_in->b_frozen_data); | |
308 | new_offset = offset_in_page(jh_in->b_frozen_data); | |
309 | } else { | |
310 | new_page = jh2bh(jh_in)->b_page; | |
311 | new_offset = offset_in_page(jh2bh(jh_in)->b_data); | |
312 | } | |
313 | ||
314 | mapped_data = kmap_atomic(new_page, KM_USER0); | |
315 | /* | |
316 | * Check for escaping | |
317 | */ | |
318 | if (*((__be32 *)(mapped_data + new_offset)) == | |
319 | cpu_to_be32(JFS_MAGIC_NUMBER)) { | |
320 | need_copy_out = 1; | |
321 | do_escape = 1; | |
322 | } | |
323 | kunmap_atomic(mapped_data, KM_USER0); | |
324 | ||
325 | /* | |
326 | * Do we need to do a data copy? | |
327 | */ | |
328 | if (need_copy_out && !done_copy_out) { | |
329 | char *tmp; | |
330 | ||
331 | jbd_unlock_bh_state(bh_in); | |
332 | tmp = jbd_slab_alloc(bh_in->b_size, GFP_NOFS); | |
333 | jbd_lock_bh_state(bh_in); | |
334 | if (jh_in->b_frozen_data) { | |
335 | jbd_slab_free(tmp, bh_in->b_size); | |
336 | goto repeat; | |
337 | } | |
338 | ||
339 | jh_in->b_frozen_data = tmp; | |
340 | mapped_data = kmap_atomic(new_page, KM_USER0); | |
341 | memcpy(tmp, mapped_data + new_offset, jh2bh(jh_in)->b_size); | |
342 | kunmap_atomic(mapped_data, KM_USER0); | |
343 | ||
344 | new_page = virt_to_page(tmp); | |
345 | new_offset = offset_in_page(tmp); | |
346 | done_copy_out = 1; | |
347 | } | |
348 | ||
349 | /* | |
350 | * Did we need to do an escaping? Now we've done all the | |
351 | * copying, we can finally do so. | |
352 | */ | |
353 | if (do_escape) { | |
354 | mapped_data = kmap_atomic(new_page, KM_USER0); | |
355 | *((unsigned int *)(mapped_data + new_offset)) = 0; | |
356 | kunmap_atomic(mapped_data, KM_USER0); | |
357 | } | |
358 | ||
359 | /* keep subsequent assertions sane */ | |
360 | new_bh->b_state = 0; | |
361 | init_buffer(new_bh, NULL, NULL); | |
362 | atomic_set(&new_bh->b_count, 1); | |
363 | jbd_unlock_bh_state(bh_in); | |
364 | ||
365 | new_jh = journal_add_journal_head(new_bh); /* This sleeps */ | |
366 | ||
367 | set_bh_page(new_bh, new_page, new_offset); | |
368 | new_jh->b_transaction = NULL; | |
369 | new_bh->b_size = jh2bh(jh_in)->b_size; | |
370 | new_bh->b_bdev = transaction->t_journal->j_dev; | |
371 | new_bh->b_blocknr = blocknr; | |
372 | set_buffer_mapped(new_bh); | |
373 | set_buffer_dirty(new_bh); | |
374 | ||
375 | *jh_out = new_jh; | |
376 | ||
377 | /* | |
378 | * The to-be-written buffer needs to get moved to the io queue, | |
379 | * and the original buffer whose contents we are shadowing or | |
380 | * copying is moved to the transaction's shadow queue. | |
381 | */ | |
382 | JBUFFER_TRACE(jh_in, "file as BJ_Shadow"); | |
383 | journal_file_buffer(jh_in, transaction, BJ_Shadow); | |
384 | JBUFFER_TRACE(new_jh, "file as BJ_IO"); | |
385 | journal_file_buffer(new_jh, transaction, BJ_IO); | |
386 | ||
387 | return do_escape | (done_copy_out << 1); | |
388 | } | |
389 | ||
390 | /* | |
391 | * Allocation code for the journal file. Manage the space left in the | |
392 | * journal, so that we can begin checkpointing when appropriate. | |
393 | */ | |
394 | ||
395 | /* | |
396 | * __log_space_left: Return the number of free blocks left in the journal. | |
397 | * | |
398 | * Called with the journal already locked. | |
399 | * | |
400 | * Called under j_state_lock | |
401 | */ | |
402 | ||
403 | int __log_space_left(journal_t *journal) | |
404 | { | |
405 | int left = journal->j_free; | |
406 | ||
407 | assert_spin_locked(&journal->j_state_lock); | |
408 | ||
409 | /* | |
410 | * Be pessimistic here about the number of those free blocks which | |
411 | * might be required for log descriptor control blocks. | |
412 | */ | |
413 | ||
414 | #define MIN_LOG_RESERVED_BLOCKS 32 /* Allow for rounding errors */ | |
415 | ||
416 | left -= MIN_LOG_RESERVED_BLOCKS; | |
417 | ||
418 | if (left <= 0) | |
419 | return 0; | |
420 | left -= (left >> 3); | |
421 | return left; | |
422 | } | |
423 | ||
424 | /* | |
425 | * Called under j_state_lock. Returns true if a transaction was started. | |
426 | */ | |
427 | int __log_start_commit(journal_t *journal, tid_t target) | |
428 | { | |
429 | /* | |
430 | * Are we already doing a recent enough commit? | |
431 | */ | |
432 | if (!tid_geq(journal->j_commit_request, target)) { | |
433 | /* | |
434 | * We want a new commit: OK, mark the request and wakup the | |
435 | * commit thread. We do _not_ do the commit ourselves. | |
436 | */ | |
437 | ||
438 | journal->j_commit_request = target; | |
439 | jbd_debug(1, "JBD: requesting commit %d/%d\n", | |
440 | journal->j_commit_request, | |
441 | journal->j_commit_sequence); | |
442 | wake_up(&journal->j_wait_commit); | |
443 | return 1; | |
444 | } | |
445 | return 0; | |
446 | } | |
447 | ||
448 | int log_start_commit(journal_t *journal, tid_t tid) | |
449 | { | |
450 | int ret; | |
451 | ||
452 | spin_lock(&journal->j_state_lock); | |
453 | ret = __log_start_commit(journal, tid); | |
454 | spin_unlock(&journal->j_state_lock); | |
455 | return ret; | |
456 | } | |
457 | ||
458 | /* | |
459 | * Force and wait upon a commit if the calling process is not within | |
460 | * transaction. This is used for forcing out undo-protected data which contains | |
461 | * bitmaps, when the fs is running out of space. | |
462 | * | |
463 | * We can only force the running transaction if we don't have an active handle; | |
464 | * otherwise, we will deadlock. | |
465 | * | |
466 | * Returns true if a transaction was started. | |
467 | */ | |
468 | int journal_force_commit_nested(journal_t *journal) | |
469 | { | |
470 | transaction_t *transaction = NULL; | |
471 | tid_t tid; | |
472 | ||
473 | spin_lock(&journal->j_state_lock); | |
474 | if (journal->j_running_transaction && !current->journal_info) { | |
475 | transaction = journal->j_running_transaction; | |
476 | __log_start_commit(journal, transaction->t_tid); | |
477 | } else if (journal->j_committing_transaction) | |
478 | transaction = journal->j_committing_transaction; | |
479 | ||
480 | if (!transaction) { | |
481 | spin_unlock(&journal->j_state_lock); | |
482 | return 0; /* Nothing to retry */ | |
483 | } | |
484 | ||
485 | tid = transaction->t_tid; | |
486 | spin_unlock(&journal->j_state_lock); | |
487 | log_wait_commit(journal, tid); | |
488 | return 1; | |
489 | } | |
490 | ||
491 | /* | |
492 | * Start a commit of the current running transaction (if any). Returns true | |
493 | * if a transaction was started, and fills its tid in at *ptid | |
494 | */ | |
495 | int journal_start_commit(journal_t *journal, tid_t *ptid) | |
496 | { | |
497 | int ret = 0; | |
498 | ||
499 | spin_lock(&journal->j_state_lock); | |
500 | if (journal->j_running_transaction) { | |
501 | tid_t tid = journal->j_running_transaction->t_tid; | |
502 | ||
503 | ret = __log_start_commit(journal, tid); | |
504 | if (ret && ptid) | |
505 | *ptid = tid; | |
506 | } else if (journal->j_committing_transaction && ptid) { | |
507 | /* | |
508 | * If ext3_write_super() recently started a commit, then we | |
509 | * have to wait for completion of that transaction | |
510 | */ | |
511 | *ptid = journal->j_committing_transaction->t_tid; | |
512 | ret = 1; | |
513 | } | |
514 | spin_unlock(&journal->j_state_lock); | |
515 | return ret; | |
516 | } | |
517 | ||
518 | /* | |
519 | * Wait for a specified commit to complete. | |
520 | * The caller may not hold the journal lock. | |
521 | */ | |
522 | int log_wait_commit(journal_t *journal, tid_t tid) | |
523 | { | |
524 | int err = 0; | |
525 | ||
526 | #ifdef CONFIG_JBD_DEBUG | |
527 | spin_lock(&journal->j_state_lock); | |
528 | if (!tid_geq(journal->j_commit_request, tid)) { | |
529 | printk(KERN_EMERG | |
530 | "%s: error: j_commit_request=%d, tid=%d\n", | |
531 | __FUNCTION__, journal->j_commit_request, tid); | |
532 | } | |
533 | spin_unlock(&journal->j_state_lock); | |
534 | #endif | |
535 | spin_lock(&journal->j_state_lock); | |
536 | while (tid_gt(tid, journal->j_commit_sequence)) { | |
537 | jbd_debug(1, "JBD: want %d, j_commit_sequence=%d\n", | |
538 | tid, journal->j_commit_sequence); | |
539 | wake_up(&journal->j_wait_commit); | |
540 | spin_unlock(&journal->j_state_lock); | |
541 | wait_event(journal->j_wait_done_commit, | |
542 | !tid_gt(tid, journal->j_commit_sequence)); | |
543 | spin_lock(&journal->j_state_lock); | |
544 | } | |
545 | spin_unlock(&journal->j_state_lock); | |
546 | ||
547 | if (unlikely(is_journal_aborted(journal))) { | |
548 | printk(KERN_EMERG "journal commit I/O error\n"); | |
549 | err = -EIO; | |
550 | } | |
551 | return err; | |
552 | } | |
553 | ||
554 | /* | |
555 | * Log buffer allocation routines: | |
556 | */ | |
557 | ||
558 | int journal_next_log_block(journal_t *journal, unsigned long *retp) | |
559 | { | |
560 | unsigned long blocknr; | |
561 | ||
562 | spin_lock(&journal->j_state_lock); | |
563 | J_ASSERT(journal->j_free > 1); | |
564 | ||
565 | blocknr = journal->j_head; | |
566 | journal->j_head++; | |
567 | journal->j_free--; | |
568 | if (journal->j_head == journal->j_last) | |
569 | journal->j_head = journal->j_first; | |
570 | spin_unlock(&journal->j_state_lock); | |
571 | return journal_bmap(journal, blocknr, retp); | |
572 | } | |
573 | ||
574 | /* | |
575 | * Conversion of logical to physical block numbers for the journal | |
576 | * | |
577 | * On external journals the journal blocks are identity-mapped, so | |
578 | * this is a no-op. If needed, we can use j_blk_offset - everything is | |
579 | * ready. | |
580 | */ | |
581 | int journal_bmap(journal_t *journal, unsigned long blocknr, | |
582 | unsigned long *retp) | |
583 | { | |
584 | int err = 0; | |
585 | unsigned long ret; | |
586 | ||
587 | if (journal->j_inode) { | |
588 | ret = bmap(journal->j_inode, blocknr); | |
589 | if (ret) | |
590 | *retp = ret; | |
591 | else { | |
592 | char b[BDEVNAME_SIZE]; | |
593 | ||
594 | printk(KERN_ALERT "%s: journal block not found " | |
595 | "at offset %lu on %s\n", | |
596 | __FUNCTION__, | |
597 | blocknr, | |
598 | bdevname(journal->j_dev, b)); | |
599 | err = -EIO; | |
600 | __journal_abort_soft(journal, err); | |
601 | } | |
602 | } else { | |
603 | *retp = blocknr; /* +journal->j_blk_offset */ | |
604 | } | |
605 | return err; | |
606 | } | |
607 | ||
608 | /* | |
609 | * We play buffer_head aliasing tricks to write data/metadata blocks to | |
610 | * the journal without copying their contents, but for journal | |
611 | * descriptor blocks we do need to generate bona fide buffers. | |
612 | * | |
613 | * After the caller of journal_get_descriptor_buffer() has finished modifying | |
614 | * the buffer's contents they really should run flush_dcache_page(bh->b_page). | |
615 | * But we don't bother doing that, so there will be coherency problems with | |
616 | * mmaps of blockdevs which hold live JBD-controlled filesystems. | |
617 | */ | |
618 | struct journal_head *journal_get_descriptor_buffer(journal_t *journal) | |
619 | { | |
620 | struct buffer_head *bh; | |
621 | unsigned long blocknr; | |
622 | int err; | |
623 | ||
624 | err = journal_next_log_block(journal, &blocknr); | |
625 | ||
626 | if (err) | |
627 | return NULL; | |
628 | ||
629 | bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize); | |
630 | lock_buffer(bh); | |
631 | memset(bh->b_data, 0, journal->j_blocksize); | |
632 | set_buffer_uptodate(bh); | |
633 | unlock_buffer(bh); | |
634 | BUFFER_TRACE(bh, "return this buffer"); | |
635 | return journal_add_journal_head(bh); | |
636 | } | |
637 | ||
638 | /* | |
639 | * Management for journal control blocks: functions to create and | |
640 | * destroy journal_t structures, and to initialise and read existing | |
641 | * journal blocks from disk. */ | |
642 | ||
643 | /* First: create and setup a journal_t object in memory. We initialise | |
644 | * very few fields yet: that has to wait until we have created the | |
645 | * journal structures from from scratch, or loaded them from disk. */ | |
646 | ||
647 | static journal_t * journal_init_common (void) | |
648 | { | |
649 | journal_t *journal; | |
650 | int err; | |
651 | ||
652 | journal = jbd_kmalloc(sizeof(*journal), GFP_KERNEL); | |
653 | if (!journal) | |
654 | goto fail; | |
655 | memset(journal, 0, sizeof(*journal)); | |
656 | ||
657 | init_waitqueue_head(&journal->j_wait_transaction_locked); | |
658 | init_waitqueue_head(&journal->j_wait_logspace); | |
659 | init_waitqueue_head(&journal->j_wait_done_commit); | |
660 | init_waitqueue_head(&journal->j_wait_checkpoint); | |
661 | init_waitqueue_head(&journal->j_wait_commit); | |
662 | init_waitqueue_head(&journal->j_wait_updates); | |
663 | mutex_init(&journal->j_barrier); | |
664 | mutex_init(&journal->j_checkpoint_mutex); | |
665 | spin_lock_init(&journal->j_revoke_lock); | |
666 | spin_lock_init(&journal->j_list_lock); | |
667 | spin_lock_init(&journal->j_state_lock); | |
668 | ||
669 | journal->j_commit_interval = (HZ * JBD_DEFAULT_MAX_COMMIT_AGE); | |
670 | ||
671 | /* The journal is marked for error until we succeed with recovery! */ | |
672 | journal->j_flags = JFS_ABORT; | |
673 | ||
674 | /* Set up a default-sized revoke table for the new mount. */ | |
675 | err = journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH); | |
676 | if (err) { | |
677 | kfree(journal); | |
678 | goto fail; | |
679 | } | |
680 | return journal; | |
681 | fail: | |
682 | return NULL; | |
683 | } | |
684 | ||
685 | /* journal_init_dev and journal_init_inode: | |
686 | * | |
687 | * Create a journal structure assigned some fixed set of disk blocks to | |
688 | * the journal. We don't actually touch those disk blocks yet, but we | |
689 | * need to set up all of the mapping information to tell the journaling | |
690 | * system where the journal blocks are. | |
691 | * | |
692 | */ | |
693 | ||
694 | /** | |
695 | * journal_t * journal_init_dev() - creates an initialises a journal structure | |
696 | * @bdev: Block device on which to create the journal | |
697 | * @fs_dev: Device which hold journalled filesystem for this journal. | |
698 | * @start: Block nr Start of journal. | |
699 | * @len: Length of the journal in blocks. | |
700 | * @blocksize: blocksize of journalling device | |
701 | * @returns: a newly created journal_t * | |
702 | * | |
703 | * journal_init_dev creates a journal which maps a fixed contiguous | |
704 | * range of blocks on an arbitrary block device. | |
705 | * | |
706 | */ | |
707 | journal_t * journal_init_dev(struct block_device *bdev, | |
708 | struct block_device *fs_dev, | |
709 | int start, int len, int blocksize) | |
710 | { | |
711 | journal_t *journal = journal_init_common(); | |
712 | struct buffer_head *bh; | |
713 | int n; | |
714 | ||
715 | if (!journal) | |
716 | return NULL; | |
717 | ||
718 | /* journal descriptor can store up to n blocks -bzzz */ | |
719 | journal->j_blocksize = blocksize; | |
720 | n = journal->j_blocksize / sizeof(journal_block_tag_t); | |
721 | journal->j_wbufsize = n; | |
722 | journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL); | |
723 | if (!journal->j_wbuf) { | |
724 | printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n", | |
725 | __FUNCTION__); | |
726 | kfree(journal); | |
727 | journal = NULL; | |
728 | } | |
729 | journal->j_dev = bdev; | |
730 | journal->j_fs_dev = fs_dev; | |
731 | journal->j_blk_offset = start; | |
732 | journal->j_maxlen = len; | |
733 | ||
734 | bh = __getblk(journal->j_dev, start, journal->j_blocksize); | |
735 | J_ASSERT(bh != NULL); | |
736 | journal->j_sb_buffer = bh; | |
737 | journal->j_superblock = (journal_superblock_t *)bh->b_data; | |
738 | ||
739 | return journal; | |
740 | } | |
741 | ||
742 | /** | |
743 | * journal_t * journal_init_inode () - creates a journal which maps to a inode. | |
744 | * @inode: An inode to create the journal in | |
745 | * | |
746 | * journal_init_inode creates a journal which maps an on-disk inode as | |
747 | * the journal. The inode must exist already, must support bmap() and | |
748 | * must have all data blocks preallocated. | |
749 | */ | |
750 | journal_t * journal_init_inode (struct inode *inode) | |
751 | { | |
752 | struct buffer_head *bh; | |
753 | journal_t *journal = journal_init_common(); | |
754 | int err; | |
755 | int n; | |
756 | unsigned long blocknr; | |
757 | ||
758 | if (!journal) | |
759 | return NULL; | |
760 | ||
761 | journal->j_dev = journal->j_fs_dev = inode->i_sb->s_bdev; | |
762 | journal->j_inode = inode; | |
763 | jbd_debug(1, | |
764 | "journal %p: inode %s/%ld, size %Ld, bits %d, blksize %ld\n", | |
765 | journal, inode->i_sb->s_id, inode->i_ino, | |
766 | (long long) inode->i_size, | |
767 | inode->i_sb->s_blocksize_bits, inode->i_sb->s_blocksize); | |
768 | ||
769 | journal->j_maxlen = inode->i_size >> inode->i_sb->s_blocksize_bits; | |
770 | journal->j_blocksize = inode->i_sb->s_blocksize; | |
771 | ||
772 | /* journal descriptor can store up to n blocks -bzzz */ | |
773 | n = journal->j_blocksize / sizeof(journal_block_tag_t); | |
774 | journal->j_wbufsize = n; | |
775 | journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL); | |
776 | if (!journal->j_wbuf) { | |
777 | printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n", | |
778 | __FUNCTION__); | |
779 | kfree(journal); | |
780 | return NULL; | |
781 | } | |
782 | ||
783 | err = journal_bmap(journal, 0, &blocknr); | |
784 | /* If that failed, give up */ | |
785 | if (err) { | |
786 | printk(KERN_ERR "%s: Cannnot locate journal superblock\n", | |
787 | __FUNCTION__); | |
788 | kfree(journal); | |
789 | return NULL; | |
790 | } | |
791 | ||
792 | bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize); | |
793 | J_ASSERT(bh != NULL); | |
794 | journal->j_sb_buffer = bh; | |
795 | journal->j_superblock = (journal_superblock_t *)bh->b_data; | |
796 | ||
797 | return journal; | |
798 | } | |
799 | ||
800 | /* | |
801 | * If the journal init or create aborts, we need to mark the journal | |
802 | * superblock as being NULL to prevent the journal destroy from writing | |
803 | * back a bogus superblock. | |
804 | */ | |
805 | static void journal_fail_superblock (journal_t *journal) | |
806 | { | |
807 | struct buffer_head *bh = journal->j_sb_buffer; | |
808 | brelse(bh); | |
809 | journal->j_sb_buffer = NULL; | |
810 | } | |
811 | ||
812 | /* | |
813 | * Given a journal_t structure, initialise the various fields for | |
814 | * startup of a new journaling session. We use this both when creating | |
815 | * a journal, and after recovering an old journal to reset it for | |
816 | * subsequent use. | |
817 | */ | |
818 | ||
819 | static int journal_reset(journal_t *journal) | |
820 | { | |
821 | journal_superblock_t *sb = journal->j_superblock; | |
822 | unsigned long first, last; | |
823 | ||
824 | first = be32_to_cpu(sb->s_first); | |
825 | last = be32_to_cpu(sb->s_maxlen); | |
826 | ||
827 | journal->j_first = first; | |
828 | journal->j_last = last; | |
829 | ||
830 | journal->j_head = first; | |
831 | journal->j_tail = first; | |
832 | journal->j_free = last - first; | |
833 | ||
834 | journal->j_tail_sequence = journal->j_transaction_sequence; | |
835 | journal->j_commit_sequence = journal->j_transaction_sequence - 1; | |
836 | journal->j_commit_request = journal->j_commit_sequence; | |
837 | ||
838 | journal->j_max_transaction_buffers = journal->j_maxlen / 4; | |
839 | ||
840 | /* Add the dynamic fields and write it to disk. */ | |
841 | journal_update_superblock(journal, 1); | |
842 | journal_start_thread(journal); | |
843 | return 0; | |
844 | } | |
845 | ||
846 | /** | |
847 | * int journal_create() - Initialise the new journal file | |
848 | * @journal: Journal to create. This structure must have been initialised | |
849 | * | |
850 | * Given a journal_t structure which tells us which disk blocks we can | |
851 | * use, create a new journal superblock and initialise all of the | |
852 | * journal fields from scratch. | |
853 | **/ | |
854 | int journal_create(journal_t *journal) | |
855 | { | |
856 | unsigned long blocknr; | |
857 | struct buffer_head *bh; | |
858 | journal_superblock_t *sb; | |
859 | int i, err; | |
860 | ||
861 | if (journal->j_maxlen < JFS_MIN_JOURNAL_BLOCKS) { | |
862 | printk (KERN_ERR "Journal length (%d blocks) too short.\n", | |
863 | journal->j_maxlen); | |
864 | journal_fail_superblock(journal); | |
865 | return -EINVAL; | |
866 | } | |
867 | ||
868 | if (journal->j_inode == NULL) { | |
869 | /* | |
870 | * We don't know what block to start at! | |
871 | */ | |
872 | printk(KERN_EMERG | |
873 | "%s: creation of journal on external device!\n", | |
874 | __FUNCTION__); | |
875 | BUG(); | |
876 | } | |
877 | ||
878 | /* Zero out the entire journal on disk. We cannot afford to | |
879 | have any blocks on disk beginning with JFS_MAGIC_NUMBER. */ | |
880 | jbd_debug(1, "JBD: Zeroing out journal blocks...\n"); | |
881 | for (i = 0; i < journal->j_maxlen; i++) { | |
882 | err = journal_bmap(journal, i, &blocknr); | |
883 | if (err) | |
884 | return err; | |
885 | bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize); | |
886 | lock_buffer(bh); | |
887 | memset (bh->b_data, 0, journal->j_blocksize); | |
888 | BUFFER_TRACE(bh, "marking dirty"); | |
889 | mark_buffer_dirty(bh); | |
890 | BUFFER_TRACE(bh, "marking uptodate"); | |
891 | set_buffer_uptodate(bh); | |
892 | unlock_buffer(bh); | |
893 | __brelse(bh); | |
894 | } | |
895 | ||
896 | sync_blockdev(journal->j_dev); | |
897 | jbd_debug(1, "JBD: journal cleared.\n"); | |
898 | ||
899 | /* OK, fill in the initial static fields in the new superblock */ | |
900 | sb = journal->j_superblock; | |
901 | ||
902 | sb->s_header.h_magic = cpu_to_be32(JFS_MAGIC_NUMBER); | |
903 | sb->s_header.h_blocktype = cpu_to_be32(JFS_SUPERBLOCK_V2); | |
904 | ||
905 | sb->s_blocksize = cpu_to_be32(journal->j_blocksize); | |
906 | sb->s_maxlen = cpu_to_be32(journal->j_maxlen); | |
907 | sb->s_first = cpu_to_be32(1); | |
908 | ||
909 | journal->j_transaction_sequence = 1; | |
910 | ||
911 | journal->j_flags &= ~JFS_ABORT; | |
912 | journal->j_format_version = 2; | |
913 | ||
914 | return journal_reset(journal); | |
915 | } | |
916 | ||
917 | /** | |
918 | * void journal_update_superblock() - Update journal sb on disk. | |
919 | * @journal: The journal to update. | |
920 | * @wait: Set to '0' if you don't want to wait for IO completion. | |
921 | * | |
922 | * Update a journal's dynamic superblock fields and write it to disk, | |
923 | * optionally waiting for the IO to complete. | |
924 | */ | |
925 | void journal_update_superblock(journal_t *journal, int wait) | |
926 | { | |
927 | journal_superblock_t *sb = journal->j_superblock; | |
928 | struct buffer_head *bh = journal->j_sb_buffer; | |
929 | ||
930 | /* | |
931 | * As a special case, if the on-disk copy is already marked as needing | |
932 | * no recovery (s_start == 0) and there are no outstanding transactions | |
933 | * in the filesystem, then we can safely defer the superblock update | |
934 | * until the next commit by setting JFS_FLUSHED. This avoids | |
935 | * attempting a write to a potential-readonly device. | |
936 | */ | |
937 | if (sb->s_start == 0 && journal->j_tail_sequence == | |
938 | journal->j_transaction_sequence) { | |
939 | jbd_debug(1,"JBD: Skipping superblock update on recovered sb " | |
940 | "(start %ld, seq %d, errno %d)\n", | |
941 | journal->j_tail, journal->j_tail_sequence, | |
942 | journal->j_errno); | |
943 | goto out; | |
944 | } | |
945 | ||
946 | spin_lock(&journal->j_state_lock); | |
947 | jbd_debug(1,"JBD: updating superblock (start %ld, seq %d, errno %d)\n", | |
948 | journal->j_tail, journal->j_tail_sequence, journal->j_errno); | |
949 | ||
950 | sb->s_sequence = cpu_to_be32(journal->j_tail_sequence); | |
951 | sb->s_start = cpu_to_be32(journal->j_tail); | |
952 | sb->s_errno = cpu_to_be32(journal->j_errno); | |
953 | spin_unlock(&journal->j_state_lock); | |
954 | ||
955 | BUFFER_TRACE(bh, "marking dirty"); | |
956 | mark_buffer_dirty(bh); | |
957 | if (wait) | |
958 | sync_dirty_buffer(bh); | |
959 | else | |
960 | ll_rw_block(SWRITE, 1, &bh); | |
961 | ||
962 | out: | |
963 | /* If we have just flushed the log (by marking s_start==0), then | |
964 | * any future commit will have to be careful to update the | |
965 | * superblock again to re-record the true start of the log. */ | |
966 | ||
967 | spin_lock(&journal->j_state_lock); | |
968 | if (sb->s_start) | |
969 | journal->j_flags &= ~JFS_FLUSHED; | |
970 | else | |
971 | journal->j_flags |= JFS_FLUSHED; | |
972 | spin_unlock(&journal->j_state_lock); | |
973 | } | |
974 | ||
975 | /* | |
976 | * Read the superblock for a given journal, performing initial | |
977 | * validation of the format. | |
978 | */ | |
979 | ||
980 | static int journal_get_superblock(journal_t *journal) | |
981 | { | |
982 | struct buffer_head *bh; | |
983 | journal_superblock_t *sb; | |
984 | int err = -EIO; | |
985 | ||
986 | bh = journal->j_sb_buffer; | |
987 | ||
988 | J_ASSERT(bh != NULL); | |
989 | if (!buffer_uptodate(bh)) { | |
990 | ll_rw_block(READ, 1, &bh); | |
991 | wait_on_buffer(bh); | |
992 | if (!buffer_uptodate(bh)) { | |
993 | printk (KERN_ERR | |
994 | "JBD: IO error reading journal superblock\n"); | |
995 | goto out; | |
996 | } | |
997 | } | |
998 | ||
999 | sb = journal->j_superblock; | |
1000 | ||
1001 | err = -EINVAL; | |
1002 | ||
1003 | if (sb->s_header.h_magic != cpu_to_be32(JFS_MAGIC_NUMBER) || | |
1004 | sb->s_blocksize != cpu_to_be32(journal->j_blocksize)) { | |
1005 | printk(KERN_WARNING "JBD: no valid journal superblock found\n"); | |
1006 | goto out; | |
1007 | } | |
1008 | ||
1009 | switch(be32_to_cpu(sb->s_header.h_blocktype)) { | |
1010 | case JFS_SUPERBLOCK_V1: | |
1011 | journal->j_format_version = 1; | |
1012 | break; | |
1013 | case JFS_SUPERBLOCK_V2: | |
1014 | journal->j_format_version = 2; | |
1015 | break; | |
1016 | default: | |
1017 | printk(KERN_WARNING "JBD: unrecognised superblock format ID\n"); | |
1018 | goto out; | |
1019 | } | |
1020 | ||
1021 | if (be32_to_cpu(sb->s_maxlen) < journal->j_maxlen) | |
1022 | journal->j_maxlen = be32_to_cpu(sb->s_maxlen); | |
1023 | else if (be32_to_cpu(sb->s_maxlen) > journal->j_maxlen) { | |
1024 | printk (KERN_WARNING "JBD: journal file too short\n"); | |
1025 | goto out; | |
1026 | } | |
1027 | ||
1028 | return 0; | |
1029 | ||
1030 | out: | |
1031 | journal_fail_superblock(journal); | |
1032 | return err; | |
1033 | } | |
1034 | ||
1035 | /* | |
1036 | * Load the on-disk journal superblock and read the key fields into the | |
1037 | * journal_t. | |
1038 | */ | |
1039 | ||
1040 | static int load_superblock(journal_t *journal) | |
1041 | { | |
1042 | int err; | |
1043 | journal_superblock_t *sb; | |
1044 | ||
1045 | err = journal_get_superblock(journal); | |
1046 | if (err) | |
1047 | return err; | |
1048 | ||
1049 | sb = journal->j_superblock; | |
1050 | ||
1051 | journal->j_tail_sequence = be32_to_cpu(sb->s_sequence); | |
1052 | journal->j_tail = be32_to_cpu(sb->s_start); | |
1053 | journal->j_first = be32_to_cpu(sb->s_first); | |
1054 | journal->j_last = be32_to_cpu(sb->s_maxlen); | |
1055 | journal->j_errno = be32_to_cpu(sb->s_errno); | |
1056 | ||
1057 | return 0; | |
1058 | } | |
1059 | ||
1060 | ||
1061 | /** | |
1062 | * int journal_load() - Read journal from disk. | |
1063 | * @journal: Journal to act on. | |
1064 | * | |
1065 | * Given a journal_t structure which tells us which disk blocks contain | |
1066 | * a journal, read the journal from disk to initialise the in-memory | |
1067 | * structures. | |
1068 | */ | |
1069 | int journal_load(journal_t *journal) | |
1070 | { | |
1071 | int err; | |
1072 | journal_superblock_t *sb; | |
1073 | ||
1074 | err = load_superblock(journal); | |
1075 | if (err) | |
1076 | return err; | |
1077 | ||
1078 | sb = journal->j_superblock; | |
1079 | /* If this is a V2 superblock, then we have to check the | |
1080 | * features flags on it. */ | |
1081 | ||
1082 | if (journal->j_format_version >= 2) { | |
1083 | if ((sb->s_feature_ro_compat & | |
1084 | ~cpu_to_be32(JFS_KNOWN_ROCOMPAT_FEATURES)) || | |
1085 | (sb->s_feature_incompat & | |
1086 | ~cpu_to_be32(JFS_KNOWN_INCOMPAT_FEATURES))) { | |
1087 | printk (KERN_WARNING | |
1088 | "JBD: Unrecognised features on journal\n"); | |
1089 | return -EINVAL; | |
1090 | } | |
1091 | } | |
1092 | ||
1093 | /* | |
1094 | * Create a slab for this blocksize | |
1095 | */ | |
1096 | err = journal_create_jbd_slab(be32_to_cpu(sb->s_blocksize)); | |
1097 | if (err) | |
1098 | return err; | |
1099 | ||
1100 | /* Let the recovery code check whether it needs to recover any | |
1101 | * data from the journal. */ | |
1102 | if (journal_recover(journal)) | |
1103 | goto recovery_error; | |
1104 | ||
1105 | /* OK, we've finished with the dynamic journal bits: | |
1106 | * reinitialise the dynamic contents of the superblock in memory | |
1107 | * and reset them on disk. */ | |
1108 | if (journal_reset(journal)) | |
1109 | goto recovery_error; | |
1110 | ||
1111 | journal->j_flags &= ~JFS_ABORT; | |
1112 | journal->j_flags |= JFS_LOADED; | |
1113 | return 0; | |
1114 | ||
1115 | recovery_error: | |
1116 | printk (KERN_WARNING "JBD: recovery failed\n"); | |
1117 | return -EIO; | |
1118 | } | |
1119 | ||
1120 | /** | |
1121 | * void journal_destroy() - Release a journal_t structure. | |
1122 | * @journal: Journal to act on. | |
1123 | * | |
1124 | * Release a journal_t structure once it is no longer in use by the | |
1125 | * journaled object. | |
1126 | */ | |
1127 | void journal_destroy(journal_t *journal) | |
1128 | { | |
1129 | /* Wait for the commit thread to wake up and die. */ | |
1130 | journal_kill_thread(journal); | |
1131 | ||
1132 | /* Force a final log commit */ | |
1133 | if (journal->j_running_transaction) | |
1134 | journal_commit_transaction(journal); | |
1135 | ||
1136 | /* Force any old transactions to disk */ | |
1137 | ||
1138 | /* Totally anal locking here... */ | |
1139 | spin_lock(&journal->j_list_lock); | |
1140 | while (journal->j_checkpoint_transactions != NULL) { | |
1141 | spin_unlock(&journal->j_list_lock); | |
1142 | log_do_checkpoint(journal); | |
1143 | spin_lock(&journal->j_list_lock); | |
1144 | } | |
1145 | ||
1146 | J_ASSERT(journal->j_running_transaction == NULL); | |
1147 | J_ASSERT(journal->j_committing_transaction == NULL); | |
1148 | J_ASSERT(journal->j_checkpoint_transactions == NULL); | |
1149 | spin_unlock(&journal->j_list_lock); | |
1150 | ||
1151 | /* We can now mark the journal as empty. */ | |
1152 | journal->j_tail = 0; | |
1153 | journal->j_tail_sequence = ++journal->j_transaction_sequence; | |
1154 | if (journal->j_sb_buffer) { | |
1155 | journal_update_superblock(journal, 1); | |
1156 | brelse(journal->j_sb_buffer); | |
1157 | } | |
1158 | ||
1159 | if (journal->j_inode) | |
1160 | iput(journal->j_inode); | |
1161 | if (journal->j_revoke) | |
1162 | journal_destroy_revoke(journal); | |
1163 | kfree(journal->j_wbuf); | |
1164 | kfree(journal); | |
1165 | } | |
1166 | ||
1167 | ||
1168 | /** | |
1169 | *int journal_check_used_features () - Check if features specified are used. | |
1170 | * @journal: Journal to check. | |
1171 | * @compat: bitmask of compatible features | |
1172 | * @ro: bitmask of features that force read-only mount | |
1173 | * @incompat: bitmask of incompatible features | |
1174 | * | |
1175 | * Check whether the journal uses all of a given set of | |
1176 | * features. Return true (non-zero) if it does. | |
1177 | **/ | |
1178 | ||
1179 | int journal_check_used_features (journal_t *journal, unsigned long compat, | |
1180 | unsigned long ro, unsigned long incompat) | |
1181 | { | |
1182 | journal_superblock_t *sb; | |
1183 | ||
1184 | if (!compat && !ro && !incompat) | |
1185 | return 1; | |
1186 | if (journal->j_format_version == 1) | |
1187 | return 0; | |
1188 | ||
1189 | sb = journal->j_superblock; | |
1190 | ||
1191 | if (((be32_to_cpu(sb->s_feature_compat) & compat) == compat) && | |
1192 | ((be32_to_cpu(sb->s_feature_ro_compat) & ro) == ro) && | |
1193 | ((be32_to_cpu(sb->s_feature_incompat) & incompat) == incompat)) | |
1194 | return 1; | |
1195 | ||
1196 | return 0; | |
1197 | } | |
1198 | ||
1199 | /** | |
1200 | * int journal_check_available_features() - Check feature set in journalling layer | |
1201 | * @journal: Journal to check. | |
1202 | * @compat: bitmask of compatible features | |
1203 | * @ro: bitmask of features that force read-only mount | |
1204 | * @incompat: bitmask of incompatible features | |
1205 | * | |
1206 | * Check whether the journaling code supports the use of | |
1207 | * all of a given set of features on this journal. Return true | |
1208 | * (non-zero) if it can. */ | |
1209 | ||
1210 | int journal_check_available_features (journal_t *journal, unsigned long compat, | |
1211 | unsigned long ro, unsigned long incompat) | |
1212 | { | |
1213 | journal_superblock_t *sb; | |
1214 | ||
1215 | if (!compat && !ro && !incompat) | |
1216 | return 1; | |
1217 | ||
1218 | sb = journal->j_superblock; | |
1219 | ||
1220 | /* We can support any known requested features iff the | |
1221 | * superblock is in version 2. Otherwise we fail to support any | |
1222 | * extended sb features. */ | |
1223 | ||
1224 | if (journal->j_format_version != 2) | |
1225 | return 0; | |
1226 | ||
1227 | if ((compat & JFS_KNOWN_COMPAT_FEATURES) == compat && | |
1228 | (ro & JFS_KNOWN_ROCOMPAT_FEATURES) == ro && | |
1229 | (incompat & JFS_KNOWN_INCOMPAT_FEATURES) == incompat) | |
1230 | return 1; | |
1231 | ||
1232 | return 0; | |
1233 | } | |
1234 | ||
1235 | /** | |
1236 | * int journal_set_features () - Mark a given journal feature in the superblock | |
1237 | * @journal: Journal to act on. | |
1238 | * @compat: bitmask of compatible features | |
1239 | * @ro: bitmask of features that force read-only mount | |
1240 | * @incompat: bitmask of incompatible features | |
1241 | * | |
1242 | * Mark a given journal feature as present on the | |
1243 | * superblock. Returns true if the requested features could be set. | |
1244 | * | |
1245 | */ | |
1246 | ||
1247 | int journal_set_features (journal_t *journal, unsigned long compat, | |
1248 | unsigned long ro, unsigned long incompat) | |
1249 | { | |
1250 | journal_superblock_t *sb; | |
1251 | ||
1252 | if (journal_check_used_features(journal, compat, ro, incompat)) | |
1253 | return 1; | |
1254 | ||
1255 | if (!journal_check_available_features(journal, compat, ro, incompat)) | |
1256 | return 0; | |
1257 | ||
1258 | jbd_debug(1, "Setting new features 0x%lx/0x%lx/0x%lx\n", | |
1259 | compat, ro, incompat); | |
1260 | ||
1261 | sb = journal->j_superblock; | |
1262 | ||
1263 | sb->s_feature_compat |= cpu_to_be32(compat); | |
1264 | sb->s_feature_ro_compat |= cpu_to_be32(ro); | |
1265 | sb->s_feature_incompat |= cpu_to_be32(incompat); | |
1266 | ||
1267 | return 1; | |
1268 | } | |
1269 | ||
1270 | ||
1271 | /** | |
1272 | * int journal_update_format () - Update on-disk journal structure. | |
1273 | * @journal: Journal to act on. | |
1274 | * | |
1275 | * Given an initialised but unloaded journal struct, poke about in the | |
1276 | * on-disk structure to update it to the most recent supported version. | |
1277 | */ | |
1278 | int journal_update_format (journal_t *journal) | |
1279 | { | |
1280 | journal_superblock_t *sb; | |
1281 | int err; | |
1282 | ||
1283 | err = journal_get_superblock(journal); | |
1284 | if (err) | |
1285 | return err; | |
1286 | ||
1287 | sb = journal->j_superblock; | |
1288 | ||
1289 | switch (be32_to_cpu(sb->s_header.h_blocktype)) { | |
1290 | case JFS_SUPERBLOCK_V2: | |
1291 | return 0; | |
1292 | case JFS_SUPERBLOCK_V1: | |
1293 | return journal_convert_superblock_v1(journal, sb); | |
1294 | default: | |
1295 | break; | |
1296 | } | |
1297 | return -EINVAL; | |
1298 | } | |
1299 | ||
1300 | static int journal_convert_superblock_v1(journal_t *journal, | |
1301 | journal_superblock_t *sb) | |
1302 | { | |
1303 | int offset, blocksize; | |
1304 | struct buffer_head *bh; | |
1305 | ||
1306 | printk(KERN_WARNING | |
1307 | "JBD: Converting superblock from version 1 to 2.\n"); | |
1308 | ||
1309 | /* Pre-initialise new fields to zero */ | |
1310 | offset = ((char *) &(sb->s_feature_compat)) - ((char *) sb); | |
1311 | blocksize = be32_to_cpu(sb->s_blocksize); | |
1312 | memset(&sb->s_feature_compat, 0, blocksize-offset); | |
1313 | ||
1314 | sb->s_nr_users = cpu_to_be32(1); | |
1315 | sb->s_header.h_blocktype = cpu_to_be32(JFS_SUPERBLOCK_V2); | |
1316 | journal->j_format_version = 2; | |
1317 | ||
1318 | bh = journal->j_sb_buffer; | |
1319 | BUFFER_TRACE(bh, "marking dirty"); | |
1320 | mark_buffer_dirty(bh); | |
1321 | sync_dirty_buffer(bh); | |
1322 | return 0; | |
1323 | } | |
1324 | ||
1325 | ||
1326 | /** | |
1327 | * int journal_flush () - Flush journal | |
1328 | * @journal: Journal to act on. | |
1329 | * | |
1330 | * Flush all data for a given journal to disk and empty the journal. | |
1331 | * Filesystems can use this when remounting readonly to ensure that | |
1332 | * recovery does not need to happen on remount. | |
1333 | */ | |
1334 | ||
1335 | int journal_flush(journal_t *journal) | |
1336 | { | |
1337 | int err = 0; | |
1338 | transaction_t *transaction = NULL; | |
1339 | unsigned long old_tail; | |
1340 | ||
1341 | spin_lock(&journal->j_state_lock); | |
1342 | ||
1343 | /* Force everything buffered to the log... */ | |
1344 | if (journal->j_running_transaction) { | |
1345 | transaction = journal->j_running_transaction; | |
1346 | __log_start_commit(journal, transaction->t_tid); | |
1347 | } else if (journal->j_committing_transaction) | |
1348 | transaction = journal->j_committing_transaction; | |
1349 | ||
1350 | /* Wait for the log commit to complete... */ | |
1351 | if (transaction) { | |
1352 | tid_t tid = transaction->t_tid; | |
1353 | ||
1354 | spin_unlock(&journal->j_state_lock); | |
1355 | log_wait_commit(journal, tid); | |
1356 | } else { | |
1357 | spin_unlock(&journal->j_state_lock); | |
1358 | } | |
1359 | ||
1360 | /* ...and flush everything in the log out to disk. */ | |
1361 | spin_lock(&journal->j_list_lock); | |
1362 | while (!err && journal->j_checkpoint_transactions != NULL) { | |
1363 | spin_unlock(&journal->j_list_lock); | |
1364 | err = log_do_checkpoint(journal); | |
1365 | spin_lock(&journal->j_list_lock); | |
1366 | } | |
1367 | spin_unlock(&journal->j_list_lock); | |
1368 | cleanup_journal_tail(journal); | |
1369 | ||
1370 | /* Finally, mark the journal as really needing no recovery. | |
1371 | * This sets s_start==0 in the underlying superblock, which is | |
1372 | * the magic code for a fully-recovered superblock. Any future | |
1373 | * commits of data to the journal will restore the current | |
1374 | * s_start value. */ | |
1375 | spin_lock(&journal->j_state_lock); | |
1376 | old_tail = journal->j_tail; | |
1377 | journal->j_tail = 0; | |
1378 | spin_unlock(&journal->j_state_lock); | |
1379 | journal_update_superblock(journal, 1); | |
1380 | spin_lock(&journal->j_state_lock); | |
1381 | journal->j_tail = old_tail; | |
1382 | ||
1383 | J_ASSERT(!journal->j_running_transaction); | |
1384 | J_ASSERT(!journal->j_committing_transaction); | |
1385 | J_ASSERT(!journal->j_checkpoint_transactions); | |
1386 | J_ASSERT(journal->j_head == journal->j_tail); | |
1387 | J_ASSERT(journal->j_tail_sequence == journal->j_transaction_sequence); | |
1388 | spin_unlock(&journal->j_state_lock); | |
1389 | return err; | |
1390 | } | |
1391 | ||
1392 | /** | |
1393 | * int journal_wipe() - Wipe journal contents | |
1394 | * @journal: Journal to act on. | |
1395 | * @write: flag (see below) | |
1396 | * | |
1397 | * Wipe out all of the contents of a journal, safely. This will produce | |
1398 | * a warning if the journal contains any valid recovery information. | |
1399 | * Must be called between journal_init_*() and journal_load(). | |
1400 | * | |
1401 | * If 'write' is non-zero, then we wipe out the journal on disk; otherwise | |
1402 | * we merely suppress recovery. | |
1403 | */ | |
1404 | ||
1405 | int journal_wipe(journal_t *journal, int write) | |
1406 | { | |
1407 | journal_superblock_t *sb; | |
1408 | int err = 0; | |
1409 | ||
1410 | J_ASSERT (!(journal->j_flags & JFS_LOADED)); | |
1411 | ||
1412 | err = load_superblock(journal); | |
1413 | if (err) | |
1414 | return err; | |
1415 | ||
1416 | sb = journal->j_superblock; | |
1417 | ||
1418 | if (!journal->j_tail) | |
1419 | goto no_recovery; | |
1420 | ||
1421 | printk (KERN_WARNING "JBD: %s recovery information on journal\n", | |
1422 | write ? "Clearing" : "Ignoring"); | |
1423 | ||
1424 | err = journal_skip_recovery(journal); | |
1425 | if (write) | |
1426 | journal_update_superblock(journal, 1); | |
1427 | ||
1428 | no_recovery: | |
1429 | return err; | |
1430 | } | |
1431 | ||
1432 | /* | |
1433 | * journal_dev_name: format a character string to describe on what | |
1434 | * device this journal is present. | |
1435 | */ | |
1436 | ||
1437 | static const char *journal_dev_name(journal_t *journal, char *buffer) | |
1438 | { | |
1439 | struct block_device *bdev; | |
1440 | ||
1441 | if (journal->j_inode) | |
1442 | bdev = journal->j_inode->i_sb->s_bdev; | |
1443 | else | |
1444 | bdev = journal->j_dev; | |
1445 | ||
1446 | return bdevname(bdev, buffer); | |
1447 | } | |
1448 | ||
1449 | /* | |
1450 | * Journal abort has very specific semantics, which we describe | |
1451 | * for journal abort. | |
1452 | * | |
1453 | * Two internal function, which provide abort to te jbd layer | |
1454 | * itself are here. | |
1455 | */ | |
1456 | ||
1457 | /* | |
1458 | * Quick version for internal journal use (doesn't lock the journal). | |
1459 | * Aborts hard --- we mark the abort as occurred, but do _nothing_ else, | |
1460 | * and don't attempt to make any other journal updates. | |
1461 | */ | |
1462 | void __journal_abort_hard(journal_t *journal) | |
1463 | { | |
1464 | transaction_t *transaction; | |
1465 | char b[BDEVNAME_SIZE]; | |
1466 | ||
1467 | if (journal->j_flags & JFS_ABORT) | |
1468 | return; | |
1469 | ||
1470 | printk(KERN_ERR "Aborting journal on device %s.\n", | |
1471 | journal_dev_name(journal, b)); | |
1472 | ||
1473 | spin_lock(&journal->j_state_lock); | |
1474 | journal->j_flags |= JFS_ABORT; | |
1475 | transaction = journal->j_running_transaction; | |
1476 | if (transaction) | |
1477 | __log_start_commit(journal, transaction->t_tid); | |
1478 | spin_unlock(&journal->j_state_lock); | |
1479 | } | |
1480 | ||
1481 | /* Soft abort: record the abort error status in the journal superblock, | |
1482 | * but don't do any other IO. */ | |
1483 | static void __journal_abort_soft (journal_t *journal, int errno) | |
1484 | { | |
1485 | if (journal->j_flags & JFS_ABORT) | |
1486 | return; | |
1487 | ||
1488 | if (!journal->j_errno) | |
1489 | journal->j_errno = errno; | |
1490 | ||
1491 | __journal_abort_hard(journal); | |
1492 | ||
1493 | if (errno) | |
1494 | journal_update_superblock(journal, 1); | |
1495 | } | |
1496 | ||
1497 | /** | |
1498 | * void journal_abort () - Shutdown the journal immediately. | |
1499 | * @journal: the journal to shutdown. | |
1500 | * @errno: an error number to record in the journal indicating | |
1501 | * the reason for the shutdown. | |
1502 | * | |
1503 | * Perform a complete, immediate shutdown of the ENTIRE | |
1504 | * journal (not of a single transaction). This operation cannot be | |
1505 | * undone without closing and reopening the journal. | |
1506 | * | |
1507 | * The journal_abort function is intended to support higher level error | |
1508 | * recovery mechanisms such as the ext2/ext3 remount-readonly error | |
1509 | * mode. | |
1510 | * | |
1511 | * Journal abort has very specific semantics. Any existing dirty, | |
1512 | * unjournaled buffers in the main filesystem will still be written to | |
1513 | * disk by bdflush, but the journaling mechanism will be suspended | |
1514 | * immediately and no further transaction commits will be honoured. | |
1515 | * | |
1516 | * Any dirty, journaled buffers will be written back to disk without | |
1517 | * hitting the journal. Atomicity cannot be guaranteed on an aborted | |
1518 | * filesystem, but we _do_ attempt to leave as much data as possible | |
1519 | * behind for fsck to use for cleanup. | |
1520 | * | |
1521 | * Any attempt to get a new transaction handle on a journal which is in | |
1522 | * ABORT state will just result in an -EROFS error return. A | |
1523 | * journal_stop on an existing handle will return -EIO if we have | |
1524 | * entered abort state during the update. | |
1525 | * | |
1526 | * Recursive transactions are not disturbed by journal abort until the | |
1527 | * final journal_stop, which will receive the -EIO error. | |
1528 | * | |
1529 | * Finally, the journal_abort call allows the caller to supply an errno | |
1530 | * which will be recorded (if possible) in the journal superblock. This | |
1531 | * allows a client to record failure conditions in the middle of a | |
1532 | * transaction without having to complete the transaction to record the | |
1533 | * failure to disk. ext3_error, for example, now uses this | |
1534 | * functionality. | |
1535 | * | |
1536 | * Errors which originate from within the journaling layer will NOT | |
1537 | * supply an errno; a null errno implies that absolutely no further | |
1538 | * writes are done to the journal (unless there are any already in | |
1539 | * progress). | |
1540 | * | |
1541 | */ | |
1542 | ||
1543 | void journal_abort(journal_t *journal, int errno) | |
1544 | { | |
1545 | __journal_abort_soft(journal, errno); | |
1546 | } | |
1547 | ||
1548 | /** | |
1549 | * int journal_errno () - returns the journal's error state. | |
1550 | * @journal: journal to examine. | |
1551 | * | |
1552 | * This is the errno numbet set with journal_abort(), the last | |
1553 | * time the journal was mounted - if the journal was stopped | |
1554 | * without calling abort this will be 0. | |
1555 | * | |
1556 | * If the journal has been aborted on this mount time -EROFS will | |
1557 | * be returned. | |
1558 | */ | |
1559 | int journal_errno(journal_t *journal) | |
1560 | { | |
1561 | int err; | |
1562 | ||
1563 | spin_lock(&journal->j_state_lock); | |
1564 | if (journal->j_flags & JFS_ABORT) | |
1565 | err = -EROFS; | |
1566 | else | |
1567 | err = journal->j_errno; | |
1568 | spin_unlock(&journal->j_state_lock); | |
1569 | return err; | |
1570 | } | |
1571 | ||
1572 | /** | |
1573 | * int journal_clear_err () - clears the journal's error state | |
1574 | * @journal: journal to act on. | |
1575 | * | |
1576 | * An error must be cleared or Acked to take a FS out of readonly | |
1577 | * mode. | |
1578 | */ | |
1579 | int journal_clear_err(journal_t *journal) | |
1580 | { | |
1581 | int err = 0; | |
1582 | ||
1583 | spin_lock(&journal->j_state_lock); | |
1584 | if (journal->j_flags & JFS_ABORT) | |
1585 | err = -EROFS; | |
1586 | else | |
1587 | journal->j_errno = 0; | |
1588 | spin_unlock(&journal->j_state_lock); | |
1589 | return err; | |
1590 | } | |
1591 | ||
1592 | /** | |
1593 | * void journal_ack_err() - Ack journal err. | |
1594 | * @journal: journal to act on. | |
1595 | * | |
1596 | * An error must be cleared or Acked to take a FS out of readonly | |
1597 | * mode. | |
1598 | */ | |
1599 | void journal_ack_err(journal_t *journal) | |
1600 | { | |
1601 | spin_lock(&journal->j_state_lock); | |
1602 | if (journal->j_errno) | |
1603 | journal->j_flags |= JFS_ACK_ERR; | |
1604 | spin_unlock(&journal->j_state_lock); | |
1605 | } | |
1606 | ||
1607 | int journal_blocks_per_page(struct inode *inode) | |
1608 | { | |
1609 | return 1 << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); | |
1610 | } | |
1611 | ||
1612 | /* | |
1613 | * Simple support for retrying memory allocations. Introduced to help to | |
1614 | * debug different VM deadlock avoidance strategies. | |
1615 | */ | |
1616 | void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry) | |
1617 | { | |
1618 | return kmalloc(size, flags | (retry ? __GFP_NOFAIL : 0)); | |
1619 | } | |
1620 | ||
1621 | /* | |
1622 | * jbd slab management: create 1k, 2k, 4k, 8k slabs as needed | |
1623 | * and allocate frozen and commit buffers from these slabs. | |
1624 | * | |
1625 | * Reason for doing this is to avoid, SLAB_DEBUG - since it could | |
1626 | * cause bh to cross page boundary. | |
1627 | */ | |
1628 | ||
1629 | #define JBD_MAX_SLABS 5 | |
1630 | #define JBD_SLAB_INDEX(size) (size >> 11) | |
1631 | ||
1632 | static kmem_cache_t *jbd_slab[JBD_MAX_SLABS]; | |
1633 | static const char *jbd_slab_names[JBD_MAX_SLABS] = { | |
1634 | "jbd_1k", "jbd_2k", "jbd_4k", NULL, "jbd_8k" | |
1635 | }; | |
1636 | ||
1637 | static void journal_destroy_jbd_slabs(void) | |
1638 | { | |
1639 | int i; | |
1640 | ||
1641 | for (i = 0; i < JBD_MAX_SLABS; i++) { | |
1642 | if (jbd_slab[i]) | |
1643 | kmem_cache_destroy(jbd_slab[i]); | |
1644 | jbd_slab[i] = NULL; | |
1645 | } | |
1646 | } | |
1647 | ||
1648 | static int journal_create_jbd_slab(size_t slab_size) | |
1649 | { | |
1650 | int i = JBD_SLAB_INDEX(slab_size); | |
1651 | ||
1652 | BUG_ON(i >= JBD_MAX_SLABS); | |
1653 | ||
1654 | /* | |
1655 | * Check if we already have a slab created for this size | |
1656 | */ | |
1657 | if (jbd_slab[i]) | |
1658 | return 0; | |
1659 | ||
1660 | /* | |
1661 | * Create a slab and force alignment to be same as slabsize - | |
1662 | * this will make sure that allocations won't cross the page | |
1663 | * boundary. | |
1664 | */ | |
1665 | jbd_slab[i] = kmem_cache_create(jbd_slab_names[i], | |
1666 | slab_size, slab_size, 0, NULL, NULL); | |
1667 | if (!jbd_slab[i]) { | |
1668 | printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n"); | |
1669 | return -ENOMEM; | |
1670 | } | |
1671 | return 0; | |
1672 | } | |
1673 | ||
1674 | void * jbd_slab_alloc(size_t size, gfp_t flags) | |
1675 | { | |
1676 | int idx; | |
1677 | ||
1678 | idx = JBD_SLAB_INDEX(size); | |
1679 | BUG_ON(jbd_slab[idx] == NULL); | |
1680 | return kmem_cache_alloc(jbd_slab[idx], flags | __GFP_NOFAIL); | |
1681 | } | |
1682 | ||
1683 | void jbd_slab_free(void *ptr, size_t size) | |
1684 | { | |
1685 | int idx; | |
1686 | ||
1687 | idx = JBD_SLAB_INDEX(size); | |
1688 | BUG_ON(jbd_slab[idx] == NULL); | |
1689 | kmem_cache_free(jbd_slab[idx], ptr); | |
1690 | } | |
1691 | ||
1692 | /* | |
1693 | * Journal_head storage management | |
1694 | */ | |
1695 | static kmem_cache_t *journal_head_cache; | |
1696 | #ifdef CONFIG_JBD_DEBUG | |
1697 | static atomic_t nr_journal_heads = ATOMIC_INIT(0); | |
1698 | #endif | |
1699 | ||
1700 | static int journal_init_journal_head_cache(void) | |
1701 | { | |
1702 | int retval; | |
1703 | ||
1704 | J_ASSERT(journal_head_cache == 0); | |
1705 | journal_head_cache = kmem_cache_create("journal_head", | |
1706 | sizeof(struct journal_head), | |
1707 | 0, /* offset */ | |
1708 | 0, /* flags */ | |
1709 | NULL, /* ctor */ | |
1710 | NULL); /* dtor */ | |
1711 | retval = 0; | |
1712 | if (journal_head_cache == 0) { | |
1713 | retval = -ENOMEM; | |
1714 | printk(KERN_EMERG "JBD: no memory for journal_head cache\n"); | |
1715 | } | |
1716 | return retval; | |
1717 | } | |
1718 | ||
1719 | static void journal_destroy_journal_head_cache(void) | |
1720 | { | |
1721 | J_ASSERT(journal_head_cache != NULL); | |
1722 | kmem_cache_destroy(journal_head_cache); | |
1723 | journal_head_cache = NULL; | |
1724 | } | |
1725 | ||
1726 | /* | |
1727 | * journal_head splicing and dicing | |
1728 | */ | |
1729 | static struct journal_head *journal_alloc_journal_head(void) | |
1730 | { | |
1731 | struct journal_head *ret; | |
1732 | static unsigned long last_warning; | |
1733 | ||
1734 | #ifdef CONFIG_JBD_DEBUG | |
1735 | atomic_inc(&nr_journal_heads); | |
1736 | #endif | |
1737 | ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS); | |
1738 | if (ret == 0) { | |
1739 | jbd_debug(1, "out of memory for journal_head\n"); | |
1740 | if (time_after(jiffies, last_warning + 5*HZ)) { | |
1741 | printk(KERN_NOTICE "ENOMEM in %s, retrying.\n", | |
1742 | __FUNCTION__); | |
1743 | last_warning = jiffies; | |
1744 | } | |
1745 | while (ret == 0) { | |
1746 | yield(); | |
1747 | ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS); | |
1748 | } | |
1749 | } | |
1750 | return ret; | |
1751 | } | |
1752 | ||
1753 | static void journal_free_journal_head(struct journal_head *jh) | |
1754 | { | |
1755 | #ifdef CONFIG_JBD_DEBUG | |
1756 | atomic_dec(&nr_journal_heads); | |
1757 | memset(jh, JBD_POISON_FREE, sizeof(*jh)); | |
1758 | #endif | |
1759 | kmem_cache_free(journal_head_cache, jh); | |
1760 | } | |
1761 | ||
1762 | /* | |
1763 | * A journal_head is attached to a buffer_head whenever JBD has an | |
1764 | * interest in the buffer. | |
1765 | * | |
1766 | * Whenever a buffer has an attached journal_head, its ->b_state:BH_JBD bit | |
1767 | * is set. This bit is tested in core kernel code where we need to take | |
1768 | * JBD-specific actions. Testing the zeroness of ->b_private is not reliable | |
1769 | * there. | |
1770 | * | |
1771 | * When a buffer has its BH_JBD bit set, its ->b_count is elevated by one. | |
1772 | * | |
1773 | * When a buffer has its BH_JBD bit set it is immune from being released by | |
1774 | * core kernel code, mainly via ->b_count. | |
1775 | * | |
1776 | * A journal_head may be detached from its buffer_head when the journal_head's | |
1777 | * b_transaction, b_cp_transaction and b_next_transaction pointers are NULL. | |
1778 | * Various places in JBD call journal_remove_journal_head() to indicate that the | |
1779 | * journal_head can be dropped if needed. | |
1780 | * | |
1781 | * Various places in the kernel want to attach a journal_head to a buffer_head | |
1782 | * _before_ attaching the journal_head to a transaction. To protect the | |
1783 | * journal_head in this situation, journal_add_journal_head elevates the | |
1784 | * journal_head's b_jcount refcount by one. The caller must call | |
1785 | * journal_put_journal_head() to undo this. | |
1786 | * | |
1787 | * So the typical usage would be: | |
1788 | * | |
1789 | * (Attach a journal_head if needed. Increments b_jcount) | |
1790 | * struct journal_head *jh = journal_add_journal_head(bh); | |
1791 | * ... | |
1792 | * jh->b_transaction = xxx; | |
1793 | * journal_put_journal_head(jh); | |
1794 | * | |
1795 | * Now, the journal_head's b_jcount is zero, but it is safe from being released | |
1796 | * because it has a non-zero b_transaction. | |
1797 | */ | |
1798 | ||
1799 | /* | |
1800 | * Give a buffer_head a journal_head. | |
1801 | * | |
1802 | * Doesn't need the journal lock. | |
1803 | * May sleep. | |
1804 | */ | |
1805 | struct journal_head *journal_add_journal_head(struct buffer_head *bh) | |
1806 | { | |
1807 | struct journal_head *jh; | |
1808 | struct journal_head *new_jh = NULL; | |
1809 | ||
1810 | repeat: | |
1811 | if (!buffer_jbd(bh)) { | |
1812 | new_jh = journal_alloc_journal_head(); | |
1813 | memset(new_jh, 0, sizeof(*new_jh)); | |
1814 | } | |
1815 | ||
1816 | jbd_lock_bh_journal_head(bh); | |
1817 | if (buffer_jbd(bh)) { | |
1818 | jh = bh2jh(bh); | |
1819 | } else { | |
1820 | J_ASSERT_BH(bh, | |
1821 | (atomic_read(&bh->b_count) > 0) || | |
1822 | (bh->b_page && bh->b_page->mapping)); | |
1823 | ||
1824 | if (!new_jh) { | |
1825 | jbd_unlock_bh_journal_head(bh); | |
1826 | goto repeat; | |
1827 | } | |
1828 | ||
1829 | jh = new_jh; | |
1830 | new_jh = NULL; /* We consumed it */ | |
1831 | set_buffer_jbd(bh); | |
1832 | bh->b_private = jh; | |
1833 | jh->b_bh = bh; | |
1834 | get_bh(bh); | |
1835 | BUFFER_TRACE(bh, "added journal_head"); | |
1836 | } | |
1837 | jh->b_jcount++; | |
1838 | jbd_unlock_bh_journal_head(bh); | |
1839 | if (new_jh) | |
1840 | journal_free_journal_head(new_jh); | |
1841 | return bh->b_private; | |
1842 | } | |
1843 | ||
1844 | /* | |
1845 | * Grab a ref against this buffer_head's journal_head. If it ended up not | |
1846 | * having a journal_head, return NULL | |
1847 | */ | |
1848 | struct journal_head *journal_grab_journal_head(struct buffer_head *bh) | |
1849 | { | |
1850 | struct journal_head *jh = NULL; | |
1851 | ||
1852 | jbd_lock_bh_journal_head(bh); | |
1853 | if (buffer_jbd(bh)) { | |
1854 | jh = bh2jh(bh); | |
1855 | jh->b_jcount++; | |
1856 | } | |
1857 | jbd_unlock_bh_journal_head(bh); | |
1858 | return jh; | |
1859 | } | |
1860 | ||
1861 | static void __journal_remove_journal_head(struct buffer_head *bh) | |
1862 | { | |
1863 | struct journal_head *jh = bh2jh(bh); | |
1864 | ||
1865 | J_ASSERT_JH(jh, jh->b_jcount >= 0); | |
1866 | ||
1867 | get_bh(bh); | |
1868 | if (jh->b_jcount == 0) { | |
1869 | if (jh->b_transaction == NULL && | |
1870 | jh->b_next_transaction == NULL && | |
1871 | jh->b_cp_transaction == NULL) { | |
1872 | J_ASSERT_JH(jh, jh->b_jlist == BJ_None); | |
1873 | J_ASSERT_BH(bh, buffer_jbd(bh)); | |
1874 | J_ASSERT_BH(bh, jh2bh(jh) == bh); | |
1875 | BUFFER_TRACE(bh, "remove journal_head"); | |
1876 | if (jh->b_frozen_data) { | |
1877 | printk(KERN_WARNING "%s: freeing " | |
1878 | "b_frozen_data\n", | |
1879 | __FUNCTION__); | |
1880 | jbd_slab_free(jh->b_frozen_data, bh->b_size); | |
1881 | } | |
1882 | if (jh->b_committed_data) { | |
1883 | printk(KERN_WARNING "%s: freeing " | |
1884 | "b_committed_data\n", | |
1885 | __FUNCTION__); | |
1886 | jbd_slab_free(jh->b_committed_data, bh->b_size); | |
1887 | } | |
1888 | bh->b_private = NULL; | |
1889 | jh->b_bh = NULL; /* debug, really */ | |
1890 | clear_buffer_jbd(bh); | |
1891 | __brelse(bh); | |
1892 | journal_free_journal_head(jh); | |
1893 | } else { | |
1894 | BUFFER_TRACE(bh, "journal_head was locked"); | |
1895 | } | |
1896 | } | |
1897 | } | |
1898 | ||
1899 | /* | |
1900 | * journal_remove_journal_head(): if the buffer isn't attached to a transaction | |
1901 | * and has a zero b_jcount then remove and release its journal_head. If we did | |
1902 | * see that the buffer is not used by any transaction we also "logically" | |
1903 | * decrement ->b_count. | |
1904 | * | |
1905 | * We in fact take an additional increment on ->b_count as a convenience, | |
1906 | * because the caller usually wants to do additional things with the bh | |
1907 | * after calling here. | |
1908 | * The caller of journal_remove_journal_head() *must* run __brelse(bh) at some | |
1909 | * time. Once the caller has run __brelse(), the buffer is eligible for | |
1910 | * reaping by try_to_free_buffers(). | |
1911 | */ | |
1912 | void journal_remove_journal_head(struct buffer_head *bh) | |
1913 | { | |
1914 | jbd_lock_bh_journal_head(bh); | |
1915 | __journal_remove_journal_head(bh); | |
1916 | jbd_unlock_bh_journal_head(bh); | |
1917 | } | |
1918 | ||
1919 | /* | |
1920 | * Drop a reference on the passed journal_head. If it fell to zero then try to | |
1921 | * release the journal_head from the buffer_head. | |
1922 | */ | |
1923 | void journal_put_journal_head(struct journal_head *jh) | |
1924 | { | |
1925 | struct buffer_head *bh = jh2bh(jh); | |
1926 | ||
1927 | jbd_lock_bh_journal_head(bh); | |
1928 | J_ASSERT_JH(jh, jh->b_jcount > 0); | |
1929 | --jh->b_jcount; | |
1930 | if (!jh->b_jcount && !jh->b_transaction) { | |
1931 | __journal_remove_journal_head(bh); | |
1932 | __brelse(bh); | |
1933 | } | |
1934 | jbd_unlock_bh_journal_head(bh); | |
1935 | } | |
1936 | ||
1937 | /* | |
1938 | * /proc tunables | |
1939 | */ | |
1940 | #if defined(CONFIG_JBD_DEBUG) | |
1941 | int journal_enable_debug; | |
1942 | EXPORT_SYMBOL(journal_enable_debug); | |
1943 | #endif | |
1944 | ||
1945 | #if defined(CONFIG_JBD_DEBUG) && defined(CONFIG_PROC_FS) | |
1946 | ||
1947 | static struct proc_dir_entry *proc_jbd_debug; | |
1948 | ||
1949 | static int read_jbd_debug(char *page, char **start, off_t off, | |
1950 | int count, int *eof, void *data) | |
1951 | { | |
1952 | int ret; | |
1953 | ||
1954 | ret = sprintf(page + off, "%d\n", journal_enable_debug); | |
1955 | *eof = 1; | |
1956 | return ret; | |
1957 | } | |
1958 | ||
1959 | static int write_jbd_debug(struct file *file, const char __user *buffer, | |
1960 | unsigned long count, void *data) | |
1961 | { | |
1962 | char buf[32]; | |
1963 | ||
1964 | if (count > ARRAY_SIZE(buf) - 1) | |
1965 | count = ARRAY_SIZE(buf) - 1; | |
1966 | if (copy_from_user(buf, buffer, count)) | |
1967 | return -EFAULT; | |
1968 | buf[ARRAY_SIZE(buf) - 1] = '\0'; | |
1969 | journal_enable_debug = simple_strtoul(buf, NULL, 10); | |
1970 | return count; | |
1971 | } | |
1972 | ||
1973 | #define JBD_PROC_NAME "sys/fs/jbd-debug" | |
1974 | ||
1975 | static void __init create_jbd_proc_entry(void) | |
1976 | { | |
1977 | proc_jbd_debug = create_proc_entry(JBD_PROC_NAME, 0644, NULL); | |
1978 | if (proc_jbd_debug) { | |
1979 | /* Why is this so hard? */ | |
1980 | proc_jbd_debug->read_proc = read_jbd_debug; | |
1981 | proc_jbd_debug->write_proc = write_jbd_debug; | |
1982 | } | |
1983 | } | |
1984 | ||
1985 | static void __exit remove_jbd_proc_entry(void) | |
1986 | { | |
1987 | if (proc_jbd_debug) | |
1988 | remove_proc_entry(JBD_PROC_NAME, NULL); | |
1989 | } | |
1990 | ||
1991 | #else | |
1992 | ||
1993 | #define create_jbd_proc_entry() do {} while (0) | |
1994 | #define remove_jbd_proc_entry() do {} while (0) | |
1995 | ||
1996 | #endif | |
1997 | ||
1998 | kmem_cache_t *jbd_handle_cache; | |
1999 | ||
2000 | static int __init journal_init_handle_cache(void) | |
2001 | { | |
2002 | jbd_handle_cache = kmem_cache_create("journal_handle", | |
2003 | sizeof(handle_t), | |
2004 | 0, /* offset */ | |
2005 | 0, /* flags */ | |
2006 | NULL, /* ctor */ | |
2007 | NULL); /* dtor */ | |
2008 | if (jbd_handle_cache == NULL) { | |
2009 | printk(KERN_EMERG "JBD: failed to create handle cache\n"); | |
2010 | return -ENOMEM; | |
2011 | } | |
2012 | return 0; | |
2013 | } | |
2014 | ||
2015 | static void journal_destroy_handle_cache(void) | |
2016 | { | |
2017 | if (jbd_handle_cache) | |
2018 | kmem_cache_destroy(jbd_handle_cache); | |
2019 | } | |
2020 | ||
2021 | /* | |
2022 | * Module startup and shutdown | |
2023 | */ | |
2024 | ||
2025 | static int __init journal_init_caches(void) | |
2026 | { | |
2027 | int ret; | |
2028 | ||
2029 | ret = journal_init_revoke_caches(); | |
2030 | if (ret == 0) | |
2031 | ret = journal_init_journal_head_cache(); | |
2032 | if (ret == 0) | |
2033 | ret = journal_init_handle_cache(); | |
2034 | return ret; | |
2035 | } | |
2036 | ||
2037 | static void journal_destroy_caches(void) | |
2038 | { | |
2039 | journal_destroy_revoke_caches(); | |
2040 | journal_destroy_journal_head_cache(); | |
2041 | journal_destroy_handle_cache(); | |
2042 | journal_destroy_jbd_slabs(); | |
2043 | } | |
2044 | ||
2045 | static int __init journal_init(void) | |
2046 | { | |
2047 | int ret; | |
2048 | ||
2049 | BUILD_BUG_ON(sizeof(struct journal_superblock_s) != 1024); | |
2050 | ||
2051 | ret = journal_init_caches(); | |
2052 | if (ret != 0) | |
2053 | journal_destroy_caches(); | |
2054 | create_jbd_proc_entry(); | |
2055 | return ret; | |
2056 | } | |
2057 | ||
2058 | static void __exit journal_exit(void) | |
2059 | { | |
2060 | #ifdef CONFIG_JBD_DEBUG | |
2061 | int n = atomic_read(&nr_journal_heads); | |
2062 | if (n) | |
2063 | printk(KERN_EMERG "JBD: leaked %d journal_heads!\n", n); | |
2064 | #endif | |
2065 | remove_jbd_proc_entry(); | |
2066 | journal_destroy_caches(); | |
2067 | } | |
2068 | ||
2069 | MODULE_LICENSE("GPL"); | |
2070 | module_init(journal_init); | |
2071 | module_exit(journal_exit); | |
2072 |