]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/buffer.c
[PATCH] EDAC: formatting cleanup
[mirror_ubuntu-artful-kernel.git] / fs / buffer.c
1 /*
2 * linux/fs/buffer.c
3 *
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
6
7 /*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9 *
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12 *
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
15 *
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17 *
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19 */
20
21 #include <linux/config.h>
22 #include <linux/kernel.h>
23 #include <linux/syscalls.h>
24 #include <linux/fs.h>
25 #include <linux/mm.h>
26 #include <linux/percpu.h>
27 #include <linux/slab.h>
28 #include <linux/smp_lock.h>
29 #include <linux/capability.h>
30 #include <linux/blkdev.h>
31 #include <linux/file.h>
32 #include <linux/quotaops.h>
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/writeback.h>
36 #include <linux/hash.h>
37 #include <linux/suspend.h>
38 #include <linux/buffer_head.h>
39 #include <linux/bio.h>
40 #include <linux/notifier.h>
41 #include <linux/cpu.h>
42 #include <linux/bitops.h>
43 #include <linux/mpage.h>
44 #include <linux/bit_spinlock.h>
45
46 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
47 static void invalidate_bh_lrus(void);
48
49 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
50
51 inline void
52 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
53 {
54 bh->b_end_io = handler;
55 bh->b_private = private;
56 }
57
58 static int sync_buffer(void *word)
59 {
60 struct block_device *bd;
61 struct buffer_head *bh
62 = container_of(word, struct buffer_head, b_state);
63
64 smp_mb();
65 bd = bh->b_bdev;
66 if (bd)
67 blk_run_address_space(bd->bd_inode->i_mapping);
68 io_schedule();
69 return 0;
70 }
71
72 void fastcall __lock_buffer(struct buffer_head *bh)
73 {
74 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
75 TASK_UNINTERRUPTIBLE);
76 }
77 EXPORT_SYMBOL(__lock_buffer);
78
79 void fastcall unlock_buffer(struct buffer_head *bh)
80 {
81 clear_buffer_locked(bh);
82 smp_mb__after_clear_bit();
83 wake_up_bit(&bh->b_state, BH_Lock);
84 }
85
86 /*
87 * Block until a buffer comes unlocked. This doesn't stop it
88 * from becoming locked again - you have to lock it yourself
89 * if you want to preserve its state.
90 */
91 void __wait_on_buffer(struct buffer_head * bh)
92 {
93 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
94 }
95
96 static void
97 __clear_page_buffers(struct page *page)
98 {
99 ClearPagePrivate(page);
100 set_page_private(page, 0);
101 page_cache_release(page);
102 }
103
104 static void buffer_io_error(struct buffer_head *bh)
105 {
106 char b[BDEVNAME_SIZE];
107
108 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
109 bdevname(bh->b_bdev, b),
110 (unsigned long long)bh->b_blocknr);
111 }
112
113 /*
114 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
115 * unlock the buffer. This is what ll_rw_block uses too.
116 */
117 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
118 {
119 if (uptodate) {
120 set_buffer_uptodate(bh);
121 } else {
122 /* This happens, due to failed READA attempts. */
123 clear_buffer_uptodate(bh);
124 }
125 unlock_buffer(bh);
126 put_bh(bh);
127 }
128
129 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
130 {
131 char b[BDEVNAME_SIZE];
132
133 if (uptodate) {
134 set_buffer_uptodate(bh);
135 } else {
136 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
137 buffer_io_error(bh);
138 printk(KERN_WARNING "lost page write due to "
139 "I/O error on %s\n",
140 bdevname(bh->b_bdev, b));
141 }
142 set_buffer_write_io_error(bh);
143 clear_buffer_uptodate(bh);
144 }
145 unlock_buffer(bh);
146 put_bh(bh);
147 }
148
149 /*
150 * Write out and wait upon all the dirty data associated with a block
151 * device via its mapping. Does not take the superblock lock.
152 */
153 int sync_blockdev(struct block_device *bdev)
154 {
155 int ret = 0;
156
157 if (bdev)
158 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
159 return ret;
160 }
161 EXPORT_SYMBOL(sync_blockdev);
162
163 static void __fsync_super(struct super_block *sb)
164 {
165 sync_inodes_sb(sb, 0);
166 DQUOT_SYNC(sb);
167 lock_super(sb);
168 if (sb->s_dirt && sb->s_op->write_super)
169 sb->s_op->write_super(sb);
170 unlock_super(sb);
171 if (sb->s_op->sync_fs)
172 sb->s_op->sync_fs(sb, 1);
173 sync_blockdev(sb->s_bdev);
174 sync_inodes_sb(sb, 1);
175 }
176
177 /*
178 * Write out and wait upon all dirty data associated with this
179 * superblock. Filesystem data as well as the underlying block
180 * device. Takes the superblock lock.
181 */
182 int fsync_super(struct super_block *sb)
183 {
184 __fsync_super(sb);
185 return sync_blockdev(sb->s_bdev);
186 }
187
188 /*
189 * Write out and wait upon all dirty data associated with this
190 * device. Filesystem data as well as the underlying block
191 * device. Takes the superblock lock.
192 */
193 int fsync_bdev(struct block_device *bdev)
194 {
195 struct super_block *sb = get_super(bdev);
196 if (sb) {
197 int res = fsync_super(sb);
198 drop_super(sb);
199 return res;
200 }
201 return sync_blockdev(bdev);
202 }
203
204 /**
205 * freeze_bdev -- lock a filesystem and force it into a consistent state
206 * @bdev: blockdevice to lock
207 *
208 * This takes the block device bd_mount_mutex to make sure no new mounts
209 * happen on bdev until thaw_bdev() is called.
210 * If a superblock is found on this device, we take the s_umount semaphore
211 * on it to make sure nobody unmounts until the snapshot creation is done.
212 */
213 struct super_block *freeze_bdev(struct block_device *bdev)
214 {
215 struct super_block *sb;
216
217 mutex_lock(&bdev->bd_mount_mutex);
218 sb = get_super(bdev);
219 if (sb && !(sb->s_flags & MS_RDONLY)) {
220 sb->s_frozen = SB_FREEZE_WRITE;
221 smp_wmb();
222
223 __fsync_super(sb);
224
225 sb->s_frozen = SB_FREEZE_TRANS;
226 smp_wmb();
227
228 sync_blockdev(sb->s_bdev);
229
230 if (sb->s_op->write_super_lockfs)
231 sb->s_op->write_super_lockfs(sb);
232 }
233
234 sync_blockdev(bdev);
235 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
236 }
237 EXPORT_SYMBOL(freeze_bdev);
238
239 /**
240 * thaw_bdev -- unlock filesystem
241 * @bdev: blockdevice to unlock
242 * @sb: associated superblock
243 *
244 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
245 */
246 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
247 {
248 if (sb) {
249 BUG_ON(sb->s_bdev != bdev);
250
251 if (sb->s_op->unlockfs)
252 sb->s_op->unlockfs(sb);
253 sb->s_frozen = SB_UNFROZEN;
254 smp_wmb();
255 wake_up(&sb->s_wait_unfrozen);
256 drop_super(sb);
257 }
258
259 mutex_unlock(&bdev->bd_mount_mutex);
260 }
261 EXPORT_SYMBOL(thaw_bdev);
262
263 /*
264 * sync everything. Start out by waking pdflush, because that writes back
265 * all queues in parallel.
266 */
267 static void do_sync(unsigned long wait)
268 {
269 wakeup_pdflush(0);
270 sync_inodes(0); /* All mappings, inodes and their blockdevs */
271 DQUOT_SYNC(NULL);
272 sync_supers(); /* Write the superblocks */
273 sync_filesystems(0); /* Start syncing the filesystems */
274 sync_filesystems(wait); /* Waitingly sync the filesystems */
275 sync_inodes(wait); /* Mappings, inodes and blockdevs, again. */
276 if (!wait)
277 printk("Emergency Sync complete\n");
278 if (unlikely(laptop_mode))
279 laptop_sync_completion();
280 }
281
282 asmlinkage long sys_sync(void)
283 {
284 do_sync(1);
285 return 0;
286 }
287
288 void emergency_sync(void)
289 {
290 pdflush_operation(do_sync, 0);
291 }
292
293 /*
294 * Generic function to fsync a file.
295 *
296 * filp may be NULL if called via the msync of a vma.
297 */
298
299 int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
300 {
301 struct inode * inode = dentry->d_inode;
302 struct super_block * sb;
303 int ret, err;
304
305 /* sync the inode to buffers */
306 ret = write_inode_now(inode, 0);
307
308 /* sync the superblock to buffers */
309 sb = inode->i_sb;
310 lock_super(sb);
311 if (sb->s_op->write_super)
312 sb->s_op->write_super(sb);
313 unlock_super(sb);
314
315 /* .. finally sync the buffers to disk */
316 err = sync_blockdev(sb->s_bdev);
317 if (!ret)
318 ret = err;
319 return ret;
320 }
321
322 long do_fsync(struct file *file, int datasync)
323 {
324 int ret;
325 int err;
326 struct address_space *mapping = file->f_mapping;
327
328 if (!file->f_op || !file->f_op->fsync) {
329 /* Why? We can still call filemap_fdatawrite */
330 ret = -EINVAL;
331 goto out;
332 }
333
334 current->flags |= PF_SYNCWRITE;
335 ret = filemap_fdatawrite(mapping);
336
337 /*
338 * We need to protect against concurrent writers, which could cause
339 * livelocks in fsync_buffers_list().
340 */
341 mutex_lock(&mapping->host->i_mutex);
342 err = file->f_op->fsync(file, file->f_dentry, datasync);
343 if (!ret)
344 ret = err;
345 mutex_unlock(&mapping->host->i_mutex);
346 err = filemap_fdatawait(mapping);
347 if (!ret)
348 ret = err;
349 current->flags &= ~PF_SYNCWRITE;
350 out:
351 return ret;
352 }
353
354 static long __do_fsync(unsigned int fd, int datasync)
355 {
356 struct file *file;
357 int ret = -EBADF;
358
359 file = fget(fd);
360 if (file) {
361 ret = do_fsync(file, datasync);
362 fput(file);
363 }
364 return ret;
365 }
366
367 asmlinkage long sys_fsync(unsigned int fd)
368 {
369 return __do_fsync(fd, 0);
370 }
371
372 asmlinkage long sys_fdatasync(unsigned int fd)
373 {
374 return __do_fsync(fd, 1);
375 }
376
377 /*
378 * Various filesystems appear to want __find_get_block to be non-blocking.
379 * But it's the page lock which protects the buffers. To get around this,
380 * we get exclusion from try_to_free_buffers with the blockdev mapping's
381 * private_lock.
382 *
383 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
384 * may be quite high. This code could TryLock the page, and if that
385 * succeeds, there is no need to take private_lock. (But if
386 * private_lock is contended then so is mapping->tree_lock).
387 */
388 static struct buffer_head *
389 __find_get_block_slow(struct block_device *bdev, sector_t block)
390 {
391 struct inode *bd_inode = bdev->bd_inode;
392 struct address_space *bd_mapping = bd_inode->i_mapping;
393 struct buffer_head *ret = NULL;
394 pgoff_t index;
395 struct buffer_head *bh;
396 struct buffer_head *head;
397 struct page *page;
398 int all_mapped = 1;
399
400 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
401 page = find_get_page(bd_mapping, index);
402 if (!page)
403 goto out;
404
405 spin_lock(&bd_mapping->private_lock);
406 if (!page_has_buffers(page))
407 goto out_unlock;
408 head = page_buffers(page);
409 bh = head;
410 do {
411 if (bh->b_blocknr == block) {
412 ret = bh;
413 get_bh(bh);
414 goto out_unlock;
415 }
416 if (!buffer_mapped(bh))
417 all_mapped = 0;
418 bh = bh->b_this_page;
419 } while (bh != head);
420
421 /* we might be here because some of the buffers on this page are
422 * not mapped. This is due to various races between
423 * file io on the block device and getblk. It gets dealt with
424 * elsewhere, don't buffer_error if we had some unmapped buffers
425 */
426 if (all_mapped) {
427 printk("__find_get_block_slow() failed. "
428 "block=%llu, b_blocknr=%llu\n",
429 (unsigned long long)block,
430 (unsigned long long)bh->b_blocknr);
431 printk("b_state=0x%08lx, b_size=%zu\n",
432 bh->b_state, bh->b_size);
433 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
434 }
435 out_unlock:
436 spin_unlock(&bd_mapping->private_lock);
437 page_cache_release(page);
438 out:
439 return ret;
440 }
441
442 /* If invalidate_buffers() will trash dirty buffers, it means some kind
443 of fs corruption is going on. Trashing dirty data always imply losing
444 information that was supposed to be just stored on the physical layer
445 by the user.
446
447 Thus invalidate_buffers in general usage is not allwowed to trash
448 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
449 be preserved. These buffers are simply skipped.
450
451 We also skip buffers which are still in use. For example this can
452 happen if a userspace program is reading the block device.
453
454 NOTE: In the case where the user removed a removable-media-disk even if
455 there's still dirty data not synced on disk (due a bug in the device driver
456 or due an error of the user), by not destroying the dirty buffers we could
457 generate corruption also on the next media inserted, thus a parameter is
458 necessary to handle this case in the most safe way possible (trying
459 to not corrupt also the new disk inserted with the data belonging to
460 the old now corrupted disk). Also for the ramdisk the natural thing
461 to do in order to release the ramdisk memory is to destroy dirty buffers.
462
463 These are two special cases. Normal usage imply the device driver
464 to issue a sync on the device (without waiting I/O completion) and
465 then an invalidate_buffers call that doesn't trash dirty buffers.
466
467 For handling cache coherency with the blkdev pagecache the 'update' case
468 is been introduced. It is needed to re-read from disk any pinned
469 buffer. NOTE: re-reading from disk is destructive so we can do it only
470 when we assume nobody is changing the buffercache under our I/O and when
471 we think the disk contains more recent information than the buffercache.
472 The update == 1 pass marks the buffers we need to update, the update == 2
473 pass does the actual I/O. */
474 void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
475 {
476 invalidate_bh_lrus();
477 /*
478 * FIXME: what about destroy_dirty_buffers?
479 * We really want to use invalidate_inode_pages2() for
480 * that, but not until that's cleaned up.
481 */
482 invalidate_inode_pages(bdev->bd_inode->i_mapping);
483 }
484
485 /*
486 * Kick pdflush then try to free up some ZONE_NORMAL memory.
487 */
488 static void free_more_memory(void)
489 {
490 struct zone **zones;
491 pg_data_t *pgdat;
492
493 wakeup_pdflush(1024);
494 yield();
495
496 for_each_pgdat(pgdat) {
497 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
498 if (*zones)
499 try_to_free_pages(zones, GFP_NOFS);
500 }
501 }
502
503 /*
504 * I/O completion handler for block_read_full_page() - pages
505 * which come unlocked at the end of I/O.
506 */
507 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
508 {
509 unsigned long flags;
510 struct buffer_head *first;
511 struct buffer_head *tmp;
512 struct page *page;
513 int page_uptodate = 1;
514
515 BUG_ON(!buffer_async_read(bh));
516
517 page = bh->b_page;
518 if (uptodate) {
519 set_buffer_uptodate(bh);
520 } else {
521 clear_buffer_uptodate(bh);
522 if (printk_ratelimit())
523 buffer_io_error(bh);
524 SetPageError(page);
525 }
526
527 /*
528 * Be _very_ careful from here on. Bad things can happen if
529 * two buffer heads end IO at almost the same time and both
530 * decide that the page is now completely done.
531 */
532 first = page_buffers(page);
533 local_irq_save(flags);
534 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
535 clear_buffer_async_read(bh);
536 unlock_buffer(bh);
537 tmp = bh;
538 do {
539 if (!buffer_uptodate(tmp))
540 page_uptodate = 0;
541 if (buffer_async_read(tmp)) {
542 BUG_ON(!buffer_locked(tmp));
543 goto still_busy;
544 }
545 tmp = tmp->b_this_page;
546 } while (tmp != bh);
547 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
548 local_irq_restore(flags);
549
550 /*
551 * If none of the buffers had errors and they are all
552 * uptodate then we can set the page uptodate.
553 */
554 if (page_uptodate && !PageError(page))
555 SetPageUptodate(page);
556 unlock_page(page);
557 return;
558
559 still_busy:
560 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
561 local_irq_restore(flags);
562 return;
563 }
564
565 /*
566 * Completion handler for block_write_full_page() - pages which are unlocked
567 * during I/O, and which have PageWriteback cleared upon I/O completion.
568 */
569 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
570 {
571 char b[BDEVNAME_SIZE];
572 unsigned long flags;
573 struct buffer_head *first;
574 struct buffer_head *tmp;
575 struct page *page;
576
577 BUG_ON(!buffer_async_write(bh));
578
579 page = bh->b_page;
580 if (uptodate) {
581 set_buffer_uptodate(bh);
582 } else {
583 if (printk_ratelimit()) {
584 buffer_io_error(bh);
585 printk(KERN_WARNING "lost page write due to "
586 "I/O error on %s\n",
587 bdevname(bh->b_bdev, b));
588 }
589 set_bit(AS_EIO, &page->mapping->flags);
590 clear_buffer_uptodate(bh);
591 SetPageError(page);
592 }
593
594 first = page_buffers(page);
595 local_irq_save(flags);
596 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
597
598 clear_buffer_async_write(bh);
599 unlock_buffer(bh);
600 tmp = bh->b_this_page;
601 while (tmp != bh) {
602 if (buffer_async_write(tmp)) {
603 BUG_ON(!buffer_locked(tmp));
604 goto still_busy;
605 }
606 tmp = tmp->b_this_page;
607 }
608 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
609 local_irq_restore(flags);
610 end_page_writeback(page);
611 return;
612
613 still_busy:
614 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
615 local_irq_restore(flags);
616 return;
617 }
618
619 /*
620 * If a page's buffers are under async readin (end_buffer_async_read
621 * completion) then there is a possibility that another thread of
622 * control could lock one of the buffers after it has completed
623 * but while some of the other buffers have not completed. This
624 * locked buffer would confuse end_buffer_async_read() into not unlocking
625 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
626 * that this buffer is not under async I/O.
627 *
628 * The page comes unlocked when it has no locked buffer_async buffers
629 * left.
630 *
631 * PageLocked prevents anyone starting new async I/O reads any of
632 * the buffers.
633 *
634 * PageWriteback is used to prevent simultaneous writeout of the same
635 * page.
636 *
637 * PageLocked prevents anyone from starting writeback of a page which is
638 * under read I/O (PageWriteback is only ever set against a locked page).
639 */
640 static void mark_buffer_async_read(struct buffer_head *bh)
641 {
642 bh->b_end_io = end_buffer_async_read;
643 set_buffer_async_read(bh);
644 }
645
646 void mark_buffer_async_write(struct buffer_head *bh)
647 {
648 bh->b_end_io = end_buffer_async_write;
649 set_buffer_async_write(bh);
650 }
651 EXPORT_SYMBOL(mark_buffer_async_write);
652
653
654 /*
655 * fs/buffer.c contains helper functions for buffer-backed address space's
656 * fsync functions. A common requirement for buffer-based filesystems is
657 * that certain data from the backing blockdev needs to be written out for
658 * a successful fsync(). For example, ext2 indirect blocks need to be
659 * written back and waited upon before fsync() returns.
660 *
661 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
662 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
663 * management of a list of dependent buffers at ->i_mapping->private_list.
664 *
665 * Locking is a little subtle: try_to_free_buffers() will remove buffers
666 * from their controlling inode's queue when they are being freed. But
667 * try_to_free_buffers() will be operating against the *blockdev* mapping
668 * at the time, not against the S_ISREG file which depends on those buffers.
669 * So the locking for private_list is via the private_lock in the address_space
670 * which backs the buffers. Which is different from the address_space
671 * against which the buffers are listed. So for a particular address_space,
672 * mapping->private_lock does *not* protect mapping->private_list! In fact,
673 * mapping->private_list will always be protected by the backing blockdev's
674 * ->private_lock.
675 *
676 * Which introduces a requirement: all buffers on an address_space's
677 * ->private_list must be from the same address_space: the blockdev's.
678 *
679 * address_spaces which do not place buffers at ->private_list via these
680 * utility functions are free to use private_lock and private_list for
681 * whatever they want. The only requirement is that list_empty(private_list)
682 * be true at clear_inode() time.
683 *
684 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
685 * filesystems should do that. invalidate_inode_buffers() should just go
686 * BUG_ON(!list_empty).
687 *
688 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
689 * take an address_space, not an inode. And it should be called
690 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
691 * queued up.
692 *
693 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
694 * list if it is already on a list. Because if the buffer is on a list,
695 * it *must* already be on the right one. If not, the filesystem is being
696 * silly. This will save a ton of locking. But first we have to ensure
697 * that buffers are taken *off* the old inode's list when they are freed
698 * (presumably in truncate). That requires careful auditing of all
699 * filesystems (do it inside bforget()). It could also be done by bringing
700 * b_inode back.
701 */
702
703 /*
704 * The buffer's backing address_space's private_lock must be held
705 */
706 static inline void __remove_assoc_queue(struct buffer_head *bh)
707 {
708 list_del_init(&bh->b_assoc_buffers);
709 }
710
711 int inode_has_buffers(struct inode *inode)
712 {
713 return !list_empty(&inode->i_data.private_list);
714 }
715
716 /*
717 * osync is designed to support O_SYNC io. It waits synchronously for
718 * all already-submitted IO to complete, but does not queue any new
719 * writes to the disk.
720 *
721 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
722 * you dirty the buffers, and then use osync_inode_buffers to wait for
723 * completion. Any other dirty buffers which are not yet queued for
724 * write will not be flushed to disk by the osync.
725 */
726 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
727 {
728 struct buffer_head *bh;
729 struct list_head *p;
730 int err = 0;
731
732 spin_lock(lock);
733 repeat:
734 list_for_each_prev(p, list) {
735 bh = BH_ENTRY(p);
736 if (buffer_locked(bh)) {
737 get_bh(bh);
738 spin_unlock(lock);
739 wait_on_buffer(bh);
740 if (!buffer_uptodate(bh))
741 err = -EIO;
742 brelse(bh);
743 spin_lock(lock);
744 goto repeat;
745 }
746 }
747 spin_unlock(lock);
748 return err;
749 }
750
751 /**
752 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
753 * buffers
754 * @mapping: the mapping which wants those buffers written
755 *
756 * Starts I/O against the buffers at mapping->private_list, and waits upon
757 * that I/O.
758 *
759 * Basically, this is a convenience function for fsync().
760 * @mapping is a file or directory which needs those buffers to be written for
761 * a successful fsync().
762 */
763 int sync_mapping_buffers(struct address_space *mapping)
764 {
765 struct address_space *buffer_mapping = mapping->assoc_mapping;
766
767 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
768 return 0;
769
770 return fsync_buffers_list(&buffer_mapping->private_lock,
771 &mapping->private_list);
772 }
773 EXPORT_SYMBOL(sync_mapping_buffers);
774
775 /*
776 * Called when we've recently written block `bblock', and it is known that
777 * `bblock' was for a buffer_boundary() buffer. This means that the block at
778 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
779 * dirty, schedule it for IO. So that indirects merge nicely with their data.
780 */
781 void write_boundary_block(struct block_device *bdev,
782 sector_t bblock, unsigned blocksize)
783 {
784 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
785 if (bh) {
786 if (buffer_dirty(bh))
787 ll_rw_block(WRITE, 1, &bh);
788 put_bh(bh);
789 }
790 }
791
792 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
793 {
794 struct address_space *mapping = inode->i_mapping;
795 struct address_space *buffer_mapping = bh->b_page->mapping;
796
797 mark_buffer_dirty(bh);
798 if (!mapping->assoc_mapping) {
799 mapping->assoc_mapping = buffer_mapping;
800 } else {
801 if (mapping->assoc_mapping != buffer_mapping)
802 BUG();
803 }
804 if (list_empty(&bh->b_assoc_buffers)) {
805 spin_lock(&buffer_mapping->private_lock);
806 list_move_tail(&bh->b_assoc_buffers,
807 &mapping->private_list);
808 spin_unlock(&buffer_mapping->private_lock);
809 }
810 }
811 EXPORT_SYMBOL(mark_buffer_dirty_inode);
812
813 /*
814 * Add a page to the dirty page list.
815 *
816 * It is a sad fact of life that this function is called from several places
817 * deeply under spinlocking. It may not sleep.
818 *
819 * If the page has buffers, the uptodate buffers are set dirty, to preserve
820 * dirty-state coherency between the page and the buffers. It the page does
821 * not have buffers then when they are later attached they will all be set
822 * dirty.
823 *
824 * The buffers are dirtied before the page is dirtied. There's a small race
825 * window in which a writepage caller may see the page cleanness but not the
826 * buffer dirtiness. That's fine. If this code were to set the page dirty
827 * before the buffers, a concurrent writepage caller could clear the page dirty
828 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
829 * page on the dirty page list.
830 *
831 * We use private_lock to lock against try_to_free_buffers while using the
832 * page's buffer list. Also use this to protect against clean buffers being
833 * added to the page after it was set dirty.
834 *
835 * FIXME: may need to call ->reservepage here as well. That's rather up to the
836 * address_space though.
837 */
838 int __set_page_dirty_buffers(struct page *page)
839 {
840 struct address_space * const mapping = page->mapping;
841
842 spin_lock(&mapping->private_lock);
843 if (page_has_buffers(page)) {
844 struct buffer_head *head = page_buffers(page);
845 struct buffer_head *bh = head;
846
847 do {
848 set_buffer_dirty(bh);
849 bh = bh->b_this_page;
850 } while (bh != head);
851 }
852 spin_unlock(&mapping->private_lock);
853
854 if (!TestSetPageDirty(page)) {
855 write_lock_irq(&mapping->tree_lock);
856 if (page->mapping) { /* Race with truncate? */
857 if (mapping_cap_account_dirty(mapping))
858 inc_page_state(nr_dirty);
859 radix_tree_tag_set(&mapping->page_tree,
860 page_index(page),
861 PAGECACHE_TAG_DIRTY);
862 }
863 write_unlock_irq(&mapping->tree_lock);
864 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
865 return 1;
866 }
867 return 0;
868 }
869 EXPORT_SYMBOL(__set_page_dirty_buffers);
870
871 /*
872 * Write out and wait upon a list of buffers.
873 *
874 * We have conflicting pressures: we want to make sure that all
875 * initially dirty buffers get waited on, but that any subsequently
876 * dirtied buffers don't. After all, we don't want fsync to last
877 * forever if somebody is actively writing to the file.
878 *
879 * Do this in two main stages: first we copy dirty buffers to a
880 * temporary inode list, queueing the writes as we go. Then we clean
881 * up, waiting for those writes to complete.
882 *
883 * During this second stage, any subsequent updates to the file may end
884 * up refiling the buffer on the original inode's dirty list again, so
885 * there is a chance we will end up with a buffer queued for write but
886 * not yet completed on that list. So, as a final cleanup we go through
887 * the osync code to catch these locked, dirty buffers without requeuing
888 * any newly dirty buffers for write.
889 */
890 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
891 {
892 struct buffer_head *bh;
893 struct list_head tmp;
894 int err = 0, err2;
895
896 INIT_LIST_HEAD(&tmp);
897
898 spin_lock(lock);
899 while (!list_empty(list)) {
900 bh = BH_ENTRY(list->next);
901 list_del_init(&bh->b_assoc_buffers);
902 if (buffer_dirty(bh) || buffer_locked(bh)) {
903 list_add(&bh->b_assoc_buffers, &tmp);
904 if (buffer_dirty(bh)) {
905 get_bh(bh);
906 spin_unlock(lock);
907 /*
908 * Ensure any pending I/O completes so that
909 * ll_rw_block() actually writes the current
910 * contents - it is a noop if I/O is still in
911 * flight on potentially older contents.
912 */
913 ll_rw_block(SWRITE, 1, &bh);
914 brelse(bh);
915 spin_lock(lock);
916 }
917 }
918 }
919
920 while (!list_empty(&tmp)) {
921 bh = BH_ENTRY(tmp.prev);
922 __remove_assoc_queue(bh);
923 get_bh(bh);
924 spin_unlock(lock);
925 wait_on_buffer(bh);
926 if (!buffer_uptodate(bh))
927 err = -EIO;
928 brelse(bh);
929 spin_lock(lock);
930 }
931
932 spin_unlock(lock);
933 err2 = osync_buffers_list(lock, list);
934 if (err)
935 return err;
936 else
937 return err2;
938 }
939
940 /*
941 * Invalidate any and all dirty buffers on a given inode. We are
942 * probably unmounting the fs, but that doesn't mean we have already
943 * done a sync(). Just drop the buffers from the inode list.
944 *
945 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
946 * assumes that all the buffers are against the blockdev. Not true
947 * for reiserfs.
948 */
949 void invalidate_inode_buffers(struct inode *inode)
950 {
951 if (inode_has_buffers(inode)) {
952 struct address_space *mapping = &inode->i_data;
953 struct list_head *list = &mapping->private_list;
954 struct address_space *buffer_mapping = mapping->assoc_mapping;
955
956 spin_lock(&buffer_mapping->private_lock);
957 while (!list_empty(list))
958 __remove_assoc_queue(BH_ENTRY(list->next));
959 spin_unlock(&buffer_mapping->private_lock);
960 }
961 }
962
963 /*
964 * Remove any clean buffers from the inode's buffer list. This is called
965 * when we're trying to free the inode itself. Those buffers can pin it.
966 *
967 * Returns true if all buffers were removed.
968 */
969 int remove_inode_buffers(struct inode *inode)
970 {
971 int ret = 1;
972
973 if (inode_has_buffers(inode)) {
974 struct address_space *mapping = &inode->i_data;
975 struct list_head *list = &mapping->private_list;
976 struct address_space *buffer_mapping = mapping->assoc_mapping;
977
978 spin_lock(&buffer_mapping->private_lock);
979 while (!list_empty(list)) {
980 struct buffer_head *bh = BH_ENTRY(list->next);
981 if (buffer_dirty(bh)) {
982 ret = 0;
983 break;
984 }
985 __remove_assoc_queue(bh);
986 }
987 spin_unlock(&buffer_mapping->private_lock);
988 }
989 return ret;
990 }
991
992 /*
993 * Create the appropriate buffers when given a page for data area and
994 * the size of each buffer.. Use the bh->b_this_page linked list to
995 * follow the buffers created. Return NULL if unable to create more
996 * buffers.
997 *
998 * The retry flag is used to differentiate async IO (paging, swapping)
999 * which may not fail from ordinary buffer allocations.
1000 */
1001 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
1002 int retry)
1003 {
1004 struct buffer_head *bh, *head;
1005 long offset;
1006
1007 try_again:
1008 head = NULL;
1009 offset = PAGE_SIZE;
1010 while ((offset -= size) >= 0) {
1011 bh = alloc_buffer_head(GFP_NOFS);
1012 if (!bh)
1013 goto no_grow;
1014
1015 bh->b_bdev = NULL;
1016 bh->b_this_page = head;
1017 bh->b_blocknr = -1;
1018 head = bh;
1019
1020 bh->b_state = 0;
1021 atomic_set(&bh->b_count, 0);
1022 bh->b_private = NULL;
1023 bh->b_size = size;
1024
1025 /* Link the buffer to its page */
1026 set_bh_page(bh, page, offset);
1027
1028 init_buffer(bh, NULL, NULL);
1029 }
1030 return head;
1031 /*
1032 * In case anything failed, we just free everything we got.
1033 */
1034 no_grow:
1035 if (head) {
1036 do {
1037 bh = head;
1038 head = head->b_this_page;
1039 free_buffer_head(bh);
1040 } while (head);
1041 }
1042
1043 /*
1044 * Return failure for non-async IO requests. Async IO requests
1045 * are not allowed to fail, so we have to wait until buffer heads
1046 * become available. But we don't want tasks sleeping with
1047 * partially complete buffers, so all were released above.
1048 */
1049 if (!retry)
1050 return NULL;
1051
1052 /* We're _really_ low on memory. Now we just
1053 * wait for old buffer heads to become free due to
1054 * finishing IO. Since this is an async request and
1055 * the reserve list is empty, we're sure there are
1056 * async buffer heads in use.
1057 */
1058 free_more_memory();
1059 goto try_again;
1060 }
1061 EXPORT_SYMBOL_GPL(alloc_page_buffers);
1062
1063 static inline void
1064 link_dev_buffers(struct page *page, struct buffer_head *head)
1065 {
1066 struct buffer_head *bh, *tail;
1067
1068 bh = head;
1069 do {
1070 tail = bh;
1071 bh = bh->b_this_page;
1072 } while (bh);
1073 tail->b_this_page = head;
1074 attach_page_buffers(page, head);
1075 }
1076
1077 /*
1078 * Initialise the state of a blockdev page's buffers.
1079 */
1080 static void
1081 init_page_buffers(struct page *page, struct block_device *bdev,
1082 sector_t block, int size)
1083 {
1084 struct buffer_head *head = page_buffers(page);
1085 struct buffer_head *bh = head;
1086 int uptodate = PageUptodate(page);
1087
1088 do {
1089 if (!buffer_mapped(bh)) {
1090 init_buffer(bh, NULL, NULL);
1091 bh->b_bdev = bdev;
1092 bh->b_blocknr = block;
1093 if (uptodate)
1094 set_buffer_uptodate(bh);
1095 set_buffer_mapped(bh);
1096 }
1097 block++;
1098 bh = bh->b_this_page;
1099 } while (bh != head);
1100 }
1101
1102 /*
1103 * Create the page-cache page that contains the requested block.
1104 *
1105 * This is user purely for blockdev mappings.
1106 */
1107 static struct page *
1108 grow_dev_page(struct block_device *bdev, sector_t block,
1109 pgoff_t index, int size)
1110 {
1111 struct inode *inode = bdev->bd_inode;
1112 struct page *page;
1113 struct buffer_head *bh;
1114
1115 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
1116 if (!page)
1117 return NULL;
1118
1119 if (!PageLocked(page))
1120 BUG();
1121
1122 if (page_has_buffers(page)) {
1123 bh = page_buffers(page);
1124 if (bh->b_size == size) {
1125 init_page_buffers(page, bdev, block, size);
1126 return page;
1127 }
1128 if (!try_to_free_buffers(page))
1129 goto failed;
1130 }
1131
1132 /*
1133 * Allocate some buffers for this page
1134 */
1135 bh = alloc_page_buffers(page, size, 0);
1136 if (!bh)
1137 goto failed;
1138
1139 /*
1140 * Link the page to the buffers and initialise them. Take the
1141 * lock to be atomic wrt __find_get_block(), which does not
1142 * run under the page lock.
1143 */
1144 spin_lock(&inode->i_mapping->private_lock);
1145 link_dev_buffers(page, bh);
1146 init_page_buffers(page, bdev, block, size);
1147 spin_unlock(&inode->i_mapping->private_lock);
1148 return page;
1149
1150 failed:
1151 BUG();
1152 unlock_page(page);
1153 page_cache_release(page);
1154 return NULL;
1155 }
1156
1157 /*
1158 * Create buffers for the specified block device block's page. If
1159 * that page was dirty, the buffers are set dirty also.
1160 *
1161 * Except that's a bug. Attaching dirty buffers to a dirty
1162 * blockdev's page can result in filesystem corruption, because
1163 * some of those buffers may be aliases of filesystem data.
1164 * grow_dev_page() will go BUG() if this happens.
1165 */
1166 static int
1167 grow_buffers(struct block_device *bdev, sector_t block, int size)
1168 {
1169 struct page *page;
1170 pgoff_t index;
1171 int sizebits;
1172
1173 sizebits = -1;
1174 do {
1175 sizebits++;
1176 } while ((size << sizebits) < PAGE_SIZE);
1177
1178 index = block >> sizebits;
1179 block = index << sizebits;
1180
1181 /* Create a page with the proper size buffers.. */
1182 page = grow_dev_page(bdev, block, index, size);
1183 if (!page)
1184 return 0;
1185 unlock_page(page);
1186 page_cache_release(page);
1187 return 1;
1188 }
1189
1190 static struct buffer_head *
1191 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1192 {
1193 /* Size must be multiple of hard sectorsize */
1194 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1195 (size < 512 || size > PAGE_SIZE))) {
1196 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1197 size);
1198 printk(KERN_ERR "hardsect size: %d\n",
1199 bdev_hardsect_size(bdev));
1200
1201 dump_stack();
1202 return NULL;
1203 }
1204
1205 for (;;) {
1206 struct buffer_head * bh;
1207
1208 bh = __find_get_block(bdev, block, size);
1209 if (bh)
1210 return bh;
1211
1212 if (!grow_buffers(bdev, block, size))
1213 free_more_memory();
1214 }
1215 }
1216
1217 /*
1218 * The relationship between dirty buffers and dirty pages:
1219 *
1220 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1221 * the page is tagged dirty in its radix tree.
1222 *
1223 * At all times, the dirtiness of the buffers represents the dirtiness of
1224 * subsections of the page. If the page has buffers, the page dirty bit is
1225 * merely a hint about the true dirty state.
1226 *
1227 * When a page is set dirty in its entirety, all its buffers are marked dirty
1228 * (if the page has buffers).
1229 *
1230 * When a buffer is marked dirty, its page is dirtied, but the page's other
1231 * buffers are not.
1232 *
1233 * Also. When blockdev buffers are explicitly read with bread(), they
1234 * individually become uptodate. But their backing page remains not
1235 * uptodate - even if all of its buffers are uptodate. A subsequent
1236 * block_read_full_page() against that page will discover all the uptodate
1237 * buffers, will set the page uptodate and will perform no I/O.
1238 */
1239
1240 /**
1241 * mark_buffer_dirty - mark a buffer_head as needing writeout
1242 * @bh: the buffer_head to mark dirty
1243 *
1244 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1245 * backing page dirty, then tag the page as dirty in its address_space's radix
1246 * tree and then attach the address_space's inode to its superblock's dirty
1247 * inode list.
1248 *
1249 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1250 * mapping->tree_lock and the global inode_lock.
1251 */
1252 void fastcall mark_buffer_dirty(struct buffer_head *bh)
1253 {
1254 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1255 __set_page_dirty_nobuffers(bh->b_page);
1256 }
1257
1258 /*
1259 * Decrement a buffer_head's reference count. If all buffers against a page
1260 * have zero reference count, are clean and unlocked, and if the page is clean
1261 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1262 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1263 * a page but it ends up not being freed, and buffers may later be reattached).
1264 */
1265 void __brelse(struct buffer_head * buf)
1266 {
1267 if (atomic_read(&buf->b_count)) {
1268 put_bh(buf);
1269 return;
1270 }
1271 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1272 WARN_ON(1);
1273 }
1274
1275 /*
1276 * bforget() is like brelse(), except it discards any
1277 * potentially dirty data.
1278 */
1279 void __bforget(struct buffer_head *bh)
1280 {
1281 clear_buffer_dirty(bh);
1282 if (!list_empty(&bh->b_assoc_buffers)) {
1283 struct address_space *buffer_mapping = bh->b_page->mapping;
1284
1285 spin_lock(&buffer_mapping->private_lock);
1286 list_del_init(&bh->b_assoc_buffers);
1287 spin_unlock(&buffer_mapping->private_lock);
1288 }
1289 __brelse(bh);
1290 }
1291
1292 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1293 {
1294 lock_buffer(bh);
1295 if (buffer_uptodate(bh)) {
1296 unlock_buffer(bh);
1297 return bh;
1298 } else {
1299 get_bh(bh);
1300 bh->b_end_io = end_buffer_read_sync;
1301 submit_bh(READ, bh);
1302 wait_on_buffer(bh);
1303 if (buffer_uptodate(bh))
1304 return bh;
1305 }
1306 brelse(bh);
1307 return NULL;
1308 }
1309
1310 /*
1311 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1312 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1313 * refcount elevated by one when they're in an LRU. A buffer can only appear
1314 * once in a particular CPU's LRU. A single buffer can be present in multiple
1315 * CPU's LRUs at the same time.
1316 *
1317 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1318 * sb_find_get_block().
1319 *
1320 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1321 * a local interrupt disable for that.
1322 */
1323
1324 #define BH_LRU_SIZE 8
1325
1326 struct bh_lru {
1327 struct buffer_head *bhs[BH_LRU_SIZE];
1328 };
1329
1330 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1331
1332 #ifdef CONFIG_SMP
1333 #define bh_lru_lock() local_irq_disable()
1334 #define bh_lru_unlock() local_irq_enable()
1335 #else
1336 #define bh_lru_lock() preempt_disable()
1337 #define bh_lru_unlock() preempt_enable()
1338 #endif
1339
1340 static inline void check_irqs_on(void)
1341 {
1342 #ifdef irqs_disabled
1343 BUG_ON(irqs_disabled());
1344 #endif
1345 }
1346
1347 /*
1348 * The LRU management algorithm is dopey-but-simple. Sorry.
1349 */
1350 static void bh_lru_install(struct buffer_head *bh)
1351 {
1352 struct buffer_head *evictee = NULL;
1353 struct bh_lru *lru;
1354
1355 check_irqs_on();
1356 bh_lru_lock();
1357 lru = &__get_cpu_var(bh_lrus);
1358 if (lru->bhs[0] != bh) {
1359 struct buffer_head *bhs[BH_LRU_SIZE];
1360 int in;
1361 int out = 0;
1362
1363 get_bh(bh);
1364 bhs[out++] = bh;
1365 for (in = 0; in < BH_LRU_SIZE; in++) {
1366 struct buffer_head *bh2 = lru->bhs[in];
1367
1368 if (bh2 == bh) {
1369 __brelse(bh2);
1370 } else {
1371 if (out >= BH_LRU_SIZE) {
1372 BUG_ON(evictee != NULL);
1373 evictee = bh2;
1374 } else {
1375 bhs[out++] = bh2;
1376 }
1377 }
1378 }
1379 while (out < BH_LRU_SIZE)
1380 bhs[out++] = NULL;
1381 memcpy(lru->bhs, bhs, sizeof(bhs));
1382 }
1383 bh_lru_unlock();
1384
1385 if (evictee)
1386 __brelse(evictee);
1387 }
1388
1389 /*
1390 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1391 */
1392 static struct buffer_head *
1393 lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
1394 {
1395 struct buffer_head *ret = NULL;
1396 struct bh_lru *lru;
1397 int i;
1398
1399 check_irqs_on();
1400 bh_lru_lock();
1401 lru = &__get_cpu_var(bh_lrus);
1402 for (i = 0; i < BH_LRU_SIZE; i++) {
1403 struct buffer_head *bh = lru->bhs[i];
1404
1405 if (bh && bh->b_bdev == bdev &&
1406 bh->b_blocknr == block && bh->b_size == size) {
1407 if (i) {
1408 while (i) {
1409 lru->bhs[i] = lru->bhs[i - 1];
1410 i--;
1411 }
1412 lru->bhs[0] = bh;
1413 }
1414 get_bh(bh);
1415 ret = bh;
1416 break;
1417 }
1418 }
1419 bh_lru_unlock();
1420 return ret;
1421 }
1422
1423 /*
1424 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1425 * it in the LRU and mark it as accessed. If it is not present then return
1426 * NULL
1427 */
1428 struct buffer_head *
1429 __find_get_block(struct block_device *bdev, sector_t block, int size)
1430 {
1431 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1432
1433 if (bh == NULL) {
1434 bh = __find_get_block_slow(bdev, block);
1435 if (bh)
1436 bh_lru_install(bh);
1437 }
1438 if (bh)
1439 touch_buffer(bh);
1440 return bh;
1441 }
1442 EXPORT_SYMBOL(__find_get_block);
1443
1444 /*
1445 * __getblk will locate (and, if necessary, create) the buffer_head
1446 * which corresponds to the passed block_device, block and size. The
1447 * returned buffer has its reference count incremented.
1448 *
1449 * __getblk() cannot fail - it just keeps trying. If you pass it an
1450 * illegal block number, __getblk() will happily return a buffer_head
1451 * which represents the non-existent block. Very weird.
1452 *
1453 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1454 * attempt is failing. FIXME, perhaps?
1455 */
1456 struct buffer_head *
1457 __getblk(struct block_device *bdev, sector_t block, int size)
1458 {
1459 struct buffer_head *bh = __find_get_block(bdev, block, size);
1460
1461 might_sleep();
1462 if (bh == NULL)
1463 bh = __getblk_slow(bdev, block, size);
1464 return bh;
1465 }
1466 EXPORT_SYMBOL(__getblk);
1467
1468 /*
1469 * Do async read-ahead on a buffer..
1470 */
1471 void __breadahead(struct block_device *bdev, sector_t block, int size)
1472 {
1473 struct buffer_head *bh = __getblk(bdev, block, size);
1474 if (likely(bh)) {
1475 ll_rw_block(READA, 1, &bh);
1476 brelse(bh);
1477 }
1478 }
1479 EXPORT_SYMBOL(__breadahead);
1480
1481 /**
1482 * __bread() - reads a specified block and returns the bh
1483 * @bdev: the block_device to read from
1484 * @block: number of block
1485 * @size: size (in bytes) to read
1486 *
1487 * Reads a specified block, and returns buffer head that contains it.
1488 * It returns NULL if the block was unreadable.
1489 */
1490 struct buffer_head *
1491 __bread(struct block_device *bdev, sector_t block, int size)
1492 {
1493 struct buffer_head *bh = __getblk(bdev, block, size);
1494
1495 if (likely(bh) && !buffer_uptodate(bh))
1496 bh = __bread_slow(bh);
1497 return bh;
1498 }
1499 EXPORT_SYMBOL(__bread);
1500
1501 /*
1502 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1503 * This doesn't race because it runs in each cpu either in irq
1504 * or with preempt disabled.
1505 */
1506 static void invalidate_bh_lru(void *arg)
1507 {
1508 struct bh_lru *b = &get_cpu_var(bh_lrus);
1509 int i;
1510
1511 for (i = 0; i < BH_LRU_SIZE; i++) {
1512 brelse(b->bhs[i]);
1513 b->bhs[i] = NULL;
1514 }
1515 put_cpu_var(bh_lrus);
1516 }
1517
1518 static void invalidate_bh_lrus(void)
1519 {
1520 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1521 }
1522
1523 void set_bh_page(struct buffer_head *bh,
1524 struct page *page, unsigned long offset)
1525 {
1526 bh->b_page = page;
1527 if (offset >= PAGE_SIZE)
1528 BUG();
1529 if (PageHighMem(page))
1530 /*
1531 * This catches illegal uses and preserves the offset:
1532 */
1533 bh->b_data = (char *)(0 + offset);
1534 else
1535 bh->b_data = page_address(page) + offset;
1536 }
1537 EXPORT_SYMBOL(set_bh_page);
1538
1539 /*
1540 * Called when truncating a buffer on a page completely.
1541 */
1542 static void discard_buffer(struct buffer_head * bh)
1543 {
1544 lock_buffer(bh);
1545 clear_buffer_dirty(bh);
1546 bh->b_bdev = NULL;
1547 clear_buffer_mapped(bh);
1548 clear_buffer_req(bh);
1549 clear_buffer_new(bh);
1550 clear_buffer_delay(bh);
1551 unlock_buffer(bh);
1552 }
1553
1554 /**
1555 * try_to_release_page() - release old fs-specific metadata on a page
1556 *
1557 * @page: the page which the kernel is trying to free
1558 * @gfp_mask: memory allocation flags (and I/O mode)
1559 *
1560 * The address_space is to try to release any data against the page
1561 * (presumably at page->private). If the release was successful, return `1'.
1562 * Otherwise return zero.
1563 *
1564 * The @gfp_mask argument specifies whether I/O may be performed to release
1565 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
1566 *
1567 * NOTE: @gfp_mask may go away, and this function may become non-blocking.
1568 */
1569 int try_to_release_page(struct page *page, gfp_t gfp_mask)
1570 {
1571 struct address_space * const mapping = page->mapping;
1572
1573 BUG_ON(!PageLocked(page));
1574 if (PageWriteback(page))
1575 return 0;
1576
1577 if (mapping && mapping->a_ops->releasepage)
1578 return mapping->a_ops->releasepage(page, gfp_mask);
1579 return try_to_free_buffers(page);
1580 }
1581 EXPORT_SYMBOL(try_to_release_page);
1582
1583 /**
1584 * block_invalidatepage - invalidate part of all of a buffer-backed page
1585 *
1586 * @page: the page which is affected
1587 * @offset: the index of the truncation point
1588 *
1589 * block_invalidatepage() is called when all or part of the page has become
1590 * invalidatedby a truncate operation.
1591 *
1592 * block_invalidatepage() does not have to release all buffers, but it must
1593 * ensure that no dirty buffer is left outside @offset and that no I/O
1594 * is underway against any of the blocks which are outside the truncation
1595 * point. Because the caller is about to free (and possibly reuse) those
1596 * blocks on-disk.
1597 */
1598 void block_invalidatepage(struct page *page, unsigned long offset)
1599 {
1600 struct buffer_head *head, *bh, *next;
1601 unsigned int curr_off = 0;
1602
1603 BUG_ON(!PageLocked(page));
1604 if (!page_has_buffers(page))
1605 goto out;
1606
1607 head = page_buffers(page);
1608 bh = head;
1609 do {
1610 unsigned int next_off = curr_off + bh->b_size;
1611 next = bh->b_this_page;
1612
1613 /*
1614 * is this block fully invalidated?
1615 */
1616 if (offset <= curr_off)
1617 discard_buffer(bh);
1618 curr_off = next_off;
1619 bh = next;
1620 } while (bh != head);
1621
1622 /*
1623 * We release buffers only if the entire page is being invalidated.
1624 * The get_block cached value has been unconditionally invalidated,
1625 * so real IO is not possible anymore.
1626 */
1627 if (offset == 0)
1628 try_to_release_page(page, 0);
1629 out:
1630 return;
1631 }
1632 EXPORT_SYMBOL(block_invalidatepage);
1633
1634 void do_invalidatepage(struct page *page, unsigned long offset)
1635 {
1636 void (*invalidatepage)(struct page *, unsigned long);
1637 invalidatepage = page->mapping->a_ops->invalidatepage ? :
1638 block_invalidatepage;
1639 (*invalidatepage)(page, offset);
1640 }
1641
1642 /*
1643 * We attach and possibly dirty the buffers atomically wrt
1644 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1645 * is already excluded via the page lock.
1646 */
1647 void create_empty_buffers(struct page *page,
1648 unsigned long blocksize, unsigned long b_state)
1649 {
1650 struct buffer_head *bh, *head, *tail;
1651
1652 head = alloc_page_buffers(page, blocksize, 1);
1653 bh = head;
1654 do {
1655 bh->b_state |= b_state;
1656 tail = bh;
1657 bh = bh->b_this_page;
1658 } while (bh);
1659 tail->b_this_page = head;
1660
1661 spin_lock(&page->mapping->private_lock);
1662 if (PageUptodate(page) || PageDirty(page)) {
1663 bh = head;
1664 do {
1665 if (PageDirty(page))
1666 set_buffer_dirty(bh);
1667 if (PageUptodate(page))
1668 set_buffer_uptodate(bh);
1669 bh = bh->b_this_page;
1670 } while (bh != head);
1671 }
1672 attach_page_buffers(page, head);
1673 spin_unlock(&page->mapping->private_lock);
1674 }
1675 EXPORT_SYMBOL(create_empty_buffers);
1676
1677 /*
1678 * We are taking a block for data and we don't want any output from any
1679 * buffer-cache aliases starting from return from that function and
1680 * until the moment when something will explicitly mark the buffer
1681 * dirty (hopefully that will not happen until we will free that block ;-)
1682 * We don't even need to mark it not-uptodate - nobody can expect
1683 * anything from a newly allocated buffer anyway. We used to used
1684 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1685 * don't want to mark the alias unmapped, for example - it would confuse
1686 * anyone who might pick it with bread() afterwards...
1687 *
1688 * Also.. Note that bforget() doesn't lock the buffer. So there can
1689 * be writeout I/O going on against recently-freed buffers. We don't
1690 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1691 * only if we really need to. That happens here.
1692 */
1693 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1694 {
1695 struct buffer_head *old_bh;
1696
1697 might_sleep();
1698
1699 old_bh = __find_get_block_slow(bdev, block);
1700 if (old_bh) {
1701 clear_buffer_dirty(old_bh);
1702 wait_on_buffer(old_bh);
1703 clear_buffer_req(old_bh);
1704 __brelse(old_bh);
1705 }
1706 }
1707 EXPORT_SYMBOL(unmap_underlying_metadata);
1708
1709 /*
1710 * NOTE! All mapped/uptodate combinations are valid:
1711 *
1712 * Mapped Uptodate Meaning
1713 *
1714 * No No "unknown" - must do get_block()
1715 * No Yes "hole" - zero-filled
1716 * Yes No "allocated" - allocated on disk, not read in
1717 * Yes Yes "valid" - allocated and up-to-date in memory.
1718 *
1719 * "Dirty" is valid only with the last case (mapped+uptodate).
1720 */
1721
1722 /*
1723 * While block_write_full_page is writing back the dirty buffers under
1724 * the page lock, whoever dirtied the buffers may decide to clean them
1725 * again at any time. We handle that by only looking at the buffer
1726 * state inside lock_buffer().
1727 *
1728 * If block_write_full_page() is called for regular writeback
1729 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1730 * locked buffer. This only can happen if someone has written the buffer
1731 * directly, with submit_bh(). At the address_space level PageWriteback
1732 * prevents this contention from occurring.
1733 */
1734 static int __block_write_full_page(struct inode *inode, struct page *page,
1735 get_block_t *get_block, struct writeback_control *wbc)
1736 {
1737 int err;
1738 sector_t block;
1739 sector_t last_block;
1740 struct buffer_head *bh, *head;
1741 const unsigned blocksize = 1 << inode->i_blkbits;
1742 int nr_underway = 0;
1743
1744 BUG_ON(!PageLocked(page));
1745
1746 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1747
1748 if (!page_has_buffers(page)) {
1749 create_empty_buffers(page, blocksize,
1750 (1 << BH_Dirty)|(1 << BH_Uptodate));
1751 }
1752
1753 /*
1754 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1755 * here, and the (potentially unmapped) buffers may become dirty at
1756 * any time. If a buffer becomes dirty here after we've inspected it
1757 * then we just miss that fact, and the page stays dirty.
1758 *
1759 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1760 * handle that here by just cleaning them.
1761 */
1762
1763 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1764 head = page_buffers(page);
1765 bh = head;
1766
1767 /*
1768 * Get all the dirty buffers mapped to disk addresses and
1769 * handle any aliases from the underlying blockdev's mapping.
1770 */
1771 do {
1772 if (block > last_block) {
1773 /*
1774 * mapped buffers outside i_size will occur, because
1775 * this page can be outside i_size when there is a
1776 * truncate in progress.
1777 */
1778 /*
1779 * The buffer was zeroed by block_write_full_page()
1780 */
1781 clear_buffer_dirty(bh);
1782 set_buffer_uptodate(bh);
1783 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1784 WARN_ON(bh->b_size != blocksize);
1785 err = get_block(inode, block, bh, 1);
1786 if (err)
1787 goto recover;
1788 if (buffer_new(bh)) {
1789 /* blockdev mappings never come here */
1790 clear_buffer_new(bh);
1791 unmap_underlying_metadata(bh->b_bdev,
1792 bh->b_blocknr);
1793 }
1794 }
1795 bh = bh->b_this_page;
1796 block++;
1797 } while (bh != head);
1798
1799 do {
1800 if (!buffer_mapped(bh))
1801 continue;
1802 /*
1803 * If it's a fully non-blocking write attempt and we cannot
1804 * lock the buffer then redirty the page. Note that this can
1805 * potentially cause a busy-wait loop from pdflush and kswapd
1806 * activity, but those code paths have their own higher-level
1807 * throttling.
1808 */
1809 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1810 lock_buffer(bh);
1811 } else if (test_set_buffer_locked(bh)) {
1812 redirty_page_for_writepage(wbc, page);
1813 continue;
1814 }
1815 if (test_clear_buffer_dirty(bh)) {
1816 mark_buffer_async_write(bh);
1817 } else {
1818 unlock_buffer(bh);
1819 }
1820 } while ((bh = bh->b_this_page) != head);
1821
1822 /*
1823 * The page and its buffers are protected by PageWriteback(), so we can
1824 * drop the bh refcounts early.
1825 */
1826 BUG_ON(PageWriteback(page));
1827 set_page_writeback(page);
1828
1829 do {
1830 struct buffer_head *next = bh->b_this_page;
1831 if (buffer_async_write(bh)) {
1832 submit_bh(WRITE, bh);
1833 nr_underway++;
1834 }
1835 bh = next;
1836 } while (bh != head);
1837 unlock_page(page);
1838
1839 err = 0;
1840 done:
1841 if (nr_underway == 0) {
1842 /*
1843 * The page was marked dirty, but the buffers were
1844 * clean. Someone wrote them back by hand with
1845 * ll_rw_block/submit_bh. A rare case.
1846 */
1847 int uptodate = 1;
1848 do {
1849 if (!buffer_uptodate(bh)) {
1850 uptodate = 0;
1851 break;
1852 }
1853 bh = bh->b_this_page;
1854 } while (bh != head);
1855 if (uptodate)
1856 SetPageUptodate(page);
1857 end_page_writeback(page);
1858 /*
1859 * The page and buffer_heads can be released at any time from
1860 * here on.
1861 */
1862 wbc->pages_skipped++; /* We didn't write this page */
1863 }
1864 return err;
1865
1866 recover:
1867 /*
1868 * ENOSPC, or some other error. We may already have added some
1869 * blocks to the file, so we need to write these out to avoid
1870 * exposing stale data.
1871 * The page is currently locked and not marked for writeback
1872 */
1873 bh = head;
1874 /* Recovery: lock and submit the mapped buffers */
1875 do {
1876 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1877 lock_buffer(bh);
1878 mark_buffer_async_write(bh);
1879 } else {
1880 /*
1881 * The buffer may have been set dirty during
1882 * attachment to a dirty page.
1883 */
1884 clear_buffer_dirty(bh);
1885 }
1886 } while ((bh = bh->b_this_page) != head);
1887 SetPageError(page);
1888 BUG_ON(PageWriteback(page));
1889 set_page_writeback(page);
1890 unlock_page(page);
1891 do {
1892 struct buffer_head *next = bh->b_this_page;
1893 if (buffer_async_write(bh)) {
1894 clear_buffer_dirty(bh);
1895 submit_bh(WRITE, bh);
1896 nr_underway++;
1897 }
1898 bh = next;
1899 } while (bh != head);
1900 goto done;
1901 }
1902
1903 static int __block_prepare_write(struct inode *inode, struct page *page,
1904 unsigned from, unsigned to, get_block_t *get_block)
1905 {
1906 unsigned block_start, block_end;
1907 sector_t block;
1908 int err = 0;
1909 unsigned blocksize, bbits;
1910 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1911
1912 BUG_ON(!PageLocked(page));
1913 BUG_ON(from > PAGE_CACHE_SIZE);
1914 BUG_ON(to > PAGE_CACHE_SIZE);
1915 BUG_ON(from > to);
1916
1917 blocksize = 1 << inode->i_blkbits;
1918 if (!page_has_buffers(page))
1919 create_empty_buffers(page, blocksize, 0);
1920 head = page_buffers(page);
1921
1922 bbits = inode->i_blkbits;
1923 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1924
1925 for(bh = head, block_start = 0; bh != head || !block_start;
1926 block++, block_start=block_end, bh = bh->b_this_page) {
1927 block_end = block_start + blocksize;
1928 if (block_end <= from || block_start >= to) {
1929 if (PageUptodate(page)) {
1930 if (!buffer_uptodate(bh))
1931 set_buffer_uptodate(bh);
1932 }
1933 continue;
1934 }
1935 if (buffer_new(bh))
1936 clear_buffer_new(bh);
1937 if (!buffer_mapped(bh)) {
1938 WARN_ON(bh->b_size != blocksize);
1939 err = get_block(inode, block, bh, 1);
1940 if (err)
1941 break;
1942 if (buffer_new(bh)) {
1943 unmap_underlying_metadata(bh->b_bdev,
1944 bh->b_blocknr);
1945 if (PageUptodate(page)) {
1946 set_buffer_uptodate(bh);
1947 continue;
1948 }
1949 if (block_end > to || block_start < from) {
1950 void *kaddr;
1951
1952 kaddr = kmap_atomic(page, KM_USER0);
1953 if (block_end > to)
1954 memset(kaddr+to, 0,
1955 block_end-to);
1956 if (block_start < from)
1957 memset(kaddr+block_start,
1958 0, from-block_start);
1959 flush_dcache_page(page);
1960 kunmap_atomic(kaddr, KM_USER0);
1961 }
1962 continue;
1963 }
1964 }
1965 if (PageUptodate(page)) {
1966 if (!buffer_uptodate(bh))
1967 set_buffer_uptodate(bh);
1968 continue;
1969 }
1970 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1971 (block_start < from || block_end > to)) {
1972 ll_rw_block(READ, 1, &bh);
1973 *wait_bh++=bh;
1974 }
1975 }
1976 /*
1977 * If we issued read requests - let them complete.
1978 */
1979 while(wait_bh > wait) {
1980 wait_on_buffer(*--wait_bh);
1981 if (!buffer_uptodate(*wait_bh))
1982 err = -EIO;
1983 }
1984 if (!err) {
1985 bh = head;
1986 do {
1987 if (buffer_new(bh))
1988 clear_buffer_new(bh);
1989 } while ((bh = bh->b_this_page) != head);
1990 return 0;
1991 }
1992 /* Error case: */
1993 /*
1994 * Zero out any newly allocated blocks to avoid exposing stale
1995 * data. If BH_New is set, we know that the block was newly
1996 * allocated in the above loop.
1997 */
1998 bh = head;
1999 block_start = 0;
2000 do {
2001 block_end = block_start+blocksize;
2002 if (block_end <= from)
2003 goto next_bh;
2004 if (block_start >= to)
2005 break;
2006 if (buffer_new(bh)) {
2007 void *kaddr;
2008
2009 clear_buffer_new(bh);
2010 kaddr = kmap_atomic(page, KM_USER0);
2011 memset(kaddr+block_start, 0, bh->b_size);
2012 kunmap_atomic(kaddr, KM_USER0);
2013 set_buffer_uptodate(bh);
2014 mark_buffer_dirty(bh);
2015 }
2016 next_bh:
2017 block_start = block_end;
2018 bh = bh->b_this_page;
2019 } while (bh != head);
2020 return err;
2021 }
2022
2023 static int __block_commit_write(struct inode *inode, struct page *page,
2024 unsigned from, unsigned to)
2025 {
2026 unsigned block_start, block_end;
2027 int partial = 0;
2028 unsigned blocksize;
2029 struct buffer_head *bh, *head;
2030
2031 blocksize = 1 << inode->i_blkbits;
2032
2033 for(bh = head = page_buffers(page), block_start = 0;
2034 bh != head || !block_start;
2035 block_start=block_end, bh = bh->b_this_page) {
2036 block_end = block_start + blocksize;
2037 if (block_end <= from || block_start >= to) {
2038 if (!buffer_uptodate(bh))
2039 partial = 1;
2040 } else {
2041 set_buffer_uptodate(bh);
2042 mark_buffer_dirty(bh);
2043 }
2044 }
2045
2046 /*
2047 * If this is a partial write which happened to make all buffers
2048 * uptodate then we can optimize away a bogus readpage() for
2049 * the next read(). Here we 'discover' whether the page went
2050 * uptodate as a result of this (potentially partial) write.
2051 */
2052 if (!partial)
2053 SetPageUptodate(page);
2054 return 0;
2055 }
2056
2057 /*
2058 * Generic "read page" function for block devices that have the normal
2059 * get_block functionality. This is most of the block device filesystems.
2060 * Reads the page asynchronously --- the unlock_buffer() and
2061 * set/clear_buffer_uptodate() functions propagate buffer state into the
2062 * page struct once IO has completed.
2063 */
2064 int block_read_full_page(struct page *page, get_block_t *get_block)
2065 {
2066 struct inode *inode = page->mapping->host;
2067 sector_t iblock, lblock;
2068 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2069 unsigned int blocksize;
2070 int nr, i;
2071 int fully_mapped = 1;
2072
2073 BUG_ON(!PageLocked(page));
2074 blocksize = 1 << inode->i_blkbits;
2075 if (!page_has_buffers(page))
2076 create_empty_buffers(page, blocksize, 0);
2077 head = page_buffers(page);
2078
2079 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2080 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2081 bh = head;
2082 nr = 0;
2083 i = 0;
2084
2085 do {
2086 if (buffer_uptodate(bh))
2087 continue;
2088
2089 if (!buffer_mapped(bh)) {
2090 int err = 0;
2091
2092 fully_mapped = 0;
2093 if (iblock < lblock) {
2094 WARN_ON(bh->b_size != blocksize);
2095 err = get_block(inode, iblock, bh, 0);
2096 if (err)
2097 SetPageError(page);
2098 }
2099 if (!buffer_mapped(bh)) {
2100 void *kaddr = kmap_atomic(page, KM_USER0);
2101 memset(kaddr + i * blocksize, 0, blocksize);
2102 flush_dcache_page(page);
2103 kunmap_atomic(kaddr, KM_USER0);
2104 if (!err)
2105 set_buffer_uptodate(bh);
2106 continue;
2107 }
2108 /*
2109 * get_block() might have updated the buffer
2110 * synchronously
2111 */
2112 if (buffer_uptodate(bh))
2113 continue;
2114 }
2115 arr[nr++] = bh;
2116 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2117
2118 if (fully_mapped)
2119 SetPageMappedToDisk(page);
2120
2121 if (!nr) {
2122 /*
2123 * All buffers are uptodate - we can set the page uptodate
2124 * as well. But not if get_block() returned an error.
2125 */
2126 if (!PageError(page))
2127 SetPageUptodate(page);
2128 unlock_page(page);
2129 return 0;
2130 }
2131
2132 /* Stage two: lock the buffers */
2133 for (i = 0; i < nr; i++) {
2134 bh = arr[i];
2135 lock_buffer(bh);
2136 mark_buffer_async_read(bh);
2137 }
2138
2139 /*
2140 * Stage 3: start the IO. Check for uptodateness
2141 * inside the buffer lock in case another process reading
2142 * the underlying blockdev brought it uptodate (the sct fix).
2143 */
2144 for (i = 0; i < nr; i++) {
2145 bh = arr[i];
2146 if (buffer_uptodate(bh))
2147 end_buffer_async_read(bh, 1);
2148 else
2149 submit_bh(READ, bh);
2150 }
2151 return 0;
2152 }
2153
2154 /* utility function for filesystems that need to do work on expanding
2155 * truncates. Uses prepare/commit_write to allow the filesystem to
2156 * deal with the hole.
2157 */
2158 static int __generic_cont_expand(struct inode *inode, loff_t size,
2159 pgoff_t index, unsigned int offset)
2160 {
2161 struct address_space *mapping = inode->i_mapping;
2162 struct page *page;
2163 unsigned long limit;
2164 int err;
2165
2166 err = -EFBIG;
2167 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2168 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2169 send_sig(SIGXFSZ, current, 0);
2170 goto out;
2171 }
2172 if (size > inode->i_sb->s_maxbytes)
2173 goto out;
2174
2175 err = -ENOMEM;
2176 page = grab_cache_page(mapping, index);
2177 if (!page)
2178 goto out;
2179 err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
2180 if (err) {
2181 /*
2182 * ->prepare_write() may have instantiated a few blocks
2183 * outside i_size. Trim these off again.
2184 */
2185 unlock_page(page);
2186 page_cache_release(page);
2187 vmtruncate(inode, inode->i_size);
2188 goto out;
2189 }
2190
2191 err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2192
2193 unlock_page(page);
2194 page_cache_release(page);
2195 if (err > 0)
2196 err = 0;
2197 out:
2198 return err;
2199 }
2200
2201 int generic_cont_expand(struct inode *inode, loff_t size)
2202 {
2203 pgoff_t index;
2204 unsigned int offset;
2205
2206 offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
2207
2208 /* ugh. in prepare/commit_write, if from==to==start of block, we
2209 ** skip the prepare. make sure we never send an offset for the start
2210 ** of a block
2211 */
2212 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2213 /* caller must handle this extra byte. */
2214 offset++;
2215 }
2216 index = size >> PAGE_CACHE_SHIFT;
2217
2218 return __generic_cont_expand(inode, size, index, offset);
2219 }
2220
2221 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2222 {
2223 loff_t pos = size - 1;
2224 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2225 unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
2226
2227 /* prepare/commit_write can handle even if from==to==start of block. */
2228 return __generic_cont_expand(inode, size, index, offset);
2229 }
2230
2231 /*
2232 * For moronic filesystems that do not allow holes in file.
2233 * We may have to extend the file.
2234 */
2235
2236 int cont_prepare_write(struct page *page, unsigned offset,
2237 unsigned to, get_block_t *get_block, loff_t *bytes)
2238 {
2239 struct address_space *mapping = page->mapping;
2240 struct inode *inode = mapping->host;
2241 struct page *new_page;
2242 pgoff_t pgpos;
2243 long status;
2244 unsigned zerofrom;
2245 unsigned blocksize = 1 << inode->i_blkbits;
2246 void *kaddr;
2247
2248 while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2249 status = -ENOMEM;
2250 new_page = grab_cache_page(mapping, pgpos);
2251 if (!new_page)
2252 goto out;
2253 /* we might sleep */
2254 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2255 unlock_page(new_page);
2256 page_cache_release(new_page);
2257 continue;
2258 }
2259 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2260 if (zerofrom & (blocksize-1)) {
2261 *bytes |= (blocksize-1);
2262 (*bytes)++;
2263 }
2264 status = __block_prepare_write(inode, new_page, zerofrom,
2265 PAGE_CACHE_SIZE, get_block);
2266 if (status)
2267 goto out_unmap;
2268 kaddr = kmap_atomic(new_page, KM_USER0);
2269 memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
2270 flush_dcache_page(new_page);
2271 kunmap_atomic(kaddr, KM_USER0);
2272 generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
2273 unlock_page(new_page);
2274 page_cache_release(new_page);
2275 }
2276
2277 if (page->index < pgpos) {
2278 /* completely inside the area */
2279 zerofrom = offset;
2280 } else {
2281 /* page covers the boundary, find the boundary offset */
2282 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2283
2284 /* if we will expand the thing last block will be filled */
2285 if (to > zerofrom && (zerofrom & (blocksize-1))) {
2286 *bytes |= (blocksize-1);
2287 (*bytes)++;
2288 }
2289
2290 /* starting below the boundary? Nothing to zero out */
2291 if (offset <= zerofrom)
2292 zerofrom = offset;
2293 }
2294 status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2295 if (status)
2296 goto out1;
2297 if (zerofrom < offset) {
2298 kaddr = kmap_atomic(page, KM_USER0);
2299 memset(kaddr+zerofrom, 0, offset-zerofrom);
2300 flush_dcache_page(page);
2301 kunmap_atomic(kaddr, KM_USER0);
2302 __block_commit_write(inode, page, zerofrom, offset);
2303 }
2304 return 0;
2305 out1:
2306 ClearPageUptodate(page);
2307 return status;
2308
2309 out_unmap:
2310 ClearPageUptodate(new_page);
2311 unlock_page(new_page);
2312 page_cache_release(new_page);
2313 out:
2314 return status;
2315 }
2316
2317 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2318 get_block_t *get_block)
2319 {
2320 struct inode *inode = page->mapping->host;
2321 int err = __block_prepare_write(inode, page, from, to, get_block);
2322 if (err)
2323 ClearPageUptodate(page);
2324 return err;
2325 }
2326
2327 int block_commit_write(struct page *page, unsigned from, unsigned to)
2328 {
2329 struct inode *inode = page->mapping->host;
2330 __block_commit_write(inode,page,from,to);
2331 return 0;
2332 }
2333
2334 int generic_commit_write(struct file *file, struct page *page,
2335 unsigned from, unsigned to)
2336 {
2337 struct inode *inode = page->mapping->host;
2338 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2339 __block_commit_write(inode,page,from,to);
2340 /*
2341 * No need to use i_size_read() here, the i_size
2342 * cannot change under us because we hold i_mutex.
2343 */
2344 if (pos > inode->i_size) {
2345 i_size_write(inode, pos);
2346 mark_inode_dirty(inode);
2347 }
2348 return 0;
2349 }
2350
2351
2352 /*
2353 * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2354 * immediately, while under the page lock. So it needs a special end_io
2355 * handler which does not touch the bh after unlocking it.
2356 *
2357 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
2358 * a race there is benign: unlock_buffer() only use the bh's address for
2359 * hashing after unlocking the buffer, so it doesn't actually touch the bh
2360 * itself.
2361 */
2362 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2363 {
2364 if (uptodate) {
2365 set_buffer_uptodate(bh);
2366 } else {
2367 /* This happens, due to failed READA attempts. */
2368 clear_buffer_uptodate(bh);
2369 }
2370 unlock_buffer(bh);
2371 }
2372
2373 /*
2374 * On entry, the page is fully not uptodate.
2375 * On exit the page is fully uptodate in the areas outside (from,to)
2376 */
2377 int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2378 get_block_t *get_block)
2379 {
2380 struct inode *inode = page->mapping->host;
2381 const unsigned blkbits = inode->i_blkbits;
2382 const unsigned blocksize = 1 << blkbits;
2383 struct buffer_head map_bh;
2384 struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2385 unsigned block_in_page;
2386 unsigned block_start;
2387 sector_t block_in_file;
2388 char *kaddr;
2389 int nr_reads = 0;
2390 int i;
2391 int ret = 0;
2392 int is_mapped_to_disk = 1;
2393 int dirtied_it = 0;
2394
2395 if (PageMappedToDisk(page))
2396 return 0;
2397
2398 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2399 map_bh.b_page = page;
2400
2401 /*
2402 * We loop across all blocks in the page, whether or not they are
2403 * part of the affected region. This is so we can discover if the
2404 * page is fully mapped-to-disk.
2405 */
2406 for (block_start = 0, block_in_page = 0;
2407 block_start < PAGE_CACHE_SIZE;
2408 block_in_page++, block_start += blocksize) {
2409 unsigned block_end = block_start + blocksize;
2410 int create;
2411
2412 map_bh.b_state = 0;
2413 create = 1;
2414 if (block_start >= to)
2415 create = 0;
2416 map_bh.b_size = blocksize;
2417 ret = get_block(inode, block_in_file + block_in_page,
2418 &map_bh, create);
2419 if (ret)
2420 goto failed;
2421 if (!buffer_mapped(&map_bh))
2422 is_mapped_to_disk = 0;
2423 if (buffer_new(&map_bh))
2424 unmap_underlying_metadata(map_bh.b_bdev,
2425 map_bh.b_blocknr);
2426 if (PageUptodate(page))
2427 continue;
2428 if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2429 kaddr = kmap_atomic(page, KM_USER0);
2430 if (block_start < from) {
2431 memset(kaddr+block_start, 0, from-block_start);
2432 dirtied_it = 1;
2433 }
2434 if (block_end > to) {
2435 memset(kaddr + to, 0, block_end - to);
2436 dirtied_it = 1;
2437 }
2438 flush_dcache_page(page);
2439 kunmap_atomic(kaddr, KM_USER0);
2440 continue;
2441 }
2442 if (buffer_uptodate(&map_bh))
2443 continue; /* reiserfs does this */
2444 if (block_start < from || block_end > to) {
2445 struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2446
2447 if (!bh) {
2448 ret = -ENOMEM;
2449 goto failed;
2450 }
2451 bh->b_state = map_bh.b_state;
2452 atomic_set(&bh->b_count, 0);
2453 bh->b_this_page = NULL;
2454 bh->b_page = page;
2455 bh->b_blocknr = map_bh.b_blocknr;
2456 bh->b_size = blocksize;
2457 bh->b_data = (char *)(long)block_start;
2458 bh->b_bdev = map_bh.b_bdev;
2459 bh->b_private = NULL;
2460 read_bh[nr_reads++] = bh;
2461 }
2462 }
2463
2464 if (nr_reads) {
2465 struct buffer_head *bh;
2466
2467 /*
2468 * The page is locked, so these buffers are protected from
2469 * any VM or truncate activity. Hence we don't need to care
2470 * for the buffer_head refcounts.
2471 */
2472 for (i = 0; i < nr_reads; i++) {
2473 bh = read_bh[i];
2474 lock_buffer(bh);
2475 bh->b_end_io = end_buffer_read_nobh;
2476 submit_bh(READ, bh);
2477 }
2478 for (i = 0; i < nr_reads; i++) {
2479 bh = read_bh[i];
2480 wait_on_buffer(bh);
2481 if (!buffer_uptodate(bh))
2482 ret = -EIO;
2483 free_buffer_head(bh);
2484 read_bh[i] = NULL;
2485 }
2486 if (ret)
2487 goto failed;
2488 }
2489
2490 if (is_mapped_to_disk)
2491 SetPageMappedToDisk(page);
2492 SetPageUptodate(page);
2493
2494 /*
2495 * Setting the page dirty here isn't necessary for the prepare_write
2496 * function - commit_write will do that. But if/when this function is
2497 * used within the pagefault handler to ensure that all mmapped pages
2498 * have backing space in the filesystem, we will need to dirty the page
2499 * if its contents were altered.
2500 */
2501 if (dirtied_it)
2502 set_page_dirty(page);
2503
2504 return 0;
2505
2506 failed:
2507 for (i = 0; i < nr_reads; i++) {
2508 if (read_bh[i])
2509 free_buffer_head(read_bh[i]);
2510 }
2511
2512 /*
2513 * Error recovery is pretty slack. Clear the page and mark it dirty
2514 * so we'll later zero out any blocks which _were_ allocated.
2515 */
2516 kaddr = kmap_atomic(page, KM_USER0);
2517 memset(kaddr, 0, PAGE_CACHE_SIZE);
2518 kunmap_atomic(kaddr, KM_USER0);
2519 SetPageUptodate(page);
2520 set_page_dirty(page);
2521 return ret;
2522 }
2523 EXPORT_SYMBOL(nobh_prepare_write);
2524
2525 int nobh_commit_write(struct file *file, struct page *page,
2526 unsigned from, unsigned to)
2527 {
2528 struct inode *inode = page->mapping->host;
2529 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2530
2531 set_page_dirty(page);
2532 if (pos > inode->i_size) {
2533 i_size_write(inode, pos);
2534 mark_inode_dirty(inode);
2535 }
2536 return 0;
2537 }
2538 EXPORT_SYMBOL(nobh_commit_write);
2539
2540 /*
2541 * nobh_writepage() - based on block_full_write_page() except
2542 * that it tries to operate without attaching bufferheads to
2543 * the page.
2544 */
2545 int nobh_writepage(struct page *page, get_block_t *get_block,
2546 struct writeback_control *wbc)
2547 {
2548 struct inode * const inode = page->mapping->host;
2549 loff_t i_size = i_size_read(inode);
2550 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2551 unsigned offset;
2552 void *kaddr;
2553 int ret;
2554
2555 /* Is the page fully inside i_size? */
2556 if (page->index < end_index)
2557 goto out;
2558
2559 /* Is the page fully outside i_size? (truncate in progress) */
2560 offset = i_size & (PAGE_CACHE_SIZE-1);
2561 if (page->index >= end_index+1 || !offset) {
2562 /*
2563 * The page may have dirty, unmapped buffers. For example,
2564 * they may have been added in ext3_writepage(). Make them
2565 * freeable here, so the page does not leak.
2566 */
2567 #if 0
2568 /* Not really sure about this - do we need this ? */
2569 if (page->mapping->a_ops->invalidatepage)
2570 page->mapping->a_ops->invalidatepage(page, offset);
2571 #endif
2572 unlock_page(page);
2573 return 0; /* don't care */
2574 }
2575
2576 /*
2577 * The page straddles i_size. It must be zeroed out on each and every
2578 * writepage invocation because it may be mmapped. "A file is mapped
2579 * in multiples of the page size. For a file that is not a multiple of
2580 * the page size, the remaining memory is zeroed when mapped, and
2581 * writes to that region are not written out to the file."
2582 */
2583 kaddr = kmap_atomic(page, KM_USER0);
2584 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2585 flush_dcache_page(page);
2586 kunmap_atomic(kaddr, KM_USER0);
2587 out:
2588 ret = mpage_writepage(page, get_block, wbc);
2589 if (ret == -EAGAIN)
2590 ret = __block_write_full_page(inode, page, get_block, wbc);
2591 return ret;
2592 }
2593 EXPORT_SYMBOL(nobh_writepage);
2594
2595 /*
2596 * This function assumes that ->prepare_write() uses nobh_prepare_write().
2597 */
2598 int nobh_truncate_page(struct address_space *mapping, loff_t from)
2599 {
2600 struct inode *inode = mapping->host;
2601 unsigned blocksize = 1 << inode->i_blkbits;
2602 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2603 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2604 unsigned to;
2605 struct page *page;
2606 struct address_space_operations *a_ops = mapping->a_ops;
2607 char *kaddr;
2608 int ret = 0;
2609
2610 if ((offset & (blocksize - 1)) == 0)
2611 goto out;
2612
2613 ret = -ENOMEM;
2614 page = grab_cache_page(mapping, index);
2615 if (!page)
2616 goto out;
2617
2618 to = (offset + blocksize) & ~(blocksize - 1);
2619 ret = a_ops->prepare_write(NULL, page, offset, to);
2620 if (ret == 0) {
2621 kaddr = kmap_atomic(page, KM_USER0);
2622 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2623 flush_dcache_page(page);
2624 kunmap_atomic(kaddr, KM_USER0);
2625 set_page_dirty(page);
2626 }
2627 unlock_page(page);
2628 page_cache_release(page);
2629 out:
2630 return ret;
2631 }
2632 EXPORT_SYMBOL(nobh_truncate_page);
2633
2634 int block_truncate_page(struct address_space *mapping,
2635 loff_t from, get_block_t *get_block)
2636 {
2637 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2638 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2639 unsigned blocksize;
2640 sector_t iblock;
2641 unsigned length, pos;
2642 struct inode *inode = mapping->host;
2643 struct page *page;
2644 struct buffer_head *bh;
2645 void *kaddr;
2646 int err;
2647
2648 blocksize = 1 << inode->i_blkbits;
2649 length = offset & (blocksize - 1);
2650
2651 /* Block boundary? Nothing to do */
2652 if (!length)
2653 return 0;
2654
2655 length = blocksize - length;
2656 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2657
2658 page = grab_cache_page(mapping, index);
2659 err = -ENOMEM;
2660 if (!page)
2661 goto out;
2662
2663 if (!page_has_buffers(page))
2664 create_empty_buffers(page, blocksize, 0);
2665
2666 /* Find the buffer that contains "offset" */
2667 bh = page_buffers(page);
2668 pos = blocksize;
2669 while (offset >= pos) {
2670 bh = bh->b_this_page;
2671 iblock++;
2672 pos += blocksize;
2673 }
2674
2675 err = 0;
2676 if (!buffer_mapped(bh)) {
2677 WARN_ON(bh->b_size != blocksize);
2678 err = get_block(inode, iblock, bh, 0);
2679 if (err)
2680 goto unlock;
2681 /* unmapped? It's a hole - nothing to do */
2682 if (!buffer_mapped(bh))
2683 goto unlock;
2684 }
2685
2686 /* Ok, it's mapped. Make sure it's up-to-date */
2687 if (PageUptodate(page))
2688 set_buffer_uptodate(bh);
2689
2690 if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
2691 err = -EIO;
2692 ll_rw_block(READ, 1, &bh);
2693 wait_on_buffer(bh);
2694 /* Uhhuh. Read error. Complain and punt. */
2695 if (!buffer_uptodate(bh))
2696 goto unlock;
2697 }
2698
2699 kaddr = kmap_atomic(page, KM_USER0);
2700 memset(kaddr + offset, 0, length);
2701 flush_dcache_page(page);
2702 kunmap_atomic(kaddr, KM_USER0);
2703
2704 mark_buffer_dirty(bh);
2705 err = 0;
2706
2707 unlock:
2708 unlock_page(page);
2709 page_cache_release(page);
2710 out:
2711 return err;
2712 }
2713
2714 /*
2715 * The generic ->writepage function for buffer-backed address_spaces
2716 */
2717 int block_write_full_page(struct page *page, get_block_t *get_block,
2718 struct writeback_control *wbc)
2719 {
2720 struct inode * const inode = page->mapping->host;
2721 loff_t i_size = i_size_read(inode);
2722 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2723 unsigned offset;
2724 void *kaddr;
2725
2726 /* Is the page fully inside i_size? */
2727 if (page->index < end_index)
2728 return __block_write_full_page(inode, page, get_block, wbc);
2729
2730 /* Is the page fully outside i_size? (truncate in progress) */
2731 offset = i_size & (PAGE_CACHE_SIZE-1);
2732 if (page->index >= end_index+1 || !offset) {
2733 /*
2734 * The page may have dirty, unmapped buffers. For example,
2735 * they may have been added in ext3_writepage(). Make them
2736 * freeable here, so the page does not leak.
2737 */
2738 do_invalidatepage(page, 0);
2739 unlock_page(page);
2740 return 0; /* don't care */
2741 }
2742
2743 /*
2744 * The page straddles i_size. It must be zeroed out on each and every
2745 * writepage invokation because it may be mmapped. "A file is mapped
2746 * in multiples of the page size. For a file that is not a multiple of
2747 * the page size, the remaining memory is zeroed when mapped, and
2748 * writes to that region are not written out to the file."
2749 */
2750 kaddr = kmap_atomic(page, KM_USER0);
2751 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2752 flush_dcache_page(page);
2753 kunmap_atomic(kaddr, KM_USER0);
2754 return __block_write_full_page(inode, page, get_block, wbc);
2755 }
2756
2757 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2758 get_block_t *get_block)
2759 {
2760 struct buffer_head tmp;
2761 struct inode *inode = mapping->host;
2762 tmp.b_state = 0;
2763 tmp.b_blocknr = 0;
2764 tmp.b_size = 1 << inode->i_blkbits;
2765 get_block(inode, block, &tmp, 0);
2766 return tmp.b_blocknr;
2767 }
2768
2769 static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2770 {
2771 struct buffer_head *bh = bio->bi_private;
2772
2773 if (bio->bi_size)
2774 return 1;
2775
2776 if (err == -EOPNOTSUPP) {
2777 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2778 set_bit(BH_Eopnotsupp, &bh->b_state);
2779 }
2780
2781 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2782 bio_put(bio);
2783 return 0;
2784 }
2785
2786 int submit_bh(int rw, struct buffer_head * bh)
2787 {
2788 struct bio *bio;
2789 int ret = 0;
2790
2791 BUG_ON(!buffer_locked(bh));
2792 BUG_ON(!buffer_mapped(bh));
2793 BUG_ON(!bh->b_end_io);
2794
2795 if (buffer_ordered(bh) && (rw == WRITE))
2796 rw = WRITE_BARRIER;
2797
2798 /*
2799 * Only clear out a write error when rewriting, should this
2800 * include WRITE_SYNC as well?
2801 */
2802 if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2803 clear_buffer_write_io_error(bh);
2804
2805 /*
2806 * from here on down, it's all bio -- do the initial mapping,
2807 * submit_bio -> generic_make_request may further map this bio around
2808 */
2809 bio = bio_alloc(GFP_NOIO, 1);
2810
2811 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2812 bio->bi_bdev = bh->b_bdev;
2813 bio->bi_io_vec[0].bv_page = bh->b_page;
2814 bio->bi_io_vec[0].bv_len = bh->b_size;
2815 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2816
2817 bio->bi_vcnt = 1;
2818 bio->bi_idx = 0;
2819 bio->bi_size = bh->b_size;
2820
2821 bio->bi_end_io = end_bio_bh_io_sync;
2822 bio->bi_private = bh;
2823
2824 bio_get(bio);
2825 submit_bio(rw, bio);
2826
2827 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2828 ret = -EOPNOTSUPP;
2829
2830 bio_put(bio);
2831 return ret;
2832 }
2833
2834 /**
2835 * ll_rw_block: low-level access to block devices (DEPRECATED)
2836 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2837 * @nr: number of &struct buffer_heads in the array
2838 * @bhs: array of pointers to &struct buffer_head
2839 *
2840 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2841 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2842 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2843 * are sent to disk. The fourth %READA option is described in the documentation
2844 * for generic_make_request() which ll_rw_block() calls.
2845 *
2846 * This function drops any buffer that it cannot get a lock on (with the
2847 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2848 * clean when doing a write request, and any buffer that appears to be
2849 * up-to-date when doing read request. Further it marks as clean buffers that
2850 * are processed for writing (the buffer cache won't assume that they are
2851 * actually clean until the buffer gets unlocked).
2852 *
2853 * ll_rw_block sets b_end_io to simple completion handler that marks
2854 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2855 * any waiters.
2856 *
2857 * All of the buffers must be for the same device, and must also be a
2858 * multiple of the current approved size for the device.
2859 */
2860 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2861 {
2862 int i;
2863
2864 for (i = 0; i < nr; i++) {
2865 struct buffer_head *bh = bhs[i];
2866
2867 if (rw == SWRITE)
2868 lock_buffer(bh);
2869 else if (test_set_buffer_locked(bh))
2870 continue;
2871
2872 if (rw == WRITE || rw == SWRITE) {
2873 if (test_clear_buffer_dirty(bh)) {
2874 bh->b_end_io = end_buffer_write_sync;
2875 get_bh(bh);
2876 submit_bh(WRITE, bh);
2877 continue;
2878 }
2879 } else {
2880 if (!buffer_uptodate(bh)) {
2881 bh->b_end_io = end_buffer_read_sync;
2882 get_bh(bh);
2883 submit_bh(rw, bh);
2884 continue;
2885 }
2886 }
2887 unlock_buffer(bh);
2888 }
2889 }
2890
2891 /*
2892 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2893 * and then start new I/O and then wait upon it. The caller must have a ref on
2894 * the buffer_head.
2895 */
2896 int sync_dirty_buffer(struct buffer_head *bh)
2897 {
2898 int ret = 0;
2899
2900 WARN_ON(atomic_read(&bh->b_count) < 1);
2901 lock_buffer(bh);
2902 if (test_clear_buffer_dirty(bh)) {
2903 get_bh(bh);
2904 bh->b_end_io = end_buffer_write_sync;
2905 ret = submit_bh(WRITE, bh);
2906 wait_on_buffer(bh);
2907 if (buffer_eopnotsupp(bh)) {
2908 clear_buffer_eopnotsupp(bh);
2909 ret = -EOPNOTSUPP;
2910 }
2911 if (!ret && !buffer_uptodate(bh))
2912 ret = -EIO;
2913 } else {
2914 unlock_buffer(bh);
2915 }
2916 return ret;
2917 }
2918
2919 /*
2920 * try_to_free_buffers() checks if all the buffers on this particular page
2921 * are unused, and releases them if so.
2922 *
2923 * Exclusion against try_to_free_buffers may be obtained by either
2924 * locking the page or by holding its mapping's private_lock.
2925 *
2926 * If the page is dirty but all the buffers are clean then we need to
2927 * be sure to mark the page clean as well. This is because the page
2928 * may be against a block device, and a later reattachment of buffers
2929 * to a dirty page will set *all* buffers dirty. Which would corrupt
2930 * filesystem data on the same device.
2931 *
2932 * The same applies to regular filesystem pages: if all the buffers are
2933 * clean then we set the page clean and proceed. To do that, we require
2934 * total exclusion from __set_page_dirty_buffers(). That is obtained with
2935 * private_lock.
2936 *
2937 * try_to_free_buffers() is non-blocking.
2938 */
2939 static inline int buffer_busy(struct buffer_head *bh)
2940 {
2941 return atomic_read(&bh->b_count) |
2942 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2943 }
2944
2945 static int
2946 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2947 {
2948 struct buffer_head *head = page_buffers(page);
2949 struct buffer_head *bh;
2950
2951 bh = head;
2952 do {
2953 if (buffer_write_io_error(bh) && page->mapping)
2954 set_bit(AS_EIO, &page->mapping->flags);
2955 if (buffer_busy(bh))
2956 goto failed;
2957 bh = bh->b_this_page;
2958 } while (bh != head);
2959
2960 do {
2961 struct buffer_head *next = bh->b_this_page;
2962
2963 if (!list_empty(&bh->b_assoc_buffers))
2964 __remove_assoc_queue(bh);
2965 bh = next;
2966 } while (bh != head);
2967 *buffers_to_free = head;
2968 __clear_page_buffers(page);
2969 return 1;
2970 failed:
2971 return 0;
2972 }
2973
2974 int try_to_free_buffers(struct page *page)
2975 {
2976 struct address_space * const mapping = page->mapping;
2977 struct buffer_head *buffers_to_free = NULL;
2978 int ret = 0;
2979
2980 BUG_ON(!PageLocked(page));
2981 if (PageWriteback(page))
2982 return 0;
2983
2984 if (mapping == NULL) { /* can this still happen? */
2985 ret = drop_buffers(page, &buffers_to_free);
2986 goto out;
2987 }
2988
2989 spin_lock(&mapping->private_lock);
2990 ret = drop_buffers(page, &buffers_to_free);
2991 if (ret) {
2992 /*
2993 * If the filesystem writes its buffers by hand (eg ext3)
2994 * then we can have clean buffers against a dirty page. We
2995 * clean the page here; otherwise later reattachment of buffers
2996 * could encounter a non-uptodate page, which is unresolvable.
2997 * This only applies in the rare case where try_to_free_buffers
2998 * succeeds but the page is not freed.
2999 */
3000 clear_page_dirty(page);
3001 }
3002 spin_unlock(&mapping->private_lock);
3003 out:
3004 if (buffers_to_free) {
3005 struct buffer_head *bh = buffers_to_free;
3006
3007 do {
3008 struct buffer_head *next = bh->b_this_page;
3009 free_buffer_head(bh);
3010 bh = next;
3011 } while (bh != buffers_to_free);
3012 }
3013 return ret;
3014 }
3015 EXPORT_SYMBOL(try_to_free_buffers);
3016
3017 void block_sync_page(struct page *page)
3018 {
3019 struct address_space *mapping;
3020
3021 smp_mb();
3022 mapping = page_mapping(page);
3023 if (mapping)
3024 blk_run_backing_dev(mapping->backing_dev_info, page);
3025 }
3026
3027 /*
3028 * There are no bdflush tunables left. But distributions are
3029 * still running obsolete flush daemons, so we terminate them here.
3030 *
3031 * Use of bdflush() is deprecated and will be removed in a future kernel.
3032 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3033 */
3034 asmlinkage long sys_bdflush(int func, long data)
3035 {
3036 static int msg_count;
3037
3038 if (!capable(CAP_SYS_ADMIN))
3039 return -EPERM;
3040
3041 if (msg_count < 5) {
3042 msg_count++;
3043 printk(KERN_INFO
3044 "warning: process `%s' used the obsolete bdflush"
3045 " system call\n", current->comm);
3046 printk(KERN_INFO "Fix your initscripts?\n");
3047 }
3048
3049 if (func == 1)
3050 do_exit(0);
3051 return 0;
3052 }
3053
3054 /*
3055 * Buffer-head allocation
3056 */
3057 static kmem_cache_t *bh_cachep;
3058
3059 /*
3060 * Once the number of bh's in the machine exceeds this level, we start
3061 * stripping them in writeback.
3062 */
3063 static int max_buffer_heads;
3064
3065 int buffer_heads_over_limit;
3066
3067 struct bh_accounting {
3068 int nr; /* Number of live bh's */
3069 int ratelimit; /* Limit cacheline bouncing */
3070 };
3071
3072 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3073
3074 static void recalc_bh_state(void)
3075 {
3076 int i;
3077 int tot = 0;
3078
3079 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3080 return;
3081 __get_cpu_var(bh_accounting).ratelimit = 0;
3082 for_each_online_cpu(i)
3083 tot += per_cpu(bh_accounting, i).nr;
3084 buffer_heads_over_limit = (tot > max_buffer_heads);
3085 }
3086
3087 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3088 {
3089 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3090 if (ret) {
3091 get_cpu_var(bh_accounting).nr++;
3092 recalc_bh_state();
3093 put_cpu_var(bh_accounting);
3094 }
3095 return ret;
3096 }
3097 EXPORT_SYMBOL(alloc_buffer_head);
3098
3099 void free_buffer_head(struct buffer_head *bh)
3100 {
3101 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3102 kmem_cache_free(bh_cachep, bh);
3103 get_cpu_var(bh_accounting).nr--;
3104 recalc_bh_state();
3105 put_cpu_var(bh_accounting);
3106 }
3107 EXPORT_SYMBOL(free_buffer_head);
3108
3109 static void
3110 init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
3111 {
3112 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
3113 SLAB_CTOR_CONSTRUCTOR) {
3114 struct buffer_head * bh = (struct buffer_head *)data;
3115
3116 memset(bh, 0, sizeof(*bh));
3117 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3118 }
3119 }
3120
3121 #ifdef CONFIG_HOTPLUG_CPU
3122 static void buffer_exit_cpu(int cpu)
3123 {
3124 int i;
3125 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3126
3127 for (i = 0; i < BH_LRU_SIZE; i++) {
3128 brelse(b->bhs[i]);
3129 b->bhs[i] = NULL;
3130 }
3131 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3132 per_cpu(bh_accounting, cpu).nr = 0;
3133 put_cpu_var(bh_accounting);
3134 }
3135
3136 static int buffer_cpu_notify(struct notifier_block *self,
3137 unsigned long action, void *hcpu)
3138 {
3139 if (action == CPU_DEAD)
3140 buffer_exit_cpu((unsigned long)hcpu);
3141 return NOTIFY_OK;
3142 }
3143 #endif /* CONFIG_HOTPLUG_CPU */
3144
3145 void __init buffer_init(void)
3146 {
3147 int nrpages;
3148
3149 bh_cachep = kmem_cache_create("buffer_head",
3150 sizeof(struct buffer_head), 0,
3151 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3152 SLAB_MEM_SPREAD),
3153 init_buffer_head,
3154 NULL);
3155
3156 /*
3157 * Limit the bh occupancy to 10% of ZONE_NORMAL
3158 */
3159 nrpages = (nr_free_buffer_pages() * 10) / 100;
3160 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3161 hotcpu_notifier(buffer_cpu_notify, 0);
3162 }
3163
3164 EXPORT_SYMBOL(__bforget);
3165 EXPORT_SYMBOL(__brelse);
3166 EXPORT_SYMBOL(__wait_on_buffer);
3167 EXPORT_SYMBOL(block_commit_write);
3168 EXPORT_SYMBOL(block_prepare_write);
3169 EXPORT_SYMBOL(block_read_full_page);
3170 EXPORT_SYMBOL(block_sync_page);
3171 EXPORT_SYMBOL(block_truncate_page);
3172 EXPORT_SYMBOL(block_write_full_page);
3173 EXPORT_SYMBOL(cont_prepare_write);
3174 EXPORT_SYMBOL(end_buffer_async_write);
3175 EXPORT_SYMBOL(end_buffer_read_sync);
3176 EXPORT_SYMBOL(end_buffer_write_sync);
3177 EXPORT_SYMBOL(file_fsync);
3178 EXPORT_SYMBOL(fsync_bdev);
3179 EXPORT_SYMBOL(generic_block_bmap);
3180 EXPORT_SYMBOL(generic_commit_write);
3181 EXPORT_SYMBOL(generic_cont_expand);
3182 EXPORT_SYMBOL(generic_cont_expand_simple);
3183 EXPORT_SYMBOL(init_buffer);
3184 EXPORT_SYMBOL(invalidate_bdev);
3185 EXPORT_SYMBOL(ll_rw_block);
3186 EXPORT_SYMBOL(mark_buffer_dirty);
3187 EXPORT_SYMBOL(submit_bh);
3188 EXPORT_SYMBOL(sync_dirty_buffer);
3189 EXPORT_SYMBOL(unlock_buffer);