]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/buffer.c
Merge branch 'topic/aoa' into to-push
[mirror_ubuntu-artful-kernel.git] / fs / buffer.c
1 /*
2 * linux/fs/buffer.c
3 *
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
6
7 /*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9 *
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12 *
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
15 *
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17 *
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19 */
20
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
44
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
46
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48
49 inline void
50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51 {
52 bh->b_end_io = handler;
53 bh->b_private = private;
54 }
55
56 static int sync_buffer(void *word)
57 {
58 struct block_device *bd;
59 struct buffer_head *bh
60 = container_of(word, struct buffer_head, b_state);
61
62 smp_mb();
63 bd = bh->b_bdev;
64 if (bd)
65 blk_run_address_space(bd->bd_inode->i_mapping);
66 io_schedule();
67 return 0;
68 }
69
70 void __lock_buffer(struct buffer_head *bh)
71 {
72 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73 TASK_UNINTERRUPTIBLE);
74 }
75 EXPORT_SYMBOL(__lock_buffer);
76
77 void unlock_buffer(struct buffer_head *bh)
78 {
79 clear_bit_unlock(BH_Lock, &bh->b_state);
80 smp_mb__after_clear_bit();
81 wake_up_bit(&bh->b_state, BH_Lock);
82 }
83
84 /*
85 * Block until a buffer comes unlocked. This doesn't stop it
86 * from becoming locked again - you have to lock it yourself
87 * if you want to preserve its state.
88 */
89 void __wait_on_buffer(struct buffer_head * bh)
90 {
91 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
92 }
93
94 static void
95 __clear_page_buffers(struct page *page)
96 {
97 ClearPagePrivate(page);
98 set_page_private(page, 0);
99 page_cache_release(page);
100 }
101
102 static void buffer_io_error(struct buffer_head *bh)
103 {
104 char b[BDEVNAME_SIZE];
105
106 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
107 bdevname(bh->b_bdev, b),
108 (unsigned long long)bh->b_blocknr);
109 }
110
111 /*
112 * End-of-IO handler helper function which does not touch the bh after
113 * unlocking it.
114 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
115 * a race there is benign: unlock_buffer() only use the bh's address for
116 * hashing after unlocking the buffer, so it doesn't actually touch the bh
117 * itself.
118 */
119 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
120 {
121 if (uptodate) {
122 set_buffer_uptodate(bh);
123 } else {
124 /* This happens, due to failed READA attempts. */
125 clear_buffer_uptodate(bh);
126 }
127 unlock_buffer(bh);
128 }
129
130 /*
131 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
132 * unlock the buffer. This is what ll_rw_block uses too.
133 */
134 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
135 {
136 __end_buffer_read_notouch(bh, uptodate);
137 put_bh(bh);
138 }
139
140 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
141 {
142 char b[BDEVNAME_SIZE];
143
144 if (uptodate) {
145 set_buffer_uptodate(bh);
146 } else {
147 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
148 buffer_io_error(bh);
149 printk(KERN_WARNING "lost page write due to "
150 "I/O error on %s\n",
151 bdevname(bh->b_bdev, b));
152 }
153 set_buffer_write_io_error(bh);
154 clear_buffer_uptodate(bh);
155 }
156 unlock_buffer(bh);
157 put_bh(bh);
158 }
159
160 /*
161 * Write out and wait upon all the dirty data associated with a block
162 * device via its mapping. Does not take the superblock lock.
163 */
164 int sync_blockdev(struct block_device *bdev)
165 {
166 int ret = 0;
167
168 if (bdev)
169 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
170 return ret;
171 }
172 EXPORT_SYMBOL(sync_blockdev);
173
174 /*
175 * Write out and wait upon all dirty data associated with this
176 * device. Filesystem data as well as the underlying block
177 * device. Takes the superblock lock.
178 */
179 int fsync_bdev(struct block_device *bdev)
180 {
181 struct super_block *sb = get_super(bdev);
182 if (sb) {
183 int res = fsync_super(sb);
184 drop_super(sb);
185 return res;
186 }
187 return sync_blockdev(bdev);
188 }
189
190 /**
191 * freeze_bdev -- lock a filesystem and force it into a consistent state
192 * @bdev: blockdevice to lock
193 *
194 * This takes the block device bd_mount_sem to make sure no new mounts
195 * happen on bdev until thaw_bdev() is called.
196 * If a superblock is found on this device, we take the s_umount semaphore
197 * on it to make sure nobody unmounts until the snapshot creation is done.
198 */
199 struct super_block *freeze_bdev(struct block_device *bdev)
200 {
201 struct super_block *sb;
202
203 down(&bdev->bd_mount_sem);
204 sb = get_super(bdev);
205 if (sb && !(sb->s_flags & MS_RDONLY)) {
206 sb->s_frozen = SB_FREEZE_WRITE;
207 smp_wmb();
208
209 __fsync_super(sb);
210
211 sb->s_frozen = SB_FREEZE_TRANS;
212 smp_wmb();
213
214 sync_blockdev(sb->s_bdev);
215
216 if (sb->s_op->write_super_lockfs)
217 sb->s_op->write_super_lockfs(sb);
218 }
219
220 sync_blockdev(bdev);
221 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
222 }
223 EXPORT_SYMBOL(freeze_bdev);
224
225 /**
226 * thaw_bdev -- unlock filesystem
227 * @bdev: blockdevice to unlock
228 * @sb: associated superblock
229 *
230 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
231 */
232 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
233 {
234 if (sb) {
235 BUG_ON(sb->s_bdev != bdev);
236
237 if (sb->s_op->unlockfs)
238 sb->s_op->unlockfs(sb);
239 sb->s_frozen = SB_UNFROZEN;
240 smp_wmb();
241 wake_up(&sb->s_wait_unfrozen);
242 drop_super(sb);
243 }
244
245 up(&bdev->bd_mount_sem);
246 }
247 EXPORT_SYMBOL(thaw_bdev);
248
249 /*
250 * Various filesystems appear to want __find_get_block to be non-blocking.
251 * But it's the page lock which protects the buffers. To get around this,
252 * we get exclusion from try_to_free_buffers with the blockdev mapping's
253 * private_lock.
254 *
255 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
256 * may be quite high. This code could TryLock the page, and if that
257 * succeeds, there is no need to take private_lock. (But if
258 * private_lock is contended then so is mapping->tree_lock).
259 */
260 static struct buffer_head *
261 __find_get_block_slow(struct block_device *bdev, sector_t block)
262 {
263 struct inode *bd_inode = bdev->bd_inode;
264 struct address_space *bd_mapping = bd_inode->i_mapping;
265 struct buffer_head *ret = NULL;
266 pgoff_t index;
267 struct buffer_head *bh;
268 struct buffer_head *head;
269 struct page *page;
270 int all_mapped = 1;
271
272 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
273 page = find_get_page(bd_mapping, index);
274 if (!page)
275 goto out;
276
277 spin_lock(&bd_mapping->private_lock);
278 if (!page_has_buffers(page))
279 goto out_unlock;
280 head = page_buffers(page);
281 bh = head;
282 do {
283 if (bh->b_blocknr == block) {
284 ret = bh;
285 get_bh(bh);
286 goto out_unlock;
287 }
288 if (!buffer_mapped(bh))
289 all_mapped = 0;
290 bh = bh->b_this_page;
291 } while (bh != head);
292
293 /* we might be here because some of the buffers on this page are
294 * not mapped. This is due to various races between
295 * file io on the block device and getblk. It gets dealt with
296 * elsewhere, don't buffer_error if we had some unmapped buffers
297 */
298 if (all_mapped) {
299 printk("__find_get_block_slow() failed. "
300 "block=%llu, b_blocknr=%llu\n",
301 (unsigned long long)block,
302 (unsigned long long)bh->b_blocknr);
303 printk("b_state=0x%08lx, b_size=%zu\n",
304 bh->b_state, bh->b_size);
305 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
306 }
307 out_unlock:
308 spin_unlock(&bd_mapping->private_lock);
309 page_cache_release(page);
310 out:
311 return ret;
312 }
313
314 /* If invalidate_buffers() will trash dirty buffers, it means some kind
315 of fs corruption is going on. Trashing dirty data always imply losing
316 information that was supposed to be just stored on the physical layer
317 by the user.
318
319 Thus invalidate_buffers in general usage is not allwowed to trash
320 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
321 be preserved. These buffers are simply skipped.
322
323 We also skip buffers which are still in use. For example this can
324 happen if a userspace program is reading the block device.
325
326 NOTE: In the case where the user removed a removable-media-disk even if
327 there's still dirty data not synced on disk (due a bug in the device driver
328 or due an error of the user), by not destroying the dirty buffers we could
329 generate corruption also on the next media inserted, thus a parameter is
330 necessary to handle this case in the most safe way possible (trying
331 to not corrupt also the new disk inserted with the data belonging to
332 the old now corrupted disk). Also for the ramdisk the natural thing
333 to do in order to release the ramdisk memory is to destroy dirty buffers.
334
335 These are two special cases. Normal usage imply the device driver
336 to issue a sync on the device (without waiting I/O completion) and
337 then an invalidate_buffers call that doesn't trash dirty buffers.
338
339 For handling cache coherency with the blkdev pagecache the 'update' case
340 is been introduced. It is needed to re-read from disk any pinned
341 buffer. NOTE: re-reading from disk is destructive so we can do it only
342 when we assume nobody is changing the buffercache under our I/O and when
343 we think the disk contains more recent information than the buffercache.
344 The update == 1 pass marks the buffers we need to update, the update == 2
345 pass does the actual I/O. */
346 void invalidate_bdev(struct block_device *bdev)
347 {
348 struct address_space *mapping = bdev->bd_inode->i_mapping;
349
350 if (mapping->nrpages == 0)
351 return;
352
353 invalidate_bh_lrus();
354 invalidate_mapping_pages(mapping, 0, -1);
355 }
356
357 /*
358 * Kick pdflush then try to free up some ZONE_NORMAL memory.
359 */
360 static void free_more_memory(void)
361 {
362 struct zone *zone;
363 int nid;
364
365 wakeup_pdflush(1024);
366 yield();
367
368 for_each_online_node(nid) {
369 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
370 gfp_zone(GFP_NOFS), NULL,
371 &zone);
372 if (zone)
373 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
374 GFP_NOFS);
375 }
376 }
377
378 /*
379 * I/O completion handler for block_read_full_page() - pages
380 * which come unlocked at the end of I/O.
381 */
382 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
383 {
384 unsigned long flags;
385 struct buffer_head *first;
386 struct buffer_head *tmp;
387 struct page *page;
388 int page_uptodate = 1;
389
390 BUG_ON(!buffer_async_read(bh));
391
392 page = bh->b_page;
393 if (uptodate) {
394 set_buffer_uptodate(bh);
395 } else {
396 clear_buffer_uptodate(bh);
397 if (printk_ratelimit())
398 buffer_io_error(bh);
399 SetPageError(page);
400 }
401
402 /*
403 * Be _very_ careful from here on. Bad things can happen if
404 * two buffer heads end IO at almost the same time and both
405 * decide that the page is now completely done.
406 */
407 first = page_buffers(page);
408 local_irq_save(flags);
409 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
410 clear_buffer_async_read(bh);
411 unlock_buffer(bh);
412 tmp = bh;
413 do {
414 if (!buffer_uptodate(tmp))
415 page_uptodate = 0;
416 if (buffer_async_read(tmp)) {
417 BUG_ON(!buffer_locked(tmp));
418 goto still_busy;
419 }
420 tmp = tmp->b_this_page;
421 } while (tmp != bh);
422 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
423 local_irq_restore(flags);
424
425 /*
426 * If none of the buffers had errors and they are all
427 * uptodate then we can set the page uptodate.
428 */
429 if (page_uptodate && !PageError(page))
430 SetPageUptodate(page);
431 unlock_page(page);
432 return;
433
434 still_busy:
435 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
436 local_irq_restore(flags);
437 return;
438 }
439
440 /*
441 * Completion handler for block_write_full_page() - pages which are unlocked
442 * during I/O, and which have PageWriteback cleared upon I/O completion.
443 */
444 static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
445 {
446 char b[BDEVNAME_SIZE];
447 unsigned long flags;
448 struct buffer_head *first;
449 struct buffer_head *tmp;
450 struct page *page;
451
452 BUG_ON(!buffer_async_write(bh));
453
454 page = bh->b_page;
455 if (uptodate) {
456 set_buffer_uptodate(bh);
457 } else {
458 if (printk_ratelimit()) {
459 buffer_io_error(bh);
460 printk(KERN_WARNING "lost page write due to "
461 "I/O error on %s\n",
462 bdevname(bh->b_bdev, b));
463 }
464 set_bit(AS_EIO, &page->mapping->flags);
465 set_buffer_write_io_error(bh);
466 clear_buffer_uptodate(bh);
467 SetPageError(page);
468 }
469
470 first = page_buffers(page);
471 local_irq_save(flags);
472 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
473
474 clear_buffer_async_write(bh);
475 unlock_buffer(bh);
476 tmp = bh->b_this_page;
477 while (tmp != bh) {
478 if (buffer_async_write(tmp)) {
479 BUG_ON(!buffer_locked(tmp));
480 goto still_busy;
481 }
482 tmp = tmp->b_this_page;
483 }
484 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
485 local_irq_restore(flags);
486 end_page_writeback(page);
487 return;
488
489 still_busy:
490 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
491 local_irq_restore(flags);
492 return;
493 }
494
495 /*
496 * If a page's buffers are under async readin (end_buffer_async_read
497 * completion) then there is a possibility that another thread of
498 * control could lock one of the buffers after it has completed
499 * but while some of the other buffers have not completed. This
500 * locked buffer would confuse end_buffer_async_read() into not unlocking
501 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
502 * that this buffer is not under async I/O.
503 *
504 * The page comes unlocked when it has no locked buffer_async buffers
505 * left.
506 *
507 * PageLocked prevents anyone starting new async I/O reads any of
508 * the buffers.
509 *
510 * PageWriteback is used to prevent simultaneous writeout of the same
511 * page.
512 *
513 * PageLocked prevents anyone from starting writeback of a page which is
514 * under read I/O (PageWriteback is only ever set against a locked page).
515 */
516 static void mark_buffer_async_read(struct buffer_head *bh)
517 {
518 bh->b_end_io = end_buffer_async_read;
519 set_buffer_async_read(bh);
520 }
521
522 void mark_buffer_async_write(struct buffer_head *bh)
523 {
524 bh->b_end_io = end_buffer_async_write;
525 set_buffer_async_write(bh);
526 }
527 EXPORT_SYMBOL(mark_buffer_async_write);
528
529
530 /*
531 * fs/buffer.c contains helper functions for buffer-backed address space's
532 * fsync functions. A common requirement for buffer-based filesystems is
533 * that certain data from the backing blockdev needs to be written out for
534 * a successful fsync(). For example, ext2 indirect blocks need to be
535 * written back and waited upon before fsync() returns.
536 *
537 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
538 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
539 * management of a list of dependent buffers at ->i_mapping->private_list.
540 *
541 * Locking is a little subtle: try_to_free_buffers() will remove buffers
542 * from their controlling inode's queue when they are being freed. But
543 * try_to_free_buffers() will be operating against the *blockdev* mapping
544 * at the time, not against the S_ISREG file which depends on those buffers.
545 * So the locking for private_list is via the private_lock in the address_space
546 * which backs the buffers. Which is different from the address_space
547 * against which the buffers are listed. So for a particular address_space,
548 * mapping->private_lock does *not* protect mapping->private_list! In fact,
549 * mapping->private_list will always be protected by the backing blockdev's
550 * ->private_lock.
551 *
552 * Which introduces a requirement: all buffers on an address_space's
553 * ->private_list must be from the same address_space: the blockdev's.
554 *
555 * address_spaces which do not place buffers at ->private_list via these
556 * utility functions are free to use private_lock and private_list for
557 * whatever they want. The only requirement is that list_empty(private_list)
558 * be true at clear_inode() time.
559 *
560 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
561 * filesystems should do that. invalidate_inode_buffers() should just go
562 * BUG_ON(!list_empty).
563 *
564 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
565 * take an address_space, not an inode. And it should be called
566 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
567 * queued up.
568 *
569 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
570 * list if it is already on a list. Because if the buffer is on a list,
571 * it *must* already be on the right one. If not, the filesystem is being
572 * silly. This will save a ton of locking. But first we have to ensure
573 * that buffers are taken *off* the old inode's list when they are freed
574 * (presumably in truncate). That requires careful auditing of all
575 * filesystems (do it inside bforget()). It could also be done by bringing
576 * b_inode back.
577 */
578
579 /*
580 * The buffer's backing address_space's private_lock must be held
581 */
582 static void __remove_assoc_queue(struct buffer_head *bh)
583 {
584 list_del_init(&bh->b_assoc_buffers);
585 WARN_ON(!bh->b_assoc_map);
586 if (buffer_write_io_error(bh))
587 set_bit(AS_EIO, &bh->b_assoc_map->flags);
588 bh->b_assoc_map = NULL;
589 }
590
591 int inode_has_buffers(struct inode *inode)
592 {
593 return !list_empty(&inode->i_data.private_list);
594 }
595
596 /*
597 * osync is designed to support O_SYNC io. It waits synchronously for
598 * all already-submitted IO to complete, but does not queue any new
599 * writes to the disk.
600 *
601 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
602 * you dirty the buffers, and then use osync_inode_buffers to wait for
603 * completion. Any other dirty buffers which are not yet queued for
604 * write will not be flushed to disk by the osync.
605 */
606 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
607 {
608 struct buffer_head *bh;
609 struct list_head *p;
610 int err = 0;
611
612 spin_lock(lock);
613 repeat:
614 list_for_each_prev(p, list) {
615 bh = BH_ENTRY(p);
616 if (buffer_locked(bh)) {
617 get_bh(bh);
618 spin_unlock(lock);
619 wait_on_buffer(bh);
620 if (!buffer_uptodate(bh))
621 err = -EIO;
622 brelse(bh);
623 spin_lock(lock);
624 goto repeat;
625 }
626 }
627 spin_unlock(lock);
628 return err;
629 }
630
631 /**
632 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
633 * @mapping: the mapping which wants those buffers written
634 *
635 * Starts I/O against the buffers at mapping->private_list, and waits upon
636 * that I/O.
637 *
638 * Basically, this is a convenience function for fsync().
639 * @mapping is a file or directory which needs those buffers to be written for
640 * a successful fsync().
641 */
642 int sync_mapping_buffers(struct address_space *mapping)
643 {
644 struct address_space *buffer_mapping = mapping->assoc_mapping;
645
646 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
647 return 0;
648
649 return fsync_buffers_list(&buffer_mapping->private_lock,
650 &mapping->private_list);
651 }
652 EXPORT_SYMBOL(sync_mapping_buffers);
653
654 /*
655 * Called when we've recently written block `bblock', and it is known that
656 * `bblock' was for a buffer_boundary() buffer. This means that the block at
657 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
658 * dirty, schedule it for IO. So that indirects merge nicely with their data.
659 */
660 void write_boundary_block(struct block_device *bdev,
661 sector_t bblock, unsigned blocksize)
662 {
663 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
664 if (bh) {
665 if (buffer_dirty(bh))
666 ll_rw_block(WRITE, 1, &bh);
667 put_bh(bh);
668 }
669 }
670
671 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
672 {
673 struct address_space *mapping = inode->i_mapping;
674 struct address_space *buffer_mapping = bh->b_page->mapping;
675
676 mark_buffer_dirty(bh);
677 if (!mapping->assoc_mapping) {
678 mapping->assoc_mapping = buffer_mapping;
679 } else {
680 BUG_ON(mapping->assoc_mapping != buffer_mapping);
681 }
682 if (!bh->b_assoc_map) {
683 spin_lock(&buffer_mapping->private_lock);
684 list_move_tail(&bh->b_assoc_buffers,
685 &mapping->private_list);
686 bh->b_assoc_map = mapping;
687 spin_unlock(&buffer_mapping->private_lock);
688 }
689 }
690 EXPORT_SYMBOL(mark_buffer_dirty_inode);
691
692 /*
693 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
694 * dirty.
695 *
696 * If warn is true, then emit a warning if the page is not uptodate and has
697 * not been truncated.
698 */
699 static int __set_page_dirty(struct page *page,
700 struct address_space *mapping, int warn)
701 {
702 if (unlikely(!mapping))
703 return !TestSetPageDirty(page);
704
705 if (TestSetPageDirty(page))
706 return 0;
707
708 spin_lock_irq(&mapping->tree_lock);
709 if (page->mapping) { /* Race with truncate? */
710 WARN_ON_ONCE(warn && !PageUptodate(page));
711
712 if (mapping_cap_account_dirty(mapping)) {
713 __inc_zone_page_state(page, NR_FILE_DIRTY);
714 __inc_bdi_stat(mapping->backing_dev_info,
715 BDI_RECLAIMABLE);
716 task_io_account_write(PAGE_CACHE_SIZE);
717 }
718 radix_tree_tag_set(&mapping->page_tree,
719 page_index(page), PAGECACHE_TAG_DIRTY);
720 }
721 spin_unlock_irq(&mapping->tree_lock);
722 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
723
724 return 1;
725 }
726
727 /*
728 * Add a page to the dirty page list.
729 *
730 * It is a sad fact of life that this function is called from several places
731 * deeply under spinlocking. It may not sleep.
732 *
733 * If the page has buffers, the uptodate buffers are set dirty, to preserve
734 * dirty-state coherency between the page and the buffers. It the page does
735 * not have buffers then when they are later attached they will all be set
736 * dirty.
737 *
738 * The buffers are dirtied before the page is dirtied. There's a small race
739 * window in which a writepage caller may see the page cleanness but not the
740 * buffer dirtiness. That's fine. If this code were to set the page dirty
741 * before the buffers, a concurrent writepage caller could clear the page dirty
742 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
743 * page on the dirty page list.
744 *
745 * We use private_lock to lock against try_to_free_buffers while using the
746 * page's buffer list. Also use this to protect against clean buffers being
747 * added to the page after it was set dirty.
748 *
749 * FIXME: may need to call ->reservepage here as well. That's rather up to the
750 * address_space though.
751 */
752 int __set_page_dirty_buffers(struct page *page)
753 {
754 struct address_space *mapping = page_mapping(page);
755
756 if (unlikely(!mapping))
757 return !TestSetPageDirty(page);
758
759 spin_lock(&mapping->private_lock);
760 if (page_has_buffers(page)) {
761 struct buffer_head *head = page_buffers(page);
762 struct buffer_head *bh = head;
763
764 do {
765 set_buffer_dirty(bh);
766 bh = bh->b_this_page;
767 } while (bh != head);
768 }
769 spin_unlock(&mapping->private_lock);
770
771 return __set_page_dirty(page, mapping, 1);
772 }
773 EXPORT_SYMBOL(__set_page_dirty_buffers);
774
775 /*
776 * Write out and wait upon a list of buffers.
777 *
778 * We have conflicting pressures: we want to make sure that all
779 * initially dirty buffers get waited on, but that any subsequently
780 * dirtied buffers don't. After all, we don't want fsync to last
781 * forever if somebody is actively writing to the file.
782 *
783 * Do this in two main stages: first we copy dirty buffers to a
784 * temporary inode list, queueing the writes as we go. Then we clean
785 * up, waiting for those writes to complete.
786 *
787 * During this second stage, any subsequent updates to the file may end
788 * up refiling the buffer on the original inode's dirty list again, so
789 * there is a chance we will end up with a buffer queued for write but
790 * not yet completed on that list. So, as a final cleanup we go through
791 * the osync code to catch these locked, dirty buffers without requeuing
792 * any newly dirty buffers for write.
793 */
794 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
795 {
796 struct buffer_head *bh;
797 struct list_head tmp;
798 struct address_space *mapping;
799 int err = 0, err2;
800
801 INIT_LIST_HEAD(&tmp);
802
803 spin_lock(lock);
804 while (!list_empty(list)) {
805 bh = BH_ENTRY(list->next);
806 mapping = bh->b_assoc_map;
807 __remove_assoc_queue(bh);
808 /* Avoid race with mark_buffer_dirty_inode() which does
809 * a lockless check and we rely on seeing the dirty bit */
810 smp_mb();
811 if (buffer_dirty(bh) || buffer_locked(bh)) {
812 list_add(&bh->b_assoc_buffers, &tmp);
813 bh->b_assoc_map = mapping;
814 if (buffer_dirty(bh)) {
815 get_bh(bh);
816 spin_unlock(lock);
817 /*
818 * Ensure any pending I/O completes so that
819 * ll_rw_block() actually writes the current
820 * contents - it is a noop if I/O is still in
821 * flight on potentially older contents.
822 */
823 ll_rw_block(SWRITE_SYNC, 1, &bh);
824 brelse(bh);
825 spin_lock(lock);
826 }
827 }
828 }
829
830 while (!list_empty(&tmp)) {
831 bh = BH_ENTRY(tmp.prev);
832 get_bh(bh);
833 mapping = bh->b_assoc_map;
834 __remove_assoc_queue(bh);
835 /* Avoid race with mark_buffer_dirty_inode() which does
836 * a lockless check and we rely on seeing the dirty bit */
837 smp_mb();
838 if (buffer_dirty(bh)) {
839 list_add(&bh->b_assoc_buffers,
840 &mapping->private_list);
841 bh->b_assoc_map = mapping;
842 }
843 spin_unlock(lock);
844 wait_on_buffer(bh);
845 if (!buffer_uptodate(bh))
846 err = -EIO;
847 brelse(bh);
848 spin_lock(lock);
849 }
850
851 spin_unlock(lock);
852 err2 = osync_buffers_list(lock, list);
853 if (err)
854 return err;
855 else
856 return err2;
857 }
858
859 /*
860 * Invalidate any and all dirty buffers on a given inode. We are
861 * probably unmounting the fs, but that doesn't mean we have already
862 * done a sync(). Just drop the buffers from the inode list.
863 *
864 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
865 * assumes that all the buffers are against the blockdev. Not true
866 * for reiserfs.
867 */
868 void invalidate_inode_buffers(struct inode *inode)
869 {
870 if (inode_has_buffers(inode)) {
871 struct address_space *mapping = &inode->i_data;
872 struct list_head *list = &mapping->private_list;
873 struct address_space *buffer_mapping = mapping->assoc_mapping;
874
875 spin_lock(&buffer_mapping->private_lock);
876 while (!list_empty(list))
877 __remove_assoc_queue(BH_ENTRY(list->next));
878 spin_unlock(&buffer_mapping->private_lock);
879 }
880 }
881 EXPORT_SYMBOL(invalidate_inode_buffers);
882
883 /*
884 * Remove any clean buffers from the inode's buffer list. This is called
885 * when we're trying to free the inode itself. Those buffers can pin it.
886 *
887 * Returns true if all buffers were removed.
888 */
889 int remove_inode_buffers(struct inode *inode)
890 {
891 int ret = 1;
892
893 if (inode_has_buffers(inode)) {
894 struct address_space *mapping = &inode->i_data;
895 struct list_head *list = &mapping->private_list;
896 struct address_space *buffer_mapping = mapping->assoc_mapping;
897
898 spin_lock(&buffer_mapping->private_lock);
899 while (!list_empty(list)) {
900 struct buffer_head *bh = BH_ENTRY(list->next);
901 if (buffer_dirty(bh)) {
902 ret = 0;
903 break;
904 }
905 __remove_assoc_queue(bh);
906 }
907 spin_unlock(&buffer_mapping->private_lock);
908 }
909 return ret;
910 }
911
912 /*
913 * Create the appropriate buffers when given a page for data area and
914 * the size of each buffer.. Use the bh->b_this_page linked list to
915 * follow the buffers created. Return NULL if unable to create more
916 * buffers.
917 *
918 * The retry flag is used to differentiate async IO (paging, swapping)
919 * which may not fail from ordinary buffer allocations.
920 */
921 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
922 int retry)
923 {
924 struct buffer_head *bh, *head;
925 long offset;
926
927 try_again:
928 head = NULL;
929 offset = PAGE_SIZE;
930 while ((offset -= size) >= 0) {
931 bh = alloc_buffer_head(GFP_NOFS);
932 if (!bh)
933 goto no_grow;
934
935 bh->b_bdev = NULL;
936 bh->b_this_page = head;
937 bh->b_blocknr = -1;
938 head = bh;
939
940 bh->b_state = 0;
941 atomic_set(&bh->b_count, 0);
942 bh->b_private = NULL;
943 bh->b_size = size;
944
945 /* Link the buffer to its page */
946 set_bh_page(bh, page, offset);
947
948 init_buffer(bh, NULL, NULL);
949 }
950 return head;
951 /*
952 * In case anything failed, we just free everything we got.
953 */
954 no_grow:
955 if (head) {
956 do {
957 bh = head;
958 head = head->b_this_page;
959 free_buffer_head(bh);
960 } while (head);
961 }
962
963 /*
964 * Return failure for non-async IO requests. Async IO requests
965 * are not allowed to fail, so we have to wait until buffer heads
966 * become available. But we don't want tasks sleeping with
967 * partially complete buffers, so all were released above.
968 */
969 if (!retry)
970 return NULL;
971
972 /* We're _really_ low on memory. Now we just
973 * wait for old buffer heads to become free due to
974 * finishing IO. Since this is an async request and
975 * the reserve list is empty, we're sure there are
976 * async buffer heads in use.
977 */
978 free_more_memory();
979 goto try_again;
980 }
981 EXPORT_SYMBOL_GPL(alloc_page_buffers);
982
983 static inline void
984 link_dev_buffers(struct page *page, struct buffer_head *head)
985 {
986 struct buffer_head *bh, *tail;
987
988 bh = head;
989 do {
990 tail = bh;
991 bh = bh->b_this_page;
992 } while (bh);
993 tail->b_this_page = head;
994 attach_page_buffers(page, head);
995 }
996
997 /*
998 * Initialise the state of a blockdev page's buffers.
999 */
1000 static void
1001 init_page_buffers(struct page *page, struct block_device *bdev,
1002 sector_t block, int size)
1003 {
1004 struct buffer_head *head = page_buffers(page);
1005 struct buffer_head *bh = head;
1006 int uptodate = PageUptodate(page);
1007
1008 do {
1009 if (!buffer_mapped(bh)) {
1010 init_buffer(bh, NULL, NULL);
1011 bh->b_bdev = bdev;
1012 bh->b_blocknr = block;
1013 if (uptodate)
1014 set_buffer_uptodate(bh);
1015 set_buffer_mapped(bh);
1016 }
1017 block++;
1018 bh = bh->b_this_page;
1019 } while (bh != head);
1020 }
1021
1022 /*
1023 * Create the page-cache page that contains the requested block.
1024 *
1025 * This is user purely for blockdev mappings.
1026 */
1027 static struct page *
1028 grow_dev_page(struct block_device *bdev, sector_t block,
1029 pgoff_t index, int size)
1030 {
1031 struct inode *inode = bdev->bd_inode;
1032 struct page *page;
1033 struct buffer_head *bh;
1034
1035 page = find_or_create_page(inode->i_mapping, index,
1036 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1037 if (!page)
1038 return NULL;
1039
1040 BUG_ON(!PageLocked(page));
1041
1042 if (page_has_buffers(page)) {
1043 bh = page_buffers(page);
1044 if (bh->b_size == size) {
1045 init_page_buffers(page, bdev, block, size);
1046 return page;
1047 }
1048 if (!try_to_free_buffers(page))
1049 goto failed;
1050 }
1051
1052 /*
1053 * Allocate some buffers for this page
1054 */
1055 bh = alloc_page_buffers(page, size, 0);
1056 if (!bh)
1057 goto failed;
1058
1059 /*
1060 * Link the page to the buffers and initialise them. Take the
1061 * lock to be atomic wrt __find_get_block(), which does not
1062 * run under the page lock.
1063 */
1064 spin_lock(&inode->i_mapping->private_lock);
1065 link_dev_buffers(page, bh);
1066 init_page_buffers(page, bdev, block, size);
1067 spin_unlock(&inode->i_mapping->private_lock);
1068 return page;
1069
1070 failed:
1071 BUG();
1072 unlock_page(page);
1073 page_cache_release(page);
1074 return NULL;
1075 }
1076
1077 /*
1078 * Create buffers for the specified block device block's page. If
1079 * that page was dirty, the buffers are set dirty also.
1080 */
1081 static int
1082 grow_buffers(struct block_device *bdev, sector_t block, int size)
1083 {
1084 struct page *page;
1085 pgoff_t index;
1086 int sizebits;
1087
1088 sizebits = -1;
1089 do {
1090 sizebits++;
1091 } while ((size << sizebits) < PAGE_SIZE);
1092
1093 index = block >> sizebits;
1094
1095 /*
1096 * Check for a block which wants to lie outside our maximum possible
1097 * pagecache index. (this comparison is done using sector_t types).
1098 */
1099 if (unlikely(index != block >> sizebits)) {
1100 char b[BDEVNAME_SIZE];
1101
1102 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1103 "device %s\n",
1104 __func__, (unsigned long long)block,
1105 bdevname(bdev, b));
1106 return -EIO;
1107 }
1108 block = index << sizebits;
1109 /* Create a page with the proper size buffers.. */
1110 page = grow_dev_page(bdev, block, index, size);
1111 if (!page)
1112 return 0;
1113 unlock_page(page);
1114 page_cache_release(page);
1115 return 1;
1116 }
1117
1118 static struct buffer_head *
1119 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1120 {
1121 /* Size must be multiple of hard sectorsize */
1122 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1123 (size < 512 || size > PAGE_SIZE))) {
1124 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1125 size);
1126 printk(KERN_ERR "hardsect size: %d\n",
1127 bdev_hardsect_size(bdev));
1128
1129 dump_stack();
1130 return NULL;
1131 }
1132
1133 for (;;) {
1134 struct buffer_head * bh;
1135 int ret;
1136
1137 bh = __find_get_block(bdev, block, size);
1138 if (bh)
1139 return bh;
1140
1141 ret = grow_buffers(bdev, block, size);
1142 if (ret < 0)
1143 return NULL;
1144 if (ret == 0)
1145 free_more_memory();
1146 }
1147 }
1148
1149 /*
1150 * The relationship between dirty buffers and dirty pages:
1151 *
1152 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1153 * the page is tagged dirty in its radix tree.
1154 *
1155 * At all times, the dirtiness of the buffers represents the dirtiness of
1156 * subsections of the page. If the page has buffers, the page dirty bit is
1157 * merely a hint about the true dirty state.
1158 *
1159 * When a page is set dirty in its entirety, all its buffers are marked dirty
1160 * (if the page has buffers).
1161 *
1162 * When a buffer is marked dirty, its page is dirtied, but the page's other
1163 * buffers are not.
1164 *
1165 * Also. When blockdev buffers are explicitly read with bread(), they
1166 * individually become uptodate. But their backing page remains not
1167 * uptodate - even if all of its buffers are uptodate. A subsequent
1168 * block_read_full_page() against that page will discover all the uptodate
1169 * buffers, will set the page uptodate and will perform no I/O.
1170 */
1171
1172 /**
1173 * mark_buffer_dirty - mark a buffer_head as needing writeout
1174 * @bh: the buffer_head to mark dirty
1175 *
1176 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1177 * backing page dirty, then tag the page as dirty in its address_space's radix
1178 * tree and then attach the address_space's inode to its superblock's dirty
1179 * inode list.
1180 *
1181 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1182 * mapping->tree_lock and the global inode_lock.
1183 */
1184 void mark_buffer_dirty(struct buffer_head *bh)
1185 {
1186 WARN_ON_ONCE(!buffer_uptodate(bh));
1187
1188 /*
1189 * Very *carefully* optimize the it-is-already-dirty case.
1190 *
1191 * Don't let the final "is it dirty" escape to before we
1192 * perhaps modified the buffer.
1193 */
1194 if (buffer_dirty(bh)) {
1195 smp_mb();
1196 if (buffer_dirty(bh))
1197 return;
1198 }
1199
1200 if (!test_set_buffer_dirty(bh))
1201 __set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
1202 }
1203
1204 /*
1205 * Decrement a buffer_head's reference count. If all buffers against a page
1206 * have zero reference count, are clean and unlocked, and if the page is clean
1207 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1208 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1209 * a page but it ends up not being freed, and buffers may later be reattached).
1210 */
1211 void __brelse(struct buffer_head * buf)
1212 {
1213 if (atomic_read(&buf->b_count)) {
1214 put_bh(buf);
1215 return;
1216 }
1217 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1218 }
1219
1220 /*
1221 * bforget() is like brelse(), except it discards any
1222 * potentially dirty data.
1223 */
1224 void __bforget(struct buffer_head *bh)
1225 {
1226 clear_buffer_dirty(bh);
1227 if (bh->b_assoc_map) {
1228 struct address_space *buffer_mapping = bh->b_page->mapping;
1229
1230 spin_lock(&buffer_mapping->private_lock);
1231 list_del_init(&bh->b_assoc_buffers);
1232 bh->b_assoc_map = NULL;
1233 spin_unlock(&buffer_mapping->private_lock);
1234 }
1235 __brelse(bh);
1236 }
1237
1238 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1239 {
1240 lock_buffer(bh);
1241 if (buffer_uptodate(bh)) {
1242 unlock_buffer(bh);
1243 return bh;
1244 } else {
1245 get_bh(bh);
1246 bh->b_end_io = end_buffer_read_sync;
1247 submit_bh(READ, bh);
1248 wait_on_buffer(bh);
1249 if (buffer_uptodate(bh))
1250 return bh;
1251 }
1252 brelse(bh);
1253 return NULL;
1254 }
1255
1256 /*
1257 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1258 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1259 * refcount elevated by one when they're in an LRU. A buffer can only appear
1260 * once in a particular CPU's LRU. A single buffer can be present in multiple
1261 * CPU's LRUs at the same time.
1262 *
1263 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1264 * sb_find_get_block().
1265 *
1266 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1267 * a local interrupt disable for that.
1268 */
1269
1270 #define BH_LRU_SIZE 8
1271
1272 struct bh_lru {
1273 struct buffer_head *bhs[BH_LRU_SIZE];
1274 };
1275
1276 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1277
1278 #ifdef CONFIG_SMP
1279 #define bh_lru_lock() local_irq_disable()
1280 #define bh_lru_unlock() local_irq_enable()
1281 #else
1282 #define bh_lru_lock() preempt_disable()
1283 #define bh_lru_unlock() preempt_enable()
1284 #endif
1285
1286 static inline void check_irqs_on(void)
1287 {
1288 #ifdef irqs_disabled
1289 BUG_ON(irqs_disabled());
1290 #endif
1291 }
1292
1293 /*
1294 * The LRU management algorithm is dopey-but-simple. Sorry.
1295 */
1296 static void bh_lru_install(struct buffer_head *bh)
1297 {
1298 struct buffer_head *evictee = NULL;
1299 struct bh_lru *lru;
1300
1301 check_irqs_on();
1302 bh_lru_lock();
1303 lru = &__get_cpu_var(bh_lrus);
1304 if (lru->bhs[0] != bh) {
1305 struct buffer_head *bhs[BH_LRU_SIZE];
1306 int in;
1307 int out = 0;
1308
1309 get_bh(bh);
1310 bhs[out++] = bh;
1311 for (in = 0; in < BH_LRU_SIZE; in++) {
1312 struct buffer_head *bh2 = lru->bhs[in];
1313
1314 if (bh2 == bh) {
1315 __brelse(bh2);
1316 } else {
1317 if (out >= BH_LRU_SIZE) {
1318 BUG_ON(evictee != NULL);
1319 evictee = bh2;
1320 } else {
1321 bhs[out++] = bh2;
1322 }
1323 }
1324 }
1325 while (out < BH_LRU_SIZE)
1326 bhs[out++] = NULL;
1327 memcpy(lru->bhs, bhs, sizeof(bhs));
1328 }
1329 bh_lru_unlock();
1330
1331 if (evictee)
1332 __brelse(evictee);
1333 }
1334
1335 /*
1336 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1337 */
1338 static struct buffer_head *
1339 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1340 {
1341 struct buffer_head *ret = NULL;
1342 struct bh_lru *lru;
1343 unsigned int i;
1344
1345 check_irqs_on();
1346 bh_lru_lock();
1347 lru = &__get_cpu_var(bh_lrus);
1348 for (i = 0; i < BH_LRU_SIZE; i++) {
1349 struct buffer_head *bh = lru->bhs[i];
1350
1351 if (bh && bh->b_bdev == bdev &&
1352 bh->b_blocknr == block && bh->b_size == size) {
1353 if (i) {
1354 while (i) {
1355 lru->bhs[i] = lru->bhs[i - 1];
1356 i--;
1357 }
1358 lru->bhs[0] = bh;
1359 }
1360 get_bh(bh);
1361 ret = bh;
1362 break;
1363 }
1364 }
1365 bh_lru_unlock();
1366 return ret;
1367 }
1368
1369 /*
1370 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1371 * it in the LRU and mark it as accessed. If it is not present then return
1372 * NULL
1373 */
1374 struct buffer_head *
1375 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1376 {
1377 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1378
1379 if (bh == NULL) {
1380 bh = __find_get_block_slow(bdev, block);
1381 if (bh)
1382 bh_lru_install(bh);
1383 }
1384 if (bh)
1385 touch_buffer(bh);
1386 return bh;
1387 }
1388 EXPORT_SYMBOL(__find_get_block);
1389
1390 /*
1391 * __getblk will locate (and, if necessary, create) the buffer_head
1392 * which corresponds to the passed block_device, block and size. The
1393 * returned buffer has its reference count incremented.
1394 *
1395 * __getblk() cannot fail - it just keeps trying. If you pass it an
1396 * illegal block number, __getblk() will happily return a buffer_head
1397 * which represents the non-existent block. Very weird.
1398 *
1399 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1400 * attempt is failing. FIXME, perhaps?
1401 */
1402 struct buffer_head *
1403 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1404 {
1405 struct buffer_head *bh = __find_get_block(bdev, block, size);
1406
1407 might_sleep();
1408 if (bh == NULL)
1409 bh = __getblk_slow(bdev, block, size);
1410 return bh;
1411 }
1412 EXPORT_SYMBOL(__getblk);
1413
1414 /*
1415 * Do async read-ahead on a buffer..
1416 */
1417 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1418 {
1419 struct buffer_head *bh = __getblk(bdev, block, size);
1420 if (likely(bh)) {
1421 ll_rw_block(READA, 1, &bh);
1422 brelse(bh);
1423 }
1424 }
1425 EXPORT_SYMBOL(__breadahead);
1426
1427 /**
1428 * __bread() - reads a specified block and returns the bh
1429 * @bdev: the block_device to read from
1430 * @block: number of block
1431 * @size: size (in bytes) to read
1432 *
1433 * Reads a specified block, and returns buffer head that contains it.
1434 * It returns NULL if the block was unreadable.
1435 */
1436 struct buffer_head *
1437 __bread(struct block_device *bdev, sector_t block, unsigned size)
1438 {
1439 struct buffer_head *bh = __getblk(bdev, block, size);
1440
1441 if (likely(bh) && !buffer_uptodate(bh))
1442 bh = __bread_slow(bh);
1443 return bh;
1444 }
1445 EXPORT_SYMBOL(__bread);
1446
1447 /*
1448 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1449 * This doesn't race because it runs in each cpu either in irq
1450 * or with preempt disabled.
1451 */
1452 static void invalidate_bh_lru(void *arg)
1453 {
1454 struct bh_lru *b = &get_cpu_var(bh_lrus);
1455 int i;
1456
1457 for (i = 0; i < BH_LRU_SIZE; i++) {
1458 brelse(b->bhs[i]);
1459 b->bhs[i] = NULL;
1460 }
1461 put_cpu_var(bh_lrus);
1462 }
1463
1464 void invalidate_bh_lrus(void)
1465 {
1466 on_each_cpu(invalidate_bh_lru, NULL, 1);
1467 }
1468 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1469
1470 void set_bh_page(struct buffer_head *bh,
1471 struct page *page, unsigned long offset)
1472 {
1473 bh->b_page = page;
1474 BUG_ON(offset >= PAGE_SIZE);
1475 if (PageHighMem(page))
1476 /*
1477 * This catches illegal uses and preserves the offset:
1478 */
1479 bh->b_data = (char *)(0 + offset);
1480 else
1481 bh->b_data = page_address(page) + offset;
1482 }
1483 EXPORT_SYMBOL(set_bh_page);
1484
1485 /*
1486 * Called when truncating a buffer on a page completely.
1487 */
1488 static void discard_buffer(struct buffer_head * bh)
1489 {
1490 lock_buffer(bh);
1491 clear_buffer_dirty(bh);
1492 bh->b_bdev = NULL;
1493 clear_buffer_mapped(bh);
1494 clear_buffer_req(bh);
1495 clear_buffer_new(bh);
1496 clear_buffer_delay(bh);
1497 clear_buffer_unwritten(bh);
1498 unlock_buffer(bh);
1499 }
1500
1501 /**
1502 * block_invalidatepage - invalidate part of all of a buffer-backed page
1503 *
1504 * @page: the page which is affected
1505 * @offset: the index of the truncation point
1506 *
1507 * block_invalidatepage() is called when all or part of the page has become
1508 * invalidatedby a truncate operation.
1509 *
1510 * block_invalidatepage() does not have to release all buffers, but it must
1511 * ensure that no dirty buffer is left outside @offset and that no I/O
1512 * is underway against any of the blocks which are outside the truncation
1513 * point. Because the caller is about to free (and possibly reuse) those
1514 * blocks on-disk.
1515 */
1516 void block_invalidatepage(struct page *page, unsigned long offset)
1517 {
1518 struct buffer_head *head, *bh, *next;
1519 unsigned int curr_off = 0;
1520
1521 BUG_ON(!PageLocked(page));
1522 if (!page_has_buffers(page))
1523 goto out;
1524
1525 head = page_buffers(page);
1526 bh = head;
1527 do {
1528 unsigned int next_off = curr_off + bh->b_size;
1529 next = bh->b_this_page;
1530
1531 /*
1532 * is this block fully invalidated?
1533 */
1534 if (offset <= curr_off)
1535 discard_buffer(bh);
1536 curr_off = next_off;
1537 bh = next;
1538 } while (bh != head);
1539
1540 /*
1541 * We release buffers only if the entire page is being invalidated.
1542 * The get_block cached value has been unconditionally invalidated,
1543 * so real IO is not possible anymore.
1544 */
1545 if (offset == 0)
1546 try_to_release_page(page, 0);
1547 out:
1548 return;
1549 }
1550 EXPORT_SYMBOL(block_invalidatepage);
1551
1552 /*
1553 * We attach and possibly dirty the buffers atomically wrt
1554 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1555 * is already excluded via the page lock.
1556 */
1557 void create_empty_buffers(struct page *page,
1558 unsigned long blocksize, unsigned long b_state)
1559 {
1560 struct buffer_head *bh, *head, *tail;
1561
1562 head = alloc_page_buffers(page, blocksize, 1);
1563 bh = head;
1564 do {
1565 bh->b_state |= b_state;
1566 tail = bh;
1567 bh = bh->b_this_page;
1568 } while (bh);
1569 tail->b_this_page = head;
1570
1571 spin_lock(&page->mapping->private_lock);
1572 if (PageUptodate(page) || PageDirty(page)) {
1573 bh = head;
1574 do {
1575 if (PageDirty(page))
1576 set_buffer_dirty(bh);
1577 if (PageUptodate(page))
1578 set_buffer_uptodate(bh);
1579 bh = bh->b_this_page;
1580 } while (bh != head);
1581 }
1582 attach_page_buffers(page, head);
1583 spin_unlock(&page->mapping->private_lock);
1584 }
1585 EXPORT_SYMBOL(create_empty_buffers);
1586
1587 /*
1588 * We are taking a block for data and we don't want any output from any
1589 * buffer-cache aliases starting from return from that function and
1590 * until the moment when something will explicitly mark the buffer
1591 * dirty (hopefully that will not happen until we will free that block ;-)
1592 * We don't even need to mark it not-uptodate - nobody can expect
1593 * anything from a newly allocated buffer anyway. We used to used
1594 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1595 * don't want to mark the alias unmapped, for example - it would confuse
1596 * anyone who might pick it with bread() afterwards...
1597 *
1598 * Also.. Note that bforget() doesn't lock the buffer. So there can
1599 * be writeout I/O going on against recently-freed buffers. We don't
1600 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1601 * only if we really need to. That happens here.
1602 */
1603 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1604 {
1605 struct buffer_head *old_bh;
1606
1607 might_sleep();
1608
1609 old_bh = __find_get_block_slow(bdev, block);
1610 if (old_bh) {
1611 clear_buffer_dirty(old_bh);
1612 wait_on_buffer(old_bh);
1613 clear_buffer_req(old_bh);
1614 __brelse(old_bh);
1615 }
1616 }
1617 EXPORT_SYMBOL(unmap_underlying_metadata);
1618
1619 /*
1620 * NOTE! All mapped/uptodate combinations are valid:
1621 *
1622 * Mapped Uptodate Meaning
1623 *
1624 * No No "unknown" - must do get_block()
1625 * No Yes "hole" - zero-filled
1626 * Yes No "allocated" - allocated on disk, not read in
1627 * Yes Yes "valid" - allocated and up-to-date in memory.
1628 *
1629 * "Dirty" is valid only with the last case (mapped+uptodate).
1630 */
1631
1632 /*
1633 * While block_write_full_page is writing back the dirty buffers under
1634 * the page lock, whoever dirtied the buffers may decide to clean them
1635 * again at any time. We handle that by only looking at the buffer
1636 * state inside lock_buffer().
1637 *
1638 * If block_write_full_page() is called for regular writeback
1639 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1640 * locked buffer. This only can happen if someone has written the buffer
1641 * directly, with submit_bh(). At the address_space level PageWriteback
1642 * prevents this contention from occurring.
1643 */
1644 static int __block_write_full_page(struct inode *inode, struct page *page,
1645 get_block_t *get_block, struct writeback_control *wbc)
1646 {
1647 int err;
1648 sector_t block;
1649 sector_t last_block;
1650 struct buffer_head *bh, *head;
1651 const unsigned blocksize = 1 << inode->i_blkbits;
1652 int nr_underway = 0;
1653
1654 BUG_ON(!PageLocked(page));
1655
1656 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1657
1658 if (!page_has_buffers(page)) {
1659 create_empty_buffers(page, blocksize,
1660 (1 << BH_Dirty)|(1 << BH_Uptodate));
1661 }
1662
1663 /*
1664 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1665 * here, and the (potentially unmapped) buffers may become dirty at
1666 * any time. If a buffer becomes dirty here after we've inspected it
1667 * then we just miss that fact, and the page stays dirty.
1668 *
1669 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1670 * handle that here by just cleaning them.
1671 */
1672
1673 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1674 head = page_buffers(page);
1675 bh = head;
1676
1677 /*
1678 * Get all the dirty buffers mapped to disk addresses and
1679 * handle any aliases from the underlying blockdev's mapping.
1680 */
1681 do {
1682 if (block > last_block) {
1683 /*
1684 * mapped buffers outside i_size will occur, because
1685 * this page can be outside i_size when there is a
1686 * truncate in progress.
1687 */
1688 /*
1689 * The buffer was zeroed by block_write_full_page()
1690 */
1691 clear_buffer_dirty(bh);
1692 set_buffer_uptodate(bh);
1693 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1694 buffer_dirty(bh)) {
1695 WARN_ON(bh->b_size != blocksize);
1696 err = get_block(inode, block, bh, 1);
1697 if (err)
1698 goto recover;
1699 clear_buffer_delay(bh);
1700 if (buffer_new(bh)) {
1701 /* blockdev mappings never come here */
1702 clear_buffer_new(bh);
1703 unmap_underlying_metadata(bh->b_bdev,
1704 bh->b_blocknr);
1705 }
1706 }
1707 bh = bh->b_this_page;
1708 block++;
1709 } while (bh != head);
1710
1711 do {
1712 if (!buffer_mapped(bh))
1713 continue;
1714 /*
1715 * If it's a fully non-blocking write attempt and we cannot
1716 * lock the buffer then redirty the page. Note that this can
1717 * potentially cause a busy-wait loop from pdflush and kswapd
1718 * activity, but those code paths have their own higher-level
1719 * throttling.
1720 */
1721 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1722 lock_buffer(bh);
1723 } else if (!trylock_buffer(bh)) {
1724 redirty_page_for_writepage(wbc, page);
1725 continue;
1726 }
1727 if (test_clear_buffer_dirty(bh)) {
1728 mark_buffer_async_write(bh);
1729 } else {
1730 unlock_buffer(bh);
1731 }
1732 } while ((bh = bh->b_this_page) != head);
1733
1734 /*
1735 * The page and its buffers are protected by PageWriteback(), so we can
1736 * drop the bh refcounts early.
1737 */
1738 BUG_ON(PageWriteback(page));
1739 set_page_writeback(page);
1740
1741 do {
1742 struct buffer_head *next = bh->b_this_page;
1743 if (buffer_async_write(bh)) {
1744 submit_bh(WRITE, bh);
1745 nr_underway++;
1746 }
1747 bh = next;
1748 } while (bh != head);
1749 unlock_page(page);
1750
1751 err = 0;
1752 done:
1753 if (nr_underway == 0) {
1754 /*
1755 * The page was marked dirty, but the buffers were
1756 * clean. Someone wrote them back by hand with
1757 * ll_rw_block/submit_bh. A rare case.
1758 */
1759 end_page_writeback(page);
1760
1761 /*
1762 * The page and buffer_heads can be released at any time from
1763 * here on.
1764 */
1765 }
1766 return err;
1767
1768 recover:
1769 /*
1770 * ENOSPC, or some other error. We may already have added some
1771 * blocks to the file, so we need to write these out to avoid
1772 * exposing stale data.
1773 * The page is currently locked and not marked for writeback
1774 */
1775 bh = head;
1776 /* Recovery: lock and submit the mapped buffers */
1777 do {
1778 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1779 !buffer_delay(bh)) {
1780 lock_buffer(bh);
1781 mark_buffer_async_write(bh);
1782 } else {
1783 /*
1784 * The buffer may have been set dirty during
1785 * attachment to a dirty page.
1786 */
1787 clear_buffer_dirty(bh);
1788 }
1789 } while ((bh = bh->b_this_page) != head);
1790 SetPageError(page);
1791 BUG_ON(PageWriteback(page));
1792 mapping_set_error(page->mapping, err);
1793 set_page_writeback(page);
1794 do {
1795 struct buffer_head *next = bh->b_this_page;
1796 if (buffer_async_write(bh)) {
1797 clear_buffer_dirty(bh);
1798 submit_bh(WRITE, bh);
1799 nr_underway++;
1800 }
1801 bh = next;
1802 } while (bh != head);
1803 unlock_page(page);
1804 goto done;
1805 }
1806
1807 /*
1808 * If a page has any new buffers, zero them out here, and mark them uptodate
1809 * and dirty so they'll be written out (in order to prevent uninitialised
1810 * block data from leaking). And clear the new bit.
1811 */
1812 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1813 {
1814 unsigned int block_start, block_end;
1815 struct buffer_head *head, *bh;
1816
1817 BUG_ON(!PageLocked(page));
1818 if (!page_has_buffers(page))
1819 return;
1820
1821 bh = head = page_buffers(page);
1822 block_start = 0;
1823 do {
1824 block_end = block_start + bh->b_size;
1825
1826 if (buffer_new(bh)) {
1827 if (block_end > from && block_start < to) {
1828 if (!PageUptodate(page)) {
1829 unsigned start, size;
1830
1831 start = max(from, block_start);
1832 size = min(to, block_end) - start;
1833
1834 zero_user(page, start, size);
1835 set_buffer_uptodate(bh);
1836 }
1837
1838 clear_buffer_new(bh);
1839 mark_buffer_dirty(bh);
1840 }
1841 }
1842
1843 block_start = block_end;
1844 bh = bh->b_this_page;
1845 } while (bh != head);
1846 }
1847 EXPORT_SYMBOL(page_zero_new_buffers);
1848
1849 static int __block_prepare_write(struct inode *inode, struct page *page,
1850 unsigned from, unsigned to, get_block_t *get_block)
1851 {
1852 unsigned block_start, block_end;
1853 sector_t block;
1854 int err = 0;
1855 unsigned blocksize, bbits;
1856 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1857
1858 BUG_ON(!PageLocked(page));
1859 BUG_ON(from > PAGE_CACHE_SIZE);
1860 BUG_ON(to > PAGE_CACHE_SIZE);
1861 BUG_ON(from > to);
1862
1863 blocksize = 1 << inode->i_blkbits;
1864 if (!page_has_buffers(page))
1865 create_empty_buffers(page, blocksize, 0);
1866 head = page_buffers(page);
1867
1868 bbits = inode->i_blkbits;
1869 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1870
1871 for(bh = head, block_start = 0; bh != head || !block_start;
1872 block++, block_start=block_end, bh = bh->b_this_page) {
1873 block_end = block_start + blocksize;
1874 if (block_end <= from || block_start >= to) {
1875 if (PageUptodate(page)) {
1876 if (!buffer_uptodate(bh))
1877 set_buffer_uptodate(bh);
1878 }
1879 continue;
1880 }
1881 if (buffer_new(bh))
1882 clear_buffer_new(bh);
1883 if (!buffer_mapped(bh)) {
1884 WARN_ON(bh->b_size != blocksize);
1885 err = get_block(inode, block, bh, 1);
1886 if (err)
1887 break;
1888 if (buffer_new(bh)) {
1889 unmap_underlying_metadata(bh->b_bdev,
1890 bh->b_blocknr);
1891 if (PageUptodate(page)) {
1892 clear_buffer_new(bh);
1893 set_buffer_uptodate(bh);
1894 mark_buffer_dirty(bh);
1895 continue;
1896 }
1897 if (block_end > to || block_start < from)
1898 zero_user_segments(page,
1899 to, block_end,
1900 block_start, from);
1901 continue;
1902 }
1903 }
1904 if (PageUptodate(page)) {
1905 if (!buffer_uptodate(bh))
1906 set_buffer_uptodate(bh);
1907 continue;
1908 }
1909 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1910 !buffer_unwritten(bh) &&
1911 (block_start < from || block_end > to)) {
1912 ll_rw_block(READ, 1, &bh);
1913 *wait_bh++=bh;
1914 }
1915 }
1916 /*
1917 * If we issued read requests - let them complete.
1918 */
1919 while(wait_bh > wait) {
1920 wait_on_buffer(*--wait_bh);
1921 if (!buffer_uptodate(*wait_bh))
1922 err = -EIO;
1923 }
1924 if (unlikely(err))
1925 page_zero_new_buffers(page, from, to);
1926 return err;
1927 }
1928
1929 static int __block_commit_write(struct inode *inode, struct page *page,
1930 unsigned from, unsigned to)
1931 {
1932 unsigned block_start, block_end;
1933 int partial = 0;
1934 unsigned blocksize;
1935 struct buffer_head *bh, *head;
1936
1937 blocksize = 1 << inode->i_blkbits;
1938
1939 for(bh = head = page_buffers(page), block_start = 0;
1940 bh != head || !block_start;
1941 block_start=block_end, bh = bh->b_this_page) {
1942 block_end = block_start + blocksize;
1943 if (block_end <= from || block_start >= to) {
1944 if (!buffer_uptodate(bh))
1945 partial = 1;
1946 } else {
1947 set_buffer_uptodate(bh);
1948 mark_buffer_dirty(bh);
1949 }
1950 clear_buffer_new(bh);
1951 }
1952
1953 /*
1954 * If this is a partial write which happened to make all buffers
1955 * uptodate then we can optimize away a bogus readpage() for
1956 * the next read(). Here we 'discover' whether the page went
1957 * uptodate as a result of this (potentially partial) write.
1958 */
1959 if (!partial)
1960 SetPageUptodate(page);
1961 return 0;
1962 }
1963
1964 /*
1965 * block_write_begin takes care of the basic task of block allocation and
1966 * bringing partial write blocks uptodate first.
1967 *
1968 * If *pagep is not NULL, then block_write_begin uses the locked page
1969 * at *pagep rather than allocating its own. In this case, the page will
1970 * not be unlocked or deallocated on failure.
1971 */
1972 int block_write_begin(struct file *file, struct address_space *mapping,
1973 loff_t pos, unsigned len, unsigned flags,
1974 struct page **pagep, void **fsdata,
1975 get_block_t *get_block)
1976 {
1977 struct inode *inode = mapping->host;
1978 int status = 0;
1979 struct page *page;
1980 pgoff_t index;
1981 unsigned start, end;
1982 int ownpage = 0;
1983
1984 index = pos >> PAGE_CACHE_SHIFT;
1985 start = pos & (PAGE_CACHE_SIZE - 1);
1986 end = start + len;
1987
1988 page = *pagep;
1989 if (page == NULL) {
1990 ownpage = 1;
1991 page = __grab_cache_page(mapping, index);
1992 if (!page) {
1993 status = -ENOMEM;
1994 goto out;
1995 }
1996 *pagep = page;
1997 } else
1998 BUG_ON(!PageLocked(page));
1999
2000 status = __block_prepare_write(inode, page, start, end, get_block);
2001 if (unlikely(status)) {
2002 ClearPageUptodate(page);
2003
2004 if (ownpage) {
2005 unlock_page(page);
2006 page_cache_release(page);
2007 *pagep = NULL;
2008
2009 /*
2010 * prepare_write() may have instantiated a few blocks
2011 * outside i_size. Trim these off again. Don't need
2012 * i_size_read because we hold i_mutex.
2013 */
2014 if (pos + len > inode->i_size)
2015 vmtruncate(inode, inode->i_size);
2016 }
2017 goto out;
2018 }
2019
2020 out:
2021 return status;
2022 }
2023 EXPORT_SYMBOL(block_write_begin);
2024
2025 int block_write_end(struct file *file, struct address_space *mapping,
2026 loff_t pos, unsigned len, unsigned copied,
2027 struct page *page, void *fsdata)
2028 {
2029 struct inode *inode = mapping->host;
2030 unsigned start;
2031
2032 start = pos & (PAGE_CACHE_SIZE - 1);
2033
2034 if (unlikely(copied < len)) {
2035 /*
2036 * The buffers that were written will now be uptodate, so we
2037 * don't have to worry about a readpage reading them and
2038 * overwriting a partial write. However if we have encountered
2039 * a short write and only partially written into a buffer, it
2040 * will not be marked uptodate, so a readpage might come in and
2041 * destroy our partial write.
2042 *
2043 * Do the simplest thing, and just treat any short write to a
2044 * non uptodate page as a zero-length write, and force the
2045 * caller to redo the whole thing.
2046 */
2047 if (!PageUptodate(page))
2048 copied = 0;
2049
2050 page_zero_new_buffers(page, start+copied, start+len);
2051 }
2052 flush_dcache_page(page);
2053
2054 /* This could be a short (even 0-length) commit */
2055 __block_commit_write(inode, page, start, start+copied);
2056
2057 return copied;
2058 }
2059 EXPORT_SYMBOL(block_write_end);
2060
2061 int generic_write_end(struct file *file, struct address_space *mapping,
2062 loff_t pos, unsigned len, unsigned copied,
2063 struct page *page, void *fsdata)
2064 {
2065 struct inode *inode = mapping->host;
2066 int i_size_changed = 0;
2067
2068 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2069
2070 /*
2071 * No need to use i_size_read() here, the i_size
2072 * cannot change under us because we hold i_mutex.
2073 *
2074 * But it's important to update i_size while still holding page lock:
2075 * page writeout could otherwise come in and zero beyond i_size.
2076 */
2077 if (pos+copied > inode->i_size) {
2078 i_size_write(inode, pos+copied);
2079 i_size_changed = 1;
2080 }
2081
2082 unlock_page(page);
2083 page_cache_release(page);
2084
2085 /*
2086 * Don't mark the inode dirty under page lock. First, it unnecessarily
2087 * makes the holding time of page lock longer. Second, it forces lock
2088 * ordering of page lock and transaction start for journaling
2089 * filesystems.
2090 */
2091 if (i_size_changed)
2092 mark_inode_dirty(inode);
2093
2094 return copied;
2095 }
2096 EXPORT_SYMBOL(generic_write_end);
2097
2098 /*
2099 * block_is_partially_uptodate checks whether buffers within a page are
2100 * uptodate or not.
2101 *
2102 * Returns true if all buffers which correspond to a file portion
2103 * we want to read are uptodate.
2104 */
2105 int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2106 unsigned long from)
2107 {
2108 struct inode *inode = page->mapping->host;
2109 unsigned block_start, block_end, blocksize;
2110 unsigned to;
2111 struct buffer_head *bh, *head;
2112 int ret = 1;
2113
2114 if (!page_has_buffers(page))
2115 return 0;
2116
2117 blocksize = 1 << inode->i_blkbits;
2118 to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2119 to = from + to;
2120 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2121 return 0;
2122
2123 head = page_buffers(page);
2124 bh = head;
2125 block_start = 0;
2126 do {
2127 block_end = block_start + blocksize;
2128 if (block_end > from && block_start < to) {
2129 if (!buffer_uptodate(bh)) {
2130 ret = 0;
2131 break;
2132 }
2133 if (block_end >= to)
2134 break;
2135 }
2136 block_start = block_end;
2137 bh = bh->b_this_page;
2138 } while (bh != head);
2139
2140 return ret;
2141 }
2142 EXPORT_SYMBOL(block_is_partially_uptodate);
2143
2144 /*
2145 * Generic "read page" function for block devices that have the normal
2146 * get_block functionality. This is most of the block device filesystems.
2147 * Reads the page asynchronously --- the unlock_buffer() and
2148 * set/clear_buffer_uptodate() functions propagate buffer state into the
2149 * page struct once IO has completed.
2150 */
2151 int block_read_full_page(struct page *page, get_block_t *get_block)
2152 {
2153 struct inode *inode = page->mapping->host;
2154 sector_t iblock, lblock;
2155 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2156 unsigned int blocksize;
2157 int nr, i;
2158 int fully_mapped = 1;
2159
2160 BUG_ON(!PageLocked(page));
2161 blocksize = 1 << inode->i_blkbits;
2162 if (!page_has_buffers(page))
2163 create_empty_buffers(page, blocksize, 0);
2164 head = page_buffers(page);
2165
2166 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2167 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2168 bh = head;
2169 nr = 0;
2170 i = 0;
2171
2172 do {
2173 if (buffer_uptodate(bh))
2174 continue;
2175
2176 if (!buffer_mapped(bh)) {
2177 int err = 0;
2178
2179 fully_mapped = 0;
2180 if (iblock < lblock) {
2181 WARN_ON(bh->b_size != blocksize);
2182 err = get_block(inode, iblock, bh, 0);
2183 if (err)
2184 SetPageError(page);
2185 }
2186 if (!buffer_mapped(bh)) {
2187 zero_user(page, i * blocksize, blocksize);
2188 if (!err)
2189 set_buffer_uptodate(bh);
2190 continue;
2191 }
2192 /*
2193 * get_block() might have updated the buffer
2194 * synchronously
2195 */
2196 if (buffer_uptodate(bh))
2197 continue;
2198 }
2199 arr[nr++] = bh;
2200 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2201
2202 if (fully_mapped)
2203 SetPageMappedToDisk(page);
2204
2205 if (!nr) {
2206 /*
2207 * All buffers are uptodate - we can set the page uptodate
2208 * as well. But not if get_block() returned an error.
2209 */
2210 if (!PageError(page))
2211 SetPageUptodate(page);
2212 unlock_page(page);
2213 return 0;
2214 }
2215
2216 /* Stage two: lock the buffers */
2217 for (i = 0; i < nr; i++) {
2218 bh = arr[i];
2219 lock_buffer(bh);
2220 mark_buffer_async_read(bh);
2221 }
2222
2223 /*
2224 * Stage 3: start the IO. Check for uptodateness
2225 * inside the buffer lock in case another process reading
2226 * the underlying blockdev brought it uptodate (the sct fix).
2227 */
2228 for (i = 0; i < nr; i++) {
2229 bh = arr[i];
2230 if (buffer_uptodate(bh))
2231 end_buffer_async_read(bh, 1);
2232 else
2233 submit_bh(READ, bh);
2234 }
2235 return 0;
2236 }
2237
2238 /* utility function for filesystems that need to do work on expanding
2239 * truncates. Uses filesystem pagecache writes to allow the filesystem to
2240 * deal with the hole.
2241 */
2242 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2243 {
2244 struct address_space *mapping = inode->i_mapping;
2245 struct page *page;
2246 void *fsdata;
2247 unsigned long limit;
2248 int err;
2249
2250 err = -EFBIG;
2251 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2252 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2253 send_sig(SIGXFSZ, current, 0);
2254 goto out;
2255 }
2256 if (size > inode->i_sb->s_maxbytes)
2257 goto out;
2258
2259 err = pagecache_write_begin(NULL, mapping, size, 0,
2260 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2261 &page, &fsdata);
2262 if (err)
2263 goto out;
2264
2265 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2266 BUG_ON(err > 0);
2267
2268 out:
2269 return err;
2270 }
2271
2272 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2273 loff_t pos, loff_t *bytes)
2274 {
2275 struct inode *inode = mapping->host;
2276 unsigned blocksize = 1 << inode->i_blkbits;
2277 struct page *page;
2278 void *fsdata;
2279 pgoff_t index, curidx;
2280 loff_t curpos;
2281 unsigned zerofrom, offset, len;
2282 int err = 0;
2283
2284 index = pos >> PAGE_CACHE_SHIFT;
2285 offset = pos & ~PAGE_CACHE_MASK;
2286
2287 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2288 zerofrom = curpos & ~PAGE_CACHE_MASK;
2289 if (zerofrom & (blocksize-1)) {
2290 *bytes |= (blocksize-1);
2291 (*bytes)++;
2292 }
2293 len = PAGE_CACHE_SIZE - zerofrom;
2294
2295 err = pagecache_write_begin(file, mapping, curpos, len,
2296 AOP_FLAG_UNINTERRUPTIBLE,
2297 &page, &fsdata);
2298 if (err)
2299 goto out;
2300 zero_user(page, zerofrom, len);
2301 err = pagecache_write_end(file, mapping, curpos, len, len,
2302 page, fsdata);
2303 if (err < 0)
2304 goto out;
2305 BUG_ON(err != len);
2306 err = 0;
2307
2308 balance_dirty_pages_ratelimited(mapping);
2309 }
2310
2311 /* page covers the boundary, find the boundary offset */
2312 if (index == curidx) {
2313 zerofrom = curpos & ~PAGE_CACHE_MASK;
2314 /* if we will expand the thing last block will be filled */
2315 if (offset <= zerofrom) {
2316 goto out;
2317 }
2318 if (zerofrom & (blocksize-1)) {
2319 *bytes |= (blocksize-1);
2320 (*bytes)++;
2321 }
2322 len = offset - zerofrom;
2323
2324 err = pagecache_write_begin(file, mapping, curpos, len,
2325 AOP_FLAG_UNINTERRUPTIBLE,
2326 &page, &fsdata);
2327 if (err)
2328 goto out;
2329 zero_user(page, zerofrom, len);
2330 err = pagecache_write_end(file, mapping, curpos, len, len,
2331 page, fsdata);
2332 if (err < 0)
2333 goto out;
2334 BUG_ON(err != len);
2335 err = 0;
2336 }
2337 out:
2338 return err;
2339 }
2340
2341 /*
2342 * For moronic filesystems that do not allow holes in file.
2343 * We may have to extend the file.
2344 */
2345 int cont_write_begin(struct file *file, struct address_space *mapping,
2346 loff_t pos, unsigned len, unsigned flags,
2347 struct page **pagep, void **fsdata,
2348 get_block_t *get_block, loff_t *bytes)
2349 {
2350 struct inode *inode = mapping->host;
2351 unsigned blocksize = 1 << inode->i_blkbits;
2352 unsigned zerofrom;
2353 int err;
2354
2355 err = cont_expand_zero(file, mapping, pos, bytes);
2356 if (err)
2357 goto out;
2358
2359 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2360 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2361 *bytes |= (blocksize-1);
2362 (*bytes)++;
2363 }
2364
2365 *pagep = NULL;
2366 err = block_write_begin(file, mapping, pos, len,
2367 flags, pagep, fsdata, get_block);
2368 out:
2369 return err;
2370 }
2371
2372 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2373 get_block_t *get_block)
2374 {
2375 struct inode *inode = page->mapping->host;
2376 int err = __block_prepare_write(inode, page, from, to, get_block);
2377 if (err)
2378 ClearPageUptodate(page);
2379 return err;
2380 }
2381
2382 int block_commit_write(struct page *page, unsigned from, unsigned to)
2383 {
2384 struct inode *inode = page->mapping->host;
2385 __block_commit_write(inode,page,from,to);
2386 return 0;
2387 }
2388
2389 /*
2390 * block_page_mkwrite() is not allowed to change the file size as it gets
2391 * called from a page fault handler when a page is first dirtied. Hence we must
2392 * be careful to check for EOF conditions here. We set the page up correctly
2393 * for a written page which means we get ENOSPC checking when writing into
2394 * holes and correct delalloc and unwritten extent mapping on filesystems that
2395 * support these features.
2396 *
2397 * We are not allowed to take the i_mutex here so we have to play games to
2398 * protect against truncate races as the page could now be beyond EOF. Because
2399 * vmtruncate() writes the inode size before removing pages, once we have the
2400 * page lock we can determine safely if the page is beyond EOF. If it is not
2401 * beyond EOF, then the page is guaranteed safe against truncation until we
2402 * unlock the page.
2403 */
2404 int
2405 block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
2406 get_block_t get_block)
2407 {
2408 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2409 unsigned long end;
2410 loff_t size;
2411 int ret = -EINVAL;
2412
2413 lock_page(page);
2414 size = i_size_read(inode);
2415 if ((page->mapping != inode->i_mapping) ||
2416 (page_offset(page) > size)) {
2417 /* page got truncated out from underneath us */
2418 goto out_unlock;
2419 }
2420
2421 /* page is wholly or partially inside EOF */
2422 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2423 end = size & ~PAGE_CACHE_MASK;
2424 else
2425 end = PAGE_CACHE_SIZE;
2426
2427 ret = block_prepare_write(page, 0, end, get_block);
2428 if (!ret)
2429 ret = block_commit_write(page, 0, end);
2430
2431 out_unlock:
2432 unlock_page(page);
2433 return ret;
2434 }
2435
2436 /*
2437 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2438 * immediately, while under the page lock. So it needs a special end_io
2439 * handler which does not touch the bh after unlocking it.
2440 */
2441 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2442 {
2443 __end_buffer_read_notouch(bh, uptodate);
2444 }
2445
2446 /*
2447 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2448 * the page (converting it to circular linked list and taking care of page
2449 * dirty races).
2450 */
2451 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2452 {
2453 struct buffer_head *bh;
2454
2455 BUG_ON(!PageLocked(page));
2456
2457 spin_lock(&page->mapping->private_lock);
2458 bh = head;
2459 do {
2460 if (PageDirty(page))
2461 set_buffer_dirty(bh);
2462 if (!bh->b_this_page)
2463 bh->b_this_page = head;
2464 bh = bh->b_this_page;
2465 } while (bh != head);
2466 attach_page_buffers(page, head);
2467 spin_unlock(&page->mapping->private_lock);
2468 }
2469
2470 /*
2471 * On entry, the page is fully not uptodate.
2472 * On exit the page is fully uptodate in the areas outside (from,to)
2473 */
2474 int nobh_write_begin(struct file *file, struct address_space *mapping,
2475 loff_t pos, unsigned len, unsigned flags,
2476 struct page **pagep, void **fsdata,
2477 get_block_t *get_block)
2478 {
2479 struct inode *inode = mapping->host;
2480 const unsigned blkbits = inode->i_blkbits;
2481 const unsigned blocksize = 1 << blkbits;
2482 struct buffer_head *head, *bh;
2483 struct page *page;
2484 pgoff_t index;
2485 unsigned from, to;
2486 unsigned block_in_page;
2487 unsigned block_start, block_end;
2488 sector_t block_in_file;
2489 int nr_reads = 0;
2490 int ret = 0;
2491 int is_mapped_to_disk = 1;
2492
2493 index = pos >> PAGE_CACHE_SHIFT;
2494 from = pos & (PAGE_CACHE_SIZE - 1);
2495 to = from + len;
2496
2497 page = __grab_cache_page(mapping, index);
2498 if (!page)
2499 return -ENOMEM;
2500 *pagep = page;
2501 *fsdata = NULL;
2502
2503 if (page_has_buffers(page)) {
2504 unlock_page(page);
2505 page_cache_release(page);
2506 *pagep = NULL;
2507 return block_write_begin(file, mapping, pos, len, flags, pagep,
2508 fsdata, get_block);
2509 }
2510
2511 if (PageMappedToDisk(page))
2512 return 0;
2513
2514 /*
2515 * Allocate buffers so that we can keep track of state, and potentially
2516 * attach them to the page if an error occurs. In the common case of
2517 * no error, they will just be freed again without ever being attached
2518 * to the page (which is all OK, because we're under the page lock).
2519 *
2520 * Be careful: the buffer linked list is a NULL terminated one, rather
2521 * than the circular one we're used to.
2522 */
2523 head = alloc_page_buffers(page, blocksize, 0);
2524 if (!head) {
2525 ret = -ENOMEM;
2526 goto out_release;
2527 }
2528
2529 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2530
2531 /*
2532 * We loop across all blocks in the page, whether or not they are
2533 * part of the affected region. This is so we can discover if the
2534 * page is fully mapped-to-disk.
2535 */
2536 for (block_start = 0, block_in_page = 0, bh = head;
2537 block_start < PAGE_CACHE_SIZE;
2538 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2539 int create;
2540
2541 block_end = block_start + blocksize;
2542 bh->b_state = 0;
2543 create = 1;
2544 if (block_start >= to)
2545 create = 0;
2546 ret = get_block(inode, block_in_file + block_in_page,
2547 bh, create);
2548 if (ret)
2549 goto failed;
2550 if (!buffer_mapped(bh))
2551 is_mapped_to_disk = 0;
2552 if (buffer_new(bh))
2553 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2554 if (PageUptodate(page)) {
2555 set_buffer_uptodate(bh);
2556 continue;
2557 }
2558 if (buffer_new(bh) || !buffer_mapped(bh)) {
2559 zero_user_segments(page, block_start, from,
2560 to, block_end);
2561 continue;
2562 }
2563 if (buffer_uptodate(bh))
2564 continue; /* reiserfs does this */
2565 if (block_start < from || block_end > to) {
2566 lock_buffer(bh);
2567 bh->b_end_io = end_buffer_read_nobh;
2568 submit_bh(READ, bh);
2569 nr_reads++;
2570 }
2571 }
2572
2573 if (nr_reads) {
2574 /*
2575 * The page is locked, so these buffers are protected from
2576 * any VM or truncate activity. Hence we don't need to care
2577 * for the buffer_head refcounts.
2578 */
2579 for (bh = head; bh; bh = bh->b_this_page) {
2580 wait_on_buffer(bh);
2581 if (!buffer_uptodate(bh))
2582 ret = -EIO;
2583 }
2584 if (ret)
2585 goto failed;
2586 }
2587
2588 if (is_mapped_to_disk)
2589 SetPageMappedToDisk(page);
2590
2591 *fsdata = head; /* to be released by nobh_write_end */
2592
2593 return 0;
2594
2595 failed:
2596 BUG_ON(!ret);
2597 /*
2598 * Error recovery is a bit difficult. We need to zero out blocks that
2599 * were newly allocated, and dirty them to ensure they get written out.
2600 * Buffers need to be attached to the page at this point, otherwise
2601 * the handling of potential IO errors during writeout would be hard
2602 * (could try doing synchronous writeout, but what if that fails too?)
2603 */
2604 attach_nobh_buffers(page, head);
2605 page_zero_new_buffers(page, from, to);
2606
2607 out_release:
2608 unlock_page(page);
2609 page_cache_release(page);
2610 *pagep = NULL;
2611
2612 if (pos + len > inode->i_size)
2613 vmtruncate(inode, inode->i_size);
2614
2615 return ret;
2616 }
2617 EXPORT_SYMBOL(nobh_write_begin);
2618
2619 int nobh_write_end(struct file *file, struct address_space *mapping,
2620 loff_t pos, unsigned len, unsigned copied,
2621 struct page *page, void *fsdata)
2622 {
2623 struct inode *inode = page->mapping->host;
2624 struct buffer_head *head = fsdata;
2625 struct buffer_head *bh;
2626 BUG_ON(fsdata != NULL && page_has_buffers(page));
2627
2628 if (unlikely(copied < len) && !page_has_buffers(page))
2629 attach_nobh_buffers(page, head);
2630 if (page_has_buffers(page))
2631 return generic_write_end(file, mapping, pos, len,
2632 copied, page, fsdata);
2633
2634 SetPageUptodate(page);
2635 set_page_dirty(page);
2636 if (pos+copied > inode->i_size) {
2637 i_size_write(inode, pos+copied);
2638 mark_inode_dirty(inode);
2639 }
2640
2641 unlock_page(page);
2642 page_cache_release(page);
2643
2644 while (head) {
2645 bh = head;
2646 head = head->b_this_page;
2647 free_buffer_head(bh);
2648 }
2649
2650 return copied;
2651 }
2652 EXPORT_SYMBOL(nobh_write_end);
2653
2654 /*
2655 * nobh_writepage() - based on block_full_write_page() except
2656 * that it tries to operate without attaching bufferheads to
2657 * the page.
2658 */
2659 int nobh_writepage(struct page *page, get_block_t *get_block,
2660 struct writeback_control *wbc)
2661 {
2662 struct inode * const inode = page->mapping->host;
2663 loff_t i_size = i_size_read(inode);
2664 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2665 unsigned offset;
2666 int ret;
2667
2668 /* Is the page fully inside i_size? */
2669 if (page->index < end_index)
2670 goto out;
2671
2672 /* Is the page fully outside i_size? (truncate in progress) */
2673 offset = i_size & (PAGE_CACHE_SIZE-1);
2674 if (page->index >= end_index+1 || !offset) {
2675 /*
2676 * The page may have dirty, unmapped buffers. For example,
2677 * they may have been added in ext3_writepage(). Make them
2678 * freeable here, so the page does not leak.
2679 */
2680 #if 0
2681 /* Not really sure about this - do we need this ? */
2682 if (page->mapping->a_ops->invalidatepage)
2683 page->mapping->a_ops->invalidatepage(page, offset);
2684 #endif
2685 unlock_page(page);
2686 return 0; /* don't care */
2687 }
2688
2689 /*
2690 * The page straddles i_size. It must be zeroed out on each and every
2691 * writepage invocation because it may be mmapped. "A file is mapped
2692 * in multiples of the page size. For a file that is not a multiple of
2693 * the page size, the remaining memory is zeroed when mapped, and
2694 * writes to that region are not written out to the file."
2695 */
2696 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2697 out:
2698 ret = mpage_writepage(page, get_block, wbc);
2699 if (ret == -EAGAIN)
2700 ret = __block_write_full_page(inode, page, get_block, wbc);
2701 return ret;
2702 }
2703 EXPORT_SYMBOL(nobh_writepage);
2704
2705 int nobh_truncate_page(struct address_space *mapping,
2706 loff_t from, get_block_t *get_block)
2707 {
2708 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2709 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2710 unsigned blocksize;
2711 sector_t iblock;
2712 unsigned length, pos;
2713 struct inode *inode = mapping->host;
2714 struct page *page;
2715 struct buffer_head map_bh;
2716 int err;
2717
2718 blocksize = 1 << inode->i_blkbits;
2719 length = offset & (blocksize - 1);
2720
2721 /* Block boundary? Nothing to do */
2722 if (!length)
2723 return 0;
2724
2725 length = blocksize - length;
2726 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2727
2728 page = grab_cache_page(mapping, index);
2729 err = -ENOMEM;
2730 if (!page)
2731 goto out;
2732
2733 if (page_has_buffers(page)) {
2734 has_buffers:
2735 unlock_page(page);
2736 page_cache_release(page);
2737 return block_truncate_page(mapping, from, get_block);
2738 }
2739
2740 /* Find the buffer that contains "offset" */
2741 pos = blocksize;
2742 while (offset >= pos) {
2743 iblock++;
2744 pos += blocksize;
2745 }
2746
2747 err = get_block(inode, iblock, &map_bh, 0);
2748 if (err)
2749 goto unlock;
2750 /* unmapped? It's a hole - nothing to do */
2751 if (!buffer_mapped(&map_bh))
2752 goto unlock;
2753
2754 /* Ok, it's mapped. Make sure it's up-to-date */
2755 if (!PageUptodate(page)) {
2756 err = mapping->a_ops->readpage(NULL, page);
2757 if (err) {
2758 page_cache_release(page);
2759 goto out;
2760 }
2761 lock_page(page);
2762 if (!PageUptodate(page)) {
2763 err = -EIO;
2764 goto unlock;
2765 }
2766 if (page_has_buffers(page))
2767 goto has_buffers;
2768 }
2769 zero_user(page, offset, length);
2770 set_page_dirty(page);
2771 err = 0;
2772
2773 unlock:
2774 unlock_page(page);
2775 page_cache_release(page);
2776 out:
2777 return err;
2778 }
2779 EXPORT_SYMBOL(nobh_truncate_page);
2780
2781 int block_truncate_page(struct address_space *mapping,
2782 loff_t from, get_block_t *get_block)
2783 {
2784 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2785 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2786 unsigned blocksize;
2787 sector_t iblock;
2788 unsigned length, pos;
2789 struct inode *inode = mapping->host;
2790 struct page *page;
2791 struct buffer_head *bh;
2792 int err;
2793
2794 blocksize = 1 << inode->i_blkbits;
2795 length = offset & (blocksize - 1);
2796
2797 /* Block boundary? Nothing to do */
2798 if (!length)
2799 return 0;
2800
2801 length = blocksize - length;
2802 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2803
2804 page = grab_cache_page(mapping, index);
2805 err = -ENOMEM;
2806 if (!page)
2807 goto out;
2808
2809 if (!page_has_buffers(page))
2810 create_empty_buffers(page, blocksize, 0);
2811
2812 /* Find the buffer that contains "offset" */
2813 bh = page_buffers(page);
2814 pos = blocksize;
2815 while (offset >= pos) {
2816 bh = bh->b_this_page;
2817 iblock++;
2818 pos += blocksize;
2819 }
2820
2821 err = 0;
2822 if (!buffer_mapped(bh)) {
2823 WARN_ON(bh->b_size != blocksize);
2824 err = get_block(inode, iblock, bh, 0);
2825 if (err)
2826 goto unlock;
2827 /* unmapped? It's a hole - nothing to do */
2828 if (!buffer_mapped(bh))
2829 goto unlock;
2830 }
2831
2832 /* Ok, it's mapped. Make sure it's up-to-date */
2833 if (PageUptodate(page))
2834 set_buffer_uptodate(bh);
2835
2836 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2837 err = -EIO;
2838 ll_rw_block(READ, 1, &bh);
2839 wait_on_buffer(bh);
2840 /* Uhhuh. Read error. Complain and punt. */
2841 if (!buffer_uptodate(bh))
2842 goto unlock;
2843 }
2844
2845 zero_user(page, offset, length);
2846 mark_buffer_dirty(bh);
2847 err = 0;
2848
2849 unlock:
2850 unlock_page(page);
2851 page_cache_release(page);
2852 out:
2853 return err;
2854 }
2855
2856 /*
2857 * The generic ->writepage function for buffer-backed address_spaces
2858 */
2859 int block_write_full_page(struct page *page, get_block_t *get_block,
2860 struct writeback_control *wbc)
2861 {
2862 struct inode * const inode = page->mapping->host;
2863 loff_t i_size = i_size_read(inode);
2864 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2865 unsigned offset;
2866
2867 /* Is the page fully inside i_size? */
2868 if (page->index < end_index)
2869 return __block_write_full_page(inode, page, get_block, wbc);
2870
2871 /* Is the page fully outside i_size? (truncate in progress) */
2872 offset = i_size & (PAGE_CACHE_SIZE-1);
2873 if (page->index >= end_index+1 || !offset) {
2874 /*
2875 * The page may have dirty, unmapped buffers. For example,
2876 * they may have been added in ext3_writepage(). Make them
2877 * freeable here, so the page does not leak.
2878 */
2879 do_invalidatepage(page, 0);
2880 unlock_page(page);
2881 return 0; /* don't care */
2882 }
2883
2884 /*
2885 * The page straddles i_size. It must be zeroed out on each and every
2886 * writepage invokation because it may be mmapped. "A file is mapped
2887 * in multiples of the page size. For a file that is not a multiple of
2888 * the page size, the remaining memory is zeroed when mapped, and
2889 * writes to that region are not written out to the file."
2890 */
2891 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2892 return __block_write_full_page(inode, page, get_block, wbc);
2893 }
2894
2895 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2896 get_block_t *get_block)
2897 {
2898 struct buffer_head tmp;
2899 struct inode *inode = mapping->host;
2900 tmp.b_state = 0;
2901 tmp.b_blocknr = 0;
2902 tmp.b_size = 1 << inode->i_blkbits;
2903 get_block(inode, block, &tmp, 0);
2904 return tmp.b_blocknr;
2905 }
2906
2907 static void end_bio_bh_io_sync(struct bio *bio, int err)
2908 {
2909 struct buffer_head *bh = bio->bi_private;
2910
2911 if (err == -EOPNOTSUPP) {
2912 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2913 set_bit(BH_Eopnotsupp, &bh->b_state);
2914 }
2915
2916 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2917 bio_put(bio);
2918 }
2919
2920 int submit_bh(int rw, struct buffer_head * bh)
2921 {
2922 struct bio *bio;
2923 int ret = 0;
2924
2925 BUG_ON(!buffer_locked(bh));
2926 BUG_ON(!buffer_mapped(bh));
2927 BUG_ON(!bh->b_end_io);
2928
2929 /*
2930 * Mask in barrier bit for a write (could be either a WRITE or a
2931 * WRITE_SYNC
2932 */
2933 if (buffer_ordered(bh) && (rw & WRITE))
2934 rw |= WRITE_BARRIER;
2935
2936 /*
2937 * Only clear out a write error when rewriting
2938 */
2939 if (test_set_buffer_req(bh) && (rw & WRITE))
2940 clear_buffer_write_io_error(bh);
2941
2942 /*
2943 * from here on down, it's all bio -- do the initial mapping,
2944 * submit_bio -> generic_make_request may further map this bio around
2945 */
2946 bio = bio_alloc(GFP_NOIO, 1);
2947
2948 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2949 bio->bi_bdev = bh->b_bdev;
2950 bio->bi_io_vec[0].bv_page = bh->b_page;
2951 bio->bi_io_vec[0].bv_len = bh->b_size;
2952 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2953
2954 bio->bi_vcnt = 1;
2955 bio->bi_idx = 0;
2956 bio->bi_size = bh->b_size;
2957
2958 bio->bi_end_io = end_bio_bh_io_sync;
2959 bio->bi_private = bh;
2960
2961 bio_get(bio);
2962 submit_bio(rw, bio);
2963
2964 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2965 ret = -EOPNOTSUPP;
2966
2967 bio_put(bio);
2968 return ret;
2969 }
2970
2971 /**
2972 * ll_rw_block: low-level access to block devices (DEPRECATED)
2973 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2974 * @nr: number of &struct buffer_heads in the array
2975 * @bhs: array of pointers to &struct buffer_head
2976 *
2977 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2978 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2979 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2980 * are sent to disk. The fourth %READA option is described in the documentation
2981 * for generic_make_request() which ll_rw_block() calls.
2982 *
2983 * This function drops any buffer that it cannot get a lock on (with the
2984 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2985 * clean when doing a write request, and any buffer that appears to be
2986 * up-to-date when doing read request. Further it marks as clean buffers that
2987 * are processed for writing (the buffer cache won't assume that they are
2988 * actually clean until the buffer gets unlocked).
2989 *
2990 * ll_rw_block sets b_end_io to simple completion handler that marks
2991 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2992 * any waiters.
2993 *
2994 * All of the buffers must be for the same device, and must also be a
2995 * multiple of the current approved size for the device.
2996 */
2997 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2998 {
2999 int i;
3000
3001 for (i = 0; i < nr; i++) {
3002 struct buffer_head *bh = bhs[i];
3003
3004 if (rw == SWRITE || rw == SWRITE_SYNC)
3005 lock_buffer(bh);
3006 else if (!trylock_buffer(bh))
3007 continue;
3008
3009 if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) {
3010 if (test_clear_buffer_dirty(bh)) {
3011 bh->b_end_io = end_buffer_write_sync;
3012 get_bh(bh);
3013 if (rw == SWRITE_SYNC)
3014 submit_bh(WRITE_SYNC, bh);
3015 else
3016 submit_bh(WRITE, bh);
3017 continue;
3018 }
3019 } else {
3020 if (!buffer_uptodate(bh)) {
3021 bh->b_end_io = end_buffer_read_sync;
3022 get_bh(bh);
3023 submit_bh(rw, bh);
3024 continue;
3025 }
3026 }
3027 unlock_buffer(bh);
3028 }
3029 }
3030
3031 /*
3032 * For a data-integrity writeout, we need to wait upon any in-progress I/O
3033 * and then start new I/O and then wait upon it. The caller must have a ref on
3034 * the buffer_head.
3035 */
3036 int sync_dirty_buffer(struct buffer_head *bh)
3037 {
3038 int ret = 0;
3039
3040 WARN_ON(atomic_read(&bh->b_count) < 1);
3041 lock_buffer(bh);
3042 if (test_clear_buffer_dirty(bh)) {
3043 get_bh(bh);
3044 bh->b_end_io = end_buffer_write_sync;
3045 ret = submit_bh(WRITE_SYNC, bh);
3046 wait_on_buffer(bh);
3047 if (buffer_eopnotsupp(bh)) {
3048 clear_buffer_eopnotsupp(bh);
3049 ret = -EOPNOTSUPP;
3050 }
3051 if (!ret && !buffer_uptodate(bh))
3052 ret = -EIO;
3053 } else {
3054 unlock_buffer(bh);
3055 }
3056 return ret;
3057 }
3058
3059 /*
3060 * try_to_free_buffers() checks if all the buffers on this particular page
3061 * are unused, and releases them if so.
3062 *
3063 * Exclusion against try_to_free_buffers may be obtained by either
3064 * locking the page or by holding its mapping's private_lock.
3065 *
3066 * If the page is dirty but all the buffers are clean then we need to
3067 * be sure to mark the page clean as well. This is because the page
3068 * may be against a block device, and a later reattachment of buffers
3069 * to a dirty page will set *all* buffers dirty. Which would corrupt
3070 * filesystem data on the same device.
3071 *
3072 * The same applies to regular filesystem pages: if all the buffers are
3073 * clean then we set the page clean and proceed. To do that, we require
3074 * total exclusion from __set_page_dirty_buffers(). That is obtained with
3075 * private_lock.
3076 *
3077 * try_to_free_buffers() is non-blocking.
3078 */
3079 static inline int buffer_busy(struct buffer_head *bh)
3080 {
3081 return atomic_read(&bh->b_count) |
3082 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3083 }
3084
3085 static int
3086 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3087 {
3088 struct buffer_head *head = page_buffers(page);
3089 struct buffer_head *bh;
3090
3091 bh = head;
3092 do {
3093 if (buffer_write_io_error(bh) && page->mapping)
3094 set_bit(AS_EIO, &page->mapping->flags);
3095 if (buffer_busy(bh))
3096 goto failed;
3097 bh = bh->b_this_page;
3098 } while (bh != head);
3099
3100 do {
3101 struct buffer_head *next = bh->b_this_page;
3102
3103 if (bh->b_assoc_map)
3104 __remove_assoc_queue(bh);
3105 bh = next;
3106 } while (bh != head);
3107 *buffers_to_free = head;
3108 __clear_page_buffers(page);
3109 return 1;
3110 failed:
3111 return 0;
3112 }
3113
3114 int try_to_free_buffers(struct page *page)
3115 {
3116 struct address_space * const mapping = page->mapping;
3117 struct buffer_head *buffers_to_free = NULL;
3118 int ret = 0;
3119
3120 BUG_ON(!PageLocked(page));
3121 if (PageWriteback(page))
3122 return 0;
3123
3124 if (mapping == NULL) { /* can this still happen? */
3125 ret = drop_buffers(page, &buffers_to_free);
3126 goto out;
3127 }
3128
3129 spin_lock(&mapping->private_lock);
3130 ret = drop_buffers(page, &buffers_to_free);
3131
3132 /*
3133 * If the filesystem writes its buffers by hand (eg ext3)
3134 * then we can have clean buffers against a dirty page. We
3135 * clean the page here; otherwise the VM will never notice
3136 * that the filesystem did any IO at all.
3137 *
3138 * Also, during truncate, discard_buffer will have marked all
3139 * the page's buffers clean. We discover that here and clean
3140 * the page also.
3141 *
3142 * private_lock must be held over this entire operation in order
3143 * to synchronise against __set_page_dirty_buffers and prevent the
3144 * dirty bit from being lost.
3145 */
3146 if (ret)
3147 cancel_dirty_page(page, PAGE_CACHE_SIZE);
3148 spin_unlock(&mapping->private_lock);
3149 out:
3150 if (buffers_to_free) {
3151 struct buffer_head *bh = buffers_to_free;
3152
3153 do {
3154 struct buffer_head *next = bh->b_this_page;
3155 free_buffer_head(bh);
3156 bh = next;
3157 } while (bh != buffers_to_free);
3158 }
3159 return ret;
3160 }
3161 EXPORT_SYMBOL(try_to_free_buffers);
3162
3163 void block_sync_page(struct page *page)
3164 {
3165 struct address_space *mapping;
3166
3167 smp_mb();
3168 mapping = page_mapping(page);
3169 if (mapping)
3170 blk_run_backing_dev(mapping->backing_dev_info, page);
3171 }
3172
3173 /*
3174 * There are no bdflush tunables left. But distributions are
3175 * still running obsolete flush daemons, so we terminate them here.
3176 *
3177 * Use of bdflush() is deprecated and will be removed in a future kernel.
3178 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3179 */
3180 asmlinkage long sys_bdflush(int func, long data)
3181 {
3182 static int msg_count;
3183
3184 if (!capable(CAP_SYS_ADMIN))
3185 return -EPERM;
3186
3187 if (msg_count < 5) {
3188 msg_count++;
3189 printk(KERN_INFO
3190 "warning: process `%s' used the obsolete bdflush"
3191 " system call\n", current->comm);
3192 printk(KERN_INFO "Fix your initscripts?\n");
3193 }
3194
3195 if (func == 1)
3196 do_exit(0);
3197 return 0;
3198 }
3199
3200 /*
3201 * Buffer-head allocation
3202 */
3203 static struct kmem_cache *bh_cachep;
3204
3205 /*
3206 * Once the number of bh's in the machine exceeds this level, we start
3207 * stripping them in writeback.
3208 */
3209 static int max_buffer_heads;
3210
3211 int buffer_heads_over_limit;
3212
3213 struct bh_accounting {
3214 int nr; /* Number of live bh's */
3215 int ratelimit; /* Limit cacheline bouncing */
3216 };
3217
3218 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3219
3220 static void recalc_bh_state(void)
3221 {
3222 int i;
3223 int tot = 0;
3224
3225 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3226 return;
3227 __get_cpu_var(bh_accounting).ratelimit = 0;
3228 for_each_online_cpu(i)
3229 tot += per_cpu(bh_accounting, i).nr;
3230 buffer_heads_over_limit = (tot > max_buffer_heads);
3231 }
3232
3233 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3234 {
3235 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3236 if (ret) {
3237 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3238 get_cpu_var(bh_accounting).nr++;
3239 recalc_bh_state();
3240 put_cpu_var(bh_accounting);
3241 }
3242 return ret;
3243 }
3244 EXPORT_SYMBOL(alloc_buffer_head);
3245
3246 void free_buffer_head(struct buffer_head *bh)
3247 {
3248 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3249 kmem_cache_free(bh_cachep, bh);
3250 get_cpu_var(bh_accounting).nr--;
3251 recalc_bh_state();
3252 put_cpu_var(bh_accounting);
3253 }
3254 EXPORT_SYMBOL(free_buffer_head);
3255
3256 static void buffer_exit_cpu(int cpu)
3257 {
3258 int i;
3259 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3260
3261 for (i = 0; i < BH_LRU_SIZE; i++) {
3262 brelse(b->bhs[i]);
3263 b->bhs[i] = NULL;
3264 }
3265 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3266 per_cpu(bh_accounting, cpu).nr = 0;
3267 put_cpu_var(bh_accounting);
3268 }
3269
3270 static int buffer_cpu_notify(struct notifier_block *self,
3271 unsigned long action, void *hcpu)
3272 {
3273 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3274 buffer_exit_cpu((unsigned long)hcpu);
3275 return NOTIFY_OK;
3276 }
3277
3278 /**
3279 * bh_uptodate_or_lock - Test whether the buffer is uptodate
3280 * @bh: struct buffer_head
3281 *
3282 * Return true if the buffer is up-to-date and false,
3283 * with the buffer locked, if not.
3284 */
3285 int bh_uptodate_or_lock(struct buffer_head *bh)
3286 {
3287 if (!buffer_uptodate(bh)) {
3288 lock_buffer(bh);
3289 if (!buffer_uptodate(bh))
3290 return 0;
3291 unlock_buffer(bh);
3292 }
3293 return 1;
3294 }
3295 EXPORT_SYMBOL(bh_uptodate_or_lock);
3296
3297 /**
3298 * bh_submit_read - Submit a locked buffer for reading
3299 * @bh: struct buffer_head
3300 *
3301 * Returns zero on success and -EIO on error.
3302 */
3303 int bh_submit_read(struct buffer_head *bh)
3304 {
3305 BUG_ON(!buffer_locked(bh));
3306
3307 if (buffer_uptodate(bh)) {
3308 unlock_buffer(bh);
3309 return 0;
3310 }
3311
3312 get_bh(bh);
3313 bh->b_end_io = end_buffer_read_sync;
3314 submit_bh(READ, bh);
3315 wait_on_buffer(bh);
3316 if (buffer_uptodate(bh))
3317 return 0;
3318 return -EIO;
3319 }
3320 EXPORT_SYMBOL(bh_submit_read);
3321
3322 static void
3323 init_buffer_head(void *data)
3324 {
3325 struct buffer_head *bh = data;
3326
3327 memset(bh, 0, sizeof(*bh));
3328 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3329 }
3330
3331 void __init buffer_init(void)
3332 {
3333 int nrpages;
3334
3335 bh_cachep = kmem_cache_create("buffer_head",
3336 sizeof(struct buffer_head), 0,
3337 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3338 SLAB_MEM_SPREAD),
3339 init_buffer_head);
3340
3341 /*
3342 * Limit the bh occupancy to 10% of ZONE_NORMAL
3343 */
3344 nrpages = (nr_free_buffer_pages() * 10) / 100;
3345 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3346 hotcpu_notifier(buffer_cpu_notify, 0);
3347 }
3348
3349 EXPORT_SYMBOL(__bforget);
3350 EXPORT_SYMBOL(__brelse);
3351 EXPORT_SYMBOL(__wait_on_buffer);
3352 EXPORT_SYMBOL(block_commit_write);
3353 EXPORT_SYMBOL(block_prepare_write);
3354 EXPORT_SYMBOL(block_page_mkwrite);
3355 EXPORT_SYMBOL(block_read_full_page);
3356 EXPORT_SYMBOL(block_sync_page);
3357 EXPORT_SYMBOL(block_truncate_page);
3358 EXPORT_SYMBOL(block_write_full_page);
3359 EXPORT_SYMBOL(cont_write_begin);
3360 EXPORT_SYMBOL(end_buffer_read_sync);
3361 EXPORT_SYMBOL(end_buffer_write_sync);
3362 EXPORT_SYMBOL(file_fsync);
3363 EXPORT_SYMBOL(fsync_bdev);
3364 EXPORT_SYMBOL(generic_block_bmap);
3365 EXPORT_SYMBOL(generic_cont_expand_simple);
3366 EXPORT_SYMBOL(init_buffer);
3367 EXPORT_SYMBOL(invalidate_bdev);
3368 EXPORT_SYMBOL(ll_rw_block);
3369 EXPORT_SYMBOL(mark_buffer_dirty);
3370 EXPORT_SYMBOL(submit_bh);
3371 EXPORT_SYMBOL(sync_dirty_buffer);
3372 EXPORT_SYMBOL(unlock_buffer);