]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/buffer.c
[PATCH] Add carta_random32() library routine
[mirror_ubuntu-artful-kernel.git] / fs / buffer.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/buffer.c
3 *
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
6
7/*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9 *
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12 *
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
15 *
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17 *
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19 */
20
1da177e4
LT
21#include <linux/kernel.h>
22#include <linux/syscalls.h>
23#include <linux/fs.h>
24#include <linux/mm.h>
25#include <linux/percpu.h>
26#include <linux/slab.h>
27#include <linux/smp_lock.h>
16f7e0fe 28#include <linux/capability.h>
1da177e4
LT
29#include <linux/blkdev.h>
30#include <linux/file.h>
31#include <linux/quotaops.h>
32#include <linux/highmem.h>
33#include <linux/module.h>
34#include <linux/writeback.h>
35#include <linux/hash.h>
36#include <linux/suspend.h>
37#include <linux/buffer_head.h>
38#include <linux/bio.h>
39#include <linux/notifier.h>
40#include <linux/cpu.h>
41#include <linux/bitops.h>
42#include <linux/mpage.h>
fb1c8f93 43#include <linux/bit_spinlock.h>
1da177e4
LT
44
45static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
46static void invalidate_bh_lrus(void);
47
48#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
49
50inline void
51init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
52{
53 bh->b_end_io = handler;
54 bh->b_private = private;
55}
56
57static int sync_buffer(void *word)
58{
59 struct block_device *bd;
60 struct buffer_head *bh
61 = container_of(word, struct buffer_head, b_state);
62
63 smp_mb();
64 bd = bh->b_bdev;
65 if (bd)
66 blk_run_address_space(bd->bd_inode->i_mapping);
67 io_schedule();
68 return 0;
69}
70
71void fastcall __lock_buffer(struct buffer_head *bh)
72{
73 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
74 TASK_UNINTERRUPTIBLE);
75}
76EXPORT_SYMBOL(__lock_buffer);
77
78void fastcall unlock_buffer(struct buffer_head *bh)
79{
80 clear_buffer_locked(bh);
81 smp_mb__after_clear_bit();
82 wake_up_bit(&bh->b_state, BH_Lock);
83}
84
85/*
86 * Block until a buffer comes unlocked. This doesn't stop it
87 * from becoming locked again - you have to lock it yourself
88 * if you want to preserve its state.
89 */
90void __wait_on_buffer(struct buffer_head * bh)
91{
92 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
93}
94
95static void
96__clear_page_buffers(struct page *page)
97{
98 ClearPagePrivate(page);
4c21e2f2 99 set_page_private(page, 0);
1da177e4
LT
100 page_cache_release(page);
101}
102
103static void buffer_io_error(struct buffer_head *bh)
104{
105 char b[BDEVNAME_SIZE];
106
107 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
108 bdevname(bh->b_bdev, b),
109 (unsigned long long)bh->b_blocknr);
110}
111
112/*
113 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
114 * unlock the buffer. This is what ll_rw_block uses too.
115 */
116void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
117{
118 if (uptodate) {
119 set_buffer_uptodate(bh);
120 } else {
121 /* This happens, due to failed READA attempts. */
122 clear_buffer_uptodate(bh);
123 }
124 unlock_buffer(bh);
125 put_bh(bh);
126}
127
128void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
129{
130 char b[BDEVNAME_SIZE];
131
132 if (uptodate) {
133 set_buffer_uptodate(bh);
134 } else {
135 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
136 buffer_io_error(bh);
137 printk(KERN_WARNING "lost page write due to "
138 "I/O error on %s\n",
139 bdevname(bh->b_bdev, b));
140 }
141 set_buffer_write_io_error(bh);
142 clear_buffer_uptodate(bh);
143 }
144 unlock_buffer(bh);
145 put_bh(bh);
146}
147
148/*
149 * Write out and wait upon all the dirty data associated with a block
150 * device via its mapping. Does not take the superblock lock.
151 */
152int sync_blockdev(struct block_device *bdev)
153{
154 int ret = 0;
155
28fd1298
OH
156 if (bdev)
157 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
1da177e4
LT
158 return ret;
159}
160EXPORT_SYMBOL(sync_blockdev);
161
1da177e4
LT
162/*
163 * Write out and wait upon all dirty data associated with this
164 * device. Filesystem data as well as the underlying block
165 * device. Takes the superblock lock.
166 */
167int fsync_bdev(struct block_device *bdev)
168{
169 struct super_block *sb = get_super(bdev);
170 if (sb) {
171 int res = fsync_super(sb);
172 drop_super(sb);
173 return res;
174 }
175 return sync_blockdev(bdev);
176}
177
178/**
179 * freeze_bdev -- lock a filesystem and force it into a consistent state
180 * @bdev: blockdevice to lock
181 *
c039e313 182 * This takes the block device bd_mount_mutex to make sure no new mounts
1da177e4
LT
183 * happen on bdev until thaw_bdev() is called.
184 * If a superblock is found on this device, we take the s_umount semaphore
185 * on it to make sure nobody unmounts until the snapshot creation is done.
186 */
187struct super_block *freeze_bdev(struct block_device *bdev)
188{
189 struct super_block *sb;
190
c039e313 191 mutex_lock(&bdev->bd_mount_mutex);
1da177e4
LT
192 sb = get_super(bdev);
193 if (sb && !(sb->s_flags & MS_RDONLY)) {
194 sb->s_frozen = SB_FREEZE_WRITE;
d59dd462 195 smp_wmb();
1da177e4 196
d25b9a1f 197 __fsync_super(sb);
1da177e4
LT
198
199 sb->s_frozen = SB_FREEZE_TRANS;
d59dd462 200 smp_wmb();
1da177e4
LT
201
202 sync_blockdev(sb->s_bdev);
203
204 if (sb->s_op->write_super_lockfs)
205 sb->s_op->write_super_lockfs(sb);
206 }
207
208 sync_blockdev(bdev);
209 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
210}
211EXPORT_SYMBOL(freeze_bdev);
212
213/**
214 * thaw_bdev -- unlock filesystem
215 * @bdev: blockdevice to unlock
216 * @sb: associated superblock
217 *
218 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
219 */
220void thaw_bdev(struct block_device *bdev, struct super_block *sb)
221{
222 if (sb) {
223 BUG_ON(sb->s_bdev != bdev);
224
225 if (sb->s_op->unlockfs)
226 sb->s_op->unlockfs(sb);
227 sb->s_frozen = SB_UNFROZEN;
d59dd462 228 smp_wmb();
1da177e4
LT
229 wake_up(&sb->s_wait_unfrozen);
230 drop_super(sb);
231 }
232
c039e313 233 mutex_unlock(&bdev->bd_mount_mutex);
1da177e4
LT
234}
235EXPORT_SYMBOL(thaw_bdev);
236
1da177e4
LT
237/*
238 * Various filesystems appear to want __find_get_block to be non-blocking.
239 * But it's the page lock which protects the buffers. To get around this,
240 * we get exclusion from try_to_free_buffers with the blockdev mapping's
241 * private_lock.
242 *
243 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
244 * may be quite high. This code could TryLock the page, and if that
245 * succeeds, there is no need to take private_lock. (But if
246 * private_lock is contended then so is mapping->tree_lock).
247 */
248static struct buffer_head *
385fd4c5 249__find_get_block_slow(struct block_device *bdev, sector_t block)
1da177e4
LT
250{
251 struct inode *bd_inode = bdev->bd_inode;
252 struct address_space *bd_mapping = bd_inode->i_mapping;
253 struct buffer_head *ret = NULL;
254 pgoff_t index;
255 struct buffer_head *bh;
256 struct buffer_head *head;
257 struct page *page;
258 int all_mapped = 1;
259
260 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
261 page = find_get_page(bd_mapping, index);
262 if (!page)
263 goto out;
264
265 spin_lock(&bd_mapping->private_lock);
266 if (!page_has_buffers(page))
267 goto out_unlock;
268 head = page_buffers(page);
269 bh = head;
270 do {
271 if (bh->b_blocknr == block) {
272 ret = bh;
273 get_bh(bh);
274 goto out_unlock;
275 }
276 if (!buffer_mapped(bh))
277 all_mapped = 0;
278 bh = bh->b_this_page;
279 } while (bh != head);
280
281 /* we might be here because some of the buffers on this page are
282 * not mapped. This is due to various races between
283 * file io on the block device and getblk. It gets dealt with
284 * elsewhere, don't buffer_error if we had some unmapped buffers
285 */
286 if (all_mapped) {
287 printk("__find_get_block_slow() failed. "
288 "block=%llu, b_blocknr=%llu\n",
205f87f6
BP
289 (unsigned long long)block,
290 (unsigned long long)bh->b_blocknr);
291 printk("b_state=0x%08lx, b_size=%zu\n",
292 bh->b_state, bh->b_size);
1da177e4
LT
293 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
294 }
295out_unlock:
296 spin_unlock(&bd_mapping->private_lock);
297 page_cache_release(page);
298out:
299 return ret;
300}
301
302/* If invalidate_buffers() will trash dirty buffers, it means some kind
303 of fs corruption is going on. Trashing dirty data always imply losing
304 information that was supposed to be just stored on the physical layer
305 by the user.
306
307 Thus invalidate_buffers in general usage is not allwowed to trash
308 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
309 be preserved. These buffers are simply skipped.
310
311 We also skip buffers which are still in use. For example this can
312 happen if a userspace program is reading the block device.
313
314 NOTE: In the case where the user removed a removable-media-disk even if
315 there's still dirty data not synced on disk (due a bug in the device driver
316 or due an error of the user), by not destroying the dirty buffers we could
317 generate corruption also on the next media inserted, thus a parameter is
318 necessary to handle this case in the most safe way possible (trying
319 to not corrupt also the new disk inserted with the data belonging to
320 the old now corrupted disk). Also for the ramdisk the natural thing
321 to do in order to release the ramdisk memory is to destroy dirty buffers.
322
323 These are two special cases. Normal usage imply the device driver
324 to issue a sync on the device (without waiting I/O completion) and
325 then an invalidate_buffers call that doesn't trash dirty buffers.
326
327 For handling cache coherency with the blkdev pagecache the 'update' case
328 is been introduced. It is needed to re-read from disk any pinned
329 buffer. NOTE: re-reading from disk is destructive so we can do it only
330 when we assume nobody is changing the buffercache under our I/O and when
331 we think the disk contains more recent information than the buffercache.
332 The update == 1 pass marks the buffers we need to update, the update == 2
333 pass does the actual I/O. */
334void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
335{
0e1dfc66
AM
336 struct address_space *mapping = bdev->bd_inode->i_mapping;
337
338 if (mapping->nrpages == 0)
339 return;
340
1da177e4
LT
341 invalidate_bh_lrus();
342 /*
343 * FIXME: what about destroy_dirty_buffers?
344 * We really want to use invalidate_inode_pages2() for
345 * that, but not until that's cleaned up.
346 */
0e1dfc66 347 invalidate_inode_pages(mapping);
1da177e4
LT
348}
349
350/*
351 * Kick pdflush then try to free up some ZONE_NORMAL memory.
352 */
353static void free_more_memory(void)
354{
355 struct zone **zones;
356 pg_data_t *pgdat;
357
687a21ce 358 wakeup_pdflush(1024);
1da177e4
LT
359 yield();
360
ec936fc5 361 for_each_online_pgdat(pgdat) {
af4ca457 362 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
1da177e4 363 if (*zones)
1ad539b2 364 try_to_free_pages(zones, GFP_NOFS);
1da177e4
LT
365 }
366}
367
368/*
369 * I/O completion handler for block_read_full_page() - pages
370 * which come unlocked at the end of I/O.
371 */
372static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
373{
1da177e4 374 unsigned long flags;
a3972203 375 struct buffer_head *first;
1da177e4
LT
376 struct buffer_head *tmp;
377 struct page *page;
378 int page_uptodate = 1;
379
380 BUG_ON(!buffer_async_read(bh));
381
382 page = bh->b_page;
383 if (uptodate) {
384 set_buffer_uptodate(bh);
385 } else {
386 clear_buffer_uptodate(bh);
387 if (printk_ratelimit())
388 buffer_io_error(bh);
389 SetPageError(page);
390 }
391
392 /*
393 * Be _very_ careful from here on. Bad things can happen if
394 * two buffer heads end IO at almost the same time and both
395 * decide that the page is now completely done.
396 */
a3972203
NP
397 first = page_buffers(page);
398 local_irq_save(flags);
399 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
1da177e4
LT
400 clear_buffer_async_read(bh);
401 unlock_buffer(bh);
402 tmp = bh;
403 do {
404 if (!buffer_uptodate(tmp))
405 page_uptodate = 0;
406 if (buffer_async_read(tmp)) {
407 BUG_ON(!buffer_locked(tmp));
408 goto still_busy;
409 }
410 tmp = tmp->b_this_page;
411 } while (tmp != bh);
a3972203
NP
412 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
413 local_irq_restore(flags);
1da177e4
LT
414
415 /*
416 * If none of the buffers had errors and they are all
417 * uptodate then we can set the page uptodate.
418 */
419 if (page_uptodate && !PageError(page))
420 SetPageUptodate(page);
421 unlock_page(page);
422 return;
423
424still_busy:
a3972203
NP
425 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
426 local_irq_restore(flags);
1da177e4
LT
427 return;
428}
429
430/*
431 * Completion handler for block_write_full_page() - pages which are unlocked
432 * during I/O, and which have PageWriteback cleared upon I/O completion.
433 */
b6cd0b77 434static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
1da177e4
LT
435{
436 char b[BDEVNAME_SIZE];
1da177e4 437 unsigned long flags;
a3972203 438 struct buffer_head *first;
1da177e4
LT
439 struct buffer_head *tmp;
440 struct page *page;
441
442 BUG_ON(!buffer_async_write(bh));
443
444 page = bh->b_page;
445 if (uptodate) {
446 set_buffer_uptodate(bh);
447 } else {
448 if (printk_ratelimit()) {
449 buffer_io_error(bh);
450 printk(KERN_WARNING "lost page write due to "
451 "I/O error on %s\n",
452 bdevname(bh->b_bdev, b));
453 }
454 set_bit(AS_EIO, &page->mapping->flags);
455 clear_buffer_uptodate(bh);
456 SetPageError(page);
457 }
458
a3972203
NP
459 first = page_buffers(page);
460 local_irq_save(flags);
461 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
462
1da177e4
LT
463 clear_buffer_async_write(bh);
464 unlock_buffer(bh);
465 tmp = bh->b_this_page;
466 while (tmp != bh) {
467 if (buffer_async_write(tmp)) {
468 BUG_ON(!buffer_locked(tmp));
469 goto still_busy;
470 }
471 tmp = tmp->b_this_page;
472 }
a3972203
NP
473 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
474 local_irq_restore(flags);
1da177e4
LT
475 end_page_writeback(page);
476 return;
477
478still_busy:
a3972203
NP
479 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
480 local_irq_restore(flags);
1da177e4
LT
481 return;
482}
483
484/*
485 * If a page's buffers are under async readin (end_buffer_async_read
486 * completion) then there is a possibility that another thread of
487 * control could lock one of the buffers after it has completed
488 * but while some of the other buffers have not completed. This
489 * locked buffer would confuse end_buffer_async_read() into not unlocking
490 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
491 * that this buffer is not under async I/O.
492 *
493 * The page comes unlocked when it has no locked buffer_async buffers
494 * left.
495 *
496 * PageLocked prevents anyone starting new async I/O reads any of
497 * the buffers.
498 *
499 * PageWriteback is used to prevent simultaneous writeout of the same
500 * page.
501 *
502 * PageLocked prevents anyone from starting writeback of a page which is
503 * under read I/O (PageWriteback is only ever set against a locked page).
504 */
505static void mark_buffer_async_read(struct buffer_head *bh)
506{
507 bh->b_end_io = end_buffer_async_read;
508 set_buffer_async_read(bh);
509}
510
511void mark_buffer_async_write(struct buffer_head *bh)
512{
513 bh->b_end_io = end_buffer_async_write;
514 set_buffer_async_write(bh);
515}
516EXPORT_SYMBOL(mark_buffer_async_write);
517
518
519/*
520 * fs/buffer.c contains helper functions for buffer-backed address space's
521 * fsync functions. A common requirement for buffer-based filesystems is
522 * that certain data from the backing blockdev needs to be written out for
523 * a successful fsync(). For example, ext2 indirect blocks need to be
524 * written back and waited upon before fsync() returns.
525 *
526 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
527 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
528 * management of a list of dependent buffers at ->i_mapping->private_list.
529 *
530 * Locking is a little subtle: try_to_free_buffers() will remove buffers
531 * from their controlling inode's queue when they are being freed. But
532 * try_to_free_buffers() will be operating against the *blockdev* mapping
533 * at the time, not against the S_ISREG file which depends on those buffers.
534 * So the locking for private_list is via the private_lock in the address_space
535 * which backs the buffers. Which is different from the address_space
536 * against which the buffers are listed. So for a particular address_space,
537 * mapping->private_lock does *not* protect mapping->private_list! In fact,
538 * mapping->private_list will always be protected by the backing blockdev's
539 * ->private_lock.
540 *
541 * Which introduces a requirement: all buffers on an address_space's
542 * ->private_list must be from the same address_space: the blockdev's.
543 *
544 * address_spaces which do not place buffers at ->private_list via these
545 * utility functions are free to use private_lock and private_list for
546 * whatever they want. The only requirement is that list_empty(private_list)
547 * be true at clear_inode() time.
548 *
549 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
550 * filesystems should do that. invalidate_inode_buffers() should just go
551 * BUG_ON(!list_empty).
552 *
553 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
554 * take an address_space, not an inode. And it should be called
555 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
556 * queued up.
557 *
558 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
559 * list if it is already on a list. Because if the buffer is on a list,
560 * it *must* already be on the right one. If not, the filesystem is being
561 * silly. This will save a ton of locking. But first we have to ensure
562 * that buffers are taken *off* the old inode's list when they are freed
563 * (presumably in truncate). That requires careful auditing of all
564 * filesystems (do it inside bforget()). It could also be done by bringing
565 * b_inode back.
566 */
567
568/*
569 * The buffer's backing address_space's private_lock must be held
570 */
571static inline void __remove_assoc_queue(struct buffer_head *bh)
572{
573 list_del_init(&bh->b_assoc_buffers);
574}
575
576int inode_has_buffers(struct inode *inode)
577{
578 return !list_empty(&inode->i_data.private_list);
579}
580
581/*
582 * osync is designed to support O_SYNC io. It waits synchronously for
583 * all already-submitted IO to complete, but does not queue any new
584 * writes to the disk.
585 *
586 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
587 * you dirty the buffers, and then use osync_inode_buffers to wait for
588 * completion. Any other dirty buffers which are not yet queued for
589 * write will not be flushed to disk by the osync.
590 */
591static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
592{
593 struct buffer_head *bh;
594 struct list_head *p;
595 int err = 0;
596
597 spin_lock(lock);
598repeat:
599 list_for_each_prev(p, list) {
600 bh = BH_ENTRY(p);
601 if (buffer_locked(bh)) {
602 get_bh(bh);
603 spin_unlock(lock);
604 wait_on_buffer(bh);
605 if (!buffer_uptodate(bh))
606 err = -EIO;
607 brelse(bh);
608 spin_lock(lock);
609 goto repeat;
610 }
611 }
612 spin_unlock(lock);
613 return err;
614}
615
616/**
617 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
618 * buffers
67be2dd1 619 * @mapping: the mapping which wants those buffers written
1da177e4
LT
620 *
621 * Starts I/O against the buffers at mapping->private_list, and waits upon
622 * that I/O.
623 *
67be2dd1
MW
624 * Basically, this is a convenience function for fsync().
625 * @mapping is a file or directory which needs those buffers to be written for
626 * a successful fsync().
1da177e4
LT
627 */
628int sync_mapping_buffers(struct address_space *mapping)
629{
630 struct address_space *buffer_mapping = mapping->assoc_mapping;
631
632 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
633 return 0;
634
635 return fsync_buffers_list(&buffer_mapping->private_lock,
636 &mapping->private_list);
637}
638EXPORT_SYMBOL(sync_mapping_buffers);
639
640/*
641 * Called when we've recently written block `bblock', and it is known that
642 * `bblock' was for a buffer_boundary() buffer. This means that the block at
643 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
644 * dirty, schedule it for IO. So that indirects merge nicely with their data.
645 */
646void write_boundary_block(struct block_device *bdev,
647 sector_t bblock, unsigned blocksize)
648{
649 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
650 if (bh) {
651 if (buffer_dirty(bh))
652 ll_rw_block(WRITE, 1, &bh);
653 put_bh(bh);
654 }
655}
656
657void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
658{
659 struct address_space *mapping = inode->i_mapping;
660 struct address_space *buffer_mapping = bh->b_page->mapping;
661
662 mark_buffer_dirty(bh);
663 if (!mapping->assoc_mapping) {
664 mapping->assoc_mapping = buffer_mapping;
665 } else {
e827f923 666 BUG_ON(mapping->assoc_mapping != buffer_mapping);
1da177e4
LT
667 }
668 if (list_empty(&bh->b_assoc_buffers)) {
669 spin_lock(&buffer_mapping->private_lock);
670 list_move_tail(&bh->b_assoc_buffers,
671 &mapping->private_list);
672 spin_unlock(&buffer_mapping->private_lock);
673 }
674}
675EXPORT_SYMBOL(mark_buffer_dirty_inode);
676
677/*
678 * Add a page to the dirty page list.
679 *
680 * It is a sad fact of life that this function is called from several places
681 * deeply under spinlocking. It may not sleep.
682 *
683 * If the page has buffers, the uptodate buffers are set dirty, to preserve
684 * dirty-state coherency between the page and the buffers. It the page does
685 * not have buffers then when they are later attached they will all be set
686 * dirty.
687 *
688 * The buffers are dirtied before the page is dirtied. There's a small race
689 * window in which a writepage caller may see the page cleanness but not the
690 * buffer dirtiness. That's fine. If this code were to set the page dirty
691 * before the buffers, a concurrent writepage caller could clear the page dirty
692 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
693 * page on the dirty page list.
694 *
695 * We use private_lock to lock against try_to_free_buffers while using the
696 * page's buffer list. Also use this to protect against clean buffers being
697 * added to the page after it was set dirty.
698 *
699 * FIXME: may need to call ->reservepage here as well. That's rather up to the
700 * address_space though.
701 */
702int __set_page_dirty_buffers(struct page *page)
703{
ebf7a227
NP
704 struct address_space * const mapping = page_mapping(page);
705
706 if (unlikely(!mapping))
707 return !TestSetPageDirty(page);
1da177e4
LT
708
709 spin_lock(&mapping->private_lock);
710 if (page_has_buffers(page)) {
711 struct buffer_head *head = page_buffers(page);
712 struct buffer_head *bh = head;
713
714 do {
715 set_buffer_dirty(bh);
716 bh = bh->b_this_page;
717 } while (bh != head);
718 }
719 spin_unlock(&mapping->private_lock);
720
721 if (!TestSetPageDirty(page)) {
722 write_lock_irq(&mapping->tree_lock);
723 if (page->mapping) { /* Race with truncate? */
724 if (mapping_cap_account_dirty(mapping))
b1e7a8fd 725 __inc_zone_page_state(page, NR_FILE_DIRTY);
1da177e4
LT
726 radix_tree_tag_set(&mapping->page_tree,
727 page_index(page),
728 PAGECACHE_TAG_DIRTY);
729 }
730 write_unlock_irq(&mapping->tree_lock);
731 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
4741c9fd 732 return 1;
1da177e4 733 }
1da177e4
LT
734 return 0;
735}
736EXPORT_SYMBOL(__set_page_dirty_buffers);
737
738/*
739 * Write out and wait upon a list of buffers.
740 *
741 * We have conflicting pressures: we want to make sure that all
742 * initially dirty buffers get waited on, but that any subsequently
743 * dirtied buffers don't. After all, we don't want fsync to last
744 * forever if somebody is actively writing to the file.
745 *
746 * Do this in two main stages: first we copy dirty buffers to a
747 * temporary inode list, queueing the writes as we go. Then we clean
748 * up, waiting for those writes to complete.
749 *
750 * During this second stage, any subsequent updates to the file may end
751 * up refiling the buffer on the original inode's dirty list again, so
752 * there is a chance we will end up with a buffer queued for write but
753 * not yet completed on that list. So, as a final cleanup we go through
754 * the osync code to catch these locked, dirty buffers without requeuing
755 * any newly dirty buffers for write.
756 */
757static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
758{
759 struct buffer_head *bh;
760 struct list_head tmp;
761 int err = 0, err2;
762
763 INIT_LIST_HEAD(&tmp);
764
765 spin_lock(lock);
766 while (!list_empty(list)) {
767 bh = BH_ENTRY(list->next);
768 list_del_init(&bh->b_assoc_buffers);
769 if (buffer_dirty(bh) || buffer_locked(bh)) {
770 list_add(&bh->b_assoc_buffers, &tmp);
771 if (buffer_dirty(bh)) {
772 get_bh(bh);
773 spin_unlock(lock);
774 /*
775 * Ensure any pending I/O completes so that
776 * ll_rw_block() actually writes the current
777 * contents - it is a noop if I/O is still in
778 * flight on potentially older contents.
779 */
a7662236 780 ll_rw_block(SWRITE, 1, &bh);
1da177e4
LT
781 brelse(bh);
782 spin_lock(lock);
783 }
784 }
785 }
786
787 while (!list_empty(&tmp)) {
788 bh = BH_ENTRY(tmp.prev);
789 __remove_assoc_queue(bh);
790 get_bh(bh);
791 spin_unlock(lock);
792 wait_on_buffer(bh);
793 if (!buffer_uptodate(bh))
794 err = -EIO;
795 brelse(bh);
796 spin_lock(lock);
797 }
798
799 spin_unlock(lock);
800 err2 = osync_buffers_list(lock, list);
801 if (err)
802 return err;
803 else
804 return err2;
805}
806
807/*
808 * Invalidate any and all dirty buffers on a given inode. We are
809 * probably unmounting the fs, but that doesn't mean we have already
810 * done a sync(). Just drop the buffers from the inode list.
811 *
812 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
813 * assumes that all the buffers are against the blockdev. Not true
814 * for reiserfs.
815 */
816void invalidate_inode_buffers(struct inode *inode)
817{
818 if (inode_has_buffers(inode)) {
819 struct address_space *mapping = &inode->i_data;
820 struct list_head *list = &mapping->private_list;
821 struct address_space *buffer_mapping = mapping->assoc_mapping;
822
823 spin_lock(&buffer_mapping->private_lock);
824 while (!list_empty(list))
825 __remove_assoc_queue(BH_ENTRY(list->next));
826 spin_unlock(&buffer_mapping->private_lock);
827 }
828}
829
830/*
831 * Remove any clean buffers from the inode's buffer list. This is called
832 * when we're trying to free the inode itself. Those buffers can pin it.
833 *
834 * Returns true if all buffers were removed.
835 */
836int remove_inode_buffers(struct inode *inode)
837{
838 int ret = 1;
839
840 if (inode_has_buffers(inode)) {
841 struct address_space *mapping = &inode->i_data;
842 struct list_head *list = &mapping->private_list;
843 struct address_space *buffer_mapping = mapping->assoc_mapping;
844
845 spin_lock(&buffer_mapping->private_lock);
846 while (!list_empty(list)) {
847 struct buffer_head *bh = BH_ENTRY(list->next);
848 if (buffer_dirty(bh)) {
849 ret = 0;
850 break;
851 }
852 __remove_assoc_queue(bh);
853 }
854 spin_unlock(&buffer_mapping->private_lock);
855 }
856 return ret;
857}
858
859/*
860 * Create the appropriate buffers when given a page for data area and
861 * the size of each buffer.. Use the bh->b_this_page linked list to
862 * follow the buffers created. Return NULL if unable to create more
863 * buffers.
864 *
865 * The retry flag is used to differentiate async IO (paging, swapping)
866 * which may not fail from ordinary buffer allocations.
867 */
868struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
869 int retry)
870{
871 struct buffer_head *bh, *head;
872 long offset;
873
874try_again:
875 head = NULL;
876 offset = PAGE_SIZE;
877 while ((offset -= size) >= 0) {
878 bh = alloc_buffer_head(GFP_NOFS);
879 if (!bh)
880 goto no_grow;
881
882 bh->b_bdev = NULL;
883 bh->b_this_page = head;
884 bh->b_blocknr = -1;
885 head = bh;
886
887 bh->b_state = 0;
888 atomic_set(&bh->b_count, 0);
fc5cd582 889 bh->b_private = NULL;
1da177e4
LT
890 bh->b_size = size;
891
892 /* Link the buffer to its page */
893 set_bh_page(bh, page, offset);
894
01ffe339 895 init_buffer(bh, NULL, NULL);
1da177e4
LT
896 }
897 return head;
898/*
899 * In case anything failed, we just free everything we got.
900 */
901no_grow:
902 if (head) {
903 do {
904 bh = head;
905 head = head->b_this_page;
906 free_buffer_head(bh);
907 } while (head);
908 }
909
910 /*
911 * Return failure for non-async IO requests. Async IO requests
912 * are not allowed to fail, so we have to wait until buffer heads
913 * become available. But we don't want tasks sleeping with
914 * partially complete buffers, so all were released above.
915 */
916 if (!retry)
917 return NULL;
918
919 /* We're _really_ low on memory. Now we just
920 * wait for old buffer heads to become free due to
921 * finishing IO. Since this is an async request and
922 * the reserve list is empty, we're sure there are
923 * async buffer heads in use.
924 */
925 free_more_memory();
926 goto try_again;
927}
928EXPORT_SYMBOL_GPL(alloc_page_buffers);
929
930static inline void
931link_dev_buffers(struct page *page, struct buffer_head *head)
932{
933 struct buffer_head *bh, *tail;
934
935 bh = head;
936 do {
937 tail = bh;
938 bh = bh->b_this_page;
939 } while (bh);
940 tail->b_this_page = head;
941 attach_page_buffers(page, head);
942}
943
944/*
945 * Initialise the state of a blockdev page's buffers.
946 */
947static void
948init_page_buffers(struct page *page, struct block_device *bdev,
949 sector_t block, int size)
950{
951 struct buffer_head *head = page_buffers(page);
952 struct buffer_head *bh = head;
953 int uptodate = PageUptodate(page);
954
955 do {
956 if (!buffer_mapped(bh)) {
957 init_buffer(bh, NULL, NULL);
958 bh->b_bdev = bdev;
959 bh->b_blocknr = block;
960 if (uptodate)
961 set_buffer_uptodate(bh);
962 set_buffer_mapped(bh);
963 }
964 block++;
965 bh = bh->b_this_page;
966 } while (bh != head);
967}
968
969/*
970 * Create the page-cache page that contains the requested block.
971 *
972 * This is user purely for blockdev mappings.
973 */
974static struct page *
975grow_dev_page(struct block_device *bdev, sector_t block,
976 pgoff_t index, int size)
977{
978 struct inode *inode = bdev->bd_inode;
979 struct page *page;
980 struct buffer_head *bh;
981
982 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
983 if (!page)
984 return NULL;
985
e827f923 986 BUG_ON(!PageLocked(page));
1da177e4
LT
987
988 if (page_has_buffers(page)) {
989 bh = page_buffers(page);
990 if (bh->b_size == size) {
991 init_page_buffers(page, bdev, block, size);
992 return page;
993 }
994 if (!try_to_free_buffers(page))
995 goto failed;
996 }
997
998 /*
999 * Allocate some buffers for this page
1000 */
1001 bh = alloc_page_buffers(page, size, 0);
1002 if (!bh)
1003 goto failed;
1004
1005 /*
1006 * Link the page to the buffers and initialise them. Take the
1007 * lock to be atomic wrt __find_get_block(), which does not
1008 * run under the page lock.
1009 */
1010 spin_lock(&inode->i_mapping->private_lock);
1011 link_dev_buffers(page, bh);
1012 init_page_buffers(page, bdev, block, size);
1013 spin_unlock(&inode->i_mapping->private_lock);
1014 return page;
1015
1016failed:
1017 BUG();
1018 unlock_page(page);
1019 page_cache_release(page);
1020 return NULL;
1021}
1022
1023/*
1024 * Create buffers for the specified block device block's page. If
1025 * that page was dirty, the buffers are set dirty also.
1026 *
1027 * Except that's a bug. Attaching dirty buffers to a dirty
1028 * blockdev's page can result in filesystem corruption, because
1029 * some of those buffers may be aliases of filesystem data.
1030 * grow_dev_page() will go BUG() if this happens.
1031 */
858119e1 1032static int
1da177e4
LT
1033grow_buffers(struct block_device *bdev, sector_t block, int size)
1034{
1035 struct page *page;
1036 pgoff_t index;
1037 int sizebits;
1038
1039 sizebits = -1;
1040 do {
1041 sizebits++;
1042 } while ((size << sizebits) < PAGE_SIZE);
1043
1044 index = block >> sizebits;
1045 block = index << sizebits;
1046
1047 /* Create a page with the proper size buffers.. */
1048 page = grow_dev_page(bdev, block, index, size);
1049 if (!page)
1050 return 0;
1051 unlock_page(page);
1052 page_cache_release(page);
1053 return 1;
1054}
1055
75c96f85 1056static struct buffer_head *
1da177e4
LT
1057__getblk_slow(struct block_device *bdev, sector_t block, int size)
1058{
1059 /* Size must be multiple of hard sectorsize */
1060 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1061 (size < 512 || size > PAGE_SIZE))) {
1062 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1063 size);
1064 printk(KERN_ERR "hardsect size: %d\n",
1065 bdev_hardsect_size(bdev));
1066
1067 dump_stack();
1068 return NULL;
1069 }
1070
1071 for (;;) {
1072 struct buffer_head * bh;
1073
1074 bh = __find_get_block(bdev, block, size);
1075 if (bh)
1076 return bh;
1077
1078 if (!grow_buffers(bdev, block, size))
1079 free_more_memory();
1080 }
1081}
1082
1083/*
1084 * The relationship between dirty buffers and dirty pages:
1085 *
1086 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1087 * the page is tagged dirty in its radix tree.
1088 *
1089 * At all times, the dirtiness of the buffers represents the dirtiness of
1090 * subsections of the page. If the page has buffers, the page dirty bit is
1091 * merely a hint about the true dirty state.
1092 *
1093 * When a page is set dirty in its entirety, all its buffers are marked dirty
1094 * (if the page has buffers).
1095 *
1096 * When a buffer is marked dirty, its page is dirtied, but the page's other
1097 * buffers are not.
1098 *
1099 * Also. When blockdev buffers are explicitly read with bread(), they
1100 * individually become uptodate. But their backing page remains not
1101 * uptodate - even if all of its buffers are uptodate. A subsequent
1102 * block_read_full_page() against that page will discover all the uptodate
1103 * buffers, will set the page uptodate and will perform no I/O.
1104 */
1105
1106/**
1107 * mark_buffer_dirty - mark a buffer_head as needing writeout
67be2dd1 1108 * @bh: the buffer_head to mark dirty
1da177e4
LT
1109 *
1110 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1111 * backing page dirty, then tag the page as dirty in its address_space's radix
1112 * tree and then attach the address_space's inode to its superblock's dirty
1113 * inode list.
1114 *
1115 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1116 * mapping->tree_lock and the global inode_lock.
1117 */
1118void fastcall mark_buffer_dirty(struct buffer_head *bh)
1119{
1120 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1121 __set_page_dirty_nobuffers(bh->b_page);
1122}
1123
1124/*
1125 * Decrement a buffer_head's reference count. If all buffers against a page
1126 * have zero reference count, are clean and unlocked, and if the page is clean
1127 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1128 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1129 * a page but it ends up not being freed, and buffers may later be reattached).
1130 */
1131void __brelse(struct buffer_head * buf)
1132{
1133 if (atomic_read(&buf->b_count)) {
1134 put_bh(buf);
1135 return;
1136 }
1137 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1138 WARN_ON(1);
1139}
1140
1141/*
1142 * bforget() is like brelse(), except it discards any
1143 * potentially dirty data.
1144 */
1145void __bforget(struct buffer_head *bh)
1146{
1147 clear_buffer_dirty(bh);
1148 if (!list_empty(&bh->b_assoc_buffers)) {
1149 struct address_space *buffer_mapping = bh->b_page->mapping;
1150
1151 spin_lock(&buffer_mapping->private_lock);
1152 list_del_init(&bh->b_assoc_buffers);
1153 spin_unlock(&buffer_mapping->private_lock);
1154 }
1155 __brelse(bh);
1156}
1157
1158static struct buffer_head *__bread_slow(struct buffer_head *bh)
1159{
1160 lock_buffer(bh);
1161 if (buffer_uptodate(bh)) {
1162 unlock_buffer(bh);
1163 return bh;
1164 } else {
1165 get_bh(bh);
1166 bh->b_end_io = end_buffer_read_sync;
1167 submit_bh(READ, bh);
1168 wait_on_buffer(bh);
1169 if (buffer_uptodate(bh))
1170 return bh;
1171 }
1172 brelse(bh);
1173 return NULL;
1174}
1175
1176/*
1177 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1178 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1179 * refcount elevated by one when they're in an LRU. A buffer can only appear
1180 * once in a particular CPU's LRU. A single buffer can be present in multiple
1181 * CPU's LRUs at the same time.
1182 *
1183 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1184 * sb_find_get_block().
1185 *
1186 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1187 * a local interrupt disable for that.
1188 */
1189
1190#define BH_LRU_SIZE 8
1191
1192struct bh_lru {
1193 struct buffer_head *bhs[BH_LRU_SIZE];
1194};
1195
1196static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1197
1198#ifdef CONFIG_SMP
1199#define bh_lru_lock() local_irq_disable()
1200#define bh_lru_unlock() local_irq_enable()
1201#else
1202#define bh_lru_lock() preempt_disable()
1203#define bh_lru_unlock() preempt_enable()
1204#endif
1205
1206static inline void check_irqs_on(void)
1207{
1208#ifdef irqs_disabled
1209 BUG_ON(irqs_disabled());
1210#endif
1211}
1212
1213/*
1214 * The LRU management algorithm is dopey-but-simple. Sorry.
1215 */
1216static void bh_lru_install(struct buffer_head *bh)
1217{
1218 struct buffer_head *evictee = NULL;
1219 struct bh_lru *lru;
1220
1221 check_irqs_on();
1222 bh_lru_lock();
1223 lru = &__get_cpu_var(bh_lrus);
1224 if (lru->bhs[0] != bh) {
1225 struct buffer_head *bhs[BH_LRU_SIZE];
1226 int in;
1227 int out = 0;
1228
1229 get_bh(bh);
1230 bhs[out++] = bh;
1231 for (in = 0; in < BH_LRU_SIZE; in++) {
1232 struct buffer_head *bh2 = lru->bhs[in];
1233
1234 if (bh2 == bh) {
1235 __brelse(bh2);
1236 } else {
1237 if (out >= BH_LRU_SIZE) {
1238 BUG_ON(evictee != NULL);
1239 evictee = bh2;
1240 } else {
1241 bhs[out++] = bh2;
1242 }
1243 }
1244 }
1245 while (out < BH_LRU_SIZE)
1246 bhs[out++] = NULL;
1247 memcpy(lru->bhs, bhs, sizeof(bhs));
1248 }
1249 bh_lru_unlock();
1250
1251 if (evictee)
1252 __brelse(evictee);
1253}
1254
1255/*
1256 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1257 */
858119e1 1258static struct buffer_head *
1da177e4
LT
1259lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
1260{
1261 struct buffer_head *ret = NULL;
1262 struct bh_lru *lru;
1263 int i;
1264
1265 check_irqs_on();
1266 bh_lru_lock();
1267 lru = &__get_cpu_var(bh_lrus);
1268 for (i = 0; i < BH_LRU_SIZE; i++) {
1269 struct buffer_head *bh = lru->bhs[i];
1270
1271 if (bh && bh->b_bdev == bdev &&
1272 bh->b_blocknr == block && bh->b_size == size) {
1273 if (i) {
1274 while (i) {
1275 lru->bhs[i] = lru->bhs[i - 1];
1276 i--;
1277 }
1278 lru->bhs[0] = bh;
1279 }
1280 get_bh(bh);
1281 ret = bh;
1282 break;
1283 }
1284 }
1285 bh_lru_unlock();
1286 return ret;
1287}
1288
1289/*
1290 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1291 * it in the LRU and mark it as accessed. If it is not present then return
1292 * NULL
1293 */
1294struct buffer_head *
1295__find_get_block(struct block_device *bdev, sector_t block, int size)
1296{
1297 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1298
1299 if (bh == NULL) {
385fd4c5 1300 bh = __find_get_block_slow(bdev, block);
1da177e4
LT
1301 if (bh)
1302 bh_lru_install(bh);
1303 }
1304 if (bh)
1305 touch_buffer(bh);
1306 return bh;
1307}
1308EXPORT_SYMBOL(__find_get_block);
1309
1310/*
1311 * __getblk will locate (and, if necessary, create) the buffer_head
1312 * which corresponds to the passed block_device, block and size. The
1313 * returned buffer has its reference count incremented.
1314 *
1315 * __getblk() cannot fail - it just keeps trying. If you pass it an
1316 * illegal block number, __getblk() will happily return a buffer_head
1317 * which represents the non-existent block. Very weird.
1318 *
1319 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1320 * attempt is failing. FIXME, perhaps?
1321 */
1322struct buffer_head *
1323__getblk(struct block_device *bdev, sector_t block, int size)
1324{
1325 struct buffer_head *bh = __find_get_block(bdev, block, size);
1326
1327 might_sleep();
1328 if (bh == NULL)
1329 bh = __getblk_slow(bdev, block, size);
1330 return bh;
1331}
1332EXPORT_SYMBOL(__getblk);
1333
1334/*
1335 * Do async read-ahead on a buffer..
1336 */
1337void __breadahead(struct block_device *bdev, sector_t block, int size)
1338{
1339 struct buffer_head *bh = __getblk(bdev, block, size);
a3e713b5
AM
1340 if (likely(bh)) {
1341 ll_rw_block(READA, 1, &bh);
1342 brelse(bh);
1343 }
1da177e4
LT
1344}
1345EXPORT_SYMBOL(__breadahead);
1346
1347/**
1348 * __bread() - reads a specified block and returns the bh
67be2dd1 1349 * @bdev: the block_device to read from
1da177e4
LT
1350 * @block: number of block
1351 * @size: size (in bytes) to read
1352 *
1353 * Reads a specified block, and returns buffer head that contains it.
1354 * It returns NULL if the block was unreadable.
1355 */
1356struct buffer_head *
1357__bread(struct block_device *bdev, sector_t block, int size)
1358{
1359 struct buffer_head *bh = __getblk(bdev, block, size);
1360
a3e713b5 1361 if (likely(bh) && !buffer_uptodate(bh))
1da177e4
LT
1362 bh = __bread_slow(bh);
1363 return bh;
1364}
1365EXPORT_SYMBOL(__bread);
1366
1367/*
1368 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1369 * This doesn't race because it runs in each cpu either in irq
1370 * or with preempt disabled.
1371 */
1372static void invalidate_bh_lru(void *arg)
1373{
1374 struct bh_lru *b = &get_cpu_var(bh_lrus);
1375 int i;
1376
1377 for (i = 0; i < BH_LRU_SIZE; i++) {
1378 brelse(b->bhs[i]);
1379 b->bhs[i] = NULL;
1380 }
1381 put_cpu_var(bh_lrus);
1382}
1383
1384static void invalidate_bh_lrus(void)
1385{
1386 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1387}
1388
1389void set_bh_page(struct buffer_head *bh,
1390 struct page *page, unsigned long offset)
1391{
1392 bh->b_page = page;
e827f923 1393 BUG_ON(offset >= PAGE_SIZE);
1da177e4
LT
1394 if (PageHighMem(page))
1395 /*
1396 * This catches illegal uses and preserves the offset:
1397 */
1398 bh->b_data = (char *)(0 + offset);
1399 else
1400 bh->b_data = page_address(page) + offset;
1401}
1402EXPORT_SYMBOL(set_bh_page);
1403
1404/*
1405 * Called when truncating a buffer on a page completely.
1406 */
858119e1 1407static void discard_buffer(struct buffer_head * bh)
1da177e4
LT
1408{
1409 lock_buffer(bh);
1410 clear_buffer_dirty(bh);
1411 bh->b_bdev = NULL;
1412 clear_buffer_mapped(bh);
1413 clear_buffer_req(bh);
1414 clear_buffer_new(bh);
1415 clear_buffer_delay(bh);
1416 unlock_buffer(bh);
1417}
1418
1da177e4
LT
1419/**
1420 * block_invalidatepage - invalidate part of all of a buffer-backed page
1421 *
1422 * @page: the page which is affected
1423 * @offset: the index of the truncation point
1424 *
1425 * block_invalidatepage() is called when all or part of the page has become
1426 * invalidatedby a truncate operation.
1427 *
1428 * block_invalidatepage() does not have to release all buffers, but it must
1429 * ensure that no dirty buffer is left outside @offset and that no I/O
1430 * is underway against any of the blocks which are outside the truncation
1431 * point. Because the caller is about to free (and possibly reuse) those
1432 * blocks on-disk.
1433 */
2ff28e22 1434void block_invalidatepage(struct page *page, unsigned long offset)
1da177e4
LT
1435{
1436 struct buffer_head *head, *bh, *next;
1437 unsigned int curr_off = 0;
1da177e4
LT
1438
1439 BUG_ON(!PageLocked(page));
1440 if (!page_has_buffers(page))
1441 goto out;
1442
1443 head = page_buffers(page);
1444 bh = head;
1445 do {
1446 unsigned int next_off = curr_off + bh->b_size;
1447 next = bh->b_this_page;
1448
1449 /*
1450 * is this block fully invalidated?
1451 */
1452 if (offset <= curr_off)
1453 discard_buffer(bh);
1454 curr_off = next_off;
1455 bh = next;
1456 } while (bh != head);
1457
1458 /*
1459 * We release buffers only if the entire page is being invalidated.
1460 * The get_block cached value has been unconditionally invalidated,
1461 * so real IO is not possible anymore.
1462 */
1463 if (offset == 0)
2ff28e22 1464 try_to_release_page(page, 0);
1da177e4 1465out:
2ff28e22 1466 return;
1da177e4
LT
1467}
1468EXPORT_SYMBOL(block_invalidatepage);
1469
1470/*
1471 * We attach and possibly dirty the buffers atomically wrt
1472 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1473 * is already excluded via the page lock.
1474 */
1475void create_empty_buffers(struct page *page,
1476 unsigned long blocksize, unsigned long b_state)
1477{
1478 struct buffer_head *bh, *head, *tail;
1479
1480 head = alloc_page_buffers(page, blocksize, 1);
1481 bh = head;
1482 do {
1483 bh->b_state |= b_state;
1484 tail = bh;
1485 bh = bh->b_this_page;
1486 } while (bh);
1487 tail->b_this_page = head;
1488
1489 spin_lock(&page->mapping->private_lock);
1490 if (PageUptodate(page) || PageDirty(page)) {
1491 bh = head;
1492 do {
1493 if (PageDirty(page))
1494 set_buffer_dirty(bh);
1495 if (PageUptodate(page))
1496 set_buffer_uptodate(bh);
1497 bh = bh->b_this_page;
1498 } while (bh != head);
1499 }
1500 attach_page_buffers(page, head);
1501 spin_unlock(&page->mapping->private_lock);
1502}
1503EXPORT_SYMBOL(create_empty_buffers);
1504
1505/*
1506 * We are taking a block for data and we don't want any output from any
1507 * buffer-cache aliases starting from return from that function and
1508 * until the moment when something will explicitly mark the buffer
1509 * dirty (hopefully that will not happen until we will free that block ;-)
1510 * We don't even need to mark it not-uptodate - nobody can expect
1511 * anything from a newly allocated buffer anyway. We used to used
1512 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1513 * don't want to mark the alias unmapped, for example - it would confuse
1514 * anyone who might pick it with bread() afterwards...
1515 *
1516 * Also.. Note that bforget() doesn't lock the buffer. So there can
1517 * be writeout I/O going on against recently-freed buffers. We don't
1518 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1519 * only if we really need to. That happens here.
1520 */
1521void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1522{
1523 struct buffer_head *old_bh;
1524
1525 might_sleep();
1526
385fd4c5 1527 old_bh = __find_get_block_slow(bdev, block);
1da177e4
LT
1528 if (old_bh) {
1529 clear_buffer_dirty(old_bh);
1530 wait_on_buffer(old_bh);
1531 clear_buffer_req(old_bh);
1532 __brelse(old_bh);
1533 }
1534}
1535EXPORT_SYMBOL(unmap_underlying_metadata);
1536
1537/*
1538 * NOTE! All mapped/uptodate combinations are valid:
1539 *
1540 * Mapped Uptodate Meaning
1541 *
1542 * No No "unknown" - must do get_block()
1543 * No Yes "hole" - zero-filled
1544 * Yes No "allocated" - allocated on disk, not read in
1545 * Yes Yes "valid" - allocated and up-to-date in memory.
1546 *
1547 * "Dirty" is valid only with the last case (mapped+uptodate).
1548 */
1549
1550/*
1551 * While block_write_full_page is writing back the dirty buffers under
1552 * the page lock, whoever dirtied the buffers may decide to clean them
1553 * again at any time. We handle that by only looking at the buffer
1554 * state inside lock_buffer().
1555 *
1556 * If block_write_full_page() is called for regular writeback
1557 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1558 * locked buffer. This only can happen if someone has written the buffer
1559 * directly, with submit_bh(). At the address_space level PageWriteback
1560 * prevents this contention from occurring.
1561 */
1562static int __block_write_full_page(struct inode *inode, struct page *page,
1563 get_block_t *get_block, struct writeback_control *wbc)
1564{
1565 int err;
1566 sector_t block;
1567 sector_t last_block;
f0fbd5fc 1568 struct buffer_head *bh, *head;
b0cf2321 1569 const unsigned blocksize = 1 << inode->i_blkbits;
1da177e4
LT
1570 int nr_underway = 0;
1571
1572 BUG_ON(!PageLocked(page));
1573
1574 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1575
1576 if (!page_has_buffers(page)) {
b0cf2321 1577 create_empty_buffers(page, blocksize,
1da177e4
LT
1578 (1 << BH_Dirty)|(1 << BH_Uptodate));
1579 }
1580
1581 /*
1582 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1583 * here, and the (potentially unmapped) buffers may become dirty at
1584 * any time. If a buffer becomes dirty here after we've inspected it
1585 * then we just miss that fact, and the page stays dirty.
1586 *
1587 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1588 * handle that here by just cleaning them.
1589 */
1590
54b21a79 1591 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4
LT
1592 head = page_buffers(page);
1593 bh = head;
1594
1595 /*
1596 * Get all the dirty buffers mapped to disk addresses and
1597 * handle any aliases from the underlying blockdev's mapping.
1598 */
1599 do {
1600 if (block > last_block) {
1601 /*
1602 * mapped buffers outside i_size will occur, because
1603 * this page can be outside i_size when there is a
1604 * truncate in progress.
1605 */
1606 /*
1607 * The buffer was zeroed by block_write_full_page()
1608 */
1609 clear_buffer_dirty(bh);
1610 set_buffer_uptodate(bh);
1611 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
b0cf2321 1612 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
1613 err = get_block(inode, block, bh, 1);
1614 if (err)
1615 goto recover;
1616 if (buffer_new(bh)) {
1617 /* blockdev mappings never come here */
1618 clear_buffer_new(bh);
1619 unmap_underlying_metadata(bh->b_bdev,
1620 bh->b_blocknr);
1621 }
1622 }
1623 bh = bh->b_this_page;
1624 block++;
1625 } while (bh != head);
1626
1627 do {
1da177e4
LT
1628 if (!buffer_mapped(bh))
1629 continue;
1630 /*
1631 * If it's a fully non-blocking write attempt and we cannot
1632 * lock the buffer then redirty the page. Note that this can
1633 * potentially cause a busy-wait loop from pdflush and kswapd
1634 * activity, but those code paths have their own higher-level
1635 * throttling.
1636 */
1637 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1638 lock_buffer(bh);
1639 } else if (test_set_buffer_locked(bh)) {
1640 redirty_page_for_writepage(wbc, page);
1641 continue;
1642 }
1643 if (test_clear_buffer_dirty(bh)) {
1644 mark_buffer_async_write(bh);
1645 } else {
1646 unlock_buffer(bh);
1647 }
1648 } while ((bh = bh->b_this_page) != head);
1649
1650 /*
1651 * The page and its buffers are protected by PageWriteback(), so we can
1652 * drop the bh refcounts early.
1653 */
1654 BUG_ON(PageWriteback(page));
1655 set_page_writeback(page);
1da177e4
LT
1656
1657 do {
1658 struct buffer_head *next = bh->b_this_page;
1659 if (buffer_async_write(bh)) {
1660 submit_bh(WRITE, bh);
1661 nr_underway++;
1662 }
1da177e4
LT
1663 bh = next;
1664 } while (bh != head);
05937baa 1665 unlock_page(page);
1da177e4
LT
1666
1667 err = 0;
1668done:
1669 if (nr_underway == 0) {
1670 /*
1671 * The page was marked dirty, but the buffers were
1672 * clean. Someone wrote them back by hand with
1673 * ll_rw_block/submit_bh. A rare case.
1674 */
1675 int uptodate = 1;
1676 do {
1677 if (!buffer_uptodate(bh)) {
1678 uptodate = 0;
1679 break;
1680 }
1681 bh = bh->b_this_page;
1682 } while (bh != head);
1683 if (uptodate)
1684 SetPageUptodate(page);
1685 end_page_writeback(page);
1686 /*
1687 * The page and buffer_heads can be released at any time from
1688 * here on.
1689 */
1690 wbc->pages_skipped++; /* We didn't write this page */
1691 }
1692 return err;
1693
1694recover:
1695 /*
1696 * ENOSPC, or some other error. We may already have added some
1697 * blocks to the file, so we need to write these out to avoid
1698 * exposing stale data.
1699 * The page is currently locked and not marked for writeback
1700 */
1701 bh = head;
1702 /* Recovery: lock and submit the mapped buffers */
1703 do {
1da177e4
LT
1704 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1705 lock_buffer(bh);
1706 mark_buffer_async_write(bh);
1707 } else {
1708 /*
1709 * The buffer may have been set dirty during
1710 * attachment to a dirty page.
1711 */
1712 clear_buffer_dirty(bh);
1713 }
1714 } while ((bh = bh->b_this_page) != head);
1715 SetPageError(page);
1716 BUG_ON(PageWriteback(page));
1717 set_page_writeback(page);
1718 unlock_page(page);
1719 do {
1720 struct buffer_head *next = bh->b_this_page;
1721 if (buffer_async_write(bh)) {
1722 clear_buffer_dirty(bh);
1723 submit_bh(WRITE, bh);
1724 nr_underway++;
1725 }
1da177e4
LT
1726 bh = next;
1727 } while (bh != head);
1728 goto done;
1729}
1730
1731static int __block_prepare_write(struct inode *inode, struct page *page,
1732 unsigned from, unsigned to, get_block_t *get_block)
1733{
1734 unsigned block_start, block_end;
1735 sector_t block;
1736 int err = 0;
1737 unsigned blocksize, bbits;
1738 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1739
1740 BUG_ON(!PageLocked(page));
1741 BUG_ON(from > PAGE_CACHE_SIZE);
1742 BUG_ON(to > PAGE_CACHE_SIZE);
1743 BUG_ON(from > to);
1744
1745 blocksize = 1 << inode->i_blkbits;
1746 if (!page_has_buffers(page))
1747 create_empty_buffers(page, blocksize, 0);
1748 head = page_buffers(page);
1749
1750 bbits = inode->i_blkbits;
1751 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1752
1753 for(bh = head, block_start = 0; bh != head || !block_start;
1754 block++, block_start=block_end, bh = bh->b_this_page) {
1755 block_end = block_start + blocksize;
1756 if (block_end <= from || block_start >= to) {
1757 if (PageUptodate(page)) {
1758 if (!buffer_uptodate(bh))
1759 set_buffer_uptodate(bh);
1760 }
1761 continue;
1762 }
1763 if (buffer_new(bh))
1764 clear_buffer_new(bh);
1765 if (!buffer_mapped(bh)) {
b0cf2321 1766 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
1767 err = get_block(inode, block, bh, 1);
1768 if (err)
f3ddbdc6 1769 break;
1da177e4 1770 if (buffer_new(bh)) {
1da177e4
LT
1771 unmap_underlying_metadata(bh->b_bdev,
1772 bh->b_blocknr);
1773 if (PageUptodate(page)) {
1774 set_buffer_uptodate(bh);
1775 continue;
1776 }
1777 if (block_end > to || block_start < from) {
1778 void *kaddr;
1779
1780 kaddr = kmap_atomic(page, KM_USER0);
1781 if (block_end > to)
1782 memset(kaddr+to, 0,
1783 block_end-to);
1784 if (block_start < from)
1785 memset(kaddr+block_start,
1786 0, from-block_start);
1787 flush_dcache_page(page);
1788 kunmap_atomic(kaddr, KM_USER0);
1789 }
1790 continue;
1791 }
1792 }
1793 if (PageUptodate(page)) {
1794 if (!buffer_uptodate(bh))
1795 set_buffer_uptodate(bh);
1796 continue;
1797 }
1798 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1799 (block_start < from || block_end > to)) {
1800 ll_rw_block(READ, 1, &bh);
1801 *wait_bh++=bh;
1802 }
1803 }
1804 /*
1805 * If we issued read requests - let them complete.
1806 */
1807 while(wait_bh > wait) {
1808 wait_on_buffer(*--wait_bh);
1809 if (!buffer_uptodate(*wait_bh))
f3ddbdc6 1810 err = -EIO;
1da177e4 1811 }
152becd2
AA
1812 if (!err) {
1813 bh = head;
1814 do {
1815 if (buffer_new(bh))
1816 clear_buffer_new(bh);
1817 } while ((bh = bh->b_this_page) != head);
1818 return 0;
1819 }
f3ddbdc6 1820 /* Error case: */
1da177e4
LT
1821 /*
1822 * Zero out any newly allocated blocks to avoid exposing stale
1823 * data. If BH_New is set, we know that the block was newly
1824 * allocated in the above loop.
1825 */
1826 bh = head;
1827 block_start = 0;
1828 do {
1829 block_end = block_start+blocksize;
1830 if (block_end <= from)
1831 goto next_bh;
1832 if (block_start >= to)
1833 break;
1834 if (buffer_new(bh)) {
1835 void *kaddr;
1836
1837 clear_buffer_new(bh);
1838 kaddr = kmap_atomic(page, KM_USER0);
1839 memset(kaddr+block_start, 0, bh->b_size);
1840 kunmap_atomic(kaddr, KM_USER0);
1841 set_buffer_uptodate(bh);
1842 mark_buffer_dirty(bh);
1843 }
1844next_bh:
1845 block_start = block_end;
1846 bh = bh->b_this_page;
1847 } while (bh != head);
1848 return err;
1849}
1850
1851static int __block_commit_write(struct inode *inode, struct page *page,
1852 unsigned from, unsigned to)
1853{
1854 unsigned block_start, block_end;
1855 int partial = 0;
1856 unsigned blocksize;
1857 struct buffer_head *bh, *head;
1858
1859 blocksize = 1 << inode->i_blkbits;
1860
1861 for(bh = head = page_buffers(page), block_start = 0;
1862 bh != head || !block_start;
1863 block_start=block_end, bh = bh->b_this_page) {
1864 block_end = block_start + blocksize;
1865 if (block_end <= from || block_start >= to) {
1866 if (!buffer_uptodate(bh))
1867 partial = 1;
1868 } else {
1869 set_buffer_uptodate(bh);
1870 mark_buffer_dirty(bh);
1871 }
1872 }
1873
1874 /*
1875 * If this is a partial write which happened to make all buffers
1876 * uptodate then we can optimize away a bogus readpage() for
1877 * the next read(). Here we 'discover' whether the page went
1878 * uptodate as a result of this (potentially partial) write.
1879 */
1880 if (!partial)
1881 SetPageUptodate(page);
1882 return 0;
1883}
1884
1885/*
1886 * Generic "read page" function for block devices that have the normal
1887 * get_block functionality. This is most of the block device filesystems.
1888 * Reads the page asynchronously --- the unlock_buffer() and
1889 * set/clear_buffer_uptodate() functions propagate buffer state into the
1890 * page struct once IO has completed.
1891 */
1892int block_read_full_page(struct page *page, get_block_t *get_block)
1893{
1894 struct inode *inode = page->mapping->host;
1895 sector_t iblock, lblock;
1896 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
1897 unsigned int blocksize;
1898 int nr, i;
1899 int fully_mapped = 1;
1900
cd7619d6 1901 BUG_ON(!PageLocked(page));
1da177e4
LT
1902 blocksize = 1 << inode->i_blkbits;
1903 if (!page_has_buffers(page))
1904 create_empty_buffers(page, blocksize, 0);
1905 head = page_buffers(page);
1906
1907 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1908 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
1909 bh = head;
1910 nr = 0;
1911 i = 0;
1912
1913 do {
1914 if (buffer_uptodate(bh))
1915 continue;
1916
1917 if (!buffer_mapped(bh)) {
c64610ba
AM
1918 int err = 0;
1919
1da177e4
LT
1920 fully_mapped = 0;
1921 if (iblock < lblock) {
b0cf2321 1922 WARN_ON(bh->b_size != blocksize);
c64610ba
AM
1923 err = get_block(inode, iblock, bh, 0);
1924 if (err)
1da177e4
LT
1925 SetPageError(page);
1926 }
1927 if (!buffer_mapped(bh)) {
1928 void *kaddr = kmap_atomic(page, KM_USER0);
1929 memset(kaddr + i * blocksize, 0, blocksize);
1930 flush_dcache_page(page);
1931 kunmap_atomic(kaddr, KM_USER0);
c64610ba
AM
1932 if (!err)
1933 set_buffer_uptodate(bh);
1da177e4
LT
1934 continue;
1935 }
1936 /*
1937 * get_block() might have updated the buffer
1938 * synchronously
1939 */
1940 if (buffer_uptodate(bh))
1941 continue;
1942 }
1943 arr[nr++] = bh;
1944 } while (i++, iblock++, (bh = bh->b_this_page) != head);
1945
1946 if (fully_mapped)
1947 SetPageMappedToDisk(page);
1948
1949 if (!nr) {
1950 /*
1951 * All buffers are uptodate - we can set the page uptodate
1952 * as well. But not if get_block() returned an error.
1953 */
1954 if (!PageError(page))
1955 SetPageUptodate(page);
1956 unlock_page(page);
1957 return 0;
1958 }
1959
1960 /* Stage two: lock the buffers */
1961 for (i = 0; i < nr; i++) {
1962 bh = arr[i];
1963 lock_buffer(bh);
1964 mark_buffer_async_read(bh);
1965 }
1966
1967 /*
1968 * Stage 3: start the IO. Check for uptodateness
1969 * inside the buffer lock in case another process reading
1970 * the underlying blockdev brought it uptodate (the sct fix).
1971 */
1972 for (i = 0; i < nr; i++) {
1973 bh = arr[i];
1974 if (buffer_uptodate(bh))
1975 end_buffer_async_read(bh, 1);
1976 else
1977 submit_bh(READ, bh);
1978 }
1979 return 0;
1980}
1981
1982/* utility function for filesystems that need to do work on expanding
1983 * truncates. Uses prepare/commit_write to allow the filesystem to
1984 * deal with the hole.
1985 */
05eb0b51
OH
1986static int __generic_cont_expand(struct inode *inode, loff_t size,
1987 pgoff_t index, unsigned int offset)
1da177e4
LT
1988{
1989 struct address_space *mapping = inode->i_mapping;
1990 struct page *page;
05eb0b51 1991 unsigned long limit;
1da177e4
LT
1992 int err;
1993
1994 err = -EFBIG;
1995 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
1996 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
1997 send_sig(SIGXFSZ, current, 0);
1998 goto out;
1999 }
2000 if (size > inode->i_sb->s_maxbytes)
2001 goto out;
2002
1da177e4
LT
2003 err = -ENOMEM;
2004 page = grab_cache_page(mapping, index);
2005 if (!page)
2006 goto out;
2007 err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
05eb0b51
OH
2008 if (err) {
2009 /*
2010 * ->prepare_write() may have instantiated a few blocks
2011 * outside i_size. Trim these off again.
2012 */
2013 unlock_page(page);
2014 page_cache_release(page);
2015 vmtruncate(inode, inode->i_size);
2016 goto out;
1da177e4 2017 }
05eb0b51
OH
2018
2019 err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2020
1da177e4
LT
2021 unlock_page(page);
2022 page_cache_release(page);
2023 if (err > 0)
2024 err = 0;
2025out:
2026 return err;
2027}
2028
05eb0b51
OH
2029int generic_cont_expand(struct inode *inode, loff_t size)
2030{
2031 pgoff_t index;
2032 unsigned int offset;
2033
2034 offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
2035
2036 /* ugh. in prepare/commit_write, if from==to==start of block, we
2037 ** skip the prepare. make sure we never send an offset for the start
2038 ** of a block
2039 */
2040 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2041 /* caller must handle this extra byte. */
2042 offset++;
2043 }
2044 index = size >> PAGE_CACHE_SHIFT;
2045
2046 return __generic_cont_expand(inode, size, index, offset);
2047}
2048
2049int generic_cont_expand_simple(struct inode *inode, loff_t size)
2050{
2051 loff_t pos = size - 1;
2052 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2053 unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
2054
2055 /* prepare/commit_write can handle even if from==to==start of block. */
2056 return __generic_cont_expand(inode, size, index, offset);
2057}
2058
1da177e4
LT
2059/*
2060 * For moronic filesystems that do not allow holes in file.
2061 * We may have to extend the file.
2062 */
2063
2064int cont_prepare_write(struct page *page, unsigned offset,
2065 unsigned to, get_block_t *get_block, loff_t *bytes)
2066{
2067 struct address_space *mapping = page->mapping;
2068 struct inode *inode = mapping->host;
2069 struct page *new_page;
2070 pgoff_t pgpos;
2071 long status;
2072 unsigned zerofrom;
2073 unsigned blocksize = 1 << inode->i_blkbits;
2074 void *kaddr;
2075
2076 while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2077 status = -ENOMEM;
2078 new_page = grab_cache_page(mapping, pgpos);
2079 if (!new_page)
2080 goto out;
2081 /* we might sleep */
2082 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2083 unlock_page(new_page);
2084 page_cache_release(new_page);
2085 continue;
2086 }
2087 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2088 if (zerofrom & (blocksize-1)) {
2089 *bytes |= (blocksize-1);
2090 (*bytes)++;
2091 }
2092 status = __block_prepare_write(inode, new_page, zerofrom,
2093 PAGE_CACHE_SIZE, get_block);
2094 if (status)
2095 goto out_unmap;
2096 kaddr = kmap_atomic(new_page, KM_USER0);
2097 memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
2098 flush_dcache_page(new_page);
2099 kunmap_atomic(kaddr, KM_USER0);
2100 generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
2101 unlock_page(new_page);
2102 page_cache_release(new_page);
2103 }
2104
2105 if (page->index < pgpos) {
2106 /* completely inside the area */
2107 zerofrom = offset;
2108 } else {
2109 /* page covers the boundary, find the boundary offset */
2110 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2111
2112 /* if we will expand the thing last block will be filled */
2113 if (to > zerofrom && (zerofrom & (blocksize-1))) {
2114 *bytes |= (blocksize-1);
2115 (*bytes)++;
2116 }
2117
2118 /* starting below the boundary? Nothing to zero out */
2119 if (offset <= zerofrom)
2120 zerofrom = offset;
2121 }
2122 status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2123 if (status)
2124 goto out1;
2125 if (zerofrom < offset) {
2126 kaddr = kmap_atomic(page, KM_USER0);
2127 memset(kaddr+zerofrom, 0, offset-zerofrom);
2128 flush_dcache_page(page);
2129 kunmap_atomic(kaddr, KM_USER0);
2130 __block_commit_write(inode, page, zerofrom, offset);
2131 }
2132 return 0;
2133out1:
2134 ClearPageUptodate(page);
2135 return status;
2136
2137out_unmap:
2138 ClearPageUptodate(new_page);
2139 unlock_page(new_page);
2140 page_cache_release(new_page);
2141out:
2142 return status;
2143}
2144
2145int block_prepare_write(struct page *page, unsigned from, unsigned to,
2146 get_block_t *get_block)
2147{
2148 struct inode *inode = page->mapping->host;
2149 int err = __block_prepare_write(inode, page, from, to, get_block);
2150 if (err)
2151 ClearPageUptodate(page);
2152 return err;
2153}
2154
2155int block_commit_write(struct page *page, unsigned from, unsigned to)
2156{
2157 struct inode *inode = page->mapping->host;
2158 __block_commit_write(inode,page,from,to);
2159 return 0;
2160}
2161
2162int generic_commit_write(struct file *file, struct page *page,
2163 unsigned from, unsigned to)
2164{
2165 struct inode *inode = page->mapping->host;
2166 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2167 __block_commit_write(inode,page,from,to);
2168 /*
2169 * No need to use i_size_read() here, the i_size
1b1dcc1b 2170 * cannot change under us because we hold i_mutex.
1da177e4
LT
2171 */
2172 if (pos > inode->i_size) {
2173 i_size_write(inode, pos);
2174 mark_inode_dirty(inode);
2175 }
2176 return 0;
2177}
2178
2179
2180/*
2181 * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2182 * immediately, while under the page lock. So it needs a special end_io
2183 * handler which does not touch the bh after unlocking it.
2184 *
2185 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
2186 * a race there is benign: unlock_buffer() only use the bh's address for
2187 * hashing after unlocking the buffer, so it doesn't actually touch the bh
2188 * itself.
2189 */
2190static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2191{
2192 if (uptodate) {
2193 set_buffer_uptodate(bh);
2194 } else {
2195 /* This happens, due to failed READA attempts. */
2196 clear_buffer_uptodate(bh);
2197 }
2198 unlock_buffer(bh);
2199}
2200
2201/*
2202 * On entry, the page is fully not uptodate.
2203 * On exit the page is fully uptodate in the areas outside (from,to)
2204 */
2205int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2206 get_block_t *get_block)
2207{
2208 struct inode *inode = page->mapping->host;
2209 const unsigned blkbits = inode->i_blkbits;
2210 const unsigned blocksize = 1 << blkbits;
2211 struct buffer_head map_bh;
2212 struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2213 unsigned block_in_page;
2214 unsigned block_start;
2215 sector_t block_in_file;
2216 char *kaddr;
2217 int nr_reads = 0;
2218 int i;
2219 int ret = 0;
2220 int is_mapped_to_disk = 1;
2221 int dirtied_it = 0;
2222
2223 if (PageMappedToDisk(page))
2224 return 0;
2225
2226 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2227 map_bh.b_page = page;
2228
2229 /*
2230 * We loop across all blocks in the page, whether or not they are
2231 * part of the affected region. This is so we can discover if the
2232 * page is fully mapped-to-disk.
2233 */
2234 for (block_start = 0, block_in_page = 0;
2235 block_start < PAGE_CACHE_SIZE;
2236 block_in_page++, block_start += blocksize) {
2237 unsigned block_end = block_start + blocksize;
2238 int create;
2239
2240 map_bh.b_state = 0;
2241 create = 1;
2242 if (block_start >= to)
2243 create = 0;
b0cf2321 2244 map_bh.b_size = blocksize;
1da177e4
LT
2245 ret = get_block(inode, block_in_file + block_in_page,
2246 &map_bh, create);
2247 if (ret)
2248 goto failed;
2249 if (!buffer_mapped(&map_bh))
2250 is_mapped_to_disk = 0;
2251 if (buffer_new(&map_bh))
2252 unmap_underlying_metadata(map_bh.b_bdev,
2253 map_bh.b_blocknr);
2254 if (PageUptodate(page))
2255 continue;
2256 if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2257 kaddr = kmap_atomic(page, KM_USER0);
2258 if (block_start < from) {
2259 memset(kaddr+block_start, 0, from-block_start);
2260 dirtied_it = 1;
2261 }
2262 if (block_end > to) {
2263 memset(kaddr + to, 0, block_end - to);
2264 dirtied_it = 1;
2265 }
2266 flush_dcache_page(page);
2267 kunmap_atomic(kaddr, KM_USER0);
2268 continue;
2269 }
2270 if (buffer_uptodate(&map_bh))
2271 continue; /* reiserfs does this */
2272 if (block_start < from || block_end > to) {
2273 struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2274
2275 if (!bh) {
2276 ret = -ENOMEM;
2277 goto failed;
2278 }
2279 bh->b_state = map_bh.b_state;
2280 atomic_set(&bh->b_count, 0);
2281 bh->b_this_page = NULL;
2282 bh->b_page = page;
2283 bh->b_blocknr = map_bh.b_blocknr;
2284 bh->b_size = blocksize;
2285 bh->b_data = (char *)(long)block_start;
2286 bh->b_bdev = map_bh.b_bdev;
2287 bh->b_private = NULL;
2288 read_bh[nr_reads++] = bh;
2289 }
2290 }
2291
2292 if (nr_reads) {
2293 struct buffer_head *bh;
2294
2295 /*
2296 * The page is locked, so these buffers are protected from
2297 * any VM or truncate activity. Hence we don't need to care
2298 * for the buffer_head refcounts.
2299 */
2300 for (i = 0; i < nr_reads; i++) {
2301 bh = read_bh[i];
2302 lock_buffer(bh);
2303 bh->b_end_io = end_buffer_read_nobh;
2304 submit_bh(READ, bh);
2305 }
2306 for (i = 0; i < nr_reads; i++) {
2307 bh = read_bh[i];
2308 wait_on_buffer(bh);
2309 if (!buffer_uptodate(bh))
2310 ret = -EIO;
2311 free_buffer_head(bh);
2312 read_bh[i] = NULL;
2313 }
2314 if (ret)
2315 goto failed;
2316 }
2317
2318 if (is_mapped_to_disk)
2319 SetPageMappedToDisk(page);
2320 SetPageUptodate(page);
2321
2322 /*
2323 * Setting the page dirty here isn't necessary for the prepare_write
2324 * function - commit_write will do that. But if/when this function is
2325 * used within the pagefault handler to ensure that all mmapped pages
2326 * have backing space in the filesystem, we will need to dirty the page
2327 * if its contents were altered.
2328 */
2329 if (dirtied_it)
2330 set_page_dirty(page);
2331
2332 return 0;
2333
2334failed:
2335 for (i = 0; i < nr_reads; i++) {
2336 if (read_bh[i])
2337 free_buffer_head(read_bh[i]);
2338 }
2339
2340 /*
2341 * Error recovery is pretty slack. Clear the page and mark it dirty
2342 * so we'll later zero out any blocks which _were_ allocated.
2343 */
2344 kaddr = kmap_atomic(page, KM_USER0);
2345 memset(kaddr, 0, PAGE_CACHE_SIZE);
2346 kunmap_atomic(kaddr, KM_USER0);
2347 SetPageUptodate(page);
2348 set_page_dirty(page);
2349 return ret;
2350}
2351EXPORT_SYMBOL(nobh_prepare_write);
2352
2353int nobh_commit_write(struct file *file, struct page *page,
2354 unsigned from, unsigned to)
2355{
2356 struct inode *inode = page->mapping->host;
2357 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2358
2359 set_page_dirty(page);
2360 if (pos > inode->i_size) {
2361 i_size_write(inode, pos);
2362 mark_inode_dirty(inode);
2363 }
2364 return 0;
2365}
2366EXPORT_SYMBOL(nobh_commit_write);
2367
2368/*
2369 * nobh_writepage() - based on block_full_write_page() except
2370 * that it tries to operate without attaching bufferheads to
2371 * the page.
2372 */
2373int nobh_writepage(struct page *page, get_block_t *get_block,
2374 struct writeback_control *wbc)
2375{
2376 struct inode * const inode = page->mapping->host;
2377 loff_t i_size = i_size_read(inode);
2378 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2379 unsigned offset;
2380 void *kaddr;
2381 int ret;
2382
2383 /* Is the page fully inside i_size? */
2384 if (page->index < end_index)
2385 goto out;
2386
2387 /* Is the page fully outside i_size? (truncate in progress) */
2388 offset = i_size & (PAGE_CACHE_SIZE-1);
2389 if (page->index >= end_index+1 || !offset) {
2390 /*
2391 * The page may have dirty, unmapped buffers. For example,
2392 * they may have been added in ext3_writepage(). Make them
2393 * freeable here, so the page does not leak.
2394 */
2395#if 0
2396 /* Not really sure about this - do we need this ? */
2397 if (page->mapping->a_ops->invalidatepage)
2398 page->mapping->a_ops->invalidatepage(page, offset);
2399#endif
2400 unlock_page(page);
2401 return 0; /* don't care */
2402 }
2403
2404 /*
2405 * The page straddles i_size. It must be zeroed out on each and every
2406 * writepage invocation because it may be mmapped. "A file is mapped
2407 * in multiples of the page size. For a file that is not a multiple of
2408 * the page size, the remaining memory is zeroed when mapped, and
2409 * writes to that region are not written out to the file."
2410 */
2411 kaddr = kmap_atomic(page, KM_USER0);
2412 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2413 flush_dcache_page(page);
2414 kunmap_atomic(kaddr, KM_USER0);
2415out:
2416 ret = mpage_writepage(page, get_block, wbc);
2417 if (ret == -EAGAIN)
2418 ret = __block_write_full_page(inode, page, get_block, wbc);
2419 return ret;
2420}
2421EXPORT_SYMBOL(nobh_writepage);
2422
2423/*
2424 * This function assumes that ->prepare_write() uses nobh_prepare_write().
2425 */
2426int nobh_truncate_page(struct address_space *mapping, loff_t from)
2427{
2428 struct inode *inode = mapping->host;
2429 unsigned blocksize = 1 << inode->i_blkbits;
2430 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2431 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2432 unsigned to;
2433 struct page *page;
f5e54d6e 2434 const struct address_space_operations *a_ops = mapping->a_ops;
1da177e4
LT
2435 char *kaddr;
2436 int ret = 0;
2437
2438 if ((offset & (blocksize - 1)) == 0)
2439 goto out;
2440
2441 ret = -ENOMEM;
2442 page = grab_cache_page(mapping, index);
2443 if (!page)
2444 goto out;
2445
2446 to = (offset + blocksize) & ~(blocksize - 1);
2447 ret = a_ops->prepare_write(NULL, page, offset, to);
2448 if (ret == 0) {
2449 kaddr = kmap_atomic(page, KM_USER0);
2450 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2451 flush_dcache_page(page);
2452 kunmap_atomic(kaddr, KM_USER0);
2453 set_page_dirty(page);
2454 }
2455 unlock_page(page);
2456 page_cache_release(page);
2457out:
2458 return ret;
2459}
2460EXPORT_SYMBOL(nobh_truncate_page);
2461
2462int block_truncate_page(struct address_space *mapping,
2463 loff_t from, get_block_t *get_block)
2464{
2465 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2466 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2467 unsigned blocksize;
54b21a79 2468 sector_t iblock;
1da177e4
LT
2469 unsigned length, pos;
2470 struct inode *inode = mapping->host;
2471 struct page *page;
2472 struct buffer_head *bh;
2473 void *kaddr;
2474 int err;
2475
2476 blocksize = 1 << inode->i_blkbits;
2477 length = offset & (blocksize - 1);
2478
2479 /* Block boundary? Nothing to do */
2480 if (!length)
2481 return 0;
2482
2483 length = blocksize - length;
54b21a79 2484 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4
LT
2485
2486 page = grab_cache_page(mapping, index);
2487 err = -ENOMEM;
2488 if (!page)
2489 goto out;
2490
2491 if (!page_has_buffers(page))
2492 create_empty_buffers(page, blocksize, 0);
2493
2494 /* Find the buffer that contains "offset" */
2495 bh = page_buffers(page);
2496 pos = blocksize;
2497 while (offset >= pos) {
2498 bh = bh->b_this_page;
2499 iblock++;
2500 pos += blocksize;
2501 }
2502
2503 err = 0;
2504 if (!buffer_mapped(bh)) {
b0cf2321 2505 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
2506 err = get_block(inode, iblock, bh, 0);
2507 if (err)
2508 goto unlock;
2509 /* unmapped? It's a hole - nothing to do */
2510 if (!buffer_mapped(bh))
2511 goto unlock;
2512 }
2513
2514 /* Ok, it's mapped. Make sure it's up-to-date */
2515 if (PageUptodate(page))
2516 set_buffer_uptodate(bh);
2517
2518 if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
2519 err = -EIO;
2520 ll_rw_block(READ, 1, &bh);
2521 wait_on_buffer(bh);
2522 /* Uhhuh. Read error. Complain and punt. */
2523 if (!buffer_uptodate(bh))
2524 goto unlock;
2525 }
2526
2527 kaddr = kmap_atomic(page, KM_USER0);
2528 memset(kaddr + offset, 0, length);
2529 flush_dcache_page(page);
2530 kunmap_atomic(kaddr, KM_USER0);
2531
2532 mark_buffer_dirty(bh);
2533 err = 0;
2534
2535unlock:
2536 unlock_page(page);
2537 page_cache_release(page);
2538out:
2539 return err;
2540}
2541
2542/*
2543 * The generic ->writepage function for buffer-backed address_spaces
2544 */
2545int block_write_full_page(struct page *page, get_block_t *get_block,
2546 struct writeback_control *wbc)
2547{
2548 struct inode * const inode = page->mapping->host;
2549 loff_t i_size = i_size_read(inode);
2550 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2551 unsigned offset;
2552 void *kaddr;
2553
2554 /* Is the page fully inside i_size? */
2555 if (page->index < end_index)
2556 return __block_write_full_page(inode, page, get_block, wbc);
2557
2558 /* Is the page fully outside i_size? (truncate in progress) */
2559 offset = i_size & (PAGE_CACHE_SIZE-1);
2560 if (page->index >= end_index+1 || !offset) {
2561 /*
2562 * The page may have dirty, unmapped buffers. For example,
2563 * they may have been added in ext3_writepage(). Make them
2564 * freeable here, so the page does not leak.
2565 */
aaa4059b 2566 do_invalidatepage(page, 0);
1da177e4
LT
2567 unlock_page(page);
2568 return 0; /* don't care */
2569 }
2570
2571 /*
2572 * The page straddles i_size. It must be zeroed out on each and every
2573 * writepage invokation because it may be mmapped. "A file is mapped
2574 * in multiples of the page size. For a file that is not a multiple of
2575 * the page size, the remaining memory is zeroed when mapped, and
2576 * writes to that region are not written out to the file."
2577 */
2578 kaddr = kmap_atomic(page, KM_USER0);
2579 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2580 flush_dcache_page(page);
2581 kunmap_atomic(kaddr, KM_USER0);
2582 return __block_write_full_page(inode, page, get_block, wbc);
2583}
2584
2585sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2586 get_block_t *get_block)
2587{
2588 struct buffer_head tmp;
2589 struct inode *inode = mapping->host;
2590 tmp.b_state = 0;
2591 tmp.b_blocknr = 0;
b0cf2321 2592 tmp.b_size = 1 << inode->i_blkbits;
1da177e4
LT
2593 get_block(inode, block, &tmp, 0);
2594 return tmp.b_blocknr;
2595}
2596
2597static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2598{
2599 struct buffer_head *bh = bio->bi_private;
2600
2601 if (bio->bi_size)
2602 return 1;
2603
2604 if (err == -EOPNOTSUPP) {
2605 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2606 set_bit(BH_Eopnotsupp, &bh->b_state);
2607 }
2608
2609 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2610 bio_put(bio);
2611 return 0;
2612}
2613
2614int submit_bh(int rw, struct buffer_head * bh)
2615{
2616 struct bio *bio;
2617 int ret = 0;
2618
2619 BUG_ON(!buffer_locked(bh));
2620 BUG_ON(!buffer_mapped(bh));
2621 BUG_ON(!bh->b_end_io);
2622
2623 if (buffer_ordered(bh) && (rw == WRITE))
2624 rw = WRITE_BARRIER;
2625
2626 /*
2627 * Only clear out a write error when rewriting, should this
2628 * include WRITE_SYNC as well?
2629 */
2630 if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2631 clear_buffer_write_io_error(bh);
2632
2633 /*
2634 * from here on down, it's all bio -- do the initial mapping,
2635 * submit_bio -> generic_make_request may further map this bio around
2636 */
2637 bio = bio_alloc(GFP_NOIO, 1);
2638
2639 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2640 bio->bi_bdev = bh->b_bdev;
2641 bio->bi_io_vec[0].bv_page = bh->b_page;
2642 bio->bi_io_vec[0].bv_len = bh->b_size;
2643 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2644
2645 bio->bi_vcnt = 1;
2646 bio->bi_idx = 0;
2647 bio->bi_size = bh->b_size;
2648
2649 bio->bi_end_io = end_bio_bh_io_sync;
2650 bio->bi_private = bh;
2651
2652 bio_get(bio);
2653 submit_bio(rw, bio);
2654
2655 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2656 ret = -EOPNOTSUPP;
2657
2658 bio_put(bio);
2659 return ret;
2660}
2661
2662/**
2663 * ll_rw_block: low-level access to block devices (DEPRECATED)
a7662236 2664 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
1da177e4
LT
2665 * @nr: number of &struct buffer_heads in the array
2666 * @bhs: array of pointers to &struct buffer_head
2667 *
a7662236
JK
2668 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2669 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2670 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2671 * are sent to disk. The fourth %READA option is described in the documentation
2672 * for generic_make_request() which ll_rw_block() calls.
1da177e4
LT
2673 *
2674 * This function drops any buffer that it cannot get a lock on (with the
a7662236
JK
2675 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2676 * clean when doing a write request, and any buffer that appears to be
2677 * up-to-date when doing read request. Further it marks as clean buffers that
2678 * are processed for writing (the buffer cache won't assume that they are
2679 * actually clean until the buffer gets unlocked).
1da177e4
LT
2680 *
2681 * ll_rw_block sets b_end_io to simple completion handler that marks
2682 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2683 * any waiters.
2684 *
2685 * All of the buffers must be for the same device, and must also be a
2686 * multiple of the current approved size for the device.
2687 */
2688void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2689{
2690 int i;
2691
2692 for (i = 0; i < nr; i++) {
2693 struct buffer_head *bh = bhs[i];
2694
a7662236
JK
2695 if (rw == SWRITE)
2696 lock_buffer(bh);
2697 else if (test_set_buffer_locked(bh))
1da177e4
LT
2698 continue;
2699
a7662236 2700 if (rw == WRITE || rw == SWRITE) {
1da177e4 2701 if (test_clear_buffer_dirty(bh)) {
76c3073a 2702 bh->b_end_io = end_buffer_write_sync;
e60e5c50 2703 get_bh(bh);
1da177e4
LT
2704 submit_bh(WRITE, bh);
2705 continue;
2706 }
2707 } else {
1da177e4 2708 if (!buffer_uptodate(bh)) {
76c3073a 2709 bh->b_end_io = end_buffer_read_sync;
e60e5c50 2710 get_bh(bh);
1da177e4
LT
2711 submit_bh(rw, bh);
2712 continue;
2713 }
2714 }
2715 unlock_buffer(bh);
1da177e4
LT
2716 }
2717}
2718
2719/*
2720 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2721 * and then start new I/O and then wait upon it. The caller must have a ref on
2722 * the buffer_head.
2723 */
2724int sync_dirty_buffer(struct buffer_head *bh)
2725{
2726 int ret = 0;
2727
2728 WARN_ON(atomic_read(&bh->b_count) < 1);
2729 lock_buffer(bh);
2730 if (test_clear_buffer_dirty(bh)) {
2731 get_bh(bh);
2732 bh->b_end_io = end_buffer_write_sync;
2733 ret = submit_bh(WRITE, bh);
2734 wait_on_buffer(bh);
2735 if (buffer_eopnotsupp(bh)) {
2736 clear_buffer_eopnotsupp(bh);
2737 ret = -EOPNOTSUPP;
2738 }
2739 if (!ret && !buffer_uptodate(bh))
2740 ret = -EIO;
2741 } else {
2742 unlock_buffer(bh);
2743 }
2744 return ret;
2745}
2746
2747/*
2748 * try_to_free_buffers() checks if all the buffers on this particular page
2749 * are unused, and releases them if so.
2750 *
2751 * Exclusion against try_to_free_buffers may be obtained by either
2752 * locking the page or by holding its mapping's private_lock.
2753 *
2754 * If the page is dirty but all the buffers are clean then we need to
2755 * be sure to mark the page clean as well. This is because the page
2756 * may be against a block device, and a later reattachment of buffers
2757 * to a dirty page will set *all* buffers dirty. Which would corrupt
2758 * filesystem data on the same device.
2759 *
2760 * The same applies to regular filesystem pages: if all the buffers are
2761 * clean then we set the page clean and proceed. To do that, we require
2762 * total exclusion from __set_page_dirty_buffers(). That is obtained with
2763 * private_lock.
2764 *
2765 * try_to_free_buffers() is non-blocking.
2766 */
2767static inline int buffer_busy(struct buffer_head *bh)
2768{
2769 return atomic_read(&bh->b_count) |
2770 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2771}
2772
2773static int
2774drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2775{
2776 struct buffer_head *head = page_buffers(page);
2777 struct buffer_head *bh;
2778
2779 bh = head;
2780 do {
de7d5a3b 2781 if (buffer_write_io_error(bh) && page->mapping)
1da177e4
LT
2782 set_bit(AS_EIO, &page->mapping->flags);
2783 if (buffer_busy(bh))
2784 goto failed;
2785 bh = bh->b_this_page;
2786 } while (bh != head);
2787
2788 do {
2789 struct buffer_head *next = bh->b_this_page;
2790
2791 if (!list_empty(&bh->b_assoc_buffers))
2792 __remove_assoc_queue(bh);
2793 bh = next;
2794 } while (bh != head);
2795 *buffers_to_free = head;
2796 __clear_page_buffers(page);
2797 return 1;
2798failed:
2799 return 0;
2800}
2801
2802int try_to_free_buffers(struct page *page)
2803{
2804 struct address_space * const mapping = page->mapping;
2805 struct buffer_head *buffers_to_free = NULL;
2806 int ret = 0;
2807
2808 BUG_ON(!PageLocked(page));
2809 if (PageWriteback(page))
2810 return 0;
2811
2812 if (mapping == NULL) { /* can this still happen? */
2813 ret = drop_buffers(page, &buffers_to_free);
2814 goto out;
2815 }
2816
2817 spin_lock(&mapping->private_lock);
2818 ret = drop_buffers(page, &buffers_to_free);
d08b3851 2819 spin_unlock(&mapping->private_lock);
1da177e4
LT
2820 if (ret) {
2821 /*
2822 * If the filesystem writes its buffers by hand (eg ext3)
2823 * then we can have clean buffers against a dirty page. We
2824 * clean the page here; otherwise later reattachment of buffers
2825 * could encounter a non-uptodate page, which is unresolvable.
2826 * This only applies in the rare case where try_to_free_buffers
2827 * succeeds but the page is not freed.
2828 */
2829 clear_page_dirty(page);
2830 }
1da177e4
LT
2831out:
2832 if (buffers_to_free) {
2833 struct buffer_head *bh = buffers_to_free;
2834
2835 do {
2836 struct buffer_head *next = bh->b_this_page;
2837 free_buffer_head(bh);
2838 bh = next;
2839 } while (bh != buffers_to_free);
2840 }
2841 return ret;
2842}
2843EXPORT_SYMBOL(try_to_free_buffers);
2844
3978d717 2845void block_sync_page(struct page *page)
1da177e4
LT
2846{
2847 struct address_space *mapping;
2848
2849 smp_mb();
2850 mapping = page_mapping(page);
2851 if (mapping)
2852 blk_run_backing_dev(mapping->backing_dev_info, page);
1da177e4
LT
2853}
2854
2855/*
2856 * There are no bdflush tunables left. But distributions are
2857 * still running obsolete flush daemons, so we terminate them here.
2858 *
2859 * Use of bdflush() is deprecated and will be removed in a future kernel.
2860 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
2861 */
2862asmlinkage long sys_bdflush(int func, long data)
2863{
2864 static int msg_count;
2865
2866 if (!capable(CAP_SYS_ADMIN))
2867 return -EPERM;
2868
2869 if (msg_count < 5) {
2870 msg_count++;
2871 printk(KERN_INFO
2872 "warning: process `%s' used the obsolete bdflush"
2873 " system call\n", current->comm);
2874 printk(KERN_INFO "Fix your initscripts?\n");
2875 }
2876
2877 if (func == 1)
2878 do_exit(0);
2879 return 0;
2880}
2881
2882/*
2883 * Buffer-head allocation
2884 */
2885static kmem_cache_t *bh_cachep;
2886
2887/*
2888 * Once the number of bh's in the machine exceeds this level, we start
2889 * stripping them in writeback.
2890 */
2891static int max_buffer_heads;
2892
2893int buffer_heads_over_limit;
2894
2895struct bh_accounting {
2896 int nr; /* Number of live bh's */
2897 int ratelimit; /* Limit cacheline bouncing */
2898};
2899
2900static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
2901
2902static void recalc_bh_state(void)
2903{
2904 int i;
2905 int tot = 0;
2906
2907 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
2908 return;
2909 __get_cpu_var(bh_accounting).ratelimit = 0;
8a143426 2910 for_each_online_cpu(i)
1da177e4
LT
2911 tot += per_cpu(bh_accounting, i).nr;
2912 buffer_heads_over_limit = (tot > max_buffer_heads);
2913}
2914
dd0fc66f 2915struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
1da177e4
LT
2916{
2917 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
2918 if (ret) {
736c7b80 2919 get_cpu_var(bh_accounting).nr++;
1da177e4 2920 recalc_bh_state();
736c7b80 2921 put_cpu_var(bh_accounting);
1da177e4
LT
2922 }
2923 return ret;
2924}
2925EXPORT_SYMBOL(alloc_buffer_head);
2926
2927void free_buffer_head(struct buffer_head *bh)
2928{
2929 BUG_ON(!list_empty(&bh->b_assoc_buffers));
2930 kmem_cache_free(bh_cachep, bh);
736c7b80 2931 get_cpu_var(bh_accounting).nr--;
1da177e4 2932 recalc_bh_state();
736c7b80 2933 put_cpu_var(bh_accounting);
1da177e4
LT
2934}
2935EXPORT_SYMBOL(free_buffer_head);
2936
2937static void
2938init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
2939{
2940 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
2941 SLAB_CTOR_CONSTRUCTOR) {
2942 struct buffer_head * bh = (struct buffer_head *)data;
2943
2944 memset(bh, 0, sizeof(*bh));
2945 INIT_LIST_HEAD(&bh->b_assoc_buffers);
2946 }
2947}
2948
2949#ifdef CONFIG_HOTPLUG_CPU
2950static void buffer_exit_cpu(int cpu)
2951{
2952 int i;
2953 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
2954
2955 for (i = 0; i < BH_LRU_SIZE; i++) {
2956 brelse(b->bhs[i]);
2957 b->bhs[i] = NULL;
2958 }
8a143426
ED
2959 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
2960 per_cpu(bh_accounting, cpu).nr = 0;
2961 put_cpu_var(bh_accounting);
1da177e4
LT
2962}
2963
2964static int buffer_cpu_notify(struct notifier_block *self,
2965 unsigned long action, void *hcpu)
2966{
2967 if (action == CPU_DEAD)
2968 buffer_exit_cpu((unsigned long)hcpu);
2969 return NOTIFY_OK;
2970}
2971#endif /* CONFIG_HOTPLUG_CPU */
2972
2973void __init buffer_init(void)
2974{
2975 int nrpages;
2976
2977 bh_cachep = kmem_cache_create("buffer_head",
b0196009
PJ
2978 sizeof(struct buffer_head), 0,
2979 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
2980 SLAB_MEM_SPREAD),
2981 init_buffer_head,
2982 NULL);
1da177e4
LT
2983
2984 /*
2985 * Limit the bh occupancy to 10% of ZONE_NORMAL
2986 */
2987 nrpages = (nr_free_buffer_pages() * 10) / 100;
2988 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
2989 hotcpu_notifier(buffer_cpu_notify, 0);
2990}
2991
2992EXPORT_SYMBOL(__bforget);
2993EXPORT_SYMBOL(__brelse);
2994EXPORT_SYMBOL(__wait_on_buffer);
2995EXPORT_SYMBOL(block_commit_write);
2996EXPORT_SYMBOL(block_prepare_write);
2997EXPORT_SYMBOL(block_read_full_page);
2998EXPORT_SYMBOL(block_sync_page);
2999EXPORT_SYMBOL(block_truncate_page);
3000EXPORT_SYMBOL(block_write_full_page);
3001EXPORT_SYMBOL(cont_prepare_write);
1da177e4
LT
3002EXPORT_SYMBOL(end_buffer_read_sync);
3003EXPORT_SYMBOL(end_buffer_write_sync);
3004EXPORT_SYMBOL(file_fsync);
3005EXPORT_SYMBOL(fsync_bdev);
3006EXPORT_SYMBOL(generic_block_bmap);
3007EXPORT_SYMBOL(generic_commit_write);
3008EXPORT_SYMBOL(generic_cont_expand);
05eb0b51 3009EXPORT_SYMBOL(generic_cont_expand_simple);
1da177e4
LT
3010EXPORT_SYMBOL(init_buffer);
3011EXPORT_SYMBOL(invalidate_bdev);
3012EXPORT_SYMBOL(ll_rw_block);
3013EXPORT_SYMBOL(mark_buffer_dirty);
3014EXPORT_SYMBOL(submit_bh);
3015EXPORT_SYMBOL(sync_dirty_buffer);
3016EXPORT_SYMBOL(unlock_buffer);