]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - fs/buffer.c
block: update biodoc.txt on plugging
[mirror_ubuntu-zesty-kernel.git] / fs / buffer.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/buffer.c
3 *
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
6
7/*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9 *
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12 *
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
15 *
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17 *
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19 */
20
1da177e4
LT
21#include <linux/kernel.h>
22#include <linux/syscalls.h>
23#include <linux/fs.h>
24#include <linux/mm.h>
25#include <linux/percpu.h>
26#include <linux/slab.h>
16f7e0fe 27#include <linux/capability.h>
1da177e4
LT
28#include <linux/blkdev.h>
29#include <linux/file.h>
30#include <linux/quotaops.h>
31#include <linux/highmem.h>
32#include <linux/module.h>
33#include <linux/writeback.h>
34#include <linux/hash.h>
35#include <linux/suspend.h>
36#include <linux/buffer_head.h>
55e829af 37#include <linux/task_io_accounting_ops.h>
1da177e4
LT
38#include <linux/bio.h>
39#include <linux/notifier.h>
40#include <linux/cpu.h>
41#include <linux/bitops.h>
42#include <linux/mpage.h>
fb1c8f93 43#include <linux/bit_spinlock.h>
1da177e4
LT
44
45static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
1da177e4
LT
46
47#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48
49inline void
50init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51{
52 bh->b_end_io = handler;
53 bh->b_private = private;
54}
55
56static int sync_buffer(void *word)
57{
58 struct block_device *bd;
59 struct buffer_head *bh
60 = container_of(word, struct buffer_head, b_state);
61
62 smp_mb();
63 bd = bh->b_bdev;
64 if (bd)
65 blk_run_address_space(bd->bd_inode->i_mapping);
66 io_schedule();
67 return 0;
68}
69
fc9b52cd 70void __lock_buffer(struct buffer_head *bh)
1da177e4
LT
71{
72 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73 TASK_UNINTERRUPTIBLE);
74}
75EXPORT_SYMBOL(__lock_buffer);
76
fc9b52cd 77void unlock_buffer(struct buffer_head *bh)
1da177e4 78{
51b07fc3 79 clear_bit_unlock(BH_Lock, &bh->b_state);
1da177e4
LT
80 smp_mb__after_clear_bit();
81 wake_up_bit(&bh->b_state, BH_Lock);
82}
83
84/*
85 * Block until a buffer comes unlocked. This doesn't stop it
86 * from becoming locked again - you have to lock it yourself
87 * if you want to preserve its state.
88 */
89void __wait_on_buffer(struct buffer_head * bh)
90{
91 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
92}
93
94static void
95__clear_page_buffers(struct page *page)
96{
97 ClearPagePrivate(page);
4c21e2f2 98 set_page_private(page, 0);
1da177e4
LT
99 page_cache_release(page);
100}
101
08bafc03
KM
102
103static int quiet_error(struct buffer_head *bh)
104{
105 if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
106 return 0;
107 return 1;
108}
109
110
1da177e4
LT
111static void buffer_io_error(struct buffer_head *bh)
112{
113 char b[BDEVNAME_SIZE];
1da177e4
LT
114 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
115 bdevname(bh->b_bdev, b),
116 (unsigned long long)bh->b_blocknr);
117}
118
119/*
68671f35
DM
120 * End-of-IO handler helper function which does not touch the bh after
121 * unlocking it.
122 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
123 * a race there is benign: unlock_buffer() only use the bh's address for
124 * hashing after unlocking the buffer, so it doesn't actually touch the bh
125 * itself.
1da177e4 126 */
68671f35 127static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
1da177e4
LT
128{
129 if (uptodate) {
130 set_buffer_uptodate(bh);
131 } else {
132 /* This happens, due to failed READA attempts. */
133 clear_buffer_uptodate(bh);
134 }
135 unlock_buffer(bh);
68671f35
DM
136}
137
138/*
139 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
140 * unlock the buffer. This is what ll_rw_block uses too.
141 */
142void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
143{
144 __end_buffer_read_notouch(bh, uptodate);
1da177e4
LT
145 put_bh(bh);
146}
147
148void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
149{
150 char b[BDEVNAME_SIZE];
151
152 if (uptodate) {
153 set_buffer_uptodate(bh);
154 } else {
08bafc03 155 if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
1da177e4
LT
156 buffer_io_error(bh);
157 printk(KERN_WARNING "lost page write due to "
158 "I/O error on %s\n",
159 bdevname(bh->b_bdev, b));
160 }
161 set_buffer_write_io_error(bh);
162 clear_buffer_uptodate(bh);
163 }
164 unlock_buffer(bh);
165 put_bh(bh);
166}
167
1da177e4
LT
168/*
169 * Various filesystems appear to want __find_get_block to be non-blocking.
170 * But it's the page lock which protects the buffers. To get around this,
171 * we get exclusion from try_to_free_buffers with the blockdev mapping's
172 * private_lock.
173 *
174 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
175 * may be quite high. This code could TryLock the page, and if that
176 * succeeds, there is no need to take private_lock. (But if
177 * private_lock is contended then so is mapping->tree_lock).
178 */
179static struct buffer_head *
385fd4c5 180__find_get_block_slow(struct block_device *bdev, sector_t block)
1da177e4
LT
181{
182 struct inode *bd_inode = bdev->bd_inode;
183 struct address_space *bd_mapping = bd_inode->i_mapping;
184 struct buffer_head *ret = NULL;
185 pgoff_t index;
186 struct buffer_head *bh;
187 struct buffer_head *head;
188 struct page *page;
189 int all_mapped = 1;
190
191 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
192 page = find_get_page(bd_mapping, index);
193 if (!page)
194 goto out;
195
196 spin_lock(&bd_mapping->private_lock);
197 if (!page_has_buffers(page))
198 goto out_unlock;
199 head = page_buffers(page);
200 bh = head;
201 do {
97f76d3d
NK
202 if (!buffer_mapped(bh))
203 all_mapped = 0;
204 else if (bh->b_blocknr == block) {
1da177e4
LT
205 ret = bh;
206 get_bh(bh);
207 goto out_unlock;
208 }
1da177e4
LT
209 bh = bh->b_this_page;
210 } while (bh != head);
211
212 /* we might be here because some of the buffers on this page are
213 * not mapped. This is due to various races between
214 * file io on the block device and getblk. It gets dealt with
215 * elsewhere, don't buffer_error if we had some unmapped buffers
216 */
217 if (all_mapped) {
218 printk("__find_get_block_slow() failed. "
219 "block=%llu, b_blocknr=%llu\n",
205f87f6
BP
220 (unsigned long long)block,
221 (unsigned long long)bh->b_blocknr);
222 printk("b_state=0x%08lx, b_size=%zu\n",
223 bh->b_state, bh->b_size);
1da177e4
LT
224 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
225 }
226out_unlock:
227 spin_unlock(&bd_mapping->private_lock);
228 page_cache_release(page);
229out:
230 return ret;
231}
232
233/* If invalidate_buffers() will trash dirty buffers, it means some kind
234 of fs corruption is going on. Trashing dirty data always imply losing
235 information that was supposed to be just stored on the physical layer
236 by the user.
237
238 Thus invalidate_buffers in general usage is not allwowed to trash
239 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
240 be preserved. These buffers are simply skipped.
241
242 We also skip buffers which are still in use. For example this can
243 happen if a userspace program is reading the block device.
244
245 NOTE: In the case where the user removed a removable-media-disk even if
246 there's still dirty data not synced on disk (due a bug in the device driver
247 or due an error of the user), by not destroying the dirty buffers we could
248 generate corruption also on the next media inserted, thus a parameter is
249 necessary to handle this case in the most safe way possible (trying
250 to not corrupt also the new disk inserted with the data belonging to
251 the old now corrupted disk). Also for the ramdisk the natural thing
252 to do in order to release the ramdisk memory is to destroy dirty buffers.
253
254 These are two special cases. Normal usage imply the device driver
255 to issue a sync on the device (without waiting I/O completion) and
256 then an invalidate_buffers call that doesn't trash dirty buffers.
257
258 For handling cache coherency with the blkdev pagecache the 'update' case
259 is been introduced. It is needed to re-read from disk any pinned
260 buffer. NOTE: re-reading from disk is destructive so we can do it only
261 when we assume nobody is changing the buffercache under our I/O and when
262 we think the disk contains more recent information than the buffercache.
263 The update == 1 pass marks the buffers we need to update, the update == 2
264 pass does the actual I/O. */
f98393a6 265void invalidate_bdev(struct block_device *bdev)
1da177e4 266{
0e1dfc66
AM
267 struct address_space *mapping = bdev->bd_inode->i_mapping;
268
269 if (mapping->nrpages == 0)
270 return;
271
1da177e4 272 invalidate_bh_lrus();
fc0ecff6 273 invalidate_mapping_pages(mapping, 0, -1);
1da177e4
LT
274}
275
276/*
277 * Kick pdflush then try to free up some ZONE_NORMAL memory.
278 */
279static void free_more_memory(void)
280{
19770b32 281 struct zone *zone;
0e88460d 282 int nid;
1da177e4 283
687a21ce 284 wakeup_pdflush(1024);
1da177e4
LT
285 yield();
286
0e88460d 287 for_each_online_node(nid) {
19770b32
MG
288 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
289 gfp_zone(GFP_NOFS), NULL,
290 &zone);
291 if (zone)
54a6eb5c 292 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
327c0e96 293 GFP_NOFS, NULL);
1da177e4
LT
294 }
295}
296
297/*
298 * I/O completion handler for block_read_full_page() - pages
299 * which come unlocked at the end of I/O.
300 */
301static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
302{
1da177e4 303 unsigned long flags;
a3972203 304 struct buffer_head *first;
1da177e4
LT
305 struct buffer_head *tmp;
306 struct page *page;
307 int page_uptodate = 1;
308
309 BUG_ON(!buffer_async_read(bh));
310
311 page = bh->b_page;
312 if (uptodate) {
313 set_buffer_uptodate(bh);
314 } else {
315 clear_buffer_uptodate(bh);
08bafc03 316 if (!quiet_error(bh))
1da177e4
LT
317 buffer_io_error(bh);
318 SetPageError(page);
319 }
320
321 /*
322 * Be _very_ careful from here on. Bad things can happen if
323 * two buffer heads end IO at almost the same time and both
324 * decide that the page is now completely done.
325 */
a3972203
NP
326 first = page_buffers(page);
327 local_irq_save(flags);
328 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
1da177e4
LT
329 clear_buffer_async_read(bh);
330 unlock_buffer(bh);
331 tmp = bh;
332 do {
333 if (!buffer_uptodate(tmp))
334 page_uptodate = 0;
335 if (buffer_async_read(tmp)) {
336 BUG_ON(!buffer_locked(tmp));
337 goto still_busy;
338 }
339 tmp = tmp->b_this_page;
340 } while (tmp != bh);
a3972203
NP
341 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
342 local_irq_restore(flags);
1da177e4
LT
343
344 /*
345 * If none of the buffers had errors and they are all
346 * uptodate then we can set the page uptodate.
347 */
348 if (page_uptodate && !PageError(page))
349 SetPageUptodate(page);
350 unlock_page(page);
351 return;
352
353still_busy:
a3972203
NP
354 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
355 local_irq_restore(flags);
1da177e4
LT
356 return;
357}
358
359/*
360 * Completion handler for block_write_full_page() - pages which are unlocked
361 * during I/O, and which have PageWriteback cleared upon I/O completion.
362 */
b6cd0b77 363static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
1da177e4
LT
364{
365 char b[BDEVNAME_SIZE];
1da177e4 366 unsigned long flags;
a3972203 367 struct buffer_head *first;
1da177e4
LT
368 struct buffer_head *tmp;
369 struct page *page;
370
371 BUG_ON(!buffer_async_write(bh));
372
373 page = bh->b_page;
374 if (uptodate) {
375 set_buffer_uptodate(bh);
376 } else {
08bafc03 377 if (!quiet_error(bh)) {
1da177e4
LT
378 buffer_io_error(bh);
379 printk(KERN_WARNING "lost page write due to "
380 "I/O error on %s\n",
381 bdevname(bh->b_bdev, b));
382 }
383 set_bit(AS_EIO, &page->mapping->flags);
58ff407b 384 set_buffer_write_io_error(bh);
1da177e4
LT
385 clear_buffer_uptodate(bh);
386 SetPageError(page);
387 }
388
a3972203
NP
389 first = page_buffers(page);
390 local_irq_save(flags);
391 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
392
1da177e4
LT
393 clear_buffer_async_write(bh);
394 unlock_buffer(bh);
395 tmp = bh->b_this_page;
396 while (tmp != bh) {
397 if (buffer_async_write(tmp)) {
398 BUG_ON(!buffer_locked(tmp));
399 goto still_busy;
400 }
401 tmp = tmp->b_this_page;
402 }
a3972203
NP
403 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
404 local_irq_restore(flags);
1da177e4
LT
405 end_page_writeback(page);
406 return;
407
408still_busy:
a3972203
NP
409 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
410 local_irq_restore(flags);
1da177e4
LT
411 return;
412}
413
414/*
415 * If a page's buffers are under async readin (end_buffer_async_read
416 * completion) then there is a possibility that another thread of
417 * control could lock one of the buffers after it has completed
418 * but while some of the other buffers have not completed. This
419 * locked buffer would confuse end_buffer_async_read() into not unlocking
420 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
421 * that this buffer is not under async I/O.
422 *
423 * The page comes unlocked when it has no locked buffer_async buffers
424 * left.
425 *
426 * PageLocked prevents anyone starting new async I/O reads any of
427 * the buffers.
428 *
429 * PageWriteback is used to prevent simultaneous writeout of the same
430 * page.
431 *
432 * PageLocked prevents anyone from starting writeback of a page which is
433 * under read I/O (PageWriteback is only ever set against a locked page).
434 */
435static void mark_buffer_async_read(struct buffer_head *bh)
436{
437 bh->b_end_io = end_buffer_async_read;
438 set_buffer_async_read(bh);
439}
440
441void mark_buffer_async_write(struct buffer_head *bh)
442{
443 bh->b_end_io = end_buffer_async_write;
444 set_buffer_async_write(bh);
445}
446EXPORT_SYMBOL(mark_buffer_async_write);
447
448
449/*
450 * fs/buffer.c contains helper functions for buffer-backed address space's
451 * fsync functions. A common requirement for buffer-based filesystems is
452 * that certain data from the backing blockdev needs to be written out for
453 * a successful fsync(). For example, ext2 indirect blocks need to be
454 * written back and waited upon before fsync() returns.
455 *
456 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
457 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
458 * management of a list of dependent buffers at ->i_mapping->private_list.
459 *
460 * Locking is a little subtle: try_to_free_buffers() will remove buffers
461 * from their controlling inode's queue when they are being freed. But
462 * try_to_free_buffers() will be operating against the *blockdev* mapping
463 * at the time, not against the S_ISREG file which depends on those buffers.
464 * So the locking for private_list is via the private_lock in the address_space
465 * which backs the buffers. Which is different from the address_space
466 * against which the buffers are listed. So for a particular address_space,
467 * mapping->private_lock does *not* protect mapping->private_list! In fact,
468 * mapping->private_list will always be protected by the backing blockdev's
469 * ->private_lock.
470 *
471 * Which introduces a requirement: all buffers on an address_space's
472 * ->private_list must be from the same address_space: the blockdev's.
473 *
474 * address_spaces which do not place buffers at ->private_list via these
475 * utility functions are free to use private_lock and private_list for
476 * whatever they want. The only requirement is that list_empty(private_list)
477 * be true at clear_inode() time.
478 *
479 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
480 * filesystems should do that. invalidate_inode_buffers() should just go
481 * BUG_ON(!list_empty).
482 *
483 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
484 * take an address_space, not an inode. And it should be called
485 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
486 * queued up.
487 *
488 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
489 * list if it is already on a list. Because if the buffer is on a list,
490 * it *must* already be on the right one. If not, the filesystem is being
491 * silly. This will save a ton of locking. But first we have to ensure
492 * that buffers are taken *off* the old inode's list when they are freed
493 * (presumably in truncate). That requires careful auditing of all
494 * filesystems (do it inside bforget()). It could also be done by bringing
495 * b_inode back.
496 */
497
498/*
499 * The buffer's backing address_space's private_lock must be held
500 */
dbacefc9 501static void __remove_assoc_queue(struct buffer_head *bh)
1da177e4
LT
502{
503 list_del_init(&bh->b_assoc_buffers);
58ff407b
JK
504 WARN_ON(!bh->b_assoc_map);
505 if (buffer_write_io_error(bh))
506 set_bit(AS_EIO, &bh->b_assoc_map->flags);
507 bh->b_assoc_map = NULL;
1da177e4
LT
508}
509
510int inode_has_buffers(struct inode *inode)
511{
512 return !list_empty(&inode->i_data.private_list);
513}
514
515/*
516 * osync is designed to support O_SYNC io. It waits synchronously for
517 * all already-submitted IO to complete, but does not queue any new
518 * writes to the disk.
519 *
520 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
521 * you dirty the buffers, and then use osync_inode_buffers to wait for
522 * completion. Any other dirty buffers which are not yet queued for
523 * write will not be flushed to disk by the osync.
524 */
525static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
526{
527 struct buffer_head *bh;
528 struct list_head *p;
529 int err = 0;
530
531 spin_lock(lock);
532repeat:
533 list_for_each_prev(p, list) {
534 bh = BH_ENTRY(p);
535 if (buffer_locked(bh)) {
536 get_bh(bh);
537 spin_unlock(lock);
538 wait_on_buffer(bh);
539 if (!buffer_uptodate(bh))
540 err = -EIO;
541 brelse(bh);
542 spin_lock(lock);
543 goto repeat;
544 }
545 }
546 spin_unlock(lock);
547 return err;
548}
549
c2d75438
ES
550void do_thaw_all(unsigned long unused)
551{
552 struct super_block *sb;
553 char b[BDEVNAME_SIZE];
554
555 spin_lock(&sb_lock);
556restart:
557 list_for_each_entry(sb, &super_blocks, s_list) {
558 sb->s_count++;
559 spin_unlock(&sb_lock);
560 down_read(&sb->s_umount);
561 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
562 printk(KERN_WARNING "Emergency Thaw on %s\n",
563 bdevname(sb->s_bdev, b));
564 up_read(&sb->s_umount);
565 spin_lock(&sb_lock);
566 if (__put_super_and_need_restart(sb))
567 goto restart;
568 }
569 spin_unlock(&sb_lock);
570 printk(KERN_WARNING "Emergency Thaw complete\n");
571}
572
573/**
574 * emergency_thaw_all -- forcibly thaw every frozen filesystem
575 *
576 * Used for emergency unfreeze of all filesystems via SysRq
577 */
578void emergency_thaw_all(void)
579{
580 pdflush_operation(do_thaw_all, 0);
581}
582
1da177e4 583/**
78a4a50a 584 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
67be2dd1 585 * @mapping: the mapping which wants those buffers written
1da177e4
LT
586 *
587 * Starts I/O against the buffers at mapping->private_list, and waits upon
588 * that I/O.
589 *
67be2dd1
MW
590 * Basically, this is a convenience function for fsync().
591 * @mapping is a file or directory which needs those buffers to be written for
592 * a successful fsync().
1da177e4
LT
593 */
594int sync_mapping_buffers(struct address_space *mapping)
595{
596 struct address_space *buffer_mapping = mapping->assoc_mapping;
597
598 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
599 return 0;
600
601 return fsync_buffers_list(&buffer_mapping->private_lock,
602 &mapping->private_list);
603}
604EXPORT_SYMBOL(sync_mapping_buffers);
605
606/*
607 * Called when we've recently written block `bblock', and it is known that
608 * `bblock' was for a buffer_boundary() buffer. This means that the block at
609 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
610 * dirty, schedule it for IO. So that indirects merge nicely with their data.
611 */
612void write_boundary_block(struct block_device *bdev,
613 sector_t bblock, unsigned blocksize)
614{
615 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
616 if (bh) {
617 if (buffer_dirty(bh))
618 ll_rw_block(WRITE, 1, &bh);
619 put_bh(bh);
620 }
621}
622
623void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
624{
625 struct address_space *mapping = inode->i_mapping;
626 struct address_space *buffer_mapping = bh->b_page->mapping;
627
628 mark_buffer_dirty(bh);
629 if (!mapping->assoc_mapping) {
630 mapping->assoc_mapping = buffer_mapping;
631 } else {
e827f923 632 BUG_ON(mapping->assoc_mapping != buffer_mapping);
1da177e4 633 }
535ee2fb 634 if (!bh->b_assoc_map) {
1da177e4
LT
635 spin_lock(&buffer_mapping->private_lock);
636 list_move_tail(&bh->b_assoc_buffers,
637 &mapping->private_list);
58ff407b 638 bh->b_assoc_map = mapping;
1da177e4
LT
639 spin_unlock(&buffer_mapping->private_lock);
640 }
641}
642EXPORT_SYMBOL(mark_buffer_dirty_inode);
643
787d2214
NP
644/*
645 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
646 * dirty.
647 *
648 * If warn is true, then emit a warning if the page is not uptodate and has
649 * not been truncated.
650 */
a8e7d49a 651static void __set_page_dirty(struct page *page,
787d2214
NP
652 struct address_space *mapping, int warn)
653{
19fd6231 654 spin_lock_irq(&mapping->tree_lock);
787d2214
NP
655 if (page->mapping) { /* Race with truncate? */
656 WARN_ON_ONCE(warn && !PageUptodate(page));
e3a7cca1 657 account_page_dirtied(page, mapping);
787d2214
NP
658 radix_tree_tag_set(&mapping->page_tree,
659 page_index(page), PAGECACHE_TAG_DIRTY);
660 }
19fd6231 661 spin_unlock_irq(&mapping->tree_lock);
787d2214 662 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
787d2214
NP
663}
664
1da177e4
LT
665/*
666 * Add a page to the dirty page list.
667 *
668 * It is a sad fact of life that this function is called from several places
669 * deeply under spinlocking. It may not sleep.
670 *
671 * If the page has buffers, the uptodate buffers are set dirty, to preserve
672 * dirty-state coherency between the page and the buffers. It the page does
673 * not have buffers then when they are later attached they will all be set
674 * dirty.
675 *
676 * The buffers are dirtied before the page is dirtied. There's a small race
677 * window in which a writepage caller may see the page cleanness but not the
678 * buffer dirtiness. That's fine. If this code were to set the page dirty
679 * before the buffers, a concurrent writepage caller could clear the page dirty
680 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
681 * page on the dirty page list.
682 *
683 * We use private_lock to lock against try_to_free_buffers while using the
684 * page's buffer list. Also use this to protect against clean buffers being
685 * added to the page after it was set dirty.
686 *
687 * FIXME: may need to call ->reservepage here as well. That's rather up to the
688 * address_space though.
689 */
690int __set_page_dirty_buffers(struct page *page)
691{
a8e7d49a 692 int newly_dirty;
787d2214 693 struct address_space *mapping = page_mapping(page);
ebf7a227
NP
694
695 if (unlikely(!mapping))
696 return !TestSetPageDirty(page);
1da177e4
LT
697
698 spin_lock(&mapping->private_lock);
699 if (page_has_buffers(page)) {
700 struct buffer_head *head = page_buffers(page);
701 struct buffer_head *bh = head;
702
703 do {
704 set_buffer_dirty(bh);
705 bh = bh->b_this_page;
706 } while (bh != head);
707 }
a8e7d49a 708 newly_dirty = !TestSetPageDirty(page);
1da177e4
LT
709 spin_unlock(&mapping->private_lock);
710
a8e7d49a
LT
711 if (newly_dirty)
712 __set_page_dirty(page, mapping, 1);
713 return newly_dirty;
1da177e4
LT
714}
715EXPORT_SYMBOL(__set_page_dirty_buffers);
716
717/*
718 * Write out and wait upon a list of buffers.
719 *
720 * We have conflicting pressures: we want to make sure that all
721 * initially dirty buffers get waited on, but that any subsequently
722 * dirtied buffers don't. After all, we don't want fsync to last
723 * forever if somebody is actively writing to the file.
724 *
725 * Do this in two main stages: first we copy dirty buffers to a
726 * temporary inode list, queueing the writes as we go. Then we clean
727 * up, waiting for those writes to complete.
728 *
729 * During this second stage, any subsequent updates to the file may end
730 * up refiling the buffer on the original inode's dirty list again, so
731 * there is a chance we will end up with a buffer queued for write but
732 * not yet completed on that list. So, as a final cleanup we go through
733 * the osync code to catch these locked, dirty buffers without requeuing
734 * any newly dirty buffers for write.
735 */
736static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
737{
738 struct buffer_head *bh;
739 struct list_head tmp;
9cf6b720 740 struct address_space *mapping, *prev_mapping = NULL;
1da177e4
LT
741 int err = 0, err2;
742
743 INIT_LIST_HEAD(&tmp);
744
745 spin_lock(lock);
746 while (!list_empty(list)) {
747 bh = BH_ENTRY(list->next);
535ee2fb 748 mapping = bh->b_assoc_map;
58ff407b 749 __remove_assoc_queue(bh);
535ee2fb
JK
750 /* Avoid race with mark_buffer_dirty_inode() which does
751 * a lockless check and we rely on seeing the dirty bit */
752 smp_mb();
1da177e4
LT
753 if (buffer_dirty(bh) || buffer_locked(bh)) {
754 list_add(&bh->b_assoc_buffers, &tmp);
535ee2fb 755 bh->b_assoc_map = mapping;
1da177e4
LT
756 if (buffer_dirty(bh)) {
757 get_bh(bh);
758 spin_unlock(lock);
759 /*
760 * Ensure any pending I/O completes so that
761 * ll_rw_block() actually writes the current
762 * contents - it is a noop if I/O is still in
763 * flight on potentially older contents.
764 */
9cf6b720
JA
765 ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh);
766
767 /*
768 * Kick off IO for the previous mapping. Note
769 * that we will not run the very last mapping,
770 * wait_on_buffer() will do that for us
771 * through sync_buffer().
772 */
773 if (prev_mapping && prev_mapping != mapping)
774 blk_run_address_space(prev_mapping);
775 prev_mapping = mapping;
776
1da177e4
LT
777 brelse(bh);
778 spin_lock(lock);
779 }
780 }
781 }
782
783 while (!list_empty(&tmp)) {
784 bh = BH_ENTRY(tmp.prev);
1da177e4 785 get_bh(bh);
535ee2fb
JK
786 mapping = bh->b_assoc_map;
787 __remove_assoc_queue(bh);
788 /* Avoid race with mark_buffer_dirty_inode() which does
789 * a lockless check and we rely on seeing the dirty bit */
790 smp_mb();
791 if (buffer_dirty(bh)) {
792 list_add(&bh->b_assoc_buffers,
e3892296 793 &mapping->private_list);
535ee2fb
JK
794 bh->b_assoc_map = mapping;
795 }
1da177e4
LT
796 spin_unlock(lock);
797 wait_on_buffer(bh);
798 if (!buffer_uptodate(bh))
799 err = -EIO;
800 brelse(bh);
801 spin_lock(lock);
802 }
803
804 spin_unlock(lock);
805 err2 = osync_buffers_list(lock, list);
806 if (err)
807 return err;
808 else
809 return err2;
810}
811
812/*
813 * Invalidate any and all dirty buffers on a given inode. We are
814 * probably unmounting the fs, but that doesn't mean we have already
815 * done a sync(). Just drop the buffers from the inode list.
816 *
817 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
818 * assumes that all the buffers are against the blockdev. Not true
819 * for reiserfs.
820 */
821void invalidate_inode_buffers(struct inode *inode)
822{
823 if (inode_has_buffers(inode)) {
824 struct address_space *mapping = &inode->i_data;
825 struct list_head *list = &mapping->private_list;
826 struct address_space *buffer_mapping = mapping->assoc_mapping;
827
828 spin_lock(&buffer_mapping->private_lock);
829 while (!list_empty(list))
830 __remove_assoc_queue(BH_ENTRY(list->next));
831 spin_unlock(&buffer_mapping->private_lock);
832 }
833}
52b19ac9 834EXPORT_SYMBOL(invalidate_inode_buffers);
1da177e4
LT
835
836/*
837 * Remove any clean buffers from the inode's buffer list. This is called
838 * when we're trying to free the inode itself. Those buffers can pin it.
839 *
840 * Returns true if all buffers were removed.
841 */
842int remove_inode_buffers(struct inode *inode)
843{
844 int ret = 1;
845
846 if (inode_has_buffers(inode)) {
847 struct address_space *mapping = &inode->i_data;
848 struct list_head *list = &mapping->private_list;
849 struct address_space *buffer_mapping = mapping->assoc_mapping;
850
851 spin_lock(&buffer_mapping->private_lock);
852 while (!list_empty(list)) {
853 struct buffer_head *bh = BH_ENTRY(list->next);
854 if (buffer_dirty(bh)) {
855 ret = 0;
856 break;
857 }
858 __remove_assoc_queue(bh);
859 }
860 spin_unlock(&buffer_mapping->private_lock);
861 }
862 return ret;
863}
864
865/*
866 * Create the appropriate buffers when given a page for data area and
867 * the size of each buffer.. Use the bh->b_this_page linked list to
868 * follow the buffers created. Return NULL if unable to create more
869 * buffers.
870 *
871 * The retry flag is used to differentiate async IO (paging, swapping)
872 * which may not fail from ordinary buffer allocations.
873 */
874struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
875 int retry)
876{
877 struct buffer_head *bh, *head;
878 long offset;
879
880try_again:
881 head = NULL;
882 offset = PAGE_SIZE;
883 while ((offset -= size) >= 0) {
884 bh = alloc_buffer_head(GFP_NOFS);
885 if (!bh)
886 goto no_grow;
887
888 bh->b_bdev = NULL;
889 bh->b_this_page = head;
890 bh->b_blocknr = -1;
891 head = bh;
892
893 bh->b_state = 0;
894 atomic_set(&bh->b_count, 0);
fc5cd582 895 bh->b_private = NULL;
1da177e4
LT
896 bh->b_size = size;
897
898 /* Link the buffer to its page */
899 set_bh_page(bh, page, offset);
900
01ffe339 901 init_buffer(bh, NULL, NULL);
1da177e4
LT
902 }
903 return head;
904/*
905 * In case anything failed, we just free everything we got.
906 */
907no_grow:
908 if (head) {
909 do {
910 bh = head;
911 head = head->b_this_page;
912 free_buffer_head(bh);
913 } while (head);
914 }
915
916 /*
917 * Return failure for non-async IO requests. Async IO requests
918 * are not allowed to fail, so we have to wait until buffer heads
919 * become available. But we don't want tasks sleeping with
920 * partially complete buffers, so all were released above.
921 */
922 if (!retry)
923 return NULL;
924
925 /* We're _really_ low on memory. Now we just
926 * wait for old buffer heads to become free due to
927 * finishing IO. Since this is an async request and
928 * the reserve list is empty, we're sure there are
929 * async buffer heads in use.
930 */
931 free_more_memory();
932 goto try_again;
933}
934EXPORT_SYMBOL_GPL(alloc_page_buffers);
935
936static inline void
937link_dev_buffers(struct page *page, struct buffer_head *head)
938{
939 struct buffer_head *bh, *tail;
940
941 bh = head;
942 do {
943 tail = bh;
944 bh = bh->b_this_page;
945 } while (bh);
946 tail->b_this_page = head;
947 attach_page_buffers(page, head);
948}
949
950/*
951 * Initialise the state of a blockdev page's buffers.
952 */
953static void
954init_page_buffers(struct page *page, struct block_device *bdev,
955 sector_t block, int size)
956{
957 struct buffer_head *head = page_buffers(page);
958 struct buffer_head *bh = head;
959 int uptodate = PageUptodate(page);
960
961 do {
962 if (!buffer_mapped(bh)) {
963 init_buffer(bh, NULL, NULL);
964 bh->b_bdev = bdev;
965 bh->b_blocknr = block;
966 if (uptodate)
967 set_buffer_uptodate(bh);
968 set_buffer_mapped(bh);
969 }
970 block++;
971 bh = bh->b_this_page;
972 } while (bh != head);
973}
974
975/*
976 * Create the page-cache page that contains the requested block.
977 *
978 * This is user purely for blockdev mappings.
979 */
980static struct page *
981grow_dev_page(struct block_device *bdev, sector_t block,
982 pgoff_t index, int size)
983{
984 struct inode *inode = bdev->bd_inode;
985 struct page *page;
986 struct buffer_head *bh;
987
ea125892 988 page = find_or_create_page(inode->i_mapping, index,
769848c0 989 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1da177e4
LT
990 if (!page)
991 return NULL;
992
e827f923 993 BUG_ON(!PageLocked(page));
1da177e4
LT
994
995 if (page_has_buffers(page)) {
996 bh = page_buffers(page);
997 if (bh->b_size == size) {
998 init_page_buffers(page, bdev, block, size);
999 return page;
1000 }
1001 if (!try_to_free_buffers(page))
1002 goto failed;
1003 }
1004
1005 /*
1006 * Allocate some buffers for this page
1007 */
1008 bh = alloc_page_buffers(page, size, 0);
1009 if (!bh)
1010 goto failed;
1011
1012 /*
1013 * Link the page to the buffers and initialise them. Take the
1014 * lock to be atomic wrt __find_get_block(), which does not
1015 * run under the page lock.
1016 */
1017 spin_lock(&inode->i_mapping->private_lock);
1018 link_dev_buffers(page, bh);
1019 init_page_buffers(page, bdev, block, size);
1020 spin_unlock(&inode->i_mapping->private_lock);
1021 return page;
1022
1023failed:
1024 BUG();
1025 unlock_page(page);
1026 page_cache_release(page);
1027 return NULL;
1028}
1029
1030/*
1031 * Create buffers for the specified block device block's page. If
1032 * that page was dirty, the buffers are set dirty also.
1da177e4 1033 */
858119e1 1034static int
1da177e4
LT
1035grow_buffers(struct block_device *bdev, sector_t block, int size)
1036{
1037 struct page *page;
1038 pgoff_t index;
1039 int sizebits;
1040
1041 sizebits = -1;
1042 do {
1043 sizebits++;
1044 } while ((size << sizebits) < PAGE_SIZE);
1045
1046 index = block >> sizebits;
1da177e4 1047
e5657933
AM
1048 /*
1049 * Check for a block which wants to lie outside our maximum possible
1050 * pagecache index. (this comparison is done using sector_t types).
1051 */
1052 if (unlikely(index != block >> sizebits)) {
1053 char b[BDEVNAME_SIZE];
1054
1055 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1056 "device %s\n",
8e24eea7 1057 __func__, (unsigned long long)block,
e5657933
AM
1058 bdevname(bdev, b));
1059 return -EIO;
1060 }
1061 block = index << sizebits;
1da177e4
LT
1062 /* Create a page with the proper size buffers.. */
1063 page = grow_dev_page(bdev, block, index, size);
1064 if (!page)
1065 return 0;
1066 unlock_page(page);
1067 page_cache_release(page);
1068 return 1;
1069}
1070
75c96f85 1071static struct buffer_head *
1da177e4
LT
1072__getblk_slow(struct block_device *bdev, sector_t block, int size)
1073{
1074 /* Size must be multiple of hard sectorsize */
1075 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1076 (size < 512 || size > PAGE_SIZE))) {
1077 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1078 size);
1079 printk(KERN_ERR "hardsect size: %d\n",
1080 bdev_hardsect_size(bdev));
1081
1082 dump_stack();
1083 return NULL;
1084 }
1085
1086 for (;;) {
1087 struct buffer_head * bh;
e5657933 1088 int ret;
1da177e4
LT
1089
1090 bh = __find_get_block(bdev, block, size);
1091 if (bh)
1092 return bh;
1093
e5657933
AM
1094 ret = grow_buffers(bdev, block, size);
1095 if (ret < 0)
1096 return NULL;
1097 if (ret == 0)
1da177e4
LT
1098 free_more_memory();
1099 }
1100}
1101
1102/*
1103 * The relationship between dirty buffers and dirty pages:
1104 *
1105 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1106 * the page is tagged dirty in its radix tree.
1107 *
1108 * At all times, the dirtiness of the buffers represents the dirtiness of
1109 * subsections of the page. If the page has buffers, the page dirty bit is
1110 * merely a hint about the true dirty state.
1111 *
1112 * When a page is set dirty in its entirety, all its buffers are marked dirty
1113 * (if the page has buffers).
1114 *
1115 * When a buffer is marked dirty, its page is dirtied, but the page's other
1116 * buffers are not.
1117 *
1118 * Also. When blockdev buffers are explicitly read with bread(), they
1119 * individually become uptodate. But their backing page remains not
1120 * uptodate - even if all of its buffers are uptodate. A subsequent
1121 * block_read_full_page() against that page will discover all the uptodate
1122 * buffers, will set the page uptodate and will perform no I/O.
1123 */
1124
1125/**
1126 * mark_buffer_dirty - mark a buffer_head as needing writeout
67be2dd1 1127 * @bh: the buffer_head to mark dirty
1da177e4
LT
1128 *
1129 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1130 * backing page dirty, then tag the page as dirty in its address_space's radix
1131 * tree and then attach the address_space's inode to its superblock's dirty
1132 * inode list.
1133 *
1134 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1135 * mapping->tree_lock and the global inode_lock.
1136 */
fc9b52cd 1137void mark_buffer_dirty(struct buffer_head *bh)
1da177e4 1138{
787d2214 1139 WARN_ON_ONCE(!buffer_uptodate(bh));
1be62dc1
LT
1140
1141 /*
1142 * Very *carefully* optimize the it-is-already-dirty case.
1143 *
1144 * Don't let the final "is it dirty" escape to before we
1145 * perhaps modified the buffer.
1146 */
1147 if (buffer_dirty(bh)) {
1148 smp_mb();
1149 if (buffer_dirty(bh))
1150 return;
1151 }
1152
a8e7d49a
LT
1153 if (!test_set_buffer_dirty(bh)) {
1154 struct page *page = bh->b_page;
1155 if (!TestSetPageDirty(page))
1156 __set_page_dirty(page, page_mapping(page), 0);
1157 }
1da177e4
LT
1158}
1159
1160/*
1161 * Decrement a buffer_head's reference count. If all buffers against a page
1162 * have zero reference count, are clean and unlocked, and if the page is clean
1163 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1164 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1165 * a page but it ends up not being freed, and buffers may later be reattached).
1166 */
1167void __brelse(struct buffer_head * buf)
1168{
1169 if (atomic_read(&buf->b_count)) {
1170 put_bh(buf);
1171 return;
1172 }
5c752ad9 1173 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1da177e4
LT
1174}
1175
1176/*
1177 * bforget() is like brelse(), except it discards any
1178 * potentially dirty data.
1179 */
1180void __bforget(struct buffer_head *bh)
1181{
1182 clear_buffer_dirty(bh);
535ee2fb 1183 if (bh->b_assoc_map) {
1da177e4
LT
1184 struct address_space *buffer_mapping = bh->b_page->mapping;
1185
1186 spin_lock(&buffer_mapping->private_lock);
1187 list_del_init(&bh->b_assoc_buffers);
58ff407b 1188 bh->b_assoc_map = NULL;
1da177e4
LT
1189 spin_unlock(&buffer_mapping->private_lock);
1190 }
1191 __brelse(bh);
1192}
1193
1194static struct buffer_head *__bread_slow(struct buffer_head *bh)
1195{
1196 lock_buffer(bh);
1197 if (buffer_uptodate(bh)) {
1198 unlock_buffer(bh);
1199 return bh;
1200 } else {
1201 get_bh(bh);
1202 bh->b_end_io = end_buffer_read_sync;
1203 submit_bh(READ, bh);
1204 wait_on_buffer(bh);
1205 if (buffer_uptodate(bh))
1206 return bh;
1207 }
1208 brelse(bh);
1209 return NULL;
1210}
1211
1212/*
1213 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1214 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1215 * refcount elevated by one when they're in an LRU. A buffer can only appear
1216 * once in a particular CPU's LRU. A single buffer can be present in multiple
1217 * CPU's LRUs at the same time.
1218 *
1219 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1220 * sb_find_get_block().
1221 *
1222 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1223 * a local interrupt disable for that.
1224 */
1225
1226#define BH_LRU_SIZE 8
1227
1228struct bh_lru {
1229 struct buffer_head *bhs[BH_LRU_SIZE];
1230};
1231
1232static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1233
1234#ifdef CONFIG_SMP
1235#define bh_lru_lock() local_irq_disable()
1236#define bh_lru_unlock() local_irq_enable()
1237#else
1238#define bh_lru_lock() preempt_disable()
1239#define bh_lru_unlock() preempt_enable()
1240#endif
1241
1242static inline void check_irqs_on(void)
1243{
1244#ifdef irqs_disabled
1245 BUG_ON(irqs_disabled());
1246#endif
1247}
1248
1249/*
1250 * The LRU management algorithm is dopey-but-simple. Sorry.
1251 */
1252static void bh_lru_install(struct buffer_head *bh)
1253{
1254 struct buffer_head *evictee = NULL;
1255 struct bh_lru *lru;
1256
1257 check_irqs_on();
1258 bh_lru_lock();
1259 lru = &__get_cpu_var(bh_lrus);
1260 if (lru->bhs[0] != bh) {
1261 struct buffer_head *bhs[BH_LRU_SIZE];
1262 int in;
1263 int out = 0;
1264
1265 get_bh(bh);
1266 bhs[out++] = bh;
1267 for (in = 0; in < BH_LRU_SIZE; in++) {
1268 struct buffer_head *bh2 = lru->bhs[in];
1269
1270 if (bh2 == bh) {
1271 __brelse(bh2);
1272 } else {
1273 if (out >= BH_LRU_SIZE) {
1274 BUG_ON(evictee != NULL);
1275 evictee = bh2;
1276 } else {
1277 bhs[out++] = bh2;
1278 }
1279 }
1280 }
1281 while (out < BH_LRU_SIZE)
1282 bhs[out++] = NULL;
1283 memcpy(lru->bhs, bhs, sizeof(bhs));
1284 }
1285 bh_lru_unlock();
1286
1287 if (evictee)
1288 __brelse(evictee);
1289}
1290
1291/*
1292 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1293 */
858119e1 1294static struct buffer_head *
3991d3bd 1295lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1296{
1297 struct buffer_head *ret = NULL;
1298 struct bh_lru *lru;
3991d3bd 1299 unsigned int i;
1da177e4
LT
1300
1301 check_irqs_on();
1302 bh_lru_lock();
1303 lru = &__get_cpu_var(bh_lrus);
1304 for (i = 0; i < BH_LRU_SIZE; i++) {
1305 struct buffer_head *bh = lru->bhs[i];
1306
1307 if (bh && bh->b_bdev == bdev &&
1308 bh->b_blocknr == block && bh->b_size == size) {
1309 if (i) {
1310 while (i) {
1311 lru->bhs[i] = lru->bhs[i - 1];
1312 i--;
1313 }
1314 lru->bhs[0] = bh;
1315 }
1316 get_bh(bh);
1317 ret = bh;
1318 break;
1319 }
1320 }
1321 bh_lru_unlock();
1322 return ret;
1323}
1324
1325/*
1326 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1327 * it in the LRU and mark it as accessed. If it is not present then return
1328 * NULL
1329 */
1330struct buffer_head *
3991d3bd 1331__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1332{
1333 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1334
1335 if (bh == NULL) {
385fd4c5 1336 bh = __find_get_block_slow(bdev, block);
1da177e4
LT
1337 if (bh)
1338 bh_lru_install(bh);
1339 }
1340 if (bh)
1341 touch_buffer(bh);
1342 return bh;
1343}
1344EXPORT_SYMBOL(__find_get_block);
1345
1346/*
1347 * __getblk will locate (and, if necessary, create) the buffer_head
1348 * which corresponds to the passed block_device, block and size. The
1349 * returned buffer has its reference count incremented.
1350 *
1351 * __getblk() cannot fail - it just keeps trying. If you pass it an
1352 * illegal block number, __getblk() will happily return a buffer_head
1353 * which represents the non-existent block. Very weird.
1354 *
1355 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1356 * attempt is failing. FIXME, perhaps?
1357 */
1358struct buffer_head *
3991d3bd 1359__getblk(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1360{
1361 struct buffer_head *bh = __find_get_block(bdev, block, size);
1362
1363 might_sleep();
1364 if (bh == NULL)
1365 bh = __getblk_slow(bdev, block, size);
1366 return bh;
1367}
1368EXPORT_SYMBOL(__getblk);
1369
1370/*
1371 * Do async read-ahead on a buffer..
1372 */
3991d3bd 1373void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1374{
1375 struct buffer_head *bh = __getblk(bdev, block, size);
a3e713b5
AM
1376 if (likely(bh)) {
1377 ll_rw_block(READA, 1, &bh);
1378 brelse(bh);
1379 }
1da177e4
LT
1380}
1381EXPORT_SYMBOL(__breadahead);
1382
1383/**
1384 * __bread() - reads a specified block and returns the bh
67be2dd1 1385 * @bdev: the block_device to read from
1da177e4
LT
1386 * @block: number of block
1387 * @size: size (in bytes) to read
1388 *
1389 * Reads a specified block, and returns buffer head that contains it.
1390 * It returns NULL if the block was unreadable.
1391 */
1392struct buffer_head *
3991d3bd 1393__bread(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1394{
1395 struct buffer_head *bh = __getblk(bdev, block, size);
1396
a3e713b5 1397 if (likely(bh) && !buffer_uptodate(bh))
1da177e4
LT
1398 bh = __bread_slow(bh);
1399 return bh;
1400}
1401EXPORT_SYMBOL(__bread);
1402
1403/*
1404 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1405 * This doesn't race because it runs in each cpu either in irq
1406 * or with preempt disabled.
1407 */
1408static void invalidate_bh_lru(void *arg)
1409{
1410 struct bh_lru *b = &get_cpu_var(bh_lrus);
1411 int i;
1412
1413 for (i = 0; i < BH_LRU_SIZE; i++) {
1414 brelse(b->bhs[i]);
1415 b->bhs[i] = NULL;
1416 }
1417 put_cpu_var(bh_lrus);
1418}
1419
f9a14399 1420void invalidate_bh_lrus(void)
1da177e4 1421{
15c8b6c1 1422 on_each_cpu(invalidate_bh_lru, NULL, 1);
1da177e4 1423}
9db5579b 1424EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1da177e4
LT
1425
1426void set_bh_page(struct buffer_head *bh,
1427 struct page *page, unsigned long offset)
1428{
1429 bh->b_page = page;
e827f923 1430 BUG_ON(offset >= PAGE_SIZE);
1da177e4
LT
1431 if (PageHighMem(page))
1432 /*
1433 * This catches illegal uses and preserves the offset:
1434 */
1435 bh->b_data = (char *)(0 + offset);
1436 else
1437 bh->b_data = page_address(page) + offset;
1438}
1439EXPORT_SYMBOL(set_bh_page);
1440
1441/*
1442 * Called when truncating a buffer on a page completely.
1443 */
858119e1 1444static void discard_buffer(struct buffer_head * bh)
1da177e4
LT
1445{
1446 lock_buffer(bh);
1447 clear_buffer_dirty(bh);
1448 bh->b_bdev = NULL;
1449 clear_buffer_mapped(bh);
1450 clear_buffer_req(bh);
1451 clear_buffer_new(bh);
1452 clear_buffer_delay(bh);
33a266dd 1453 clear_buffer_unwritten(bh);
1da177e4
LT
1454 unlock_buffer(bh);
1455}
1456
1da177e4
LT
1457/**
1458 * block_invalidatepage - invalidate part of all of a buffer-backed page
1459 *
1460 * @page: the page which is affected
1461 * @offset: the index of the truncation point
1462 *
1463 * block_invalidatepage() is called when all or part of the page has become
1464 * invalidatedby a truncate operation.
1465 *
1466 * block_invalidatepage() does not have to release all buffers, but it must
1467 * ensure that no dirty buffer is left outside @offset and that no I/O
1468 * is underway against any of the blocks which are outside the truncation
1469 * point. Because the caller is about to free (and possibly reuse) those
1470 * blocks on-disk.
1471 */
2ff28e22 1472void block_invalidatepage(struct page *page, unsigned long offset)
1da177e4
LT
1473{
1474 struct buffer_head *head, *bh, *next;
1475 unsigned int curr_off = 0;
1da177e4
LT
1476
1477 BUG_ON(!PageLocked(page));
1478 if (!page_has_buffers(page))
1479 goto out;
1480
1481 head = page_buffers(page);
1482 bh = head;
1483 do {
1484 unsigned int next_off = curr_off + bh->b_size;
1485 next = bh->b_this_page;
1486
1487 /*
1488 * is this block fully invalidated?
1489 */
1490 if (offset <= curr_off)
1491 discard_buffer(bh);
1492 curr_off = next_off;
1493 bh = next;
1494 } while (bh != head);
1495
1496 /*
1497 * We release buffers only if the entire page is being invalidated.
1498 * The get_block cached value has been unconditionally invalidated,
1499 * so real IO is not possible anymore.
1500 */
1501 if (offset == 0)
2ff28e22 1502 try_to_release_page(page, 0);
1da177e4 1503out:
2ff28e22 1504 return;
1da177e4
LT
1505}
1506EXPORT_SYMBOL(block_invalidatepage);
1507
1508/*
1509 * We attach and possibly dirty the buffers atomically wrt
1510 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1511 * is already excluded via the page lock.
1512 */
1513void create_empty_buffers(struct page *page,
1514 unsigned long blocksize, unsigned long b_state)
1515{
1516 struct buffer_head *bh, *head, *tail;
1517
1518 head = alloc_page_buffers(page, blocksize, 1);
1519 bh = head;
1520 do {
1521 bh->b_state |= b_state;
1522 tail = bh;
1523 bh = bh->b_this_page;
1524 } while (bh);
1525 tail->b_this_page = head;
1526
1527 spin_lock(&page->mapping->private_lock);
1528 if (PageUptodate(page) || PageDirty(page)) {
1529 bh = head;
1530 do {
1531 if (PageDirty(page))
1532 set_buffer_dirty(bh);
1533 if (PageUptodate(page))
1534 set_buffer_uptodate(bh);
1535 bh = bh->b_this_page;
1536 } while (bh != head);
1537 }
1538 attach_page_buffers(page, head);
1539 spin_unlock(&page->mapping->private_lock);
1540}
1541EXPORT_SYMBOL(create_empty_buffers);
1542
1543/*
1544 * We are taking a block for data and we don't want any output from any
1545 * buffer-cache aliases starting from return from that function and
1546 * until the moment when something will explicitly mark the buffer
1547 * dirty (hopefully that will not happen until we will free that block ;-)
1548 * We don't even need to mark it not-uptodate - nobody can expect
1549 * anything from a newly allocated buffer anyway. We used to used
1550 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1551 * don't want to mark the alias unmapped, for example - it would confuse
1552 * anyone who might pick it with bread() afterwards...
1553 *
1554 * Also.. Note that bforget() doesn't lock the buffer. So there can
1555 * be writeout I/O going on against recently-freed buffers. We don't
1556 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1557 * only if we really need to. That happens here.
1558 */
1559void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1560{
1561 struct buffer_head *old_bh;
1562
1563 might_sleep();
1564
385fd4c5 1565 old_bh = __find_get_block_slow(bdev, block);
1da177e4
LT
1566 if (old_bh) {
1567 clear_buffer_dirty(old_bh);
1568 wait_on_buffer(old_bh);
1569 clear_buffer_req(old_bh);
1570 __brelse(old_bh);
1571 }
1572}
1573EXPORT_SYMBOL(unmap_underlying_metadata);
1574
1575/*
1576 * NOTE! All mapped/uptodate combinations are valid:
1577 *
1578 * Mapped Uptodate Meaning
1579 *
1580 * No No "unknown" - must do get_block()
1581 * No Yes "hole" - zero-filled
1582 * Yes No "allocated" - allocated on disk, not read in
1583 * Yes Yes "valid" - allocated and up-to-date in memory.
1584 *
1585 * "Dirty" is valid only with the last case (mapped+uptodate).
1586 */
1587
1588/*
1589 * While block_write_full_page is writing back the dirty buffers under
1590 * the page lock, whoever dirtied the buffers may decide to clean them
1591 * again at any time. We handle that by only looking at the buffer
1592 * state inside lock_buffer().
1593 *
1594 * If block_write_full_page() is called for regular writeback
1595 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1596 * locked buffer. This only can happen if someone has written the buffer
1597 * directly, with submit_bh(). At the address_space level PageWriteback
1598 * prevents this contention from occurring.
6e34eedd
TT
1599 *
1600 * If block_write_full_page() is called with wbc->sync_mode ==
1601 * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this
1602 * causes the writes to be flagged as synchronous writes, but the
1603 * block device queue will NOT be unplugged, since usually many pages
1604 * will be pushed to the out before the higher-level caller actually
1605 * waits for the writes to be completed. The various wait functions,
1606 * such as wait_on_writeback_range() will ultimately call sync_page()
1607 * which will ultimately call blk_run_backing_dev(), which will end up
1608 * unplugging the device queue.
1da177e4
LT
1609 */
1610static int __block_write_full_page(struct inode *inode, struct page *page,
1611 get_block_t *get_block, struct writeback_control *wbc)
1612{
1613 int err;
1614 sector_t block;
1615 sector_t last_block;
f0fbd5fc 1616 struct buffer_head *bh, *head;
b0cf2321 1617 const unsigned blocksize = 1 << inode->i_blkbits;
1da177e4 1618 int nr_underway = 0;
6e34eedd
TT
1619 int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1620 WRITE_SYNC_PLUG : WRITE);
1da177e4
LT
1621
1622 BUG_ON(!PageLocked(page));
1623
1624 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1625
1626 if (!page_has_buffers(page)) {
b0cf2321 1627 create_empty_buffers(page, blocksize,
1da177e4
LT
1628 (1 << BH_Dirty)|(1 << BH_Uptodate));
1629 }
1630
1631 /*
1632 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1633 * here, and the (potentially unmapped) buffers may become dirty at
1634 * any time. If a buffer becomes dirty here after we've inspected it
1635 * then we just miss that fact, and the page stays dirty.
1636 *
1637 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1638 * handle that here by just cleaning them.
1639 */
1640
54b21a79 1641 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4
LT
1642 head = page_buffers(page);
1643 bh = head;
1644
1645 /*
1646 * Get all the dirty buffers mapped to disk addresses and
1647 * handle any aliases from the underlying blockdev's mapping.
1648 */
1649 do {
1650 if (block > last_block) {
1651 /*
1652 * mapped buffers outside i_size will occur, because
1653 * this page can be outside i_size when there is a
1654 * truncate in progress.
1655 */
1656 /*
1657 * The buffer was zeroed by block_write_full_page()
1658 */
1659 clear_buffer_dirty(bh);
1660 set_buffer_uptodate(bh);
29a814d2
AT
1661 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1662 buffer_dirty(bh)) {
b0cf2321 1663 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
1664 err = get_block(inode, block, bh, 1);
1665 if (err)
1666 goto recover;
29a814d2 1667 clear_buffer_delay(bh);
1da177e4
LT
1668 if (buffer_new(bh)) {
1669 /* blockdev mappings never come here */
1670 clear_buffer_new(bh);
1671 unmap_underlying_metadata(bh->b_bdev,
1672 bh->b_blocknr);
1673 }
1674 }
1675 bh = bh->b_this_page;
1676 block++;
1677 } while (bh != head);
1678
1679 do {
1da177e4
LT
1680 if (!buffer_mapped(bh))
1681 continue;
1682 /*
1683 * If it's a fully non-blocking write attempt and we cannot
1684 * lock the buffer then redirty the page. Note that this can
1685 * potentially cause a busy-wait loop from pdflush and kswapd
1686 * activity, but those code paths have their own higher-level
1687 * throttling.
1688 */
1689 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1690 lock_buffer(bh);
ca5de404 1691 } else if (!trylock_buffer(bh)) {
1da177e4
LT
1692 redirty_page_for_writepage(wbc, page);
1693 continue;
1694 }
1695 if (test_clear_buffer_dirty(bh)) {
1696 mark_buffer_async_write(bh);
1697 } else {
1698 unlock_buffer(bh);
1699 }
1700 } while ((bh = bh->b_this_page) != head);
1701
1702 /*
1703 * The page and its buffers are protected by PageWriteback(), so we can
1704 * drop the bh refcounts early.
1705 */
1706 BUG_ON(PageWriteback(page));
1707 set_page_writeback(page);
1da177e4
LT
1708
1709 do {
1710 struct buffer_head *next = bh->b_this_page;
1711 if (buffer_async_write(bh)) {
a64c8610 1712 submit_bh(write_op, bh);
1da177e4
LT
1713 nr_underway++;
1714 }
1da177e4
LT
1715 bh = next;
1716 } while (bh != head);
05937baa 1717 unlock_page(page);
1da177e4
LT
1718
1719 err = 0;
1720done:
1721 if (nr_underway == 0) {
1722 /*
1723 * The page was marked dirty, but the buffers were
1724 * clean. Someone wrote them back by hand with
1725 * ll_rw_block/submit_bh. A rare case.
1726 */
1da177e4 1727 end_page_writeback(page);
3d67f2d7 1728
1da177e4
LT
1729 /*
1730 * The page and buffer_heads can be released at any time from
1731 * here on.
1732 */
1da177e4
LT
1733 }
1734 return err;
1735
1736recover:
1737 /*
1738 * ENOSPC, or some other error. We may already have added some
1739 * blocks to the file, so we need to write these out to avoid
1740 * exposing stale data.
1741 * The page is currently locked and not marked for writeback
1742 */
1743 bh = head;
1744 /* Recovery: lock and submit the mapped buffers */
1745 do {
29a814d2
AT
1746 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1747 !buffer_delay(bh)) {
1da177e4
LT
1748 lock_buffer(bh);
1749 mark_buffer_async_write(bh);
1750 } else {
1751 /*
1752 * The buffer may have been set dirty during
1753 * attachment to a dirty page.
1754 */
1755 clear_buffer_dirty(bh);
1756 }
1757 } while ((bh = bh->b_this_page) != head);
1758 SetPageError(page);
1759 BUG_ON(PageWriteback(page));
7e4c3690 1760 mapping_set_error(page->mapping, err);
1da177e4 1761 set_page_writeback(page);
1da177e4
LT
1762 do {
1763 struct buffer_head *next = bh->b_this_page;
1764 if (buffer_async_write(bh)) {
1765 clear_buffer_dirty(bh);
a64c8610 1766 submit_bh(write_op, bh);
1da177e4
LT
1767 nr_underway++;
1768 }
1da177e4
LT
1769 bh = next;
1770 } while (bh != head);
ffda9d30 1771 unlock_page(page);
1da177e4
LT
1772 goto done;
1773}
1774
afddba49
NP
1775/*
1776 * If a page has any new buffers, zero them out here, and mark them uptodate
1777 * and dirty so they'll be written out (in order to prevent uninitialised
1778 * block data from leaking). And clear the new bit.
1779 */
1780void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1781{
1782 unsigned int block_start, block_end;
1783 struct buffer_head *head, *bh;
1784
1785 BUG_ON(!PageLocked(page));
1786 if (!page_has_buffers(page))
1787 return;
1788
1789 bh = head = page_buffers(page);
1790 block_start = 0;
1791 do {
1792 block_end = block_start + bh->b_size;
1793
1794 if (buffer_new(bh)) {
1795 if (block_end > from && block_start < to) {
1796 if (!PageUptodate(page)) {
1797 unsigned start, size;
1798
1799 start = max(from, block_start);
1800 size = min(to, block_end) - start;
1801
eebd2aa3 1802 zero_user(page, start, size);
afddba49
NP
1803 set_buffer_uptodate(bh);
1804 }
1805
1806 clear_buffer_new(bh);
1807 mark_buffer_dirty(bh);
1808 }
1809 }
1810
1811 block_start = block_end;
1812 bh = bh->b_this_page;
1813 } while (bh != head);
1814}
1815EXPORT_SYMBOL(page_zero_new_buffers);
1816
1da177e4
LT
1817static int __block_prepare_write(struct inode *inode, struct page *page,
1818 unsigned from, unsigned to, get_block_t *get_block)
1819{
1820 unsigned block_start, block_end;
1821 sector_t block;
1822 int err = 0;
1823 unsigned blocksize, bbits;
1824 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1825
1826 BUG_ON(!PageLocked(page));
1827 BUG_ON(from > PAGE_CACHE_SIZE);
1828 BUG_ON(to > PAGE_CACHE_SIZE);
1829 BUG_ON(from > to);
1830
1831 blocksize = 1 << inode->i_blkbits;
1832 if (!page_has_buffers(page))
1833 create_empty_buffers(page, blocksize, 0);
1834 head = page_buffers(page);
1835
1836 bbits = inode->i_blkbits;
1837 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1838
1839 for(bh = head, block_start = 0; bh != head || !block_start;
1840 block++, block_start=block_end, bh = bh->b_this_page) {
1841 block_end = block_start + blocksize;
1842 if (block_end <= from || block_start >= to) {
1843 if (PageUptodate(page)) {
1844 if (!buffer_uptodate(bh))
1845 set_buffer_uptodate(bh);
1846 }
1847 continue;
1848 }
1849 if (buffer_new(bh))
1850 clear_buffer_new(bh);
1851 if (!buffer_mapped(bh)) {
b0cf2321 1852 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
1853 err = get_block(inode, block, bh, 1);
1854 if (err)
f3ddbdc6 1855 break;
1da177e4 1856 if (buffer_new(bh)) {
1da177e4
LT
1857 unmap_underlying_metadata(bh->b_bdev,
1858 bh->b_blocknr);
1859 if (PageUptodate(page)) {
637aff46 1860 clear_buffer_new(bh);
1da177e4 1861 set_buffer_uptodate(bh);
637aff46 1862 mark_buffer_dirty(bh);
1da177e4
LT
1863 continue;
1864 }
eebd2aa3
CL
1865 if (block_end > to || block_start < from)
1866 zero_user_segments(page,
1867 to, block_end,
1868 block_start, from);
1da177e4
LT
1869 continue;
1870 }
1871 }
1872 if (PageUptodate(page)) {
1873 if (!buffer_uptodate(bh))
1874 set_buffer_uptodate(bh);
1875 continue;
1876 }
1877 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
33a266dd 1878 !buffer_unwritten(bh) &&
1da177e4
LT
1879 (block_start < from || block_end > to)) {
1880 ll_rw_block(READ, 1, &bh);
1881 *wait_bh++=bh;
1882 }
1883 }
1884 /*
1885 * If we issued read requests - let them complete.
1886 */
1887 while(wait_bh > wait) {
1888 wait_on_buffer(*--wait_bh);
1889 if (!buffer_uptodate(*wait_bh))
f3ddbdc6 1890 err = -EIO;
1da177e4 1891 }
afddba49
NP
1892 if (unlikely(err))
1893 page_zero_new_buffers(page, from, to);
1da177e4
LT
1894 return err;
1895}
1896
1897static int __block_commit_write(struct inode *inode, struct page *page,
1898 unsigned from, unsigned to)
1899{
1900 unsigned block_start, block_end;
1901 int partial = 0;
1902 unsigned blocksize;
1903 struct buffer_head *bh, *head;
1904
1905 blocksize = 1 << inode->i_blkbits;
1906
1907 for(bh = head = page_buffers(page), block_start = 0;
1908 bh != head || !block_start;
1909 block_start=block_end, bh = bh->b_this_page) {
1910 block_end = block_start + blocksize;
1911 if (block_end <= from || block_start >= to) {
1912 if (!buffer_uptodate(bh))
1913 partial = 1;
1914 } else {
1915 set_buffer_uptodate(bh);
1916 mark_buffer_dirty(bh);
1917 }
afddba49 1918 clear_buffer_new(bh);
1da177e4
LT
1919 }
1920
1921 /*
1922 * If this is a partial write which happened to make all buffers
1923 * uptodate then we can optimize away a bogus readpage() for
1924 * the next read(). Here we 'discover' whether the page went
1925 * uptodate as a result of this (potentially partial) write.
1926 */
1927 if (!partial)
1928 SetPageUptodate(page);
1929 return 0;
1930}
1931
afddba49
NP
1932/*
1933 * block_write_begin takes care of the basic task of block allocation and
1934 * bringing partial write blocks uptodate first.
1935 *
1936 * If *pagep is not NULL, then block_write_begin uses the locked page
1937 * at *pagep rather than allocating its own. In this case, the page will
1938 * not be unlocked or deallocated on failure.
1939 */
1940int block_write_begin(struct file *file, struct address_space *mapping,
1941 loff_t pos, unsigned len, unsigned flags,
1942 struct page **pagep, void **fsdata,
1943 get_block_t *get_block)
1944{
1945 struct inode *inode = mapping->host;
1946 int status = 0;
1947 struct page *page;
1948 pgoff_t index;
1949 unsigned start, end;
1950 int ownpage = 0;
1951
1952 index = pos >> PAGE_CACHE_SHIFT;
1953 start = pos & (PAGE_CACHE_SIZE - 1);
1954 end = start + len;
1955
1956 page = *pagep;
1957 if (page == NULL) {
1958 ownpage = 1;
54566b2c 1959 page = grab_cache_page_write_begin(mapping, index, flags);
afddba49
NP
1960 if (!page) {
1961 status = -ENOMEM;
1962 goto out;
1963 }
1964 *pagep = page;
1965 } else
1966 BUG_ON(!PageLocked(page));
1967
1968 status = __block_prepare_write(inode, page, start, end, get_block);
1969 if (unlikely(status)) {
1970 ClearPageUptodate(page);
1971
1972 if (ownpage) {
1973 unlock_page(page);
1974 page_cache_release(page);
1975 *pagep = NULL;
1976
1977 /*
1978 * prepare_write() may have instantiated a few blocks
1979 * outside i_size. Trim these off again. Don't need
1980 * i_size_read because we hold i_mutex.
1981 */
1982 if (pos + len > inode->i_size)
1983 vmtruncate(inode, inode->i_size);
1984 }
afddba49
NP
1985 }
1986
1987out:
1988 return status;
1989}
1990EXPORT_SYMBOL(block_write_begin);
1991
1992int block_write_end(struct file *file, struct address_space *mapping,
1993 loff_t pos, unsigned len, unsigned copied,
1994 struct page *page, void *fsdata)
1995{
1996 struct inode *inode = mapping->host;
1997 unsigned start;
1998
1999 start = pos & (PAGE_CACHE_SIZE - 1);
2000
2001 if (unlikely(copied < len)) {
2002 /*
2003 * The buffers that were written will now be uptodate, so we
2004 * don't have to worry about a readpage reading them and
2005 * overwriting a partial write. However if we have encountered
2006 * a short write and only partially written into a buffer, it
2007 * will not be marked uptodate, so a readpage might come in and
2008 * destroy our partial write.
2009 *
2010 * Do the simplest thing, and just treat any short write to a
2011 * non uptodate page as a zero-length write, and force the
2012 * caller to redo the whole thing.
2013 */
2014 if (!PageUptodate(page))
2015 copied = 0;
2016
2017 page_zero_new_buffers(page, start+copied, start+len);
2018 }
2019 flush_dcache_page(page);
2020
2021 /* This could be a short (even 0-length) commit */
2022 __block_commit_write(inode, page, start, start+copied);
2023
2024 return copied;
2025}
2026EXPORT_SYMBOL(block_write_end);
2027
2028int generic_write_end(struct file *file, struct address_space *mapping,
2029 loff_t pos, unsigned len, unsigned copied,
2030 struct page *page, void *fsdata)
2031{
2032 struct inode *inode = mapping->host;
c7d206b3 2033 int i_size_changed = 0;
afddba49
NP
2034
2035 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2036
2037 /*
2038 * No need to use i_size_read() here, the i_size
2039 * cannot change under us because we hold i_mutex.
2040 *
2041 * But it's important to update i_size while still holding page lock:
2042 * page writeout could otherwise come in and zero beyond i_size.
2043 */
2044 if (pos+copied > inode->i_size) {
2045 i_size_write(inode, pos+copied);
c7d206b3 2046 i_size_changed = 1;
afddba49
NP
2047 }
2048
2049 unlock_page(page);
2050 page_cache_release(page);
2051
c7d206b3
JK
2052 /*
2053 * Don't mark the inode dirty under page lock. First, it unnecessarily
2054 * makes the holding time of page lock longer. Second, it forces lock
2055 * ordering of page lock and transaction start for journaling
2056 * filesystems.
2057 */
2058 if (i_size_changed)
2059 mark_inode_dirty(inode);
2060
afddba49
NP
2061 return copied;
2062}
2063EXPORT_SYMBOL(generic_write_end);
2064
8ab22b9a
HH
2065/*
2066 * block_is_partially_uptodate checks whether buffers within a page are
2067 * uptodate or not.
2068 *
2069 * Returns true if all buffers which correspond to a file portion
2070 * we want to read are uptodate.
2071 */
2072int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2073 unsigned long from)
2074{
2075 struct inode *inode = page->mapping->host;
2076 unsigned block_start, block_end, blocksize;
2077 unsigned to;
2078 struct buffer_head *bh, *head;
2079 int ret = 1;
2080
2081 if (!page_has_buffers(page))
2082 return 0;
2083
2084 blocksize = 1 << inode->i_blkbits;
2085 to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2086 to = from + to;
2087 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2088 return 0;
2089
2090 head = page_buffers(page);
2091 bh = head;
2092 block_start = 0;
2093 do {
2094 block_end = block_start + blocksize;
2095 if (block_end > from && block_start < to) {
2096 if (!buffer_uptodate(bh)) {
2097 ret = 0;
2098 break;
2099 }
2100 if (block_end >= to)
2101 break;
2102 }
2103 block_start = block_end;
2104 bh = bh->b_this_page;
2105 } while (bh != head);
2106
2107 return ret;
2108}
2109EXPORT_SYMBOL(block_is_partially_uptodate);
2110
1da177e4
LT
2111/*
2112 * Generic "read page" function for block devices that have the normal
2113 * get_block functionality. This is most of the block device filesystems.
2114 * Reads the page asynchronously --- the unlock_buffer() and
2115 * set/clear_buffer_uptodate() functions propagate buffer state into the
2116 * page struct once IO has completed.
2117 */
2118int block_read_full_page(struct page *page, get_block_t *get_block)
2119{
2120 struct inode *inode = page->mapping->host;
2121 sector_t iblock, lblock;
2122 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2123 unsigned int blocksize;
2124 int nr, i;
2125 int fully_mapped = 1;
2126
cd7619d6 2127 BUG_ON(!PageLocked(page));
1da177e4
LT
2128 blocksize = 1 << inode->i_blkbits;
2129 if (!page_has_buffers(page))
2130 create_empty_buffers(page, blocksize, 0);
2131 head = page_buffers(page);
2132
2133 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2134 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2135 bh = head;
2136 nr = 0;
2137 i = 0;
2138
2139 do {
2140 if (buffer_uptodate(bh))
2141 continue;
2142
2143 if (!buffer_mapped(bh)) {
c64610ba
AM
2144 int err = 0;
2145
1da177e4
LT
2146 fully_mapped = 0;
2147 if (iblock < lblock) {
b0cf2321 2148 WARN_ON(bh->b_size != blocksize);
c64610ba
AM
2149 err = get_block(inode, iblock, bh, 0);
2150 if (err)
1da177e4
LT
2151 SetPageError(page);
2152 }
2153 if (!buffer_mapped(bh)) {
eebd2aa3 2154 zero_user(page, i * blocksize, blocksize);
c64610ba
AM
2155 if (!err)
2156 set_buffer_uptodate(bh);
1da177e4
LT
2157 continue;
2158 }
2159 /*
2160 * get_block() might have updated the buffer
2161 * synchronously
2162 */
2163 if (buffer_uptodate(bh))
2164 continue;
2165 }
2166 arr[nr++] = bh;
2167 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2168
2169 if (fully_mapped)
2170 SetPageMappedToDisk(page);
2171
2172 if (!nr) {
2173 /*
2174 * All buffers are uptodate - we can set the page uptodate
2175 * as well. But not if get_block() returned an error.
2176 */
2177 if (!PageError(page))
2178 SetPageUptodate(page);
2179 unlock_page(page);
2180 return 0;
2181 }
2182
2183 /* Stage two: lock the buffers */
2184 for (i = 0; i < nr; i++) {
2185 bh = arr[i];
2186 lock_buffer(bh);
2187 mark_buffer_async_read(bh);
2188 }
2189
2190 /*
2191 * Stage 3: start the IO. Check for uptodateness
2192 * inside the buffer lock in case another process reading
2193 * the underlying blockdev brought it uptodate (the sct fix).
2194 */
2195 for (i = 0; i < nr; i++) {
2196 bh = arr[i];
2197 if (buffer_uptodate(bh))
2198 end_buffer_async_read(bh, 1);
2199 else
2200 submit_bh(READ, bh);
2201 }
2202 return 0;
2203}
2204
2205/* utility function for filesystems that need to do work on expanding
89e10787 2206 * truncates. Uses filesystem pagecache writes to allow the filesystem to
1da177e4
LT
2207 * deal with the hole.
2208 */
89e10787 2209int generic_cont_expand_simple(struct inode *inode, loff_t size)
1da177e4
LT
2210{
2211 struct address_space *mapping = inode->i_mapping;
2212 struct page *page;
89e10787 2213 void *fsdata;
05eb0b51 2214 unsigned long limit;
1da177e4
LT
2215 int err;
2216
2217 err = -EFBIG;
2218 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2219 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2220 send_sig(SIGXFSZ, current, 0);
2221 goto out;
2222 }
2223 if (size > inode->i_sb->s_maxbytes)
2224 goto out;
2225
89e10787
NP
2226 err = pagecache_write_begin(NULL, mapping, size, 0,
2227 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2228 &page, &fsdata);
2229 if (err)
05eb0b51 2230 goto out;
05eb0b51 2231
89e10787
NP
2232 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2233 BUG_ON(err > 0);
05eb0b51 2234
1da177e4
LT
2235out:
2236 return err;
2237}
2238
f1e3af72
AB
2239static int cont_expand_zero(struct file *file, struct address_space *mapping,
2240 loff_t pos, loff_t *bytes)
1da177e4 2241{
1da177e4 2242 struct inode *inode = mapping->host;
1da177e4 2243 unsigned blocksize = 1 << inode->i_blkbits;
89e10787
NP
2244 struct page *page;
2245 void *fsdata;
2246 pgoff_t index, curidx;
2247 loff_t curpos;
2248 unsigned zerofrom, offset, len;
2249 int err = 0;
1da177e4 2250
89e10787
NP
2251 index = pos >> PAGE_CACHE_SHIFT;
2252 offset = pos & ~PAGE_CACHE_MASK;
2253
2254 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2255 zerofrom = curpos & ~PAGE_CACHE_MASK;
1da177e4
LT
2256 if (zerofrom & (blocksize-1)) {
2257 *bytes |= (blocksize-1);
2258 (*bytes)++;
2259 }
89e10787 2260 len = PAGE_CACHE_SIZE - zerofrom;
1da177e4 2261
89e10787
NP
2262 err = pagecache_write_begin(file, mapping, curpos, len,
2263 AOP_FLAG_UNINTERRUPTIBLE,
2264 &page, &fsdata);
2265 if (err)
2266 goto out;
eebd2aa3 2267 zero_user(page, zerofrom, len);
89e10787
NP
2268 err = pagecache_write_end(file, mapping, curpos, len, len,
2269 page, fsdata);
2270 if (err < 0)
2271 goto out;
2272 BUG_ON(err != len);
2273 err = 0;
061e9746
OH
2274
2275 balance_dirty_pages_ratelimited(mapping);
89e10787 2276 }
1da177e4 2277
89e10787
NP
2278 /* page covers the boundary, find the boundary offset */
2279 if (index == curidx) {
2280 zerofrom = curpos & ~PAGE_CACHE_MASK;
1da177e4 2281 /* if we will expand the thing last block will be filled */
89e10787
NP
2282 if (offset <= zerofrom) {
2283 goto out;
2284 }
2285 if (zerofrom & (blocksize-1)) {
1da177e4
LT
2286 *bytes |= (blocksize-1);
2287 (*bytes)++;
2288 }
89e10787 2289 len = offset - zerofrom;
1da177e4 2290
89e10787
NP
2291 err = pagecache_write_begin(file, mapping, curpos, len,
2292 AOP_FLAG_UNINTERRUPTIBLE,
2293 &page, &fsdata);
2294 if (err)
2295 goto out;
eebd2aa3 2296 zero_user(page, zerofrom, len);
89e10787
NP
2297 err = pagecache_write_end(file, mapping, curpos, len, len,
2298 page, fsdata);
2299 if (err < 0)
2300 goto out;
2301 BUG_ON(err != len);
2302 err = 0;
1da177e4 2303 }
89e10787
NP
2304out:
2305 return err;
2306}
2307
2308/*
2309 * For moronic filesystems that do not allow holes in file.
2310 * We may have to extend the file.
2311 */
2312int cont_write_begin(struct file *file, struct address_space *mapping,
2313 loff_t pos, unsigned len, unsigned flags,
2314 struct page **pagep, void **fsdata,
2315 get_block_t *get_block, loff_t *bytes)
2316{
2317 struct inode *inode = mapping->host;
2318 unsigned blocksize = 1 << inode->i_blkbits;
2319 unsigned zerofrom;
2320 int err;
2321
2322 err = cont_expand_zero(file, mapping, pos, bytes);
2323 if (err)
2324 goto out;
2325
2326 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2327 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2328 *bytes |= (blocksize-1);
2329 (*bytes)++;
1da177e4 2330 }
1da177e4 2331
89e10787
NP
2332 *pagep = NULL;
2333 err = block_write_begin(file, mapping, pos, len,
2334 flags, pagep, fsdata, get_block);
1da177e4 2335out:
89e10787 2336 return err;
1da177e4
LT
2337}
2338
2339int block_prepare_write(struct page *page, unsigned from, unsigned to,
2340 get_block_t *get_block)
2341{
2342 struct inode *inode = page->mapping->host;
2343 int err = __block_prepare_write(inode, page, from, to, get_block);
2344 if (err)
2345 ClearPageUptodate(page);
2346 return err;
2347}
2348
2349int block_commit_write(struct page *page, unsigned from, unsigned to)
2350{
2351 struct inode *inode = page->mapping->host;
2352 __block_commit_write(inode,page,from,to);
2353 return 0;
2354}
2355
54171690
DC
2356/*
2357 * block_page_mkwrite() is not allowed to change the file size as it gets
2358 * called from a page fault handler when a page is first dirtied. Hence we must
2359 * be careful to check for EOF conditions here. We set the page up correctly
2360 * for a written page which means we get ENOSPC checking when writing into
2361 * holes and correct delalloc and unwritten extent mapping on filesystems that
2362 * support these features.
2363 *
2364 * We are not allowed to take the i_mutex here so we have to play games to
2365 * protect against truncate races as the page could now be beyond EOF. Because
2366 * vmtruncate() writes the inode size before removing pages, once we have the
2367 * page lock we can determine safely if the page is beyond EOF. If it is not
2368 * beyond EOF, then the page is guaranteed safe against truncation until we
2369 * unlock the page.
2370 */
2371int
c2ec175c 2372block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
54171690
DC
2373 get_block_t get_block)
2374{
c2ec175c 2375 struct page *page = vmf->page;
54171690
DC
2376 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2377 unsigned long end;
2378 loff_t size;
56a76f82 2379 int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
54171690
DC
2380
2381 lock_page(page);
2382 size = i_size_read(inode);
2383 if ((page->mapping != inode->i_mapping) ||
18336338 2384 (page_offset(page) > size)) {
54171690
DC
2385 /* page got truncated out from underneath us */
2386 goto out_unlock;
2387 }
2388
2389 /* page is wholly or partially inside EOF */
2390 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2391 end = size & ~PAGE_CACHE_MASK;
2392 else
2393 end = PAGE_CACHE_SIZE;
2394
2395 ret = block_prepare_write(page, 0, end, get_block);
2396 if (!ret)
2397 ret = block_commit_write(page, 0, end);
2398
56a76f82
NP
2399 if (unlikely(ret)) {
2400 if (ret == -ENOMEM)
2401 ret = VM_FAULT_OOM;
2402 else /* -ENOSPC, -EIO, etc */
2403 ret = VM_FAULT_SIGBUS;
2404 }
c2ec175c 2405
56a76f82 2406out_unlock:
54171690
DC
2407 unlock_page(page);
2408 return ret;
2409}
1da177e4
LT
2410
2411/*
03158cd7 2412 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
1da177e4
LT
2413 * immediately, while under the page lock. So it needs a special end_io
2414 * handler which does not touch the bh after unlocking it.
1da177e4
LT
2415 */
2416static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2417{
68671f35 2418 __end_buffer_read_notouch(bh, uptodate);
1da177e4
LT
2419}
2420
03158cd7
NP
2421/*
2422 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2423 * the page (converting it to circular linked list and taking care of page
2424 * dirty races).
2425 */
2426static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2427{
2428 struct buffer_head *bh;
2429
2430 BUG_ON(!PageLocked(page));
2431
2432 spin_lock(&page->mapping->private_lock);
2433 bh = head;
2434 do {
2435 if (PageDirty(page))
2436 set_buffer_dirty(bh);
2437 if (!bh->b_this_page)
2438 bh->b_this_page = head;
2439 bh = bh->b_this_page;
2440 } while (bh != head);
2441 attach_page_buffers(page, head);
2442 spin_unlock(&page->mapping->private_lock);
2443}
2444
1da177e4
LT
2445/*
2446 * On entry, the page is fully not uptodate.
2447 * On exit the page is fully uptodate in the areas outside (from,to)
2448 */
03158cd7
NP
2449int nobh_write_begin(struct file *file, struct address_space *mapping,
2450 loff_t pos, unsigned len, unsigned flags,
2451 struct page **pagep, void **fsdata,
1da177e4
LT
2452 get_block_t *get_block)
2453{
03158cd7 2454 struct inode *inode = mapping->host;
1da177e4
LT
2455 const unsigned blkbits = inode->i_blkbits;
2456 const unsigned blocksize = 1 << blkbits;
a4b0672d 2457 struct buffer_head *head, *bh;
03158cd7
NP
2458 struct page *page;
2459 pgoff_t index;
2460 unsigned from, to;
1da177e4 2461 unsigned block_in_page;
a4b0672d 2462 unsigned block_start, block_end;
1da177e4 2463 sector_t block_in_file;
1da177e4 2464 int nr_reads = 0;
1da177e4
LT
2465 int ret = 0;
2466 int is_mapped_to_disk = 1;
1da177e4 2467
03158cd7
NP
2468 index = pos >> PAGE_CACHE_SHIFT;
2469 from = pos & (PAGE_CACHE_SIZE - 1);
2470 to = from + len;
2471
54566b2c 2472 page = grab_cache_page_write_begin(mapping, index, flags);
03158cd7
NP
2473 if (!page)
2474 return -ENOMEM;
2475 *pagep = page;
2476 *fsdata = NULL;
2477
2478 if (page_has_buffers(page)) {
2479 unlock_page(page);
2480 page_cache_release(page);
2481 *pagep = NULL;
2482 return block_write_begin(file, mapping, pos, len, flags, pagep,
2483 fsdata, get_block);
2484 }
a4b0672d 2485
1da177e4
LT
2486 if (PageMappedToDisk(page))
2487 return 0;
2488
a4b0672d
NP
2489 /*
2490 * Allocate buffers so that we can keep track of state, and potentially
2491 * attach them to the page if an error occurs. In the common case of
2492 * no error, they will just be freed again without ever being attached
2493 * to the page (which is all OK, because we're under the page lock).
2494 *
2495 * Be careful: the buffer linked list is a NULL terminated one, rather
2496 * than the circular one we're used to.
2497 */
2498 head = alloc_page_buffers(page, blocksize, 0);
03158cd7
NP
2499 if (!head) {
2500 ret = -ENOMEM;
2501 goto out_release;
2502 }
a4b0672d 2503
1da177e4 2504 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
1da177e4
LT
2505
2506 /*
2507 * We loop across all blocks in the page, whether or not they are
2508 * part of the affected region. This is so we can discover if the
2509 * page is fully mapped-to-disk.
2510 */
a4b0672d 2511 for (block_start = 0, block_in_page = 0, bh = head;
1da177e4 2512 block_start < PAGE_CACHE_SIZE;
a4b0672d 2513 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
1da177e4
LT
2514 int create;
2515
a4b0672d
NP
2516 block_end = block_start + blocksize;
2517 bh->b_state = 0;
1da177e4
LT
2518 create = 1;
2519 if (block_start >= to)
2520 create = 0;
2521 ret = get_block(inode, block_in_file + block_in_page,
a4b0672d 2522 bh, create);
1da177e4
LT
2523 if (ret)
2524 goto failed;
a4b0672d 2525 if (!buffer_mapped(bh))
1da177e4 2526 is_mapped_to_disk = 0;
a4b0672d
NP
2527 if (buffer_new(bh))
2528 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2529 if (PageUptodate(page)) {
2530 set_buffer_uptodate(bh);
1da177e4 2531 continue;
a4b0672d
NP
2532 }
2533 if (buffer_new(bh) || !buffer_mapped(bh)) {
eebd2aa3
CL
2534 zero_user_segments(page, block_start, from,
2535 to, block_end);
1da177e4
LT
2536 continue;
2537 }
a4b0672d 2538 if (buffer_uptodate(bh))
1da177e4
LT
2539 continue; /* reiserfs does this */
2540 if (block_start < from || block_end > to) {
a4b0672d
NP
2541 lock_buffer(bh);
2542 bh->b_end_io = end_buffer_read_nobh;
2543 submit_bh(READ, bh);
2544 nr_reads++;
1da177e4
LT
2545 }
2546 }
2547
2548 if (nr_reads) {
1da177e4
LT
2549 /*
2550 * The page is locked, so these buffers are protected from
2551 * any VM or truncate activity. Hence we don't need to care
2552 * for the buffer_head refcounts.
2553 */
a4b0672d 2554 for (bh = head; bh; bh = bh->b_this_page) {
1da177e4
LT
2555 wait_on_buffer(bh);
2556 if (!buffer_uptodate(bh))
2557 ret = -EIO;
1da177e4
LT
2558 }
2559 if (ret)
2560 goto failed;
2561 }
2562
2563 if (is_mapped_to_disk)
2564 SetPageMappedToDisk(page);
1da177e4 2565
03158cd7 2566 *fsdata = head; /* to be released by nobh_write_end */
a4b0672d 2567
1da177e4
LT
2568 return 0;
2569
2570failed:
03158cd7 2571 BUG_ON(!ret);
1da177e4 2572 /*
a4b0672d
NP
2573 * Error recovery is a bit difficult. We need to zero out blocks that
2574 * were newly allocated, and dirty them to ensure they get written out.
2575 * Buffers need to be attached to the page at this point, otherwise
2576 * the handling of potential IO errors during writeout would be hard
2577 * (could try doing synchronous writeout, but what if that fails too?)
1da177e4 2578 */
03158cd7
NP
2579 attach_nobh_buffers(page, head);
2580 page_zero_new_buffers(page, from, to);
a4b0672d 2581
03158cd7
NP
2582out_release:
2583 unlock_page(page);
2584 page_cache_release(page);
2585 *pagep = NULL;
a4b0672d 2586
03158cd7
NP
2587 if (pos + len > inode->i_size)
2588 vmtruncate(inode, inode->i_size);
a4b0672d 2589
1da177e4
LT
2590 return ret;
2591}
03158cd7 2592EXPORT_SYMBOL(nobh_write_begin);
1da177e4 2593
03158cd7
NP
2594int nobh_write_end(struct file *file, struct address_space *mapping,
2595 loff_t pos, unsigned len, unsigned copied,
2596 struct page *page, void *fsdata)
1da177e4
LT
2597{
2598 struct inode *inode = page->mapping->host;
efdc3131 2599 struct buffer_head *head = fsdata;
03158cd7 2600 struct buffer_head *bh;
5b41e74a 2601 BUG_ON(fsdata != NULL && page_has_buffers(page));
1da177e4 2602
d4cf109f 2603 if (unlikely(copied < len) && head)
5b41e74a
DM
2604 attach_nobh_buffers(page, head);
2605 if (page_has_buffers(page))
2606 return generic_write_end(file, mapping, pos, len,
2607 copied, page, fsdata);
a4b0672d 2608
22c8ca78 2609 SetPageUptodate(page);
1da177e4 2610 set_page_dirty(page);
03158cd7
NP
2611 if (pos+copied > inode->i_size) {
2612 i_size_write(inode, pos+copied);
1da177e4
LT
2613 mark_inode_dirty(inode);
2614 }
03158cd7
NP
2615
2616 unlock_page(page);
2617 page_cache_release(page);
2618
03158cd7
NP
2619 while (head) {
2620 bh = head;
2621 head = head->b_this_page;
2622 free_buffer_head(bh);
2623 }
2624
2625 return copied;
1da177e4 2626}
03158cd7 2627EXPORT_SYMBOL(nobh_write_end);
1da177e4
LT
2628
2629/*
2630 * nobh_writepage() - based on block_full_write_page() except
2631 * that it tries to operate without attaching bufferheads to
2632 * the page.
2633 */
2634int nobh_writepage(struct page *page, get_block_t *get_block,
2635 struct writeback_control *wbc)
2636{
2637 struct inode * const inode = page->mapping->host;
2638 loff_t i_size = i_size_read(inode);
2639 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2640 unsigned offset;
1da177e4
LT
2641 int ret;
2642
2643 /* Is the page fully inside i_size? */
2644 if (page->index < end_index)
2645 goto out;
2646
2647 /* Is the page fully outside i_size? (truncate in progress) */
2648 offset = i_size & (PAGE_CACHE_SIZE-1);
2649 if (page->index >= end_index+1 || !offset) {
2650 /*
2651 * The page may have dirty, unmapped buffers. For example,
2652 * they may have been added in ext3_writepage(). Make them
2653 * freeable here, so the page does not leak.
2654 */
2655#if 0
2656 /* Not really sure about this - do we need this ? */
2657 if (page->mapping->a_ops->invalidatepage)
2658 page->mapping->a_ops->invalidatepage(page, offset);
2659#endif
2660 unlock_page(page);
2661 return 0; /* don't care */
2662 }
2663
2664 /*
2665 * The page straddles i_size. It must be zeroed out on each and every
2666 * writepage invocation because it may be mmapped. "A file is mapped
2667 * in multiples of the page size. For a file that is not a multiple of
2668 * the page size, the remaining memory is zeroed when mapped, and
2669 * writes to that region are not written out to the file."
2670 */
eebd2aa3 2671 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
1da177e4
LT
2672out:
2673 ret = mpage_writepage(page, get_block, wbc);
2674 if (ret == -EAGAIN)
2675 ret = __block_write_full_page(inode, page, get_block, wbc);
2676 return ret;
2677}
2678EXPORT_SYMBOL(nobh_writepage);
2679
03158cd7
NP
2680int nobh_truncate_page(struct address_space *mapping,
2681 loff_t from, get_block_t *get_block)
1da177e4 2682{
1da177e4
LT
2683 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2684 unsigned offset = from & (PAGE_CACHE_SIZE-1);
03158cd7
NP
2685 unsigned blocksize;
2686 sector_t iblock;
2687 unsigned length, pos;
2688 struct inode *inode = mapping->host;
1da177e4 2689 struct page *page;
03158cd7
NP
2690 struct buffer_head map_bh;
2691 int err;
1da177e4 2692
03158cd7
NP
2693 blocksize = 1 << inode->i_blkbits;
2694 length = offset & (blocksize - 1);
2695
2696 /* Block boundary? Nothing to do */
2697 if (!length)
2698 return 0;
2699
2700 length = blocksize - length;
2701 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4 2702
1da177e4 2703 page = grab_cache_page(mapping, index);
03158cd7 2704 err = -ENOMEM;
1da177e4
LT
2705 if (!page)
2706 goto out;
2707
03158cd7
NP
2708 if (page_has_buffers(page)) {
2709has_buffers:
2710 unlock_page(page);
2711 page_cache_release(page);
2712 return block_truncate_page(mapping, from, get_block);
2713 }
2714
2715 /* Find the buffer that contains "offset" */
2716 pos = blocksize;
2717 while (offset >= pos) {
2718 iblock++;
2719 pos += blocksize;
2720 }
2721
2722 err = get_block(inode, iblock, &map_bh, 0);
2723 if (err)
2724 goto unlock;
2725 /* unmapped? It's a hole - nothing to do */
2726 if (!buffer_mapped(&map_bh))
2727 goto unlock;
2728
2729 /* Ok, it's mapped. Make sure it's up-to-date */
2730 if (!PageUptodate(page)) {
2731 err = mapping->a_ops->readpage(NULL, page);
2732 if (err) {
2733 page_cache_release(page);
2734 goto out;
2735 }
2736 lock_page(page);
2737 if (!PageUptodate(page)) {
2738 err = -EIO;
2739 goto unlock;
2740 }
2741 if (page_has_buffers(page))
2742 goto has_buffers;
1da177e4 2743 }
eebd2aa3 2744 zero_user(page, offset, length);
03158cd7
NP
2745 set_page_dirty(page);
2746 err = 0;
2747
2748unlock:
1da177e4
LT
2749 unlock_page(page);
2750 page_cache_release(page);
2751out:
03158cd7 2752 return err;
1da177e4
LT
2753}
2754EXPORT_SYMBOL(nobh_truncate_page);
2755
2756int block_truncate_page(struct address_space *mapping,
2757 loff_t from, get_block_t *get_block)
2758{
2759 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2760 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2761 unsigned blocksize;
54b21a79 2762 sector_t iblock;
1da177e4
LT
2763 unsigned length, pos;
2764 struct inode *inode = mapping->host;
2765 struct page *page;
2766 struct buffer_head *bh;
1da177e4
LT
2767 int err;
2768
2769 blocksize = 1 << inode->i_blkbits;
2770 length = offset & (blocksize - 1);
2771
2772 /* Block boundary? Nothing to do */
2773 if (!length)
2774 return 0;
2775
2776 length = blocksize - length;
54b21a79 2777 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4
LT
2778
2779 page = grab_cache_page(mapping, index);
2780 err = -ENOMEM;
2781 if (!page)
2782 goto out;
2783
2784 if (!page_has_buffers(page))
2785 create_empty_buffers(page, blocksize, 0);
2786
2787 /* Find the buffer that contains "offset" */
2788 bh = page_buffers(page);
2789 pos = blocksize;
2790 while (offset >= pos) {
2791 bh = bh->b_this_page;
2792 iblock++;
2793 pos += blocksize;
2794 }
2795
2796 err = 0;
2797 if (!buffer_mapped(bh)) {
b0cf2321 2798 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
2799 err = get_block(inode, iblock, bh, 0);
2800 if (err)
2801 goto unlock;
2802 /* unmapped? It's a hole - nothing to do */
2803 if (!buffer_mapped(bh))
2804 goto unlock;
2805 }
2806
2807 /* Ok, it's mapped. Make sure it's up-to-date */
2808 if (PageUptodate(page))
2809 set_buffer_uptodate(bh);
2810
33a266dd 2811 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
1da177e4
LT
2812 err = -EIO;
2813 ll_rw_block(READ, 1, &bh);
2814 wait_on_buffer(bh);
2815 /* Uhhuh. Read error. Complain and punt. */
2816 if (!buffer_uptodate(bh))
2817 goto unlock;
2818 }
2819
eebd2aa3 2820 zero_user(page, offset, length);
1da177e4
LT
2821 mark_buffer_dirty(bh);
2822 err = 0;
2823
2824unlock:
2825 unlock_page(page);
2826 page_cache_release(page);
2827out:
2828 return err;
2829}
2830
2831/*
2832 * The generic ->writepage function for buffer-backed address_spaces
2833 */
2834int block_write_full_page(struct page *page, get_block_t *get_block,
2835 struct writeback_control *wbc)
2836{
2837 struct inode * const inode = page->mapping->host;
2838 loff_t i_size = i_size_read(inode);
2839 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2840 unsigned offset;
1da177e4
LT
2841
2842 /* Is the page fully inside i_size? */
2843 if (page->index < end_index)
2844 return __block_write_full_page(inode, page, get_block, wbc);
2845
2846 /* Is the page fully outside i_size? (truncate in progress) */
2847 offset = i_size & (PAGE_CACHE_SIZE-1);
2848 if (page->index >= end_index+1 || !offset) {
2849 /*
2850 * The page may have dirty, unmapped buffers. For example,
2851 * they may have been added in ext3_writepage(). Make them
2852 * freeable here, so the page does not leak.
2853 */
aaa4059b 2854 do_invalidatepage(page, 0);
1da177e4
LT
2855 unlock_page(page);
2856 return 0; /* don't care */
2857 }
2858
2859 /*
2860 * The page straddles i_size. It must be zeroed out on each and every
2861 * writepage invokation because it may be mmapped. "A file is mapped
2862 * in multiples of the page size. For a file that is not a multiple of
2863 * the page size, the remaining memory is zeroed when mapped, and
2864 * writes to that region are not written out to the file."
2865 */
eebd2aa3 2866 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
1da177e4
LT
2867 return __block_write_full_page(inode, page, get_block, wbc);
2868}
2869
2870sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2871 get_block_t *get_block)
2872{
2873 struct buffer_head tmp;
2874 struct inode *inode = mapping->host;
2875 tmp.b_state = 0;
2876 tmp.b_blocknr = 0;
b0cf2321 2877 tmp.b_size = 1 << inode->i_blkbits;
1da177e4
LT
2878 get_block(inode, block, &tmp, 0);
2879 return tmp.b_blocknr;
2880}
2881
6712ecf8 2882static void end_bio_bh_io_sync(struct bio *bio, int err)
1da177e4
LT
2883{
2884 struct buffer_head *bh = bio->bi_private;
2885
1da177e4
LT
2886 if (err == -EOPNOTSUPP) {
2887 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2888 set_bit(BH_Eopnotsupp, &bh->b_state);
2889 }
2890
08bafc03
KM
2891 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2892 set_bit(BH_Quiet, &bh->b_state);
2893
1da177e4
LT
2894 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2895 bio_put(bio);
1da177e4
LT
2896}
2897
2898int submit_bh(int rw, struct buffer_head * bh)
2899{
2900 struct bio *bio;
2901 int ret = 0;
2902
2903 BUG_ON(!buffer_locked(bh));
2904 BUG_ON(!buffer_mapped(bh));
2905 BUG_ON(!bh->b_end_io);
2906
48fd4f93
JA
2907 /*
2908 * Mask in barrier bit for a write (could be either a WRITE or a
2909 * WRITE_SYNC
2910 */
2911 if (buffer_ordered(bh) && (rw & WRITE))
2912 rw |= WRITE_BARRIER;
1da177e4
LT
2913
2914 /*
48fd4f93 2915 * Only clear out a write error when rewriting
1da177e4 2916 */
48fd4f93 2917 if (test_set_buffer_req(bh) && (rw & WRITE))
1da177e4
LT
2918 clear_buffer_write_io_error(bh);
2919
2920 /*
2921 * from here on down, it's all bio -- do the initial mapping,
2922 * submit_bio -> generic_make_request may further map this bio around
2923 */
2924 bio = bio_alloc(GFP_NOIO, 1);
2925
2926 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2927 bio->bi_bdev = bh->b_bdev;
2928 bio->bi_io_vec[0].bv_page = bh->b_page;
2929 bio->bi_io_vec[0].bv_len = bh->b_size;
2930 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2931
2932 bio->bi_vcnt = 1;
2933 bio->bi_idx = 0;
2934 bio->bi_size = bh->b_size;
2935
2936 bio->bi_end_io = end_bio_bh_io_sync;
2937 bio->bi_private = bh;
2938
2939 bio_get(bio);
2940 submit_bio(rw, bio);
2941
2942 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2943 ret = -EOPNOTSUPP;
2944
2945 bio_put(bio);
2946 return ret;
2947}
2948
2949/**
2950 * ll_rw_block: low-level access to block devices (DEPRECATED)
a7662236 2951 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
1da177e4
LT
2952 * @nr: number of &struct buffer_heads in the array
2953 * @bhs: array of pointers to &struct buffer_head
2954 *
a7662236
JK
2955 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2956 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2957 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2958 * are sent to disk. The fourth %READA option is described in the documentation
2959 * for generic_make_request() which ll_rw_block() calls.
1da177e4
LT
2960 *
2961 * This function drops any buffer that it cannot get a lock on (with the
a7662236
JK
2962 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2963 * clean when doing a write request, and any buffer that appears to be
2964 * up-to-date when doing read request. Further it marks as clean buffers that
2965 * are processed for writing (the buffer cache won't assume that they are
2966 * actually clean until the buffer gets unlocked).
1da177e4
LT
2967 *
2968 * ll_rw_block sets b_end_io to simple completion handler that marks
2969 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2970 * any waiters.
2971 *
2972 * All of the buffers must be for the same device, and must also be a
2973 * multiple of the current approved size for the device.
2974 */
2975void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2976{
2977 int i;
2978
2979 for (i = 0; i < nr; i++) {
2980 struct buffer_head *bh = bhs[i];
2981
9cf6b720 2982 if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG)
a7662236 2983 lock_buffer(bh);
ca5de404 2984 else if (!trylock_buffer(bh))
1da177e4
LT
2985 continue;
2986
9cf6b720
JA
2987 if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
2988 rw == SWRITE_SYNC_PLUG) {
1da177e4 2989 if (test_clear_buffer_dirty(bh)) {
76c3073a 2990 bh->b_end_io = end_buffer_write_sync;
e60e5c50 2991 get_bh(bh);
18ce3751
JA
2992 if (rw == SWRITE_SYNC)
2993 submit_bh(WRITE_SYNC, bh);
2994 else
2995 submit_bh(WRITE, bh);
1da177e4
LT
2996 continue;
2997 }
2998 } else {
1da177e4 2999 if (!buffer_uptodate(bh)) {
76c3073a 3000 bh->b_end_io = end_buffer_read_sync;
e60e5c50 3001 get_bh(bh);
1da177e4
LT
3002 submit_bh(rw, bh);
3003 continue;
3004 }
3005 }
3006 unlock_buffer(bh);
1da177e4
LT
3007 }
3008}
3009
3010/*
3011 * For a data-integrity writeout, we need to wait upon any in-progress I/O
3012 * and then start new I/O and then wait upon it. The caller must have a ref on
3013 * the buffer_head.
3014 */
3015int sync_dirty_buffer(struct buffer_head *bh)
3016{
3017 int ret = 0;
3018
3019 WARN_ON(atomic_read(&bh->b_count) < 1);
3020 lock_buffer(bh);
3021 if (test_clear_buffer_dirty(bh)) {
3022 get_bh(bh);
3023 bh->b_end_io = end_buffer_write_sync;
1aa2a7cc 3024 ret = submit_bh(WRITE_SYNC, bh);
1da177e4
LT
3025 wait_on_buffer(bh);
3026 if (buffer_eopnotsupp(bh)) {
3027 clear_buffer_eopnotsupp(bh);
3028 ret = -EOPNOTSUPP;
3029 }
3030 if (!ret && !buffer_uptodate(bh))
3031 ret = -EIO;
3032 } else {
3033 unlock_buffer(bh);
3034 }
3035 return ret;
3036}
3037
3038/*
3039 * try_to_free_buffers() checks if all the buffers on this particular page
3040 * are unused, and releases them if so.
3041 *
3042 * Exclusion against try_to_free_buffers may be obtained by either
3043 * locking the page or by holding its mapping's private_lock.
3044 *
3045 * If the page is dirty but all the buffers are clean then we need to
3046 * be sure to mark the page clean as well. This is because the page
3047 * may be against a block device, and a later reattachment of buffers
3048 * to a dirty page will set *all* buffers dirty. Which would corrupt
3049 * filesystem data on the same device.
3050 *
3051 * The same applies to regular filesystem pages: if all the buffers are
3052 * clean then we set the page clean and proceed. To do that, we require
3053 * total exclusion from __set_page_dirty_buffers(). That is obtained with
3054 * private_lock.
3055 *
3056 * try_to_free_buffers() is non-blocking.
3057 */
3058static inline int buffer_busy(struct buffer_head *bh)
3059{
3060 return atomic_read(&bh->b_count) |
3061 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3062}
3063
3064static int
3065drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3066{
3067 struct buffer_head *head = page_buffers(page);
3068 struct buffer_head *bh;
3069
3070 bh = head;
3071 do {
de7d5a3b 3072 if (buffer_write_io_error(bh) && page->mapping)
1da177e4
LT
3073 set_bit(AS_EIO, &page->mapping->flags);
3074 if (buffer_busy(bh))
3075 goto failed;
3076 bh = bh->b_this_page;
3077 } while (bh != head);
3078
3079 do {
3080 struct buffer_head *next = bh->b_this_page;
3081
535ee2fb 3082 if (bh->b_assoc_map)
1da177e4
LT
3083 __remove_assoc_queue(bh);
3084 bh = next;
3085 } while (bh != head);
3086 *buffers_to_free = head;
3087 __clear_page_buffers(page);
3088 return 1;
3089failed:
3090 return 0;
3091}
3092
3093int try_to_free_buffers(struct page *page)
3094{
3095 struct address_space * const mapping = page->mapping;
3096 struct buffer_head *buffers_to_free = NULL;
3097 int ret = 0;
3098
3099 BUG_ON(!PageLocked(page));
ecdfc978 3100 if (PageWriteback(page))
1da177e4
LT
3101 return 0;
3102
3103 if (mapping == NULL) { /* can this still happen? */
3104 ret = drop_buffers(page, &buffers_to_free);
3105 goto out;
3106 }
3107
3108 spin_lock(&mapping->private_lock);
3109 ret = drop_buffers(page, &buffers_to_free);
ecdfc978
LT
3110
3111 /*
3112 * If the filesystem writes its buffers by hand (eg ext3)
3113 * then we can have clean buffers against a dirty page. We
3114 * clean the page here; otherwise the VM will never notice
3115 * that the filesystem did any IO at all.
3116 *
3117 * Also, during truncate, discard_buffer will have marked all
3118 * the page's buffers clean. We discover that here and clean
3119 * the page also.
87df7241
NP
3120 *
3121 * private_lock must be held over this entire operation in order
3122 * to synchronise against __set_page_dirty_buffers and prevent the
3123 * dirty bit from being lost.
ecdfc978
LT
3124 */
3125 if (ret)
3126 cancel_dirty_page(page, PAGE_CACHE_SIZE);
87df7241 3127 spin_unlock(&mapping->private_lock);
1da177e4
LT
3128out:
3129 if (buffers_to_free) {
3130 struct buffer_head *bh = buffers_to_free;
3131
3132 do {
3133 struct buffer_head *next = bh->b_this_page;
3134 free_buffer_head(bh);
3135 bh = next;
3136 } while (bh != buffers_to_free);
3137 }
3138 return ret;
3139}
3140EXPORT_SYMBOL(try_to_free_buffers);
3141
3978d717 3142void block_sync_page(struct page *page)
1da177e4
LT
3143{
3144 struct address_space *mapping;
3145
3146 smp_mb();
3147 mapping = page_mapping(page);
3148 if (mapping)
3149 blk_run_backing_dev(mapping->backing_dev_info, page);
1da177e4
LT
3150}
3151
3152/*
3153 * There are no bdflush tunables left. But distributions are
3154 * still running obsolete flush daemons, so we terminate them here.
3155 *
3156 * Use of bdflush() is deprecated and will be removed in a future kernel.
3157 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3158 */
bdc480e3 3159SYSCALL_DEFINE2(bdflush, int, func, long, data)
1da177e4
LT
3160{
3161 static int msg_count;
3162
3163 if (!capable(CAP_SYS_ADMIN))
3164 return -EPERM;
3165
3166 if (msg_count < 5) {
3167 msg_count++;
3168 printk(KERN_INFO
3169 "warning: process `%s' used the obsolete bdflush"
3170 " system call\n", current->comm);
3171 printk(KERN_INFO "Fix your initscripts?\n");
3172 }
3173
3174 if (func == 1)
3175 do_exit(0);
3176 return 0;
3177}
3178
3179/*
3180 * Buffer-head allocation
3181 */
e18b890b 3182static struct kmem_cache *bh_cachep;
1da177e4
LT
3183
3184/*
3185 * Once the number of bh's in the machine exceeds this level, we start
3186 * stripping them in writeback.
3187 */
3188static int max_buffer_heads;
3189
3190int buffer_heads_over_limit;
3191
3192struct bh_accounting {
3193 int nr; /* Number of live bh's */
3194 int ratelimit; /* Limit cacheline bouncing */
3195};
3196
3197static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3198
3199static void recalc_bh_state(void)
3200{
3201 int i;
3202 int tot = 0;
3203
3204 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3205 return;
3206 __get_cpu_var(bh_accounting).ratelimit = 0;
8a143426 3207 for_each_online_cpu(i)
1da177e4
LT
3208 tot += per_cpu(bh_accounting, i).nr;
3209 buffer_heads_over_limit = (tot > max_buffer_heads);
3210}
3211
dd0fc66f 3212struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
1da177e4 3213{
488514d1 3214 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
1da177e4 3215 if (ret) {
a35afb83 3216 INIT_LIST_HEAD(&ret->b_assoc_buffers);
736c7b80 3217 get_cpu_var(bh_accounting).nr++;
1da177e4 3218 recalc_bh_state();
736c7b80 3219 put_cpu_var(bh_accounting);
1da177e4
LT
3220 }
3221 return ret;
3222}
3223EXPORT_SYMBOL(alloc_buffer_head);
3224
3225void free_buffer_head(struct buffer_head *bh)
3226{
3227 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3228 kmem_cache_free(bh_cachep, bh);
736c7b80 3229 get_cpu_var(bh_accounting).nr--;
1da177e4 3230 recalc_bh_state();
736c7b80 3231 put_cpu_var(bh_accounting);
1da177e4
LT
3232}
3233EXPORT_SYMBOL(free_buffer_head);
3234
1da177e4
LT
3235static void buffer_exit_cpu(int cpu)
3236{
3237 int i;
3238 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3239
3240 for (i = 0; i < BH_LRU_SIZE; i++) {
3241 brelse(b->bhs[i]);
3242 b->bhs[i] = NULL;
3243 }
8a143426
ED
3244 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3245 per_cpu(bh_accounting, cpu).nr = 0;
3246 put_cpu_var(bh_accounting);
1da177e4
LT
3247}
3248
3249static int buffer_cpu_notify(struct notifier_block *self,
3250 unsigned long action, void *hcpu)
3251{
8bb78442 3252 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1da177e4
LT
3253 buffer_exit_cpu((unsigned long)hcpu);
3254 return NOTIFY_OK;
3255}
1da177e4 3256
389d1b08 3257/**
a6b91919 3258 * bh_uptodate_or_lock - Test whether the buffer is uptodate
389d1b08
AK
3259 * @bh: struct buffer_head
3260 *
3261 * Return true if the buffer is up-to-date and false,
3262 * with the buffer locked, if not.
3263 */
3264int bh_uptodate_or_lock(struct buffer_head *bh)
3265{
3266 if (!buffer_uptodate(bh)) {
3267 lock_buffer(bh);
3268 if (!buffer_uptodate(bh))
3269 return 0;
3270 unlock_buffer(bh);
3271 }
3272 return 1;
3273}
3274EXPORT_SYMBOL(bh_uptodate_or_lock);
3275
3276/**
a6b91919 3277 * bh_submit_read - Submit a locked buffer for reading
389d1b08
AK
3278 * @bh: struct buffer_head
3279 *
3280 * Returns zero on success and -EIO on error.
3281 */
3282int bh_submit_read(struct buffer_head *bh)
3283{
3284 BUG_ON(!buffer_locked(bh));
3285
3286 if (buffer_uptodate(bh)) {
3287 unlock_buffer(bh);
3288 return 0;
3289 }
3290
3291 get_bh(bh);
3292 bh->b_end_io = end_buffer_read_sync;
3293 submit_bh(READ, bh);
3294 wait_on_buffer(bh);
3295 if (buffer_uptodate(bh))
3296 return 0;
3297 return -EIO;
3298}
3299EXPORT_SYMBOL(bh_submit_read);
3300
b98938c3 3301static void
51cc5068 3302init_buffer_head(void *data)
b98938c3
CL
3303{
3304 struct buffer_head *bh = data;
3305
3306 memset(bh, 0, sizeof(*bh));
3307 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3308}
3309
1da177e4
LT
3310void __init buffer_init(void)
3311{
3312 int nrpages;
3313
b98938c3
CL
3314 bh_cachep = kmem_cache_create("buffer_head",
3315 sizeof(struct buffer_head), 0,
3316 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3317 SLAB_MEM_SPREAD),
3318 init_buffer_head);
1da177e4
LT
3319
3320 /*
3321 * Limit the bh occupancy to 10% of ZONE_NORMAL
3322 */
3323 nrpages = (nr_free_buffer_pages() * 10) / 100;
3324 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3325 hotcpu_notifier(buffer_cpu_notify, 0);
3326}
3327
3328EXPORT_SYMBOL(__bforget);
3329EXPORT_SYMBOL(__brelse);
3330EXPORT_SYMBOL(__wait_on_buffer);
3331EXPORT_SYMBOL(block_commit_write);
3332EXPORT_SYMBOL(block_prepare_write);
54171690 3333EXPORT_SYMBOL(block_page_mkwrite);
1da177e4
LT
3334EXPORT_SYMBOL(block_read_full_page);
3335EXPORT_SYMBOL(block_sync_page);
3336EXPORT_SYMBOL(block_truncate_page);
3337EXPORT_SYMBOL(block_write_full_page);
89e10787 3338EXPORT_SYMBOL(cont_write_begin);
1da177e4
LT
3339EXPORT_SYMBOL(end_buffer_read_sync);
3340EXPORT_SYMBOL(end_buffer_write_sync);
3341EXPORT_SYMBOL(file_fsync);
1da177e4 3342EXPORT_SYMBOL(generic_block_bmap);
05eb0b51 3343EXPORT_SYMBOL(generic_cont_expand_simple);
1da177e4
LT
3344EXPORT_SYMBOL(init_buffer);
3345EXPORT_SYMBOL(invalidate_bdev);
3346EXPORT_SYMBOL(ll_rw_block);
3347EXPORT_SYMBOL(mark_buffer_dirty);
3348EXPORT_SYMBOL(submit_bh);
3349EXPORT_SYMBOL(sync_dirty_buffer);
3350EXPORT_SYMBOL(unlock_buffer);