]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/buffer.c
Add WRITE_SYNC_PLUG and SWRITE_SYNC_PLUG
[mirror_ubuntu-bionic-kernel.git] / fs / buffer.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/buffer.c
3 *
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
6
7/*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9 *
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12 *
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
15 *
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17 *
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19 */
20
1da177e4
LT
21#include <linux/kernel.h>
22#include <linux/syscalls.h>
23#include <linux/fs.h>
24#include <linux/mm.h>
25#include <linux/percpu.h>
26#include <linux/slab.h>
16f7e0fe 27#include <linux/capability.h>
1da177e4
LT
28#include <linux/blkdev.h>
29#include <linux/file.h>
30#include <linux/quotaops.h>
31#include <linux/highmem.h>
32#include <linux/module.h>
33#include <linux/writeback.h>
34#include <linux/hash.h>
35#include <linux/suspend.h>
36#include <linux/buffer_head.h>
55e829af 37#include <linux/task_io_accounting_ops.h>
1da177e4
LT
38#include <linux/bio.h>
39#include <linux/notifier.h>
40#include <linux/cpu.h>
41#include <linux/bitops.h>
42#include <linux/mpage.h>
fb1c8f93 43#include <linux/bit_spinlock.h>
1da177e4
LT
44
45static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
1da177e4
LT
46
47#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48
49inline void
50init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51{
52 bh->b_end_io = handler;
53 bh->b_private = private;
54}
55
56static int sync_buffer(void *word)
57{
58 struct block_device *bd;
59 struct buffer_head *bh
60 = container_of(word, struct buffer_head, b_state);
61
62 smp_mb();
63 bd = bh->b_bdev;
64 if (bd)
65 blk_run_address_space(bd->bd_inode->i_mapping);
66 io_schedule();
67 return 0;
68}
69
fc9b52cd 70void __lock_buffer(struct buffer_head *bh)
1da177e4
LT
71{
72 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73 TASK_UNINTERRUPTIBLE);
74}
75EXPORT_SYMBOL(__lock_buffer);
76
fc9b52cd 77void unlock_buffer(struct buffer_head *bh)
1da177e4 78{
51b07fc3 79 clear_bit_unlock(BH_Lock, &bh->b_state);
1da177e4
LT
80 smp_mb__after_clear_bit();
81 wake_up_bit(&bh->b_state, BH_Lock);
82}
83
84/*
85 * Block until a buffer comes unlocked. This doesn't stop it
86 * from becoming locked again - you have to lock it yourself
87 * if you want to preserve its state.
88 */
89void __wait_on_buffer(struct buffer_head * bh)
90{
91 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
92}
93
94static void
95__clear_page_buffers(struct page *page)
96{
97 ClearPagePrivate(page);
4c21e2f2 98 set_page_private(page, 0);
1da177e4
LT
99 page_cache_release(page);
100}
101
08bafc03
KM
102
103static int quiet_error(struct buffer_head *bh)
104{
105 if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
106 return 0;
107 return 1;
108}
109
110
1da177e4
LT
111static void buffer_io_error(struct buffer_head *bh)
112{
113 char b[BDEVNAME_SIZE];
1da177e4
LT
114 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
115 bdevname(bh->b_bdev, b),
116 (unsigned long long)bh->b_blocknr);
117}
118
119/*
68671f35
DM
120 * End-of-IO handler helper function which does not touch the bh after
121 * unlocking it.
122 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
123 * a race there is benign: unlock_buffer() only use the bh's address for
124 * hashing after unlocking the buffer, so it doesn't actually touch the bh
125 * itself.
1da177e4 126 */
68671f35 127static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
1da177e4
LT
128{
129 if (uptodate) {
130 set_buffer_uptodate(bh);
131 } else {
132 /* This happens, due to failed READA attempts. */
133 clear_buffer_uptodate(bh);
134 }
135 unlock_buffer(bh);
68671f35
DM
136}
137
138/*
139 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
140 * unlock the buffer. This is what ll_rw_block uses too.
141 */
142void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
143{
144 __end_buffer_read_notouch(bh, uptodate);
1da177e4
LT
145 put_bh(bh);
146}
147
148void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
149{
150 char b[BDEVNAME_SIZE];
151
152 if (uptodate) {
153 set_buffer_uptodate(bh);
154 } else {
08bafc03 155 if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
1da177e4
LT
156 buffer_io_error(bh);
157 printk(KERN_WARNING "lost page write due to "
158 "I/O error on %s\n",
159 bdevname(bh->b_bdev, b));
160 }
161 set_buffer_write_io_error(bh);
162 clear_buffer_uptodate(bh);
163 }
164 unlock_buffer(bh);
165 put_bh(bh);
166}
167
1da177e4
LT
168/*
169 * Various filesystems appear to want __find_get_block to be non-blocking.
170 * But it's the page lock which protects the buffers. To get around this,
171 * we get exclusion from try_to_free_buffers with the blockdev mapping's
172 * private_lock.
173 *
174 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
175 * may be quite high. This code could TryLock the page, and if that
176 * succeeds, there is no need to take private_lock. (But if
177 * private_lock is contended then so is mapping->tree_lock).
178 */
179static struct buffer_head *
385fd4c5 180__find_get_block_slow(struct block_device *bdev, sector_t block)
1da177e4
LT
181{
182 struct inode *bd_inode = bdev->bd_inode;
183 struct address_space *bd_mapping = bd_inode->i_mapping;
184 struct buffer_head *ret = NULL;
185 pgoff_t index;
186 struct buffer_head *bh;
187 struct buffer_head *head;
188 struct page *page;
189 int all_mapped = 1;
190
191 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
192 page = find_get_page(bd_mapping, index);
193 if (!page)
194 goto out;
195
196 spin_lock(&bd_mapping->private_lock);
197 if (!page_has_buffers(page))
198 goto out_unlock;
199 head = page_buffers(page);
200 bh = head;
201 do {
97f76d3d
NK
202 if (!buffer_mapped(bh))
203 all_mapped = 0;
204 else if (bh->b_blocknr == block) {
1da177e4
LT
205 ret = bh;
206 get_bh(bh);
207 goto out_unlock;
208 }
1da177e4
LT
209 bh = bh->b_this_page;
210 } while (bh != head);
211
212 /* we might be here because some of the buffers on this page are
213 * not mapped. This is due to various races between
214 * file io on the block device and getblk. It gets dealt with
215 * elsewhere, don't buffer_error if we had some unmapped buffers
216 */
217 if (all_mapped) {
218 printk("__find_get_block_slow() failed. "
219 "block=%llu, b_blocknr=%llu\n",
205f87f6
BP
220 (unsigned long long)block,
221 (unsigned long long)bh->b_blocknr);
222 printk("b_state=0x%08lx, b_size=%zu\n",
223 bh->b_state, bh->b_size);
1da177e4
LT
224 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
225 }
226out_unlock:
227 spin_unlock(&bd_mapping->private_lock);
228 page_cache_release(page);
229out:
230 return ret;
231}
232
233/* If invalidate_buffers() will trash dirty buffers, it means some kind
234 of fs corruption is going on. Trashing dirty data always imply losing
235 information that was supposed to be just stored on the physical layer
236 by the user.
237
238 Thus invalidate_buffers in general usage is not allwowed to trash
239 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
240 be preserved. These buffers are simply skipped.
241
242 We also skip buffers which are still in use. For example this can
243 happen if a userspace program is reading the block device.
244
245 NOTE: In the case where the user removed a removable-media-disk even if
246 there's still dirty data not synced on disk (due a bug in the device driver
247 or due an error of the user), by not destroying the dirty buffers we could
248 generate corruption also on the next media inserted, thus a parameter is
249 necessary to handle this case in the most safe way possible (trying
250 to not corrupt also the new disk inserted with the data belonging to
251 the old now corrupted disk). Also for the ramdisk the natural thing
252 to do in order to release the ramdisk memory is to destroy dirty buffers.
253
254 These are two special cases. Normal usage imply the device driver
255 to issue a sync on the device (without waiting I/O completion) and
256 then an invalidate_buffers call that doesn't trash dirty buffers.
257
258 For handling cache coherency with the blkdev pagecache the 'update' case
259 is been introduced. It is needed to re-read from disk any pinned
260 buffer. NOTE: re-reading from disk is destructive so we can do it only
261 when we assume nobody is changing the buffercache under our I/O and when
262 we think the disk contains more recent information than the buffercache.
263 The update == 1 pass marks the buffers we need to update, the update == 2
264 pass does the actual I/O. */
f98393a6 265void invalidate_bdev(struct block_device *bdev)
1da177e4 266{
0e1dfc66
AM
267 struct address_space *mapping = bdev->bd_inode->i_mapping;
268
269 if (mapping->nrpages == 0)
270 return;
271
1da177e4 272 invalidate_bh_lrus();
fc0ecff6 273 invalidate_mapping_pages(mapping, 0, -1);
1da177e4
LT
274}
275
276/*
277 * Kick pdflush then try to free up some ZONE_NORMAL memory.
278 */
279static void free_more_memory(void)
280{
19770b32 281 struct zone *zone;
0e88460d 282 int nid;
1da177e4 283
687a21ce 284 wakeup_pdflush(1024);
1da177e4
LT
285 yield();
286
0e88460d 287 for_each_online_node(nid) {
19770b32
MG
288 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
289 gfp_zone(GFP_NOFS), NULL,
290 &zone);
291 if (zone)
54a6eb5c 292 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
327c0e96 293 GFP_NOFS, NULL);
1da177e4
LT
294 }
295}
296
297/*
298 * I/O completion handler for block_read_full_page() - pages
299 * which come unlocked at the end of I/O.
300 */
301static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
302{
1da177e4 303 unsigned long flags;
a3972203 304 struct buffer_head *first;
1da177e4
LT
305 struct buffer_head *tmp;
306 struct page *page;
307 int page_uptodate = 1;
308
309 BUG_ON(!buffer_async_read(bh));
310
311 page = bh->b_page;
312 if (uptodate) {
313 set_buffer_uptodate(bh);
314 } else {
315 clear_buffer_uptodate(bh);
08bafc03 316 if (!quiet_error(bh))
1da177e4
LT
317 buffer_io_error(bh);
318 SetPageError(page);
319 }
320
321 /*
322 * Be _very_ careful from here on. Bad things can happen if
323 * two buffer heads end IO at almost the same time and both
324 * decide that the page is now completely done.
325 */
a3972203
NP
326 first = page_buffers(page);
327 local_irq_save(flags);
328 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
1da177e4
LT
329 clear_buffer_async_read(bh);
330 unlock_buffer(bh);
331 tmp = bh;
332 do {
333 if (!buffer_uptodate(tmp))
334 page_uptodate = 0;
335 if (buffer_async_read(tmp)) {
336 BUG_ON(!buffer_locked(tmp));
337 goto still_busy;
338 }
339 tmp = tmp->b_this_page;
340 } while (tmp != bh);
a3972203
NP
341 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
342 local_irq_restore(flags);
1da177e4
LT
343
344 /*
345 * If none of the buffers had errors and they are all
346 * uptodate then we can set the page uptodate.
347 */
348 if (page_uptodate && !PageError(page))
349 SetPageUptodate(page);
350 unlock_page(page);
351 return;
352
353still_busy:
a3972203
NP
354 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
355 local_irq_restore(flags);
1da177e4
LT
356 return;
357}
358
359/*
360 * Completion handler for block_write_full_page() - pages which are unlocked
361 * during I/O, and which have PageWriteback cleared upon I/O completion.
362 */
b6cd0b77 363static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
1da177e4
LT
364{
365 char b[BDEVNAME_SIZE];
1da177e4 366 unsigned long flags;
a3972203 367 struct buffer_head *first;
1da177e4
LT
368 struct buffer_head *tmp;
369 struct page *page;
370
371 BUG_ON(!buffer_async_write(bh));
372
373 page = bh->b_page;
374 if (uptodate) {
375 set_buffer_uptodate(bh);
376 } else {
08bafc03 377 if (!quiet_error(bh)) {
1da177e4
LT
378 buffer_io_error(bh);
379 printk(KERN_WARNING "lost page write due to "
380 "I/O error on %s\n",
381 bdevname(bh->b_bdev, b));
382 }
383 set_bit(AS_EIO, &page->mapping->flags);
58ff407b 384 set_buffer_write_io_error(bh);
1da177e4
LT
385 clear_buffer_uptodate(bh);
386 SetPageError(page);
387 }
388
a3972203
NP
389 first = page_buffers(page);
390 local_irq_save(flags);
391 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
392
1da177e4
LT
393 clear_buffer_async_write(bh);
394 unlock_buffer(bh);
395 tmp = bh->b_this_page;
396 while (tmp != bh) {
397 if (buffer_async_write(tmp)) {
398 BUG_ON(!buffer_locked(tmp));
399 goto still_busy;
400 }
401 tmp = tmp->b_this_page;
402 }
a3972203
NP
403 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
404 local_irq_restore(flags);
1da177e4
LT
405 end_page_writeback(page);
406 return;
407
408still_busy:
a3972203
NP
409 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
410 local_irq_restore(flags);
1da177e4
LT
411 return;
412}
413
414/*
415 * If a page's buffers are under async readin (end_buffer_async_read
416 * completion) then there is a possibility that another thread of
417 * control could lock one of the buffers after it has completed
418 * but while some of the other buffers have not completed. This
419 * locked buffer would confuse end_buffer_async_read() into not unlocking
420 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
421 * that this buffer is not under async I/O.
422 *
423 * The page comes unlocked when it has no locked buffer_async buffers
424 * left.
425 *
426 * PageLocked prevents anyone starting new async I/O reads any of
427 * the buffers.
428 *
429 * PageWriteback is used to prevent simultaneous writeout of the same
430 * page.
431 *
432 * PageLocked prevents anyone from starting writeback of a page which is
433 * under read I/O (PageWriteback is only ever set against a locked page).
434 */
435static void mark_buffer_async_read(struct buffer_head *bh)
436{
437 bh->b_end_io = end_buffer_async_read;
438 set_buffer_async_read(bh);
439}
440
441void mark_buffer_async_write(struct buffer_head *bh)
442{
443 bh->b_end_io = end_buffer_async_write;
444 set_buffer_async_write(bh);
445}
446EXPORT_SYMBOL(mark_buffer_async_write);
447
448
449/*
450 * fs/buffer.c contains helper functions for buffer-backed address space's
451 * fsync functions. A common requirement for buffer-based filesystems is
452 * that certain data from the backing blockdev needs to be written out for
453 * a successful fsync(). For example, ext2 indirect blocks need to be
454 * written back and waited upon before fsync() returns.
455 *
456 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
457 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
458 * management of a list of dependent buffers at ->i_mapping->private_list.
459 *
460 * Locking is a little subtle: try_to_free_buffers() will remove buffers
461 * from their controlling inode's queue when they are being freed. But
462 * try_to_free_buffers() will be operating against the *blockdev* mapping
463 * at the time, not against the S_ISREG file which depends on those buffers.
464 * So the locking for private_list is via the private_lock in the address_space
465 * which backs the buffers. Which is different from the address_space
466 * against which the buffers are listed. So for a particular address_space,
467 * mapping->private_lock does *not* protect mapping->private_list! In fact,
468 * mapping->private_list will always be protected by the backing blockdev's
469 * ->private_lock.
470 *
471 * Which introduces a requirement: all buffers on an address_space's
472 * ->private_list must be from the same address_space: the blockdev's.
473 *
474 * address_spaces which do not place buffers at ->private_list via these
475 * utility functions are free to use private_lock and private_list for
476 * whatever they want. The only requirement is that list_empty(private_list)
477 * be true at clear_inode() time.
478 *
479 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
480 * filesystems should do that. invalidate_inode_buffers() should just go
481 * BUG_ON(!list_empty).
482 *
483 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
484 * take an address_space, not an inode. And it should be called
485 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
486 * queued up.
487 *
488 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
489 * list if it is already on a list. Because if the buffer is on a list,
490 * it *must* already be on the right one. If not, the filesystem is being
491 * silly. This will save a ton of locking. But first we have to ensure
492 * that buffers are taken *off* the old inode's list when they are freed
493 * (presumably in truncate). That requires careful auditing of all
494 * filesystems (do it inside bforget()). It could also be done by bringing
495 * b_inode back.
496 */
497
498/*
499 * The buffer's backing address_space's private_lock must be held
500 */
dbacefc9 501static void __remove_assoc_queue(struct buffer_head *bh)
1da177e4
LT
502{
503 list_del_init(&bh->b_assoc_buffers);
58ff407b
JK
504 WARN_ON(!bh->b_assoc_map);
505 if (buffer_write_io_error(bh))
506 set_bit(AS_EIO, &bh->b_assoc_map->flags);
507 bh->b_assoc_map = NULL;
1da177e4
LT
508}
509
510int inode_has_buffers(struct inode *inode)
511{
512 return !list_empty(&inode->i_data.private_list);
513}
514
515/*
516 * osync is designed to support O_SYNC io. It waits synchronously for
517 * all already-submitted IO to complete, but does not queue any new
518 * writes to the disk.
519 *
520 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
521 * you dirty the buffers, and then use osync_inode_buffers to wait for
522 * completion. Any other dirty buffers which are not yet queued for
523 * write will not be flushed to disk by the osync.
524 */
525static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
526{
527 struct buffer_head *bh;
528 struct list_head *p;
529 int err = 0;
530
531 spin_lock(lock);
532repeat:
533 list_for_each_prev(p, list) {
534 bh = BH_ENTRY(p);
535 if (buffer_locked(bh)) {
536 get_bh(bh);
537 spin_unlock(lock);
538 wait_on_buffer(bh);
539 if (!buffer_uptodate(bh))
540 err = -EIO;
541 brelse(bh);
542 spin_lock(lock);
543 goto repeat;
544 }
545 }
546 spin_unlock(lock);
547 return err;
548}
549
c2d75438
ES
550void do_thaw_all(unsigned long unused)
551{
552 struct super_block *sb;
553 char b[BDEVNAME_SIZE];
554
555 spin_lock(&sb_lock);
556restart:
557 list_for_each_entry(sb, &super_blocks, s_list) {
558 sb->s_count++;
559 spin_unlock(&sb_lock);
560 down_read(&sb->s_umount);
561 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
562 printk(KERN_WARNING "Emergency Thaw on %s\n",
563 bdevname(sb->s_bdev, b));
564 up_read(&sb->s_umount);
565 spin_lock(&sb_lock);
566 if (__put_super_and_need_restart(sb))
567 goto restart;
568 }
569 spin_unlock(&sb_lock);
570 printk(KERN_WARNING "Emergency Thaw complete\n");
571}
572
573/**
574 * emergency_thaw_all -- forcibly thaw every frozen filesystem
575 *
576 * Used for emergency unfreeze of all filesystems via SysRq
577 */
578void emergency_thaw_all(void)
579{
580 pdflush_operation(do_thaw_all, 0);
581}
582
1da177e4 583/**
78a4a50a 584 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
67be2dd1 585 * @mapping: the mapping which wants those buffers written
1da177e4
LT
586 *
587 * Starts I/O against the buffers at mapping->private_list, and waits upon
588 * that I/O.
589 *
67be2dd1
MW
590 * Basically, this is a convenience function for fsync().
591 * @mapping is a file or directory which needs those buffers to be written for
592 * a successful fsync().
1da177e4
LT
593 */
594int sync_mapping_buffers(struct address_space *mapping)
595{
596 struct address_space *buffer_mapping = mapping->assoc_mapping;
597
598 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
599 return 0;
600
601 return fsync_buffers_list(&buffer_mapping->private_lock,
602 &mapping->private_list);
603}
604EXPORT_SYMBOL(sync_mapping_buffers);
605
606/*
607 * Called when we've recently written block `bblock', and it is known that
608 * `bblock' was for a buffer_boundary() buffer. This means that the block at
609 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
610 * dirty, schedule it for IO. So that indirects merge nicely with their data.
611 */
612void write_boundary_block(struct block_device *bdev,
613 sector_t bblock, unsigned blocksize)
614{
615 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
616 if (bh) {
617 if (buffer_dirty(bh))
618 ll_rw_block(WRITE, 1, &bh);
619 put_bh(bh);
620 }
621}
622
623void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
624{
625 struct address_space *mapping = inode->i_mapping;
626 struct address_space *buffer_mapping = bh->b_page->mapping;
627
628 mark_buffer_dirty(bh);
629 if (!mapping->assoc_mapping) {
630 mapping->assoc_mapping = buffer_mapping;
631 } else {
e827f923 632 BUG_ON(mapping->assoc_mapping != buffer_mapping);
1da177e4 633 }
535ee2fb 634 if (!bh->b_assoc_map) {
1da177e4
LT
635 spin_lock(&buffer_mapping->private_lock);
636 list_move_tail(&bh->b_assoc_buffers,
637 &mapping->private_list);
58ff407b 638 bh->b_assoc_map = mapping;
1da177e4
LT
639 spin_unlock(&buffer_mapping->private_lock);
640 }
641}
642EXPORT_SYMBOL(mark_buffer_dirty_inode);
643
787d2214
NP
644/*
645 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
646 * dirty.
647 *
648 * If warn is true, then emit a warning if the page is not uptodate and has
649 * not been truncated.
650 */
a8e7d49a 651static void __set_page_dirty(struct page *page,
787d2214
NP
652 struct address_space *mapping, int warn)
653{
19fd6231 654 spin_lock_irq(&mapping->tree_lock);
787d2214
NP
655 if (page->mapping) { /* Race with truncate? */
656 WARN_ON_ONCE(warn && !PageUptodate(page));
e3a7cca1 657 account_page_dirtied(page, mapping);
787d2214
NP
658 radix_tree_tag_set(&mapping->page_tree,
659 page_index(page), PAGECACHE_TAG_DIRTY);
660 }
19fd6231 661 spin_unlock_irq(&mapping->tree_lock);
787d2214 662 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
787d2214
NP
663}
664
1da177e4
LT
665/*
666 * Add a page to the dirty page list.
667 *
668 * It is a sad fact of life that this function is called from several places
669 * deeply under spinlocking. It may not sleep.
670 *
671 * If the page has buffers, the uptodate buffers are set dirty, to preserve
672 * dirty-state coherency between the page and the buffers. It the page does
673 * not have buffers then when they are later attached they will all be set
674 * dirty.
675 *
676 * The buffers are dirtied before the page is dirtied. There's a small race
677 * window in which a writepage caller may see the page cleanness but not the
678 * buffer dirtiness. That's fine. If this code were to set the page dirty
679 * before the buffers, a concurrent writepage caller could clear the page dirty
680 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
681 * page on the dirty page list.
682 *
683 * We use private_lock to lock against try_to_free_buffers while using the
684 * page's buffer list. Also use this to protect against clean buffers being
685 * added to the page after it was set dirty.
686 *
687 * FIXME: may need to call ->reservepage here as well. That's rather up to the
688 * address_space though.
689 */
690int __set_page_dirty_buffers(struct page *page)
691{
a8e7d49a 692 int newly_dirty;
787d2214 693 struct address_space *mapping = page_mapping(page);
ebf7a227
NP
694
695 if (unlikely(!mapping))
696 return !TestSetPageDirty(page);
1da177e4
LT
697
698 spin_lock(&mapping->private_lock);
699 if (page_has_buffers(page)) {
700 struct buffer_head *head = page_buffers(page);
701 struct buffer_head *bh = head;
702
703 do {
704 set_buffer_dirty(bh);
705 bh = bh->b_this_page;
706 } while (bh != head);
707 }
a8e7d49a 708 newly_dirty = !TestSetPageDirty(page);
1da177e4
LT
709 spin_unlock(&mapping->private_lock);
710
a8e7d49a
LT
711 if (newly_dirty)
712 __set_page_dirty(page, mapping, 1);
713 return newly_dirty;
1da177e4
LT
714}
715EXPORT_SYMBOL(__set_page_dirty_buffers);
716
717/*
718 * Write out and wait upon a list of buffers.
719 *
720 * We have conflicting pressures: we want to make sure that all
721 * initially dirty buffers get waited on, but that any subsequently
722 * dirtied buffers don't. After all, we don't want fsync to last
723 * forever if somebody is actively writing to the file.
724 *
725 * Do this in two main stages: first we copy dirty buffers to a
726 * temporary inode list, queueing the writes as we go. Then we clean
727 * up, waiting for those writes to complete.
728 *
729 * During this second stage, any subsequent updates to the file may end
730 * up refiling the buffer on the original inode's dirty list again, so
731 * there is a chance we will end up with a buffer queued for write but
732 * not yet completed on that list. So, as a final cleanup we go through
733 * the osync code to catch these locked, dirty buffers without requeuing
734 * any newly dirty buffers for write.
735 */
736static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
737{
738 struct buffer_head *bh;
739 struct list_head tmp;
535ee2fb 740 struct address_space *mapping;
1da177e4
LT
741 int err = 0, err2;
742
743 INIT_LIST_HEAD(&tmp);
744
745 spin_lock(lock);
746 while (!list_empty(list)) {
747 bh = BH_ENTRY(list->next);
535ee2fb 748 mapping = bh->b_assoc_map;
58ff407b 749 __remove_assoc_queue(bh);
535ee2fb
JK
750 /* Avoid race with mark_buffer_dirty_inode() which does
751 * a lockless check and we rely on seeing the dirty bit */
752 smp_mb();
1da177e4
LT
753 if (buffer_dirty(bh) || buffer_locked(bh)) {
754 list_add(&bh->b_assoc_buffers, &tmp);
535ee2fb 755 bh->b_assoc_map = mapping;
1da177e4
LT
756 if (buffer_dirty(bh)) {
757 get_bh(bh);
758 spin_unlock(lock);
759 /*
760 * Ensure any pending I/O completes so that
761 * ll_rw_block() actually writes the current
762 * contents - it is a noop if I/O is still in
763 * flight on potentially older contents.
764 */
18ce3751 765 ll_rw_block(SWRITE_SYNC, 1, &bh);
1da177e4
LT
766 brelse(bh);
767 spin_lock(lock);
768 }
769 }
770 }
771
772 while (!list_empty(&tmp)) {
773 bh = BH_ENTRY(tmp.prev);
1da177e4 774 get_bh(bh);
535ee2fb
JK
775 mapping = bh->b_assoc_map;
776 __remove_assoc_queue(bh);
777 /* Avoid race with mark_buffer_dirty_inode() which does
778 * a lockless check and we rely on seeing the dirty bit */
779 smp_mb();
780 if (buffer_dirty(bh)) {
781 list_add(&bh->b_assoc_buffers,
e3892296 782 &mapping->private_list);
535ee2fb
JK
783 bh->b_assoc_map = mapping;
784 }
1da177e4
LT
785 spin_unlock(lock);
786 wait_on_buffer(bh);
787 if (!buffer_uptodate(bh))
788 err = -EIO;
789 brelse(bh);
790 spin_lock(lock);
791 }
792
793 spin_unlock(lock);
794 err2 = osync_buffers_list(lock, list);
795 if (err)
796 return err;
797 else
798 return err2;
799}
800
801/*
802 * Invalidate any and all dirty buffers on a given inode. We are
803 * probably unmounting the fs, but that doesn't mean we have already
804 * done a sync(). Just drop the buffers from the inode list.
805 *
806 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
807 * assumes that all the buffers are against the blockdev. Not true
808 * for reiserfs.
809 */
810void invalidate_inode_buffers(struct inode *inode)
811{
812 if (inode_has_buffers(inode)) {
813 struct address_space *mapping = &inode->i_data;
814 struct list_head *list = &mapping->private_list;
815 struct address_space *buffer_mapping = mapping->assoc_mapping;
816
817 spin_lock(&buffer_mapping->private_lock);
818 while (!list_empty(list))
819 __remove_assoc_queue(BH_ENTRY(list->next));
820 spin_unlock(&buffer_mapping->private_lock);
821 }
822}
52b19ac9 823EXPORT_SYMBOL(invalidate_inode_buffers);
1da177e4
LT
824
825/*
826 * Remove any clean buffers from the inode's buffer list. This is called
827 * when we're trying to free the inode itself. Those buffers can pin it.
828 *
829 * Returns true if all buffers were removed.
830 */
831int remove_inode_buffers(struct inode *inode)
832{
833 int ret = 1;
834
835 if (inode_has_buffers(inode)) {
836 struct address_space *mapping = &inode->i_data;
837 struct list_head *list = &mapping->private_list;
838 struct address_space *buffer_mapping = mapping->assoc_mapping;
839
840 spin_lock(&buffer_mapping->private_lock);
841 while (!list_empty(list)) {
842 struct buffer_head *bh = BH_ENTRY(list->next);
843 if (buffer_dirty(bh)) {
844 ret = 0;
845 break;
846 }
847 __remove_assoc_queue(bh);
848 }
849 spin_unlock(&buffer_mapping->private_lock);
850 }
851 return ret;
852}
853
854/*
855 * Create the appropriate buffers when given a page for data area and
856 * the size of each buffer.. Use the bh->b_this_page linked list to
857 * follow the buffers created. Return NULL if unable to create more
858 * buffers.
859 *
860 * The retry flag is used to differentiate async IO (paging, swapping)
861 * which may not fail from ordinary buffer allocations.
862 */
863struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
864 int retry)
865{
866 struct buffer_head *bh, *head;
867 long offset;
868
869try_again:
870 head = NULL;
871 offset = PAGE_SIZE;
872 while ((offset -= size) >= 0) {
873 bh = alloc_buffer_head(GFP_NOFS);
874 if (!bh)
875 goto no_grow;
876
877 bh->b_bdev = NULL;
878 bh->b_this_page = head;
879 bh->b_blocknr = -1;
880 head = bh;
881
882 bh->b_state = 0;
883 atomic_set(&bh->b_count, 0);
fc5cd582 884 bh->b_private = NULL;
1da177e4
LT
885 bh->b_size = size;
886
887 /* Link the buffer to its page */
888 set_bh_page(bh, page, offset);
889
01ffe339 890 init_buffer(bh, NULL, NULL);
1da177e4
LT
891 }
892 return head;
893/*
894 * In case anything failed, we just free everything we got.
895 */
896no_grow:
897 if (head) {
898 do {
899 bh = head;
900 head = head->b_this_page;
901 free_buffer_head(bh);
902 } while (head);
903 }
904
905 /*
906 * Return failure for non-async IO requests. Async IO requests
907 * are not allowed to fail, so we have to wait until buffer heads
908 * become available. But we don't want tasks sleeping with
909 * partially complete buffers, so all were released above.
910 */
911 if (!retry)
912 return NULL;
913
914 /* We're _really_ low on memory. Now we just
915 * wait for old buffer heads to become free due to
916 * finishing IO. Since this is an async request and
917 * the reserve list is empty, we're sure there are
918 * async buffer heads in use.
919 */
920 free_more_memory();
921 goto try_again;
922}
923EXPORT_SYMBOL_GPL(alloc_page_buffers);
924
925static inline void
926link_dev_buffers(struct page *page, struct buffer_head *head)
927{
928 struct buffer_head *bh, *tail;
929
930 bh = head;
931 do {
932 tail = bh;
933 bh = bh->b_this_page;
934 } while (bh);
935 tail->b_this_page = head;
936 attach_page_buffers(page, head);
937}
938
939/*
940 * Initialise the state of a blockdev page's buffers.
941 */
942static void
943init_page_buffers(struct page *page, struct block_device *bdev,
944 sector_t block, int size)
945{
946 struct buffer_head *head = page_buffers(page);
947 struct buffer_head *bh = head;
948 int uptodate = PageUptodate(page);
949
950 do {
951 if (!buffer_mapped(bh)) {
952 init_buffer(bh, NULL, NULL);
953 bh->b_bdev = bdev;
954 bh->b_blocknr = block;
955 if (uptodate)
956 set_buffer_uptodate(bh);
957 set_buffer_mapped(bh);
958 }
959 block++;
960 bh = bh->b_this_page;
961 } while (bh != head);
962}
963
964/*
965 * Create the page-cache page that contains the requested block.
966 *
967 * This is user purely for blockdev mappings.
968 */
969static struct page *
970grow_dev_page(struct block_device *bdev, sector_t block,
971 pgoff_t index, int size)
972{
973 struct inode *inode = bdev->bd_inode;
974 struct page *page;
975 struct buffer_head *bh;
976
ea125892 977 page = find_or_create_page(inode->i_mapping, index,
769848c0 978 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1da177e4
LT
979 if (!page)
980 return NULL;
981
e827f923 982 BUG_ON(!PageLocked(page));
1da177e4
LT
983
984 if (page_has_buffers(page)) {
985 bh = page_buffers(page);
986 if (bh->b_size == size) {
987 init_page_buffers(page, bdev, block, size);
988 return page;
989 }
990 if (!try_to_free_buffers(page))
991 goto failed;
992 }
993
994 /*
995 * Allocate some buffers for this page
996 */
997 bh = alloc_page_buffers(page, size, 0);
998 if (!bh)
999 goto failed;
1000
1001 /*
1002 * Link the page to the buffers and initialise them. Take the
1003 * lock to be atomic wrt __find_get_block(), which does not
1004 * run under the page lock.
1005 */
1006 spin_lock(&inode->i_mapping->private_lock);
1007 link_dev_buffers(page, bh);
1008 init_page_buffers(page, bdev, block, size);
1009 spin_unlock(&inode->i_mapping->private_lock);
1010 return page;
1011
1012failed:
1013 BUG();
1014 unlock_page(page);
1015 page_cache_release(page);
1016 return NULL;
1017}
1018
1019/*
1020 * Create buffers for the specified block device block's page. If
1021 * that page was dirty, the buffers are set dirty also.
1da177e4 1022 */
858119e1 1023static int
1da177e4
LT
1024grow_buffers(struct block_device *bdev, sector_t block, int size)
1025{
1026 struct page *page;
1027 pgoff_t index;
1028 int sizebits;
1029
1030 sizebits = -1;
1031 do {
1032 sizebits++;
1033 } while ((size << sizebits) < PAGE_SIZE);
1034
1035 index = block >> sizebits;
1da177e4 1036
e5657933
AM
1037 /*
1038 * Check for a block which wants to lie outside our maximum possible
1039 * pagecache index. (this comparison is done using sector_t types).
1040 */
1041 if (unlikely(index != block >> sizebits)) {
1042 char b[BDEVNAME_SIZE];
1043
1044 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1045 "device %s\n",
8e24eea7 1046 __func__, (unsigned long long)block,
e5657933
AM
1047 bdevname(bdev, b));
1048 return -EIO;
1049 }
1050 block = index << sizebits;
1da177e4
LT
1051 /* Create a page with the proper size buffers.. */
1052 page = grow_dev_page(bdev, block, index, size);
1053 if (!page)
1054 return 0;
1055 unlock_page(page);
1056 page_cache_release(page);
1057 return 1;
1058}
1059
75c96f85 1060static struct buffer_head *
1da177e4
LT
1061__getblk_slow(struct block_device *bdev, sector_t block, int size)
1062{
1063 /* Size must be multiple of hard sectorsize */
1064 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1065 (size < 512 || size > PAGE_SIZE))) {
1066 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1067 size);
1068 printk(KERN_ERR "hardsect size: %d\n",
1069 bdev_hardsect_size(bdev));
1070
1071 dump_stack();
1072 return NULL;
1073 }
1074
1075 for (;;) {
1076 struct buffer_head * bh;
e5657933 1077 int ret;
1da177e4
LT
1078
1079 bh = __find_get_block(bdev, block, size);
1080 if (bh)
1081 return bh;
1082
e5657933
AM
1083 ret = grow_buffers(bdev, block, size);
1084 if (ret < 0)
1085 return NULL;
1086 if (ret == 0)
1da177e4
LT
1087 free_more_memory();
1088 }
1089}
1090
1091/*
1092 * The relationship between dirty buffers and dirty pages:
1093 *
1094 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1095 * the page is tagged dirty in its radix tree.
1096 *
1097 * At all times, the dirtiness of the buffers represents the dirtiness of
1098 * subsections of the page. If the page has buffers, the page dirty bit is
1099 * merely a hint about the true dirty state.
1100 *
1101 * When a page is set dirty in its entirety, all its buffers are marked dirty
1102 * (if the page has buffers).
1103 *
1104 * When a buffer is marked dirty, its page is dirtied, but the page's other
1105 * buffers are not.
1106 *
1107 * Also. When blockdev buffers are explicitly read with bread(), they
1108 * individually become uptodate. But their backing page remains not
1109 * uptodate - even if all of its buffers are uptodate. A subsequent
1110 * block_read_full_page() against that page will discover all the uptodate
1111 * buffers, will set the page uptodate and will perform no I/O.
1112 */
1113
1114/**
1115 * mark_buffer_dirty - mark a buffer_head as needing writeout
67be2dd1 1116 * @bh: the buffer_head to mark dirty
1da177e4
LT
1117 *
1118 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1119 * backing page dirty, then tag the page as dirty in its address_space's radix
1120 * tree and then attach the address_space's inode to its superblock's dirty
1121 * inode list.
1122 *
1123 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1124 * mapping->tree_lock and the global inode_lock.
1125 */
fc9b52cd 1126void mark_buffer_dirty(struct buffer_head *bh)
1da177e4 1127{
787d2214 1128 WARN_ON_ONCE(!buffer_uptodate(bh));
1be62dc1
LT
1129
1130 /*
1131 * Very *carefully* optimize the it-is-already-dirty case.
1132 *
1133 * Don't let the final "is it dirty" escape to before we
1134 * perhaps modified the buffer.
1135 */
1136 if (buffer_dirty(bh)) {
1137 smp_mb();
1138 if (buffer_dirty(bh))
1139 return;
1140 }
1141
a8e7d49a
LT
1142 if (!test_set_buffer_dirty(bh)) {
1143 struct page *page = bh->b_page;
1144 if (!TestSetPageDirty(page))
1145 __set_page_dirty(page, page_mapping(page), 0);
1146 }
1da177e4
LT
1147}
1148
1149/*
1150 * Decrement a buffer_head's reference count. If all buffers against a page
1151 * have zero reference count, are clean and unlocked, and if the page is clean
1152 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1153 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1154 * a page but it ends up not being freed, and buffers may later be reattached).
1155 */
1156void __brelse(struct buffer_head * buf)
1157{
1158 if (atomic_read(&buf->b_count)) {
1159 put_bh(buf);
1160 return;
1161 }
5c752ad9 1162 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1da177e4
LT
1163}
1164
1165/*
1166 * bforget() is like brelse(), except it discards any
1167 * potentially dirty data.
1168 */
1169void __bforget(struct buffer_head *bh)
1170{
1171 clear_buffer_dirty(bh);
535ee2fb 1172 if (bh->b_assoc_map) {
1da177e4
LT
1173 struct address_space *buffer_mapping = bh->b_page->mapping;
1174
1175 spin_lock(&buffer_mapping->private_lock);
1176 list_del_init(&bh->b_assoc_buffers);
58ff407b 1177 bh->b_assoc_map = NULL;
1da177e4
LT
1178 spin_unlock(&buffer_mapping->private_lock);
1179 }
1180 __brelse(bh);
1181}
1182
1183static struct buffer_head *__bread_slow(struct buffer_head *bh)
1184{
1185 lock_buffer(bh);
1186 if (buffer_uptodate(bh)) {
1187 unlock_buffer(bh);
1188 return bh;
1189 } else {
1190 get_bh(bh);
1191 bh->b_end_io = end_buffer_read_sync;
1192 submit_bh(READ, bh);
1193 wait_on_buffer(bh);
1194 if (buffer_uptodate(bh))
1195 return bh;
1196 }
1197 brelse(bh);
1198 return NULL;
1199}
1200
1201/*
1202 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1203 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1204 * refcount elevated by one when they're in an LRU. A buffer can only appear
1205 * once in a particular CPU's LRU. A single buffer can be present in multiple
1206 * CPU's LRUs at the same time.
1207 *
1208 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1209 * sb_find_get_block().
1210 *
1211 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1212 * a local interrupt disable for that.
1213 */
1214
1215#define BH_LRU_SIZE 8
1216
1217struct bh_lru {
1218 struct buffer_head *bhs[BH_LRU_SIZE];
1219};
1220
1221static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1222
1223#ifdef CONFIG_SMP
1224#define bh_lru_lock() local_irq_disable()
1225#define bh_lru_unlock() local_irq_enable()
1226#else
1227#define bh_lru_lock() preempt_disable()
1228#define bh_lru_unlock() preempt_enable()
1229#endif
1230
1231static inline void check_irqs_on(void)
1232{
1233#ifdef irqs_disabled
1234 BUG_ON(irqs_disabled());
1235#endif
1236}
1237
1238/*
1239 * The LRU management algorithm is dopey-but-simple. Sorry.
1240 */
1241static void bh_lru_install(struct buffer_head *bh)
1242{
1243 struct buffer_head *evictee = NULL;
1244 struct bh_lru *lru;
1245
1246 check_irqs_on();
1247 bh_lru_lock();
1248 lru = &__get_cpu_var(bh_lrus);
1249 if (lru->bhs[0] != bh) {
1250 struct buffer_head *bhs[BH_LRU_SIZE];
1251 int in;
1252 int out = 0;
1253
1254 get_bh(bh);
1255 bhs[out++] = bh;
1256 for (in = 0; in < BH_LRU_SIZE; in++) {
1257 struct buffer_head *bh2 = lru->bhs[in];
1258
1259 if (bh2 == bh) {
1260 __brelse(bh2);
1261 } else {
1262 if (out >= BH_LRU_SIZE) {
1263 BUG_ON(evictee != NULL);
1264 evictee = bh2;
1265 } else {
1266 bhs[out++] = bh2;
1267 }
1268 }
1269 }
1270 while (out < BH_LRU_SIZE)
1271 bhs[out++] = NULL;
1272 memcpy(lru->bhs, bhs, sizeof(bhs));
1273 }
1274 bh_lru_unlock();
1275
1276 if (evictee)
1277 __brelse(evictee);
1278}
1279
1280/*
1281 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1282 */
858119e1 1283static struct buffer_head *
3991d3bd 1284lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1285{
1286 struct buffer_head *ret = NULL;
1287 struct bh_lru *lru;
3991d3bd 1288 unsigned int i;
1da177e4
LT
1289
1290 check_irqs_on();
1291 bh_lru_lock();
1292 lru = &__get_cpu_var(bh_lrus);
1293 for (i = 0; i < BH_LRU_SIZE; i++) {
1294 struct buffer_head *bh = lru->bhs[i];
1295
1296 if (bh && bh->b_bdev == bdev &&
1297 bh->b_blocknr == block && bh->b_size == size) {
1298 if (i) {
1299 while (i) {
1300 lru->bhs[i] = lru->bhs[i - 1];
1301 i--;
1302 }
1303 lru->bhs[0] = bh;
1304 }
1305 get_bh(bh);
1306 ret = bh;
1307 break;
1308 }
1309 }
1310 bh_lru_unlock();
1311 return ret;
1312}
1313
1314/*
1315 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1316 * it in the LRU and mark it as accessed. If it is not present then return
1317 * NULL
1318 */
1319struct buffer_head *
3991d3bd 1320__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1321{
1322 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1323
1324 if (bh == NULL) {
385fd4c5 1325 bh = __find_get_block_slow(bdev, block);
1da177e4
LT
1326 if (bh)
1327 bh_lru_install(bh);
1328 }
1329 if (bh)
1330 touch_buffer(bh);
1331 return bh;
1332}
1333EXPORT_SYMBOL(__find_get_block);
1334
1335/*
1336 * __getblk will locate (and, if necessary, create) the buffer_head
1337 * which corresponds to the passed block_device, block and size. The
1338 * returned buffer has its reference count incremented.
1339 *
1340 * __getblk() cannot fail - it just keeps trying. If you pass it an
1341 * illegal block number, __getblk() will happily return a buffer_head
1342 * which represents the non-existent block. Very weird.
1343 *
1344 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1345 * attempt is failing. FIXME, perhaps?
1346 */
1347struct buffer_head *
3991d3bd 1348__getblk(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1349{
1350 struct buffer_head *bh = __find_get_block(bdev, block, size);
1351
1352 might_sleep();
1353 if (bh == NULL)
1354 bh = __getblk_slow(bdev, block, size);
1355 return bh;
1356}
1357EXPORT_SYMBOL(__getblk);
1358
1359/*
1360 * Do async read-ahead on a buffer..
1361 */
3991d3bd 1362void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1363{
1364 struct buffer_head *bh = __getblk(bdev, block, size);
a3e713b5
AM
1365 if (likely(bh)) {
1366 ll_rw_block(READA, 1, &bh);
1367 brelse(bh);
1368 }
1da177e4
LT
1369}
1370EXPORT_SYMBOL(__breadahead);
1371
1372/**
1373 * __bread() - reads a specified block and returns the bh
67be2dd1 1374 * @bdev: the block_device to read from
1da177e4
LT
1375 * @block: number of block
1376 * @size: size (in bytes) to read
1377 *
1378 * Reads a specified block, and returns buffer head that contains it.
1379 * It returns NULL if the block was unreadable.
1380 */
1381struct buffer_head *
3991d3bd 1382__bread(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1383{
1384 struct buffer_head *bh = __getblk(bdev, block, size);
1385
a3e713b5 1386 if (likely(bh) && !buffer_uptodate(bh))
1da177e4
LT
1387 bh = __bread_slow(bh);
1388 return bh;
1389}
1390EXPORT_SYMBOL(__bread);
1391
1392/*
1393 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1394 * This doesn't race because it runs in each cpu either in irq
1395 * or with preempt disabled.
1396 */
1397static void invalidate_bh_lru(void *arg)
1398{
1399 struct bh_lru *b = &get_cpu_var(bh_lrus);
1400 int i;
1401
1402 for (i = 0; i < BH_LRU_SIZE; i++) {
1403 brelse(b->bhs[i]);
1404 b->bhs[i] = NULL;
1405 }
1406 put_cpu_var(bh_lrus);
1407}
1408
f9a14399 1409void invalidate_bh_lrus(void)
1da177e4 1410{
15c8b6c1 1411 on_each_cpu(invalidate_bh_lru, NULL, 1);
1da177e4 1412}
9db5579b 1413EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1da177e4
LT
1414
1415void set_bh_page(struct buffer_head *bh,
1416 struct page *page, unsigned long offset)
1417{
1418 bh->b_page = page;
e827f923 1419 BUG_ON(offset >= PAGE_SIZE);
1da177e4
LT
1420 if (PageHighMem(page))
1421 /*
1422 * This catches illegal uses and preserves the offset:
1423 */
1424 bh->b_data = (char *)(0 + offset);
1425 else
1426 bh->b_data = page_address(page) + offset;
1427}
1428EXPORT_SYMBOL(set_bh_page);
1429
1430/*
1431 * Called when truncating a buffer on a page completely.
1432 */
858119e1 1433static void discard_buffer(struct buffer_head * bh)
1da177e4
LT
1434{
1435 lock_buffer(bh);
1436 clear_buffer_dirty(bh);
1437 bh->b_bdev = NULL;
1438 clear_buffer_mapped(bh);
1439 clear_buffer_req(bh);
1440 clear_buffer_new(bh);
1441 clear_buffer_delay(bh);
33a266dd 1442 clear_buffer_unwritten(bh);
1da177e4
LT
1443 unlock_buffer(bh);
1444}
1445
1da177e4
LT
1446/**
1447 * block_invalidatepage - invalidate part of all of a buffer-backed page
1448 *
1449 * @page: the page which is affected
1450 * @offset: the index of the truncation point
1451 *
1452 * block_invalidatepage() is called when all or part of the page has become
1453 * invalidatedby a truncate operation.
1454 *
1455 * block_invalidatepage() does not have to release all buffers, but it must
1456 * ensure that no dirty buffer is left outside @offset and that no I/O
1457 * is underway against any of the blocks which are outside the truncation
1458 * point. Because the caller is about to free (and possibly reuse) those
1459 * blocks on-disk.
1460 */
2ff28e22 1461void block_invalidatepage(struct page *page, unsigned long offset)
1da177e4
LT
1462{
1463 struct buffer_head *head, *bh, *next;
1464 unsigned int curr_off = 0;
1da177e4
LT
1465
1466 BUG_ON(!PageLocked(page));
1467 if (!page_has_buffers(page))
1468 goto out;
1469
1470 head = page_buffers(page);
1471 bh = head;
1472 do {
1473 unsigned int next_off = curr_off + bh->b_size;
1474 next = bh->b_this_page;
1475
1476 /*
1477 * is this block fully invalidated?
1478 */
1479 if (offset <= curr_off)
1480 discard_buffer(bh);
1481 curr_off = next_off;
1482 bh = next;
1483 } while (bh != head);
1484
1485 /*
1486 * We release buffers only if the entire page is being invalidated.
1487 * The get_block cached value has been unconditionally invalidated,
1488 * so real IO is not possible anymore.
1489 */
1490 if (offset == 0)
2ff28e22 1491 try_to_release_page(page, 0);
1da177e4 1492out:
2ff28e22 1493 return;
1da177e4
LT
1494}
1495EXPORT_SYMBOL(block_invalidatepage);
1496
1497/*
1498 * We attach and possibly dirty the buffers atomically wrt
1499 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1500 * is already excluded via the page lock.
1501 */
1502void create_empty_buffers(struct page *page,
1503 unsigned long blocksize, unsigned long b_state)
1504{
1505 struct buffer_head *bh, *head, *tail;
1506
1507 head = alloc_page_buffers(page, blocksize, 1);
1508 bh = head;
1509 do {
1510 bh->b_state |= b_state;
1511 tail = bh;
1512 bh = bh->b_this_page;
1513 } while (bh);
1514 tail->b_this_page = head;
1515
1516 spin_lock(&page->mapping->private_lock);
1517 if (PageUptodate(page) || PageDirty(page)) {
1518 bh = head;
1519 do {
1520 if (PageDirty(page))
1521 set_buffer_dirty(bh);
1522 if (PageUptodate(page))
1523 set_buffer_uptodate(bh);
1524 bh = bh->b_this_page;
1525 } while (bh != head);
1526 }
1527 attach_page_buffers(page, head);
1528 spin_unlock(&page->mapping->private_lock);
1529}
1530EXPORT_SYMBOL(create_empty_buffers);
1531
1532/*
1533 * We are taking a block for data and we don't want any output from any
1534 * buffer-cache aliases starting from return from that function and
1535 * until the moment when something will explicitly mark the buffer
1536 * dirty (hopefully that will not happen until we will free that block ;-)
1537 * We don't even need to mark it not-uptodate - nobody can expect
1538 * anything from a newly allocated buffer anyway. We used to used
1539 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1540 * don't want to mark the alias unmapped, for example - it would confuse
1541 * anyone who might pick it with bread() afterwards...
1542 *
1543 * Also.. Note that bforget() doesn't lock the buffer. So there can
1544 * be writeout I/O going on against recently-freed buffers. We don't
1545 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1546 * only if we really need to. That happens here.
1547 */
1548void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1549{
1550 struct buffer_head *old_bh;
1551
1552 might_sleep();
1553
385fd4c5 1554 old_bh = __find_get_block_slow(bdev, block);
1da177e4
LT
1555 if (old_bh) {
1556 clear_buffer_dirty(old_bh);
1557 wait_on_buffer(old_bh);
1558 clear_buffer_req(old_bh);
1559 __brelse(old_bh);
1560 }
1561}
1562EXPORT_SYMBOL(unmap_underlying_metadata);
1563
1564/*
1565 * NOTE! All mapped/uptodate combinations are valid:
1566 *
1567 * Mapped Uptodate Meaning
1568 *
1569 * No No "unknown" - must do get_block()
1570 * No Yes "hole" - zero-filled
1571 * Yes No "allocated" - allocated on disk, not read in
1572 * Yes Yes "valid" - allocated and up-to-date in memory.
1573 *
1574 * "Dirty" is valid only with the last case (mapped+uptodate).
1575 */
1576
1577/*
1578 * While block_write_full_page is writing back the dirty buffers under
1579 * the page lock, whoever dirtied the buffers may decide to clean them
1580 * again at any time. We handle that by only looking at the buffer
1581 * state inside lock_buffer().
1582 *
1583 * If block_write_full_page() is called for regular writeback
1584 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1585 * locked buffer. This only can happen if someone has written the buffer
1586 * directly, with submit_bh(). At the address_space level PageWriteback
1587 * prevents this contention from occurring.
1588 */
1589static int __block_write_full_page(struct inode *inode, struct page *page,
1590 get_block_t *get_block, struct writeback_control *wbc)
1591{
1592 int err;
1593 sector_t block;
1594 sector_t last_block;
f0fbd5fc 1595 struct buffer_head *bh, *head;
b0cf2321 1596 const unsigned blocksize = 1 << inode->i_blkbits;
1da177e4 1597 int nr_underway = 0;
a64c8610 1598 int write_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
1da177e4
LT
1599
1600 BUG_ON(!PageLocked(page));
1601
1602 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1603
1604 if (!page_has_buffers(page)) {
b0cf2321 1605 create_empty_buffers(page, blocksize,
1da177e4
LT
1606 (1 << BH_Dirty)|(1 << BH_Uptodate));
1607 }
1608
1609 /*
1610 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1611 * here, and the (potentially unmapped) buffers may become dirty at
1612 * any time. If a buffer becomes dirty here after we've inspected it
1613 * then we just miss that fact, and the page stays dirty.
1614 *
1615 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1616 * handle that here by just cleaning them.
1617 */
1618
54b21a79 1619 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4
LT
1620 head = page_buffers(page);
1621 bh = head;
1622
1623 /*
1624 * Get all the dirty buffers mapped to disk addresses and
1625 * handle any aliases from the underlying blockdev's mapping.
1626 */
1627 do {
1628 if (block > last_block) {
1629 /*
1630 * mapped buffers outside i_size will occur, because
1631 * this page can be outside i_size when there is a
1632 * truncate in progress.
1633 */
1634 /*
1635 * The buffer was zeroed by block_write_full_page()
1636 */
1637 clear_buffer_dirty(bh);
1638 set_buffer_uptodate(bh);
29a814d2
AT
1639 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1640 buffer_dirty(bh)) {
b0cf2321 1641 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
1642 err = get_block(inode, block, bh, 1);
1643 if (err)
1644 goto recover;
29a814d2 1645 clear_buffer_delay(bh);
1da177e4
LT
1646 if (buffer_new(bh)) {
1647 /* blockdev mappings never come here */
1648 clear_buffer_new(bh);
1649 unmap_underlying_metadata(bh->b_bdev,
1650 bh->b_blocknr);
1651 }
1652 }
1653 bh = bh->b_this_page;
1654 block++;
1655 } while (bh != head);
1656
1657 do {
1da177e4
LT
1658 if (!buffer_mapped(bh))
1659 continue;
1660 /*
1661 * If it's a fully non-blocking write attempt and we cannot
1662 * lock the buffer then redirty the page. Note that this can
1663 * potentially cause a busy-wait loop from pdflush and kswapd
1664 * activity, but those code paths have their own higher-level
1665 * throttling.
1666 */
1667 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1668 lock_buffer(bh);
ca5de404 1669 } else if (!trylock_buffer(bh)) {
1da177e4
LT
1670 redirty_page_for_writepage(wbc, page);
1671 continue;
1672 }
1673 if (test_clear_buffer_dirty(bh)) {
1674 mark_buffer_async_write(bh);
1675 } else {
1676 unlock_buffer(bh);
1677 }
1678 } while ((bh = bh->b_this_page) != head);
1679
1680 /*
1681 * The page and its buffers are protected by PageWriteback(), so we can
1682 * drop the bh refcounts early.
1683 */
1684 BUG_ON(PageWriteback(page));
1685 set_page_writeback(page);
1da177e4
LT
1686
1687 do {
1688 struct buffer_head *next = bh->b_this_page;
1689 if (buffer_async_write(bh)) {
a64c8610 1690 submit_bh(write_op, bh);
1da177e4
LT
1691 nr_underway++;
1692 }
1da177e4
LT
1693 bh = next;
1694 } while (bh != head);
05937baa 1695 unlock_page(page);
1da177e4
LT
1696
1697 err = 0;
1698done:
1699 if (nr_underway == 0) {
1700 /*
1701 * The page was marked dirty, but the buffers were
1702 * clean. Someone wrote them back by hand with
1703 * ll_rw_block/submit_bh. A rare case.
1704 */
1da177e4 1705 end_page_writeback(page);
3d67f2d7 1706
1da177e4
LT
1707 /*
1708 * The page and buffer_heads can be released at any time from
1709 * here on.
1710 */
1da177e4
LT
1711 }
1712 return err;
1713
1714recover:
1715 /*
1716 * ENOSPC, or some other error. We may already have added some
1717 * blocks to the file, so we need to write these out to avoid
1718 * exposing stale data.
1719 * The page is currently locked and not marked for writeback
1720 */
1721 bh = head;
1722 /* Recovery: lock and submit the mapped buffers */
1723 do {
29a814d2
AT
1724 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1725 !buffer_delay(bh)) {
1da177e4
LT
1726 lock_buffer(bh);
1727 mark_buffer_async_write(bh);
1728 } else {
1729 /*
1730 * The buffer may have been set dirty during
1731 * attachment to a dirty page.
1732 */
1733 clear_buffer_dirty(bh);
1734 }
1735 } while ((bh = bh->b_this_page) != head);
1736 SetPageError(page);
1737 BUG_ON(PageWriteback(page));
7e4c3690 1738 mapping_set_error(page->mapping, err);
1da177e4 1739 set_page_writeback(page);
1da177e4
LT
1740 do {
1741 struct buffer_head *next = bh->b_this_page;
1742 if (buffer_async_write(bh)) {
1743 clear_buffer_dirty(bh);
a64c8610 1744 submit_bh(write_op, bh);
1da177e4
LT
1745 nr_underway++;
1746 }
1da177e4
LT
1747 bh = next;
1748 } while (bh != head);
ffda9d30 1749 unlock_page(page);
1da177e4
LT
1750 goto done;
1751}
1752
afddba49
NP
1753/*
1754 * If a page has any new buffers, zero them out here, and mark them uptodate
1755 * and dirty so they'll be written out (in order to prevent uninitialised
1756 * block data from leaking). And clear the new bit.
1757 */
1758void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1759{
1760 unsigned int block_start, block_end;
1761 struct buffer_head *head, *bh;
1762
1763 BUG_ON(!PageLocked(page));
1764 if (!page_has_buffers(page))
1765 return;
1766
1767 bh = head = page_buffers(page);
1768 block_start = 0;
1769 do {
1770 block_end = block_start + bh->b_size;
1771
1772 if (buffer_new(bh)) {
1773 if (block_end > from && block_start < to) {
1774 if (!PageUptodate(page)) {
1775 unsigned start, size;
1776
1777 start = max(from, block_start);
1778 size = min(to, block_end) - start;
1779
eebd2aa3 1780 zero_user(page, start, size);
afddba49
NP
1781 set_buffer_uptodate(bh);
1782 }
1783
1784 clear_buffer_new(bh);
1785 mark_buffer_dirty(bh);
1786 }
1787 }
1788
1789 block_start = block_end;
1790 bh = bh->b_this_page;
1791 } while (bh != head);
1792}
1793EXPORT_SYMBOL(page_zero_new_buffers);
1794
1da177e4
LT
1795static int __block_prepare_write(struct inode *inode, struct page *page,
1796 unsigned from, unsigned to, get_block_t *get_block)
1797{
1798 unsigned block_start, block_end;
1799 sector_t block;
1800 int err = 0;
1801 unsigned blocksize, bbits;
1802 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1803
1804 BUG_ON(!PageLocked(page));
1805 BUG_ON(from > PAGE_CACHE_SIZE);
1806 BUG_ON(to > PAGE_CACHE_SIZE);
1807 BUG_ON(from > to);
1808
1809 blocksize = 1 << inode->i_blkbits;
1810 if (!page_has_buffers(page))
1811 create_empty_buffers(page, blocksize, 0);
1812 head = page_buffers(page);
1813
1814 bbits = inode->i_blkbits;
1815 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1816
1817 for(bh = head, block_start = 0; bh != head || !block_start;
1818 block++, block_start=block_end, bh = bh->b_this_page) {
1819 block_end = block_start + blocksize;
1820 if (block_end <= from || block_start >= to) {
1821 if (PageUptodate(page)) {
1822 if (!buffer_uptodate(bh))
1823 set_buffer_uptodate(bh);
1824 }
1825 continue;
1826 }
1827 if (buffer_new(bh))
1828 clear_buffer_new(bh);
1829 if (!buffer_mapped(bh)) {
b0cf2321 1830 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
1831 err = get_block(inode, block, bh, 1);
1832 if (err)
f3ddbdc6 1833 break;
1da177e4 1834 if (buffer_new(bh)) {
1da177e4
LT
1835 unmap_underlying_metadata(bh->b_bdev,
1836 bh->b_blocknr);
1837 if (PageUptodate(page)) {
637aff46 1838 clear_buffer_new(bh);
1da177e4 1839 set_buffer_uptodate(bh);
637aff46 1840 mark_buffer_dirty(bh);
1da177e4
LT
1841 continue;
1842 }
eebd2aa3
CL
1843 if (block_end > to || block_start < from)
1844 zero_user_segments(page,
1845 to, block_end,
1846 block_start, from);
1da177e4
LT
1847 continue;
1848 }
1849 }
1850 if (PageUptodate(page)) {
1851 if (!buffer_uptodate(bh))
1852 set_buffer_uptodate(bh);
1853 continue;
1854 }
1855 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
33a266dd 1856 !buffer_unwritten(bh) &&
1da177e4
LT
1857 (block_start < from || block_end > to)) {
1858 ll_rw_block(READ, 1, &bh);
1859 *wait_bh++=bh;
1860 }
1861 }
1862 /*
1863 * If we issued read requests - let them complete.
1864 */
1865 while(wait_bh > wait) {
1866 wait_on_buffer(*--wait_bh);
1867 if (!buffer_uptodate(*wait_bh))
f3ddbdc6 1868 err = -EIO;
1da177e4 1869 }
afddba49
NP
1870 if (unlikely(err))
1871 page_zero_new_buffers(page, from, to);
1da177e4
LT
1872 return err;
1873}
1874
1875static int __block_commit_write(struct inode *inode, struct page *page,
1876 unsigned from, unsigned to)
1877{
1878 unsigned block_start, block_end;
1879 int partial = 0;
1880 unsigned blocksize;
1881 struct buffer_head *bh, *head;
1882
1883 blocksize = 1 << inode->i_blkbits;
1884
1885 for(bh = head = page_buffers(page), block_start = 0;
1886 bh != head || !block_start;
1887 block_start=block_end, bh = bh->b_this_page) {
1888 block_end = block_start + blocksize;
1889 if (block_end <= from || block_start >= to) {
1890 if (!buffer_uptodate(bh))
1891 partial = 1;
1892 } else {
1893 set_buffer_uptodate(bh);
1894 mark_buffer_dirty(bh);
1895 }
afddba49 1896 clear_buffer_new(bh);
1da177e4
LT
1897 }
1898
1899 /*
1900 * If this is a partial write which happened to make all buffers
1901 * uptodate then we can optimize away a bogus readpage() for
1902 * the next read(). Here we 'discover' whether the page went
1903 * uptodate as a result of this (potentially partial) write.
1904 */
1905 if (!partial)
1906 SetPageUptodate(page);
1907 return 0;
1908}
1909
afddba49
NP
1910/*
1911 * block_write_begin takes care of the basic task of block allocation and
1912 * bringing partial write blocks uptodate first.
1913 *
1914 * If *pagep is not NULL, then block_write_begin uses the locked page
1915 * at *pagep rather than allocating its own. In this case, the page will
1916 * not be unlocked or deallocated on failure.
1917 */
1918int block_write_begin(struct file *file, struct address_space *mapping,
1919 loff_t pos, unsigned len, unsigned flags,
1920 struct page **pagep, void **fsdata,
1921 get_block_t *get_block)
1922{
1923 struct inode *inode = mapping->host;
1924 int status = 0;
1925 struct page *page;
1926 pgoff_t index;
1927 unsigned start, end;
1928 int ownpage = 0;
1929
1930 index = pos >> PAGE_CACHE_SHIFT;
1931 start = pos & (PAGE_CACHE_SIZE - 1);
1932 end = start + len;
1933
1934 page = *pagep;
1935 if (page == NULL) {
1936 ownpage = 1;
54566b2c 1937 page = grab_cache_page_write_begin(mapping, index, flags);
afddba49
NP
1938 if (!page) {
1939 status = -ENOMEM;
1940 goto out;
1941 }
1942 *pagep = page;
1943 } else
1944 BUG_ON(!PageLocked(page));
1945
1946 status = __block_prepare_write(inode, page, start, end, get_block);
1947 if (unlikely(status)) {
1948 ClearPageUptodate(page);
1949
1950 if (ownpage) {
1951 unlock_page(page);
1952 page_cache_release(page);
1953 *pagep = NULL;
1954
1955 /*
1956 * prepare_write() may have instantiated a few blocks
1957 * outside i_size. Trim these off again. Don't need
1958 * i_size_read because we hold i_mutex.
1959 */
1960 if (pos + len > inode->i_size)
1961 vmtruncate(inode, inode->i_size);
1962 }
afddba49
NP
1963 }
1964
1965out:
1966 return status;
1967}
1968EXPORT_SYMBOL(block_write_begin);
1969
1970int block_write_end(struct file *file, struct address_space *mapping,
1971 loff_t pos, unsigned len, unsigned copied,
1972 struct page *page, void *fsdata)
1973{
1974 struct inode *inode = mapping->host;
1975 unsigned start;
1976
1977 start = pos & (PAGE_CACHE_SIZE - 1);
1978
1979 if (unlikely(copied < len)) {
1980 /*
1981 * The buffers that were written will now be uptodate, so we
1982 * don't have to worry about a readpage reading them and
1983 * overwriting a partial write. However if we have encountered
1984 * a short write and only partially written into a buffer, it
1985 * will not be marked uptodate, so a readpage might come in and
1986 * destroy our partial write.
1987 *
1988 * Do the simplest thing, and just treat any short write to a
1989 * non uptodate page as a zero-length write, and force the
1990 * caller to redo the whole thing.
1991 */
1992 if (!PageUptodate(page))
1993 copied = 0;
1994
1995 page_zero_new_buffers(page, start+copied, start+len);
1996 }
1997 flush_dcache_page(page);
1998
1999 /* This could be a short (even 0-length) commit */
2000 __block_commit_write(inode, page, start, start+copied);
2001
2002 return copied;
2003}
2004EXPORT_SYMBOL(block_write_end);
2005
2006int generic_write_end(struct file *file, struct address_space *mapping,
2007 loff_t pos, unsigned len, unsigned copied,
2008 struct page *page, void *fsdata)
2009{
2010 struct inode *inode = mapping->host;
c7d206b3 2011 int i_size_changed = 0;
afddba49
NP
2012
2013 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2014
2015 /*
2016 * No need to use i_size_read() here, the i_size
2017 * cannot change under us because we hold i_mutex.
2018 *
2019 * But it's important to update i_size while still holding page lock:
2020 * page writeout could otherwise come in and zero beyond i_size.
2021 */
2022 if (pos+copied > inode->i_size) {
2023 i_size_write(inode, pos+copied);
c7d206b3 2024 i_size_changed = 1;
afddba49
NP
2025 }
2026
2027 unlock_page(page);
2028 page_cache_release(page);
2029
c7d206b3
JK
2030 /*
2031 * Don't mark the inode dirty under page lock. First, it unnecessarily
2032 * makes the holding time of page lock longer. Second, it forces lock
2033 * ordering of page lock and transaction start for journaling
2034 * filesystems.
2035 */
2036 if (i_size_changed)
2037 mark_inode_dirty(inode);
2038
afddba49
NP
2039 return copied;
2040}
2041EXPORT_SYMBOL(generic_write_end);
2042
8ab22b9a
HH
2043/*
2044 * block_is_partially_uptodate checks whether buffers within a page are
2045 * uptodate or not.
2046 *
2047 * Returns true if all buffers which correspond to a file portion
2048 * we want to read are uptodate.
2049 */
2050int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2051 unsigned long from)
2052{
2053 struct inode *inode = page->mapping->host;
2054 unsigned block_start, block_end, blocksize;
2055 unsigned to;
2056 struct buffer_head *bh, *head;
2057 int ret = 1;
2058
2059 if (!page_has_buffers(page))
2060 return 0;
2061
2062 blocksize = 1 << inode->i_blkbits;
2063 to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2064 to = from + to;
2065 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2066 return 0;
2067
2068 head = page_buffers(page);
2069 bh = head;
2070 block_start = 0;
2071 do {
2072 block_end = block_start + blocksize;
2073 if (block_end > from && block_start < to) {
2074 if (!buffer_uptodate(bh)) {
2075 ret = 0;
2076 break;
2077 }
2078 if (block_end >= to)
2079 break;
2080 }
2081 block_start = block_end;
2082 bh = bh->b_this_page;
2083 } while (bh != head);
2084
2085 return ret;
2086}
2087EXPORT_SYMBOL(block_is_partially_uptodate);
2088
1da177e4
LT
2089/*
2090 * Generic "read page" function for block devices that have the normal
2091 * get_block functionality. This is most of the block device filesystems.
2092 * Reads the page asynchronously --- the unlock_buffer() and
2093 * set/clear_buffer_uptodate() functions propagate buffer state into the
2094 * page struct once IO has completed.
2095 */
2096int block_read_full_page(struct page *page, get_block_t *get_block)
2097{
2098 struct inode *inode = page->mapping->host;
2099 sector_t iblock, lblock;
2100 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2101 unsigned int blocksize;
2102 int nr, i;
2103 int fully_mapped = 1;
2104
cd7619d6 2105 BUG_ON(!PageLocked(page));
1da177e4
LT
2106 blocksize = 1 << inode->i_blkbits;
2107 if (!page_has_buffers(page))
2108 create_empty_buffers(page, blocksize, 0);
2109 head = page_buffers(page);
2110
2111 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2112 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2113 bh = head;
2114 nr = 0;
2115 i = 0;
2116
2117 do {
2118 if (buffer_uptodate(bh))
2119 continue;
2120
2121 if (!buffer_mapped(bh)) {
c64610ba
AM
2122 int err = 0;
2123
1da177e4
LT
2124 fully_mapped = 0;
2125 if (iblock < lblock) {
b0cf2321 2126 WARN_ON(bh->b_size != blocksize);
c64610ba
AM
2127 err = get_block(inode, iblock, bh, 0);
2128 if (err)
1da177e4
LT
2129 SetPageError(page);
2130 }
2131 if (!buffer_mapped(bh)) {
eebd2aa3 2132 zero_user(page, i * blocksize, blocksize);
c64610ba
AM
2133 if (!err)
2134 set_buffer_uptodate(bh);
1da177e4
LT
2135 continue;
2136 }
2137 /*
2138 * get_block() might have updated the buffer
2139 * synchronously
2140 */
2141 if (buffer_uptodate(bh))
2142 continue;
2143 }
2144 arr[nr++] = bh;
2145 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2146
2147 if (fully_mapped)
2148 SetPageMappedToDisk(page);
2149
2150 if (!nr) {
2151 /*
2152 * All buffers are uptodate - we can set the page uptodate
2153 * as well. But not if get_block() returned an error.
2154 */
2155 if (!PageError(page))
2156 SetPageUptodate(page);
2157 unlock_page(page);
2158 return 0;
2159 }
2160
2161 /* Stage two: lock the buffers */
2162 for (i = 0; i < nr; i++) {
2163 bh = arr[i];
2164 lock_buffer(bh);
2165 mark_buffer_async_read(bh);
2166 }
2167
2168 /*
2169 * Stage 3: start the IO. Check for uptodateness
2170 * inside the buffer lock in case another process reading
2171 * the underlying blockdev brought it uptodate (the sct fix).
2172 */
2173 for (i = 0; i < nr; i++) {
2174 bh = arr[i];
2175 if (buffer_uptodate(bh))
2176 end_buffer_async_read(bh, 1);
2177 else
2178 submit_bh(READ, bh);
2179 }
2180 return 0;
2181}
2182
2183/* utility function for filesystems that need to do work on expanding
89e10787 2184 * truncates. Uses filesystem pagecache writes to allow the filesystem to
1da177e4
LT
2185 * deal with the hole.
2186 */
89e10787 2187int generic_cont_expand_simple(struct inode *inode, loff_t size)
1da177e4
LT
2188{
2189 struct address_space *mapping = inode->i_mapping;
2190 struct page *page;
89e10787 2191 void *fsdata;
05eb0b51 2192 unsigned long limit;
1da177e4
LT
2193 int err;
2194
2195 err = -EFBIG;
2196 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2197 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2198 send_sig(SIGXFSZ, current, 0);
2199 goto out;
2200 }
2201 if (size > inode->i_sb->s_maxbytes)
2202 goto out;
2203
89e10787
NP
2204 err = pagecache_write_begin(NULL, mapping, size, 0,
2205 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2206 &page, &fsdata);
2207 if (err)
05eb0b51 2208 goto out;
05eb0b51 2209
89e10787
NP
2210 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2211 BUG_ON(err > 0);
05eb0b51 2212
1da177e4
LT
2213out:
2214 return err;
2215}
2216
f1e3af72
AB
2217static int cont_expand_zero(struct file *file, struct address_space *mapping,
2218 loff_t pos, loff_t *bytes)
1da177e4 2219{
1da177e4 2220 struct inode *inode = mapping->host;
1da177e4 2221 unsigned blocksize = 1 << inode->i_blkbits;
89e10787
NP
2222 struct page *page;
2223 void *fsdata;
2224 pgoff_t index, curidx;
2225 loff_t curpos;
2226 unsigned zerofrom, offset, len;
2227 int err = 0;
1da177e4 2228
89e10787
NP
2229 index = pos >> PAGE_CACHE_SHIFT;
2230 offset = pos & ~PAGE_CACHE_MASK;
2231
2232 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2233 zerofrom = curpos & ~PAGE_CACHE_MASK;
1da177e4
LT
2234 if (zerofrom & (blocksize-1)) {
2235 *bytes |= (blocksize-1);
2236 (*bytes)++;
2237 }
89e10787 2238 len = PAGE_CACHE_SIZE - zerofrom;
1da177e4 2239
89e10787
NP
2240 err = pagecache_write_begin(file, mapping, curpos, len,
2241 AOP_FLAG_UNINTERRUPTIBLE,
2242 &page, &fsdata);
2243 if (err)
2244 goto out;
eebd2aa3 2245 zero_user(page, zerofrom, len);
89e10787
NP
2246 err = pagecache_write_end(file, mapping, curpos, len, len,
2247 page, fsdata);
2248 if (err < 0)
2249 goto out;
2250 BUG_ON(err != len);
2251 err = 0;
061e9746
OH
2252
2253 balance_dirty_pages_ratelimited(mapping);
89e10787 2254 }
1da177e4 2255
89e10787
NP
2256 /* page covers the boundary, find the boundary offset */
2257 if (index == curidx) {
2258 zerofrom = curpos & ~PAGE_CACHE_MASK;
1da177e4 2259 /* if we will expand the thing last block will be filled */
89e10787
NP
2260 if (offset <= zerofrom) {
2261 goto out;
2262 }
2263 if (zerofrom & (blocksize-1)) {
1da177e4
LT
2264 *bytes |= (blocksize-1);
2265 (*bytes)++;
2266 }
89e10787 2267 len = offset - zerofrom;
1da177e4 2268
89e10787
NP
2269 err = pagecache_write_begin(file, mapping, curpos, len,
2270 AOP_FLAG_UNINTERRUPTIBLE,
2271 &page, &fsdata);
2272 if (err)
2273 goto out;
eebd2aa3 2274 zero_user(page, zerofrom, len);
89e10787
NP
2275 err = pagecache_write_end(file, mapping, curpos, len, len,
2276 page, fsdata);
2277 if (err < 0)
2278 goto out;
2279 BUG_ON(err != len);
2280 err = 0;
1da177e4 2281 }
89e10787
NP
2282out:
2283 return err;
2284}
2285
2286/*
2287 * For moronic filesystems that do not allow holes in file.
2288 * We may have to extend the file.
2289 */
2290int cont_write_begin(struct file *file, struct address_space *mapping,
2291 loff_t pos, unsigned len, unsigned flags,
2292 struct page **pagep, void **fsdata,
2293 get_block_t *get_block, loff_t *bytes)
2294{
2295 struct inode *inode = mapping->host;
2296 unsigned blocksize = 1 << inode->i_blkbits;
2297 unsigned zerofrom;
2298 int err;
2299
2300 err = cont_expand_zero(file, mapping, pos, bytes);
2301 if (err)
2302 goto out;
2303
2304 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2305 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2306 *bytes |= (blocksize-1);
2307 (*bytes)++;
1da177e4 2308 }
1da177e4 2309
89e10787
NP
2310 *pagep = NULL;
2311 err = block_write_begin(file, mapping, pos, len,
2312 flags, pagep, fsdata, get_block);
1da177e4 2313out:
89e10787 2314 return err;
1da177e4
LT
2315}
2316
2317int block_prepare_write(struct page *page, unsigned from, unsigned to,
2318 get_block_t *get_block)
2319{
2320 struct inode *inode = page->mapping->host;
2321 int err = __block_prepare_write(inode, page, from, to, get_block);
2322 if (err)
2323 ClearPageUptodate(page);
2324 return err;
2325}
2326
2327int block_commit_write(struct page *page, unsigned from, unsigned to)
2328{
2329 struct inode *inode = page->mapping->host;
2330 __block_commit_write(inode,page,from,to);
2331 return 0;
2332}
2333
54171690
DC
2334/*
2335 * block_page_mkwrite() is not allowed to change the file size as it gets
2336 * called from a page fault handler when a page is first dirtied. Hence we must
2337 * be careful to check for EOF conditions here. We set the page up correctly
2338 * for a written page which means we get ENOSPC checking when writing into
2339 * holes and correct delalloc and unwritten extent mapping on filesystems that
2340 * support these features.
2341 *
2342 * We are not allowed to take the i_mutex here so we have to play games to
2343 * protect against truncate races as the page could now be beyond EOF. Because
2344 * vmtruncate() writes the inode size before removing pages, once we have the
2345 * page lock we can determine safely if the page is beyond EOF. If it is not
2346 * beyond EOF, then the page is guaranteed safe against truncation until we
2347 * unlock the page.
2348 */
2349int
c2ec175c 2350block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
54171690
DC
2351 get_block_t get_block)
2352{
c2ec175c 2353 struct page *page = vmf->page;
54171690
DC
2354 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2355 unsigned long end;
2356 loff_t size;
56a76f82 2357 int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
54171690
DC
2358
2359 lock_page(page);
2360 size = i_size_read(inode);
2361 if ((page->mapping != inode->i_mapping) ||
18336338 2362 (page_offset(page) > size)) {
54171690
DC
2363 /* page got truncated out from underneath us */
2364 goto out_unlock;
2365 }
2366
2367 /* page is wholly or partially inside EOF */
2368 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2369 end = size & ~PAGE_CACHE_MASK;
2370 else
2371 end = PAGE_CACHE_SIZE;
2372
2373 ret = block_prepare_write(page, 0, end, get_block);
2374 if (!ret)
2375 ret = block_commit_write(page, 0, end);
2376
56a76f82
NP
2377 if (unlikely(ret)) {
2378 if (ret == -ENOMEM)
2379 ret = VM_FAULT_OOM;
2380 else /* -ENOSPC, -EIO, etc */
2381 ret = VM_FAULT_SIGBUS;
2382 }
c2ec175c 2383
56a76f82 2384out_unlock:
54171690
DC
2385 unlock_page(page);
2386 return ret;
2387}
1da177e4
LT
2388
2389/*
03158cd7 2390 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
1da177e4
LT
2391 * immediately, while under the page lock. So it needs a special end_io
2392 * handler which does not touch the bh after unlocking it.
1da177e4
LT
2393 */
2394static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2395{
68671f35 2396 __end_buffer_read_notouch(bh, uptodate);
1da177e4
LT
2397}
2398
03158cd7
NP
2399/*
2400 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2401 * the page (converting it to circular linked list and taking care of page
2402 * dirty races).
2403 */
2404static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2405{
2406 struct buffer_head *bh;
2407
2408 BUG_ON(!PageLocked(page));
2409
2410 spin_lock(&page->mapping->private_lock);
2411 bh = head;
2412 do {
2413 if (PageDirty(page))
2414 set_buffer_dirty(bh);
2415 if (!bh->b_this_page)
2416 bh->b_this_page = head;
2417 bh = bh->b_this_page;
2418 } while (bh != head);
2419 attach_page_buffers(page, head);
2420 spin_unlock(&page->mapping->private_lock);
2421}
2422
1da177e4
LT
2423/*
2424 * On entry, the page is fully not uptodate.
2425 * On exit the page is fully uptodate in the areas outside (from,to)
2426 */
03158cd7
NP
2427int nobh_write_begin(struct file *file, struct address_space *mapping,
2428 loff_t pos, unsigned len, unsigned flags,
2429 struct page **pagep, void **fsdata,
1da177e4
LT
2430 get_block_t *get_block)
2431{
03158cd7 2432 struct inode *inode = mapping->host;
1da177e4
LT
2433 const unsigned blkbits = inode->i_blkbits;
2434 const unsigned blocksize = 1 << blkbits;
a4b0672d 2435 struct buffer_head *head, *bh;
03158cd7
NP
2436 struct page *page;
2437 pgoff_t index;
2438 unsigned from, to;
1da177e4 2439 unsigned block_in_page;
a4b0672d 2440 unsigned block_start, block_end;
1da177e4 2441 sector_t block_in_file;
1da177e4 2442 int nr_reads = 0;
1da177e4
LT
2443 int ret = 0;
2444 int is_mapped_to_disk = 1;
1da177e4 2445
03158cd7
NP
2446 index = pos >> PAGE_CACHE_SHIFT;
2447 from = pos & (PAGE_CACHE_SIZE - 1);
2448 to = from + len;
2449
54566b2c 2450 page = grab_cache_page_write_begin(mapping, index, flags);
03158cd7
NP
2451 if (!page)
2452 return -ENOMEM;
2453 *pagep = page;
2454 *fsdata = NULL;
2455
2456 if (page_has_buffers(page)) {
2457 unlock_page(page);
2458 page_cache_release(page);
2459 *pagep = NULL;
2460 return block_write_begin(file, mapping, pos, len, flags, pagep,
2461 fsdata, get_block);
2462 }
a4b0672d 2463
1da177e4
LT
2464 if (PageMappedToDisk(page))
2465 return 0;
2466
a4b0672d
NP
2467 /*
2468 * Allocate buffers so that we can keep track of state, and potentially
2469 * attach them to the page if an error occurs. In the common case of
2470 * no error, they will just be freed again without ever being attached
2471 * to the page (which is all OK, because we're under the page lock).
2472 *
2473 * Be careful: the buffer linked list is a NULL terminated one, rather
2474 * than the circular one we're used to.
2475 */
2476 head = alloc_page_buffers(page, blocksize, 0);
03158cd7
NP
2477 if (!head) {
2478 ret = -ENOMEM;
2479 goto out_release;
2480 }
a4b0672d 2481
1da177e4 2482 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
1da177e4
LT
2483
2484 /*
2485 * We loop across all blocks in the page, whether or not they are
2486 * part of the affected region. This is so we can discover if the
2487 * page is fully mapped-to-disk.
2488 */
a4b0672d 2489 for (block_start = 0, block_in_page = 0, bh = head;
1da177e4 2490 block_start < PAGE_CACHE_SIZE;
a4b0672d 2491 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
1da177e4
LT
2492 int create;
2493
a4b0672d
NP
2494 block_end = block_start + blocksize;
2495 bh->b_state = 0;
1da177e4
LT
2496 create = 1;
2497 if (block_start >= to)
2498 create = 0;
2499 ret = get_block(inode, block_in_file + block_in_page,
a4b0672d 2500 bh, create);
1da177e4
LT
2501 if (ret)
2502 goto failed;
a4b0672d 2503 if (!buffer_mapped(bh))
1da177e4 2504 is_mapped_to_disk = 0;
a4b0672d
NP
2505 if (buffer_new(bh))
2506 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2507 if (PageUptodate(page)) {
2508 set_buffer_uptodate(bh);
1da177e4 2509 continue;
a4b0672d
NP
2510 }
2511 if (buffer_new(bh) || !buffer_mapped(bh)) {
eebd2aa3
CL
2512 zero_user_segments(page, block_start, from,
2513 to, block_end);
1da177e4
LT
2514 continue;
2515 }
a4b0672d 2516 if (buffer_uptodate(bh))
1da177e4
LT
2517 continue; /* reiserfs does this */
2518 if (block_start < from || block_end > to) {
a4b0672d
NP
2519 lock_buffer(bh);
2520 bh->b_end_io = end_buffer_read_nobh;
2521 submit_bh(READ, bh);
2522 nr_reads++;
1da177e4
LT
2523 }
2524 }
2525
2526 if (nr_reads) {
1da177e4
LT
2527 /*
2528 * The page is locked, so these buffers are protected from
2529 * any VM or truncate activity. Hence we don't need to care
2530 * for the buffer_head refcounts.
2531 */
a4b0672d 2532 for (bh = head; bh; bh = bh->b_this_page) {
1da177e4
LT
2533 wait_on_buffer(bh);
2534 if (!buffer_uptodate(bh))
2535 ret = -EIO;
1da177e4
LT
2536 }
2537 if (ret)
2538 goto failed;
2539 }
2540
2541 if (is_mapped_to_disk)
2542 SetPageMappedToDisk(page);
1da177e4 2543
03158cd7 2544 *fsdata = head; /* to be released by nobh_write_end */
a4b0672d 2545
1da177e4
LT
2546 return 0;
2547
2548failed:
03158cd7 2549 BUG_ON(!ret);
1da177e4 2550 /*
a4b0672d
NP
2551 * Error recovery is a bit difficult. We need to zero out blocks that
2552 * were newly allocated, and dirty them to ensure they get written out.
2553 * Buffers need to be attached to the page at this point, otherwise
2554 * the handling of potential IO errors during writeout would be hard
2555 * (could try doing synchronous writeout, but what if that fails too?)
1da177e4 2556 */
03158cd7
NP
2557 attach_nobh_buffers(page, head);
2558 page_zero_new_buffers(page, from, to);
a4b0672d 2559
03158cd7
NP
2560out_release:
2561 unlock_page(page);
2562 page_cache_release(page);
2563 *pagep = NULL;
a4b0672d 2564
03158cd7
NP
2565 if (pos + len > inode->i_size)
2566 vmtruncate(inode, inode->i_size);
a4b0672d 2567
1da177e4
LT
2568 return ret;
2569}
03158cd7 2570EXPORT_SYMBOL(nobh_write_begin);
1da177e4 2571
03158cd7
NP
2572int nobh_write_end(struct file *file, struct address_space *mapping,
2573 loff_t pos, unsigned len, unsigned copied,
2574 struct page *page, void *fsdata)
1da177e4
LT
2575{
2576 struct inode *inode = page->mapping->host;
efdc3131 2577 struct buffer_head *head = fsdata;
03158cd7 2578 struct buffer_head *bh;
5b41e74a 2579 BUG_ON(fsdata != NULL && page_has_buffers(page));
1da177e4 2580
d4cf109f 2581 if (unlikely(copied < len) && head)
5b41e74a
DM
2582 attach_nobh_buffers(page, head);
2583 if (page_has_buffers(page))
2584 return generic_write_end(file, mapping, pos, len,
2585 copied, page, fsdata);
a4b0672d 2586
22c8ca78 2587 SetPageUptodate(page);
1da177e4 2588 set_page_dirty(page);
03158cd7
NP
2589 if (pos+copied > inode->i_size) {
2590 i_size_write(inode, pos+copied);
1da177e4
LT
2591 mark_inode_dirty(inode);
2592 }
03158cd7
NP
2593
2594 unlock_page(page);
2595 page_cache_release(page);
2596
03158cd7
NP
2597 while (head) {
2598 bh = head;
2599 head = head->b_this_page;
2600 free_buffer_head(bh);
2601 }
2602
2603 return copied;
1da177e4 2604}
03158cd7 2605EXPORT_SYMBOL(nobh_write_end);
1da177e4
LT
2606
2607/*
2608 * nobh_writepage() - based on block_full_write_page() except
2609 * that it tries to operate without attaching bufferheads to
2610 * the page.
2611 */
2612int nobh_writepage(struct page *page, get_block_t *get_block,
2613 struct writeback_control *wbc)
2614{
2615 struct inode * const inode = page->mapping->host;
2616 loff_t i_size = i_size_read(inode);
2617 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2618 unsigned offset;
1da177e4
LT
2619 int ret;
2620
2621 /* Is the page fully inside i_size? */
2622 if (page->index < end_index)
2623 goto out;
2624
2625 /* Is the page fully outside i_size? (truncate in progress) */
2626 offset = i_size & (PAGE_CACHE_SIZE-1);
2627 if (page->index >= end_index+1 || !offset) {
2628 /*
2629 * The page may have dirty, unmapped buffers. For example,
2630 * they may have been added in ext3_writepage(). Make them
2631 * freeable here, so the page does not leak.
2632 */
2633#if 0
2634 /* Not really sure about this - do we need this ? */
2635 if (page->mapping->a_ops->invalidatepage)
2636 page->mapping->a_ops->invalidatepage(page, offset);
2637#endif
2638 unlock_page(page);
2639 return 0; /* don't care */
2640 }
2641
2642 /*
2643 * The page straddles i_size. It must be zeroed out on each and every
2644 * writepage invocation because it may be mmapped. "A file is mapped
2645 * in multiples of the page size. For a file that is not a multiple of
2646 * the page size, the remaining memory is zeroed when mapped, and
2647 * writes to that region are not written out to the file."
2648 */
eebd2aa3 2649 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
1da177e4
LT
2650out:
2651 ret = mpage_writepage(page, get_block, wbc);
2652 if (ret == -EAGAIN)
2653 ret = __block_write_full_page(inode, page, get_block, wbc);
2654 return ret;
2655}
2656EXPORT_SYMBOL(nobh_writepage);
2657
03158cd7
NP
2658int nobh_truncate_page(struct address_space *mapping,
2659 loff_t from, get_block_t *get_block)
1da177e4 2660{
1da177e4
LT
2661 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2662 unsigned offset = from & (PAGE_CACHE_SIZE-1);
03158cd7
NP
2663 unsigned blocksize;
2664 sector_t iblock;
2665 unsigned length, pos;
2666 struct inode *inode = mapping->host;
1da177e4 2667 struct page *page;
03158cd7
NP
2668 struct buffer_head map_bh;
2669 int err;
1da177e4 2670
03158cd7
NP
2671 blocksize = 1 << inode->i_blkbits;
2672 length = offset & (blocksize - 1);
2673
2674 /* Block boundary? Nothing to do */
2675 if (!length)
2676 return 0;
2677
2678 length = blocksize - length;
2679 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4 2680
1da177e4 2681 page = grab_cache_page(mapping, index);
03158cd7 2682 err = -ENOMEM;
1da177e4
LT
2683 if (!page)
2684 goto out;
2685
03158cd7
NP
2686 if (page_has_buffers(page)) {
2687has_buffers:
2688 unlock_page(page);
2689 page_cache_release(page);
2690 return block_truncate_page(mapping, from, get_block);
2691 }
2692
2693 /* Find the buffer that contains "offset" */
2694 pos = blocksize;
2695 while (offset >= pos) {
2696 iblock++;
2697 pos += blocksize;
2698 }
2699
2700 err = get_block(inode, iblock, &map_bh, 0);
2701 if (err)
2702 goto unlock;
2703 /* unmapped? It's a hole - nothing to do */
2704 if (!buffer_mapped(&map_bh))
2705 goto unlock;
2706
2707 /* Ok, it's mapped. Make sure it's up-to-date */
2708 if (!PageUptodate(page)) {
2709 err = mapping->a_ops->readpage(NULL, page);
2710 if (err) {
2711 page_cache_release(page);
2712 goto out;
2713 }
2714 lock_page(page);
2715 if (!PageUptodate(page)) {
2716 err = -EIO;
2717 goto unlock;
2718 }
2719 if (page_has_buffers(page))
2720 goto has_buffers;
1da177e4 2721 }
eebd2aa3 2722 zero_user(page, offset, length);
03158cd7
NP
2723 set_page_dirty(page);
2724 err = 0;
2725
2726unlock:
1da177e4
LT
2727 unlock_page(page);
2728 page_cache_release(page);
2729out:
03158cd7 2730 return err;
1da177e4
LT
2731}
2732EXPORT_SYMBOL(nobh_truncate_page);
2733
2734int block_truncate_page(struct address_space *mapping,
2735 loff_t from, get_block_t *get_block)
2736{
2737 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2738 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2739 unsigned blocksize;
54b21a79 2740 sector_t iblock;
1da177e4
LT
2741 unsigned length, pos;
2742 struct inode *inode = mapping->host;
2743 struct page *page;
2744 struct buffer_head *bh;
1da177e4
LT
2745 int err;
2746
2747 blocksize = 1 << inode->i_blkbits;
2748 length = offset & (blocksize - 1);
2749
2750 /* Block boundary? Nothing to do */
2751 if (!length)
2752 return 0;
2753
2754 length = blocksize - length;
54b21a79 2755 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4
LT
2756
2757 page = grab_cache_page(mapping, index);
2758 err = -ENOMEM;
2759 if (!page)
2760 goto out;
2761
2762 if (!page_has_buffers(page))
2763 create_empty_buffers(page, blocksize, 0);
2764
2765 /* Find the buffer that contains "offset" */
2766 bh = page_buffers(page);
2767 pos = blocksize;
2768 while (offset >= pos) {
2769 bh = bh->b_this_page;
2770 iblock++;
2771 pos += blocksize;
2772 }
2773
2774 err = 0;
2775 if (!buffer_mapped(bh)) {
b0cf2321 2776 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
2777 err = get_block(inode, iblock, bh, 0);
2778 if (err)
2779 goto unlock;
2780 /* unmapped? It's a hole - nothing to do */
2781 if (!buffer_mapped(bh))
2782 goto unlock;
2783 }
2784
2785 /* Ok, it's mapped. Make sure it's up-to-date */
2786 if (PageUptodate(page))
2787 set_buffer_uptodate(bh);
2788
33a266dd 2789 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
1da177e4
LT
2790 err = -EIO;
2791 ll_rw_block(READ, 1, &bh);
2792 wait_on_buffer(bh);
2793 /* Uhhuh. Read error. Complain and punt. */
2794 if (!buffer_uptodate(bh))
2795 goto unlock;
2796 }
2797
eebd2aa3 2798 zero_user(page, offset, length);
1da177e4
LT
2799 mark_buffer_dirty(bh);
2800 err = 0;
2801
2802unlock:
2803 unlock_page(page);
2804 page_cache_release(page);
2805out:
2806 return err;
2807}
2808
2809/*
2810 * The generic ->writepage function for buffer-backed address_spaces
2811 */
2812int block_write_full_page(struct page *page, get_block_t *get_block,
2813 struct writeback_control *wbc)
2814{
2815 struct inode * const inode = page->mapping->host;
2816 loff_t i_size = i_size_read(inode);
2817 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2818 unsigned offset;
1da177e4
LT
2819
2820 /* Is the page fully inside i_size? */
2821 if (page->index < end_index)
2822 return __block_write_full_page(inode, page, get_block, wbc);
2823
2824 /* Is the page fully outside i_size? (truncate in progress) */
2825 offset = i_size & (PAGE_CACHE_SIZE-1);
2826 if (page->index >= end_index+1 || !offset) {
2827 /*
2828 * The page may have dirty, unmapped buffers. For example,
2829 * they may have been added in ext3_writepage(). Make them
2830 * freeable here, so the page does not leak.
2831 */
aaa4059b 2832 do_invalidatepage(page, 0);
1da177e4
LT
2833 unlock_page(page);
2834 return 0; /* don't care */
2835 }
2836
2837 /*
2838 * The page straddles i_size. It must be zeroed out on each and every
2839 * writepage invokation because it may be mmapped. "A file is mapped
2840 * in multiples of the page size. For a file that is not a multiple of
2841 * the page size, the remaining memory is zeroed when mapped, and
2842 * writes to that region are not written out to the file."
2843 */
eebd2aa3 2844 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
1da177e4
LT
2845 return __block_write_full_page(inode, page, get_block, wbc);
2846}
2847
2848sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2849 get_block_t *get_block)
2850{
2851 struct buffer_head tmp;
2852 struct inode *inode = mapping->host;
2853 tmp.b_state = 0;
2854 tmp.b_blocknr = 0;
b0cf2321 2855 tmp.b_size = 1 << inode->i_blkbits;
1da177e4
LT
2856 get_block(inode, block, &tmp, 0);
2857 return tmp.b_blocknr;
2858}
2859
6712ecf8 2860static void end_bio_bh_io_sync(struct bio *bio, int err)
1da177e4
LT
2861{
2862 struct buffer_head *bh = bio->bi_private;
2863
1da177e4
LT
2864 if (err == -EOPNOTSUPP) {
2865 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2866 set_bit(BH_Eopnotsupp, &bh->b_state);
2867 }
2868
08bafc03
KM
2869 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2870 set_bit(BH_Quiet, &bh->b_state);
2871
1da177e4
LT
2872 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2873 bio_put(bio);
1da177e4
LT
2874}
2875
2876int submit_bh(int rw, struct buffer_head * bh)
2877{
2878 struct bio *bio;
2879 int ret = 0;
2880
2881 BUG_ON(!buffer_locked(bh));
2882 BUG_ON(!buffer_mapped(bh));
2883 BUG_ON(!bh->b_end_io);
2884
48fd4f93
JA
2885 /*
2886 * Mask in barrier bit for a write (could be either a WRITE or a
2887 * WRITE_SYNC
2888 */
2889 if (buffer_ordered(bh) && (rw & WRITE))
2890 rw |= WRITE_BARRIER;
1da177e4
LT
2891
2892 /*
48fd4f93 2893 * Only clear out a write error when rewriting
1da177e4 2894 */
48fd4f93 2895 if (test_set_buffer_req(bh) && (rw & WRITE))
1da177e4
LT
2896 clear_buffer_write_io_error(bh);
2897
2898 /*
2899 * from here on down, it's all bio -- do the initial mapping,
2900 * submit_bio -> generic_make_request may further map this bio around
2901 */
2902 bio = bio_alloc(GFP_NOIO, 1);
2903
2904 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2905 bio->bi_bdev = bh->b_bdev;
2906 bio->bi_io_vec[0].bv_page = bh->b_page;
2907 bio->bi_io_vec[0].bv_len = bh->b_size;
2908 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2909
2910 bio->bi_vcnt = 1;
2911 bio->bi_idx = 0;
2912 bio->bi_size = bh->b_size;
2913
2914 bio->bi_end_io = end_bio_bh_io_sync;
2915 bio->bi_private = bh;
2916
2917 bio_get(bio);
2918 submit_bio(rw, bio);
2919
2920 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2921 ret = -EOPNOTSUPP;
2922
2923 bio_put(bio);
2924 return ret;
2925}
2926
2927/**
2928 * ll_rw_block: low-level access to block devices (DEPRECATED)
a7662236 2929 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
1da177e4
LT
2930 * @nr: number of &struct buffer_heads in the array
2931 * @bhs: array of pointers to &struct buffer_head
2932 *
a7662236
JK
2933 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2934 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2935 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2936 * are sent to disk. The fourth %READA option is described in the documentation
2937 * for generic_make_request() which ll_rw_block() calls.
1da177e4
LT
2938 *
2939 * This function drops any buffer that it cannot get a lock on (with the
a7662236
JK
2940 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2941 * clean when doing a write request, and any buffer that appears to be
2942 * up-to-date when doing read request. Further it marks as clean buffers that
2943 * are processed for writing (the buffer cache won't assume that they are
2944 * actually clean until the buffer gets unlocked).
1da177e4
LT
2945 *
2946 * ll_rw_block sets b_end_io to simple completion handler that marks
2947 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2948 * any waiters.
2949 *
2950 * All of the buffers must be for the same device, and must also be a
2951 * multiple of the current approved size for the device.
2952 */
2953void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2954{
2955 int i;
2956
2957 for (i = 0; i < nr; i++) {
2958 struct buffer_head *bh = bhs[i];
2959
18ce3751 2960 if (rw == SWRITE || rw == SWRITE_SYNC)
a7662236 2961 lock_buffer(bh);
ca5de404 2962 else if (!trylock_buffer(bh))
1da177e4
LT
2963 continue;
2964
18ce3751 2965 if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) {
1da177e4 2966 if (test_clear_buffer_dirty(bh)) {
76c3073a 2967 bh->b_end_io = end_buffer_write_sync;
e60e5c50 2968 get_bh(bh);
18ce3751
JA
2969 if (rw == SWRITE_SYNC)
2970 submit_bh(WRITE_SYNC, bh);
2971 else
2972 submit_bh(WRITE, bh);
1da177e4
LT
2973 continue;
2974 }
2975 } else {
1da177e4 2976 if (!buffer_uptodate(bh)) {
76c3073a 2977 bh->b_end_io = end_buffer_read_sync;
e60e5c50 2978 get_bh(bh);
1da177e4
LT
2979 submit_bh(rw, bh);
2980 continue;
2981 }
2982 }
2983 unlock_buffer(bh);
1da177e4
LT
2984 }
2985}
2986
2987/*
2988 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2989 * and then start new I/O and then wait upon it. The caller must have a ref on
2990 * the buffer_head.
2991 */
2992int sync_dirty_buffer(struct buffer_head *bh)
2993{
2994 int ret = 0;
2995
2996 WARN_ON(atomic_read(&bh->b_count) < 1);
2997 lock_buffer(bh);
2998 if (test_clear_buffer_dirty(bh)) {
2999 get_bh(bh);
3000 bh->b_end_io = end_buffer_write_sync;
78f707bf 3001 ret = submit_bh(WRITE, bh);
1da177e4
LT
3002 wait_on_buffer(bh);
3003 if (buffer_eopnotsupp(bh)) {
3004 clear_buffer_eopnotsupp(bh);
3005 ret = -EOPNOTSUPP;
3006 }
3007 if (!ret && !buffer_uptodate(bh))
3008 ret = -EIO;
3009 } else {
3010 unlock_buffer(bh);
3011 }
3012 return ret;
3013}
3014
3015/*
3016 * try_to_free_buffers() checks if all the buffers on this particular page
3017 * are unused, and releases them if so.
3018 *
3019 * Exclusion against try_to_free_buffers may be obtained by either
3020 * locking the page or by holding its mapping's private_lock.
3021 *
3022 * If the page is dirty but all the buffers are clean then we need to
3023 * be sure to mark the page clean as well. This is because the page
3024 * may be against a block device, and a later reattachment of buffers
3025 * to a dirty page will set *all* buffers dirty. Which would corrupt
3026 * filesystem data on the same device.
3027 *
3028 * The same applies to regular filesystem pages: if all the buffers are
3029 * clean then we set the page clean and proceed. To do that, we require
3030 * total exclusion from __set_page_dirty_buffers(). That is obtained with
3031 * private_lock.
3032 *
3033 * try_to_free_buffers() is non-blocking.
3034 */
3035static inline int buffer_busy(struct buffer_head *bh)
3036{
3037 return atomic_read(&bh->b_count) |
3038 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3039}
3040
3041static int
3042drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3043{
3044 struct buffer_head *head = page_buffers(page);
3045 struct buffer_head *bh;
3046
3047 bh = head;
3048 do {
de7d5a3b 3049 if (buffer_write_io_error(bh) && page->mapping)
1da177e4
LT
3050 set_bit(AS_EIO, &page->mapping->flags);
3051 if (buffer_busy(bh))
3052 goto failed;
3053 bh = bh->b_this_page;
3054 } while (bh != head);
3055
3056 do {
3057 struct buffer_head *next = bh->b_this_page;
3058
535ee2fb 3059 if (bh->b_assoc_map)
1da177e4
LT
3060 __remove_assoc_queue(bh);
3061 bh = next;
3062 } while (bh != head);
3063 *buffers_to_free = head;
3064 __clear_page_buffers(page);
3065 return 1;
3066failed:
3067 return 0;
3068}
3069
3070int try_to_free_buffers(struct page *page)
3071{
3072 struct address_space * const mapping = page->mapping;
3073 struct buffer_head *buffers_to_free = NULL;
3074 int ret = 0;
3075
3076 BUG_ON(!PageLocked(page));
ecdfc978 3077 if (PageWriteback(page))
1da177e4
LT
3078 return 0;
3079
3080 if (mapping == NULL) { /* can this still happen? */
3081 ret = drop_buffers(page, &buffers_to_free);
3082 goto out;
3083 }
3084
3085 spin_lock(&mapping->private_lock);
3086 ret = drop_buffers(page, &buffers_to_free);
ecdfc978
LT
3087
3088 /*
3089 * If the filesystem writes its buffers by hand (eg ext3)
3090 * then we can have clean buffers against a dirty page. We
3091 * clean the page here; otherwise the VM will never notice
3092 * that the filesystem did any IO at all.
3093 *
3094 * Also, during truncate, discard_buffer will have marked all
3095 * the page's buffers clean. We discover that here and clean
3096 * the page also.
87df7241
NP
3097 *
3098 * private_lock must be held over this entire operation in order
3099 * to synchronise against __set_page_dirty_buffers and prevent the
3100 * dirty bit from being lost.
ecdfc978
LT
3101 */
3102 if (ret)
3103 cancel_dirty_page(page, PAGE_CACHE_SIZE);
87df7241 3104 spin_unlock(&mapping->private_lock);
1da177e4
LT
3105out:
3106 if (buffers_to_free) {
3107 struct buffer_head *bh = buffers_to_free;
3108
3109 do {
3110 struct buffer_head *next = bh->b_this_page;
3111 free_buffer_head(bh);
3112 bh = next;
3113 } while (bh != buffers_to_free);
3114 }
3115 return ret;
3116}
3117EXPORT_SYMBOL(try_to_free_buffers);
3118
3978d717 3119void block_sync_page(struct page *page)
1da177e4
LT
3120{
3121 struct address_space *mapping;
3122
3123 smp_mb();
3124 mapping = page_mapping(page);
3125 if (mapping)
3126 blk_run_backing_dev(mapping->backing_dev_info, page);
1da177e4
LT
3127}
3128
3129/*
3130 * There are no bdflush tunables left. But distributions are
3131 * still running obsolete flush daemons, so we terminate them here.
3132 *
3133 * Use of bdflush() is deprecated and will be removed in a future kernel.
3134 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3135 */
bdc480e3 3136SYSCALL_DEFINE2(bdflush, int, func, long, data)
1da177e4
LT
3137{
3138 static int msg_count;
3139
3140 if (!capable(CAP_SYS_ADMIN))
3141 return -EPERM;
3142
3143 if (msg_count < 5) {
3144 msg_count++;
3145 printk(KERN_INFO
3146 "warning: process `%s' used the obsolete bdflush"
3147 " system call\n", current->comm);
3148 printk(KERN_INFO "Fix your initscripts?\n");
3149 }
3150
3151 if (func == 1)
3152 do_exit(0);
3153 return 0;
3154}
3155
3156/*
3157 * Buffer-head allocation
3158 */
e18b890b 3159static struct kmem_cache *bh_cachep;
1da177e4
LT
3160
3161/*
3162 * Once the number of bh's in the machine exceeds this level, we start
3163 * stripping them in writeback.
3164 */
3165static int max_buffer_heads;
3166
3167int buffer_heads_over_limit;
3168
3169struct bh_accounting {
3170 int nr; /* Number of live bh's */
3171 int ratelimit; /* Limit cacheline bouncing */
3172};
3173
3174static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3175
3176static void recalc_bh_state(void)
3177{
3178 int i;
3179 int tot = 0;
3180
3181 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3182 return;
3183 __get_cpu_var(bh_accounting).ratelimit = 0;
8a143426 3184 for_each_online_cpu(i)
1da177e4
LT
3185 tot += per_cpu(bh_accounting, i).nr;
3186 buffer_heads_over_limit = (tot > max_buffer_heads);
3187}
3188
dd0fc66f 3189struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
1da177e4 3190{
488514d1 3191 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
1da177e4 3192 if (ret) {
a35afb83 3193 INIT_LIST_HEAD(&ret->b_assoc_buffers);
736c7b80 3194 get_cpu_var(bh_accounting).nr++;
1da177e4 3195 recalc_bh_state();
736c7b80 3196 put_cpu_var(bh_accounting);
1da177e4
LT
3197 }
3198 return ret;
3199}
3200EXPORT_SYMBOL(alloc_buffer_head);
3201
3202void free_buffer_head(struct buffer_head *bh)
3203{
3204 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3205 kmem_cache_free(bh_cachep, bh);
736c7b80 3206 get_cpu_var(bh_accounting).nr--;
1da177e4 3207 recalc_bh_state();
736c7b80 3208 put_cpu_var(bh_accounting);
1da177e4
LT
3209}
3210EXPORT_SYMBOL(free_buffer_head);
3211
1da177e4
LT
3212static void buffer_exit_cpu(int cpu)
3213{
3214 int i;
3215 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3216
3217 for (i = 0; i < BH_LRU_SIZE; i++) {
3218 brelse(b->bhs[i]);
3219 b->bhs[i] = NULL;
3220 }
8a143426
ED
3221 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3222 per_cpu(bh_accounting, cpu).nr = 0;
3223 put_cpu_var(bh_accounting);
1da177e4
LT
3224}
3225
3226static int buffer_cpu_notify(struct notifier_block *self,
3227 unsigned long action, void *hcpu)
3228{
8bb78442 3229 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1da177e4
LT
3230 buffer_exit_cpu((unsigned long)hcpu);
3231 return NOTIFY_OK;
3232}
1da177e4 3233
389d1b08 3234/**
a6b91919 3235 * bh_uptodate_or_lock - Test whether the buffer is uptodate
389d1b08
AK
3236 * @bh: struct buffer_head
3237 *
3238 * Return true if the buffer is up-to-date and false,
3239 * with the buffer locked, if not.
3240 */
3241int bh_uptodate_or_lock(struct buffer_head *bh)
3242{
3243 if (!buffer_uptodate(bh)) {
3244 lock_buffer(bh);
3245 if (!buffer_uptodate(bh))
3246 return 0;
3247 unlock_buffer(bh);
3248 }
3249 return 1;
3250}
3251EXPORT_SYMBOL(bh_uptodate_or_lock);
3252
3253/**
a6b91919 3254 * bh_submit_read - Submit a locked buffer for reading
389d1b08
AK
3255 * @bh: struct buffer_head
3256 *
3257 * Returns zero on success and -EIO on error.
3258 */
3259int bh_submit_read(struct buffer_head *bh)
3260{
3261 BUG_ON(!buffer_locked(bh));
3262
3263 if (buffer_uptodate(bh)) {
3264 unlock_buffer(bh);
3265 return 0;
3266 }
3267
3268 get_bh(bh);
3269 bh->b_end_io = end_buffer_read_sync;
3270 submit_bh(READ, bh);
3271 wait_on_buffer(bh);
3272 if (buffer_uptodate(bh))
3273 return 0;
3274 return -EIO;
3275}
3276EXPORT_SYMBOL(bh_submit_read);
3277
b98938c3 3278static void
51cc5068 3279init_buffer_head(void *data)
b98938c3
CL
3280{
3281 struct buffer_head *bh = data;
3282
3283 memset(bh, 0, sizeof(*bh));
3284 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3285}
3286
1da177e4
LT
3287void __init buffer_init(void)
3288{
3289 int nrpages;
3290
b98938c3
CL
3291 bh_cachep = kmem_cache_create("buffer_head",
3292 sizeof(struct buffer_head), 0,
3293 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3294 SLAB_MEM_SPREAD),
3295 init_buffer_head);
1da177e4
LT
3296
3297 /*
3298 * Limit the bh occupancy to 10% of ZONE_NORMAL
3299 */
3300 nrpages = (nr_free_buffer_pages() * 10) / 100;
3301 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3302 hotcpu_notifier(buffer_cpu_notify, 0);
3303}
3304
3305EXPORT_SYMBOL(__bforget);
3306EXPORT_SYMBOL(__brelse);
3307EXPORT_SYMBOL(__wait_on_buffer);
3308EXPORT_SYMBOL(block_commit_write);
3309EXPORT_SYMBOL(block_prepare_write);
54171690 3310EXPORT_SYMBOL(block_page_mkwrite);
1da177e4
LT
3311EXPORT_SYMBOL(block_read_full_page);
3312EXPORT_SYMBOL(block_sync_page);
3313EXPORT_SYMBOL(block_truncate_page);
3314EXPORT_SYMBOL(block_write_full_page);
89e10787 3315EXPORT_SYMBOL(cont_write_begin);
1da177e4
LT
3316EXPORT_SYMBOL(end_buffer_read_sync);
3317EXPORT_SYMBOL(end_buffer_write_sync);
3318EXPORT_SYMBOL(file_fsync);
1da177e4 3319EXPORT_SYMBOL(generic_block_bmap);
05eb0b51 3320EXPORT_SYMBOL(generic_cont_expand_simple);
1da177e4
LT
3321EXPORT_SYMBOL(init_buffer);
3322EXPORT_SYMBOL(invalidate_bdev);
3323EXPORT_SYMBOL(ll_rw_block);
3324EXPORT_SYMBOL(mark_buffer_dirty);
3325EXPORT_SYMBOL(submit_bh);
3326EXPORT_SYMBOL(sync_dirty_buffer);
3327EXPORT_SYMBOL(unlock_buffer);