]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/buffer.c
mm: make generic_writepages() use plugging
[mirror_ubuntu-artful-kernel.git] / fs / buffer.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/buffer.c
3 *
4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
5 */
6
7/*
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9 *
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12 *
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
15 *
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17 *
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19 */
20
1da177e4
LT
21#include <linux/kernel.h>
22#include <linux/syscalls.h>
23#include <linux/fs.h>
24#include <linux/mm.h>
25#include <linux/percpu.h>
26#include <linux/slab.h>
16f7e0fe 27#include <linux/capability.h>
1da177e4
LT
28#include <linux/blkdev.h>
29#include <linux/file.h>
30#include <linux/quotaops.h>
31#include <linux/highmem.h>
32#include <linux/module.h>
33#include <linux/writeback.h>
34#include <linux/hash.h>
35#include <linux/suspend.h>
36#include <linux/buffer_head.h>
55e829af 37#include <linux/task_io_accounting_ops.h>
1da177e4
LT
38#include <linux/bio.h>
39#include <linux/notifier.h>
40#include <linux/cpu.h>
41#include <linux/bitops.h>
42#include <linux/mpage.h>
fb1c8f93 43#include <linux/bit_spinlock.h>
1da177e4
LT
44
45static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
1da177e4
LT
46
47#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48
49inline void
50init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51{
52 bh->b_end_io = handler;
53 bh->b_private = private;
54}
1fe72eaa 55EXPORT_SYMBOL(init_buffer);
1da177e4 56
7eaceacc 57static int sleep_on_buffer(void *word)
1da177e4 58{
1da177e4
LT
59 io_schedule();
60 return 0;
61}
62
fc9b52cd 63void __lock_buffer(struct buffer_head *bh)
1da177e4 64{
7eaceacc 65 wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
1da177e4
LT
66 TASK_UNINTERRUPTIBLE);
67}
68EXPORT_SYMBOL(__lock_buffer);
69
fc9b52cd 70void unlock_buffer(struct buffer_head *bh)
1da177e4 71{
51b07fc3 72 clear_bit_unlock(BH_Lock, &bh->b_state);
1da177e4
LT
73 smp_mb__after_clear_bit();
74 wake_up_bit(&bh->b_state, BH_Lock);
75}
1fe72eaa 76EXPORT_SYMBOL(unlock_buffer);
1da177e4
LT
77
78/*
79 * Block until a buffer comes unlocked. This doesn't stop it
80 * from becoming locked again - you have to lock it yourself
81 * if you want to preserve its state.
82 */
83void __wait_on_buffer(struct buffer_head * bh)
84{
7eaceacc 85 wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE);
1da177e4 86}
1fe72eaa 87EXPORT_SYMBOL(__wait_on_buffer);
1da177e4
LT
88
89static void
90__clear_page_buffers(struct page *page)
91{
92 ClearPagePrivate(page);
4c21e2f2 93 set_page_private(page, 0);
1da177e4
LT
94 page_cache_release(page);
95}
96
08bafc03
KM
97
98static int quiet_error(struct buffer_head *bh)
99{
100 if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
101 return 0;
102 return 1;
103}
104
105
1da177e4
LT
106static void buffer_io_error(struct buffer_head *bh)
107{
108 char b[BDEVNAME_SIZE];
1da177e4
LT
109 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
110 bdevname(bh->b_bdev, b),
111 (unsigned long long)bh->b_blocknr);
112}
113
114/*
68671f35
DM
115 * End-of-IO handler helper function which does not touch the bh after
116 * unlocking it.
117 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
118 * a race there is benign: unlock_buffer() only use the bh's address for
119 * hashing after unlocking the buffer, so it doesn't actually touch the bh
120 * itself.
1da177e4 121 */
68671f35 122static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
1da177e4
LT
123{
124 if (uptodate) {
125 set_buffer_uptodate(bh);
126 } else {
127 /* This happens, due to failed READA attempts. */
128 clear_buffer_uptodate(bh);
129 }
130 unlock_buffer(bh);
68671f35
DM
131}
132
133/*
134 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
135 * unlock the buffer. This is what ll_rw_block uses too.
136 */
137void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
138{
139 __end_buffer_read_notouch(bh, uptodate);
1da177e4
LT
140 put_bh(bh);
141}
1fe72eaa 142EXPORT_SYMBOL(end_buffer_read_sync);
1da177e4
LT
143
144void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
145{
146 char b[BDEVNAME_SIZE];
147
148 if (uptodate) {
149 set_buffer_uptodate(bh);
150 } else {
0edd55fa 151 if (!quiet_error(bh)) {
1da177e4
LT
152 buffer_io_error(bh);
153 printk(KERN_WARNING "lost page write due to "
154 "I/O error on %s\n",
155 bdevname(bh->b_bdev, b));
156 }
157 set_buffer_write_io_error(bh);
158 clear_buffer_uptodate(bh);
159 }
160 unlock_buffer(bh);
161 put_bh(bh);
162}
1fe72eaa 163EXPORT_SYMBOL(end_buffer_write_sync);
1da177e4 164
1da177e4
LT
165/*
166 * Various filesystems appear to want __find_get_block to be non-blocking.
167 * But it's the page lock which protects the buffers. To get around this,
168 * we get exclusion from try_to_free_buffers with the blockdev mapping's
169 * private_lock.
170 *
171 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
172 * may be quite high. This code could TryLock the page, and if that
173 * succeeds, there is no need to take private_lock. (But if
174 * private_lock is contended then so is mapping->tree_lock).
175 */
176static struct buffer_head *
385fd4c5 177__find_get_block_slow(struct block_device *bdev, sector_t block)
1da177e4
LT
178{
179 struct inode *bd_inode = bdev->bd_inode;
180 struct address_space *bd_mapping = bd_inode->i_mapping;
181 struct buffer_head *ret = NULL;
182 pgoff_t index;
183 struct buffer_head *bh;
184 struct buffer_head *head;
185 struct page *page;
186 int all_mapped = 1;
187
188 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
189 page = find_get_page(bd_mapping, index);
190 if (!page)
191 goto out;
192
193 spin_lock(&bd_mapping->private_lock);
194 if (!page_has_buffers(page))
195 goto out_unlock;
196 head = page_buffers(page);
197 bh = head;
198 do {
97f76d3d
NK
199 if (!buffer_mapped(bh))
200 all_mapped = 0;
201 else if (bh->b_blocknr == block) {
1da177e4
LT
202 ret = bh;
203 get_bh(bh);
204 goto out_unlock;
205 }
1da177e4
LT
206 bh = bh->b_this_page;
207 } while (bh != head);
208
209 /* we might be here because some of the buffers on this page are
210 * not mapped. This is due to various races between
211 * file io on the block device and getblk. It gets dealt with
212 * elsewhere, don't buffer_error if we had some unmapped buffers
213 */
214 if (all_mapped) {
215 printk("__find_get_block_slow() failed. "
216 "block=%llu, b_blocknr=%llu\n",
205f87f6
BP
217 (unsigned long long)block,
218 (unsigned long long)bh->b_blocknr);
219 printk("b_state=0x%08lx, b_size=%zu\n",
220 bh->b_state, bh->b_size);
1da177e4
LT
221 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
222 }
223out_unlock:
224 spin_unlock(&bd_mapping->private_lock);
225 page_cache_release(page);
226out:
227 return ret;
228}
229
230/* If invalidate_buffers() will trash dirty buffers, it means some kind
231 of fs corruption is going on. Trashing dirty data always imply losing
232 information that was supposed to be just stored on the physical layer
233 by the user.
234
235 Thus invalidate_buffers in general usage is not allwowed to trash
236 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
237 be preserved. These buffers are simply skipped.
238
239 We also skip buffers which are still in use. For example this can
240 happen if a userspace program is reading the block device.
241
242 NOTE: In the case where the user removed a removable-media-disk even if
243 there's still dirty data not synced on disk (due a bug in the device driver
244 or due an error of the user), by not destroying the dirty buffers we could
245 generate corruption also on the next media inserted, thus a parameter is
246 necessary to handle this case in the most safe way possible (trying
247 to not corrupt also the new disk inserted with the data belonging to
248 the old now corrupted disk). Also for the ramdisk the natural thing
249 to do in order to release the ramdisk memory is to destroy dirty buffers.
250
251 These are two special cases. Normal usage imply the device driver
252 to issue a sync on the device (without waiting I/O completion) and
253 then an invalidate_buffers call that doesn't trash dirty buffers.
254
255 For handling cache coherency with the blkdev pagecache the 'update' case
256 is been introduced. It is needed to re-read from disk any pinned
257 buffer. NOTE: re-reading from disk is destructive so we can do it only
258 when we assume nobody is changing the buffercache under our I/O and when
259 we think the disk contains more recent information than the buffercache.
260 The update == 1 pass marks the buffers we need to update, the update == 2
261 pass does the actual I/O. */
f98393a6 262void invalidate_bdev(struct block_device *bdev)
1da177e4 263{
0e1dfc66
AM
264 struct address_space *mapping = bdev->bd_inode->i_mapping;
265
266 if (mapping->nrpages == 0)
267 return;
268
1da177e4 269 invalidate_bh_lrus();
fa4b9074 270 lru_add_drain_all(); /* make sure all lru add caches are flushed */
fc0ecff6 271 invalidate_mapping_pages(mapping, 0, -1);
1da177e4 272}
1fe72eaa 273EXPORT_SYMBOL(invalidate_bdev);
1da177e4
LT
274
275/*
5b0830cb 276 * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
1da177e4
LT
277 */
278static void free_more_memory(void)
279{
19770b32 280 struct zone *zone;
0e88460d 281 int nid;
1da177e4 282
03ba3782 283 wakeup_flusher_threads(1024);
1da177e4
LT
284 yield();
285
0e88460d 286 for_each_online_node(nid) {
19770b32
MG
287 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
288 gfp_zone(GFP_NOFS), NULL,
289 &zone);
290 if (zone)
54a6eb5c 291 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
327c0e96 292 GFP_NOFS, NULL);
1da177e4
LT
293 }
294}
295
296/*
297 * I/O completion handler for block_read_full_page() - pages
298 * which come unlocked at the end of I/O.
299 */
300static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
301{
1da177e4 302 unsigned long flags;
a3972203 303 struct buffer_head *first;
1da177e4
LT
304 struct buffer_head *tmp;
305 struct page *page;
306 int page_uptodate = 1;
307
308 BUG_ON(!buffer_async_read(bh));
309
310 page = bh->b_page;
311 if (uptodate) {
312 set_buffer_uptodate(bh);
313 } else {
314 clear_buffer_uptodate(bh);
08bafc03 315 if (!quiet_error(bh))
1da177e4
LT
316 buffer_io_error(bh);
317 SetPageError(page);
318 }
319
320 /*
321 * Be _very_ careful from here on. Bad things can happen if
322 * two buffer heads end IO at almost the same time and both
323 * decide that the page is now completely done.
324 */
a3972203
NP
325 first = page_buffers(page);
326 local_irq_save(flags);
327 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
1da177e4
LT
328 clear_buffer_async_read(bh);
329 unlock_buffer(bh);
330 tmp = bh;
331 do {
332 if (!buffer_uptodate(tmp))
333 page_uptodate = 0;
334 if (buffer_async_read(tmp)) {
335 BUG_ON(!buffer_locked(tmp));
336 goto still_busy;
337 }
338 tmp = tmp->b_this_page;
339 } while (tmp != bh);
a3972203
NP
340 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
341 local_irq_restore(flags);
1da177e4
LT
342
343 /*
344 * If none of the buffers had errors and they are all
345 * uptodate then we can set the page uptodate.
346 */
347 if (page_uptodate && !PageError(page))
348 SetPageUptodate(page);
349 unlock_page(page);
350 return;
351
352still_busy:
a3972203
NP
353 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
354 local_irq_restore(flags);
1da177e4
LT
355 return;
356}
357
358/*
359 * Completion handler for block_write_full_page() - pages which are unlocked
360 * during I/O, and which have PageWriteback cleared upon I/O completion.
361 */
35c80d5f 362void end_buffer_async_write(struct buffer_head *bh, int uptodate)
1da177e4
LT
363{
364 char b[BDEVNAME_SIZE];
1da177e4 365 unsigned long flags;
a3972203 366 struct buffer_head *first;
1da177e4
LT
367 struct buffer_head *tmp;
368 struct page *page;
369
370 BUG_ON(!buffer_async_write(bh));
371
372 page = bh->b_page;
373 if (uptodate) {
374 set_buffer_uptodate(bh);
375 } else {
08bafc03 376 if (!quiet_error(bh)) {
1da177e4
LT
377 buffer_io_error(bh);
378 printk(KERN_WARNING "lost page write due to "
379 "I/O error on %s\n",
380 bdevname(bh->b_bdev, b));
381 }
382 set_bit(AS_EIO, &page->mapping->flags);
58ff407b 383 set_buffer_write_io_error(bh);
1da177e4
LT
384 clear_buffer_uptodate(bh);
385 SetPageError(page);
386 }
387
a3972203
NP
388 first = page_buffers(page);
389 local_irq_save(flags);
390 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
391
1da177e4
LT
392 clear_buffer_async_write(bh);
393 unlock_buffer(bh);
394 tmp = bh->b_this_page;
395 while (tmp != bh) {
396 if (buffer_async_write(tmp)) {
397 BUG_ON(!buffer_locked(tmp));
398 goto still_busy;
399 }
400 tmp = tmp->b_this_page;
401 }
a3972203
NP
402 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
403 local_irq_restore(flags);
1da177e4
LT
404 end_page_writeback(page);
405 return;
406
407still_busy:
a3972203
NP
408 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
409 local_irq_restore(flags);
1da177e4
LT
410 return;
411}
1fe72eaa 412EXPORT_SYMBOL(end_buffer_async_write);
1da177e4
LT
413
414/*
415 * If a page's buffers are under async readin (end_buffer_async_read
416 * completion) then there is a possibility that another thread of
417 * control could lock one of the buffers after it has completed
418 * but while some of the other buffers have not completed. This
419 * locked buffer would confuse end_buffer_async_read() into not unlocking
420 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
421 * that this buffer is not under async I/O.
422 *
423 * The page comes unlocked when it has no locked buffer_async buffers
424 * left.
425 *
426 * PageLocked prevents anyone starting new async I/O reads any of
427 * the buffers.
428 *
429 * PageWriteback is used to prevent simultaneous writeout of the same
430 * page.
431 *
432 * PageLocked prevents anyone from starting writeback of a page which is
433 * under read I/O (PageWriteback is only ever set against a locked page).
434 */
435static void mark_buffer_async_read(struct buffer_head *bh)
436{
437 bh->b_end_io = end_buffer_async_read;
438 set_buffer_async_read(bh);
439}
440
1fe72eaa
HS
441static void mark_buffer_async_write_endio(struct buffer_head *bh,
442 bh_end_io_t *handler)
1da177e4 443{
35c80d5f 444 bh->b_end_io = handler;
1da177e4
LT
445 set_buffer_async_write(bh);
446}
35c80d5f
CM
447
448void mark_buffer_async_write(struct buffer_head *bh)
449{
450 mark_buffer_async_write_endio(bh, end_buffer_async_write);
451}
1da177e4
LT
452EXPORT_SYMBOL(mark_buffer_async_write);
453
454
455/*
456 * fs/buffer.c contains helper functions for buffer-backed address space's
457 * fsync functions. A common requirement for buffer-based filesystems is
458 * that certain data from the backing blockdev needs to be written out for
459 * a successful fsync(). For example, ext2 indirect blocks need to be
460 * written back and waited upon before fsync() returns.
461 *
462 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
463 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
464 * management of a list of dependent buffers at ->i_mapping->private_list.
465 *
466 * Locking is a little subtle: try_to_free_buffers() will remove buffers
467 * from their controlling inode's queue when they are being freed. But
468 * try_to_free_buffers() will be operating against the *blockdev* mapping
469 * at the time, not against the S_ISREG file which depends on those buffers.
470 * So the locking for private_list is via the private_lock in the address_space
471 * which backs the buffers. Which is different from the address_space
472 * against which the buffers are listed. So for a particular address_space,
473 * mapping->private_lock does *not* protect mapping->private_list! In fact,
474 * mapping->private_list will always be protected by the backing blockdev's
475 * ->private_lock.
476 *
477 * Which introduces a requirement: all buffers on an address_space's
478 * ->private_list must be from the same address_space: the blockdev's.
479 *
480 * address_spaces which do not place buffers at ->private_list via these
481 * utility functions are free to use private_lock and private_list for
482 * whatever they want. The only requirement is that list_empty(private_list)
483 * be true at clear_inode() time.
484 *
485 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
486 * filesystems should do that. invalidate_inode_buffers() should just go
487 * BUG_ON(!list_empty).
488 *
489 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
490 * take an address_space, not an inode. And it should be called
491 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
492 * queued up.
493 *
494 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
495 * list if it is already on a list. Because if the buffer is on a list,
496 * it *must* already be on the right one. If not, the filesystem is being
497 * silly. This will save a ton of locking. But first we have to ensure
498 * that buffers are taken *off* the old inode's list when they are freed
499 * (presumably in truncate). That requires careful auditing of all
500 * filesystems (do it inside bforget()). It could also be done by bringing
501 * b_inode back.
502 */
503
504/*
505 * The buffer's backing address_space's private_lock must be held
506 */
dbacefc9 507static void __remove_assoc_queue(struct buffer_head *bh)
1da177e4
LT
508{
509 list_del_init(&bh->b_assoc_buffers);
58ff407b
JK
510 WARN_ON(!bh->b_assoc_map);
511 if (buffer_write_io_error(bh))
512 set_bit(AS_EIO, &bh->b_assoc_map->flags);
513 bh->b_assoc_map = NULL;
1da177e4
LT
514}
515
516int inode_has_buffers(struct inode *inode)
517{
518 return !list_empty(&inode->i_data.private_list);
519}
520
521/*
522 * osync is designed to support O_SYNC io. It waits synchronously for
523 * all already-submitted IO to complete, but does not queue any new
524 * writes to the disk.
525 *
526 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
527 * you dirty the buffers, and then use osync_inode_buffers to wait for
528 * completion. Any other dirty buffers which are not yet queued for
529 * write will not be flushed to disk by the osync.
530 */
531static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
532{
533 struct buffer_head *bh;
534 struct list_head *p;
535 int err = 0;
536
537 spin_lock(lock);
538repeat:
539 list_for_each_prev(p, list) {
540 bh = BH_ENTRY(p);
541 if (buffer_locked(bh)) {
542 get_bh(bh);
543 spin_unlock(lock);
544 wait_on_buffer(bh);
545 if (!buffer_uptodate(bh))
546 err = -EIO;
547 brelse(bh);
548 spin_lock(lock);
549 goto repeat;
550 }
551 }
552 spin_unlock(lock);
553 return err;
554}
555
01a05b33 556static void do_thaw_one(struct super_block *sb, void *unused)
c2d75438 557{
c2d75438 558 char b[BDEVNAME_SIZE];
01a05b33
AV
559 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
560 printk(KERN_WARNING "Emergency Thaw on %s\n",
561 bdevname(sb->s_bdev, b));
562}
c2d75438 563
01a05b33
AV
564static void do_thaw_all(struct work_struct *work)
565{
566 iterate_supers(do_thaw_one, NULL);
053c525f 567 kfree(work);
c2d75438
ES
568 printk(KERN_WARNING "Emergency Thaw complete\n");
569}
570
571/**
572 * emergency_thaw_all -- forcibly thaw every frozen filesystem
573 *
574 * Used for emergency unfreeze of all filesystems via SysRq
575 */
576void emergency_thaw_all(void)
577{
053c525f
JA
578 struct work_struct *work;
579
580 work = kmalloc(sizeof(*work), GFP_ATOMIC);
581 if (work) {
582 INIT_WORK(work, do_thaw_all);
583 schedule_work(work);
584 }
c2d75438
ES
585}
586
1da177e4 587/**
78a4a50a 588 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
67be2dd1 589 * @mapping: the mapping which wants those buffers written
1da177e4
LT
590 *
591 * Starts I/O against the buffers at mapping->private_list, and waits upon
592 * that I/O.
593 *
67be2dd1
MW
594 * Basically, this is a convenience function for fsync().
595 * @mapping is a file or directory which needs those buffers to be written for
596 * a successful fsync().
1da177e4
LT
597 */
598int sync_mapping_buffers(struct address_space *mapping)
599{
600 struct address_space *buffer_mapping = mapping->assoc_mapping;
601
602 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
603 return 0;
604
605 return fsync_buffers_list(&buffer_mapping->private_lock,
606 &mapping->private_list);
607}
608EXPORT_SYMBOL(sync_mapping_buffers);
609
610/*
611 * Called when we've recently written block `bblock', and it is known that
612 * `bblock' was for a buffer_boundary() buffer. This means that the block at
613 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
614 * dirty, schedule it for IO. So that indirects merge nicely with their data.
615 */
616void write_boundary_block(struct block_device *bdev,
617 sector_t bblock, unsigned blocksize)
618{
619 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
620 if (bh) {
621 if (buffer_dirty(bh))
622 ll_rw_block(WRITE, 1, &bh);
623 put_bh(bh);
624 }
625}
626
627void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
628{
629 struct address_space *mapping = inode->i_mapping;
630 struct address_space *buffer_mapping = bh->b_page->mapping;
631
632 mark_buffer_dirty(bh);
633 if (!mapping->assoc_mapping) {
634 mapping->assoc_mapping = buffer_mapping;
635 } else {
e827f923 636 BUG_ON(mapping->assoc_mapping != buffer_mapping);
1da177e4 637 }
535ee2fb 638 if (!bh->b_assoc_map) {
1da177e4
LT
639 spin_lock(&buffer_mapping->private_lock);
640 list_move_tail(&bh->b_assoc_buffers,
641 &mapping->private_list);
58ff407b 642 bh->b_assoc_map = mapping;
1da177e4
LT
643 spin_unlock(&buffer_mapping->private_lock);
644 }
645}
646EXPORT_SYMBOL(mark_buffer_dirty_inode);
647
787d2214
NP
648/*
649 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
650 * dirty.
651 *
652 * If warn is true, then emit a warning if the page is not uptodate and has
653 * not been truncated.
654 */
a8e7d49a 655static void __set_page_dirty(struct page *page,
787d2214
NP
656 struct address_space *mapping, int warn)
657{
19fd6231 658 spin_lock_irq(&mapping->tree_lock);
787d2214
NP
659 if (page->mapping) { /* Race with truncate? */
660 WARN_ON_ONCE(warn && !PageUptodate(page));
e3a7cca1 661 account_page_dirtied(page, mapping);
787d2214
NP
662 radix_tree_tag_set(&mapping->page_tree,
663 page_index(page), PAGECACHE_TAG_DIRTY);
664 }
19fd6231 665 spin_unlock_irq(&mapping->tree_lock);
787d2214 666 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
787d2214
NP
667}
668
1da177e4
LT
669/*
670 * Add a page to the dirty page list.
671 *
672 * It is a sad fact of life that this function is called from several places
673 * deeply under spinlocking. It may not sleep.
674 *
675 * If the page has buffers, the uptodate buffers are set dirty, to preserve
676 * dirty-state coherency between the page and the buffers. It the page does
677 * not have buffers then when they are later attached they will all be set
678 * dirty.
679 *
680 * The buffers are dirtied before the page is dirtied. There's a small race
681 * window in which a writepage caller may see the page cleanness but not the
682 * buffer dirtiness. That's fine. If this code were to set the page dirty
683 * before the buffers, a concurrent writepage caller could clear the page dirty
684 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
685 * page on the dirty page list.
686 *
687 * We use private_lock to lock against try_to_free_buffers while using the
688 * page's buffer list. Also use this to protect against clean buffers being
689 * added to the page after it was set dirty.
690 *
691 * FIXME: may need to call ->reservepage here as well. That's rather up to the
692 * address_space though.
693 */
694int __set_page_dirty_buffers(struct page *page)
695{
a8e7d49a 696 int newly_dirty;
787d2214 697 struct address_space *mapping = page_mapping(page);
ebf7a227
NP
698
699 if (unlikely(!mapping))
700 return !TestSetPageDirty(page);
1da177e4
LT
701
702 spin_lock(&mapping->private_lock);
703 if (page_has_buffers(page)) {
704 struct buffer_head *head = page_buffers(page);
705 struct buffer_head *bh = head;
706
707 do {
708 set_buffer_dirty(bh);
709 bh = bh->b_this_page;
710 } while (bh != head);
711 }
a8e7d49a 712 newly_dirty = !TestSetPageDirty(page);
1da177e4
LT
713 spin_unlock(&mapping->private_lock);
714
a8e7d49a
LT
715 if (newly_dirty)
716 __set_page_dirty(page, mapping, 1);
717 return newly_dirty;
1da177e4
LT
718}
719EXPORT_SYMBOL(__set_page_dirty_buffers);
720
721/*
722 * Write out and wait upon a list of buffers.
723 *
724 * We have conflicting pressures: we want to make sure that all
725 * initially dirty buffers get waited on, but that any subsequently
726 * dirtied buffers don't. After all, we don't want fsync to last
727 * forever if somebody is actively writing to the file.
728 *
729 * Do this in two main stages: first we copy dirty buffers to a
730 * temporary inode list, queueing the writes as we go. Then we clean
731 * up, waiting for those writes to complete.
732 *
733 * During this second stage, any subsequent updates to the file may end
734 * up refiling the buffer on the original inode's dirty list again, so
735 * there is a chance we will end up with a buffer queued for write but
736 * not yet completed on that list. So, as a final cleanup we go through
737 * the osync code to catch these locked, dirty buffers without requeuing
738 * any newly dirty buffers for write.
739 */
740static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
741{
742 struct buffer_head *bh;
743 struct list_head tmp;
7eaceacc 744 struct address_space *mapping;
1da177e4
LT
745 int err = 0, err2;
746
747 INIT_LIST_HEAD(&tmp);
748
749 spin_lock(lock);
750 while (!list_empty(list)) {
751 bh = BH_ENTRY(list->next);
535ee2fb 752 mapping = bh->b_assoc_map;
58ff407b 753 __remove_assoc_queue(bh);
535ee2fb
JK
754 /* Avoid race with mark_buffer_dirty_inode() which does
755 * a lockless check and we rely on seeing the dirty bit */
756 smp_mb();
1da177e4
LT
757 if (buffer_dirty(bh) || buffer_locked(bh)) {
758 list_add(&bh->b_assoc_buffers, &tmp);
535ee2fb 759 bh->b_assoc_map = mapping;
1da177e4
LT
760 if (buffer_dirty(bh)) {
761 get_bh(bh);
762 spin_unlock(lock);
763 /*
764 * Ensure any pending I/O completes so that
9cb569d6
CH
765 * write_dirty_buffer() actually writes the
766 * current contents - it is a noop if I/O is
767 * still in flight on potentially older
768 * contents.
1da177e4 769 */
721a9602 770 write_dirty_buffer(bh, WRITE_SYNC);
9cf6b720
JA
771
772 /*
773 * Kick off IO for the previous mapping. Note
774 * that we will not run the very last mapping,
775 * wait_on_buffer() will do that for us
776 * through sync_buffer().
777 */
1da177e4
LT
778 brelse(bh);
779 spin_lock(lock);
780 }
781 }
782 }
783
784 while (!list_empty(&tmp)) {
785 bh = BH_ENTRY(tmp.prev);
1da177e4 786 get_bh(bh);
535ee2fb
JK
787 mapping = bh->b_assoc_map;
788 __remove_assoc_queue(bh);
789 /* Avoid race with mark_buffer_dirty_inode() which does
790 * a lockless check and we rely on seeing the dirty bit */
791 smp_mb();
792 if (buffer_dirty(bh)) {
793 list_add(&bh->b_assoc_buffers,
e3892296 794 &mapping->private_list);
535ee2fb
JK
795 bh->b_assoc_map = mapping;
796 }
1da177e4
LT
797 spin_unlock(lock);
798 wait_on_buffer(bh);
799 if (!buffer_uptodate(bh))
800 err = -EIO;
801 brelse(bh);
802 spin_lock(lock);
803 }
804
805 spin_unlock(lock);
806 err2 = osync_buffers_list(lock, list);
807 if (err)
808 return err;
809 else
810 return err2;
811}
812
813/*
814 * Invalidate any and all dirty buffers on a given inode. We are
815 * probably unmounting the fs, but that doesn't mean we have already
816 * done a sync(). Just drop the buffers from the inode list.
817 *
818 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
819 * assumes that all the buffers are against the blockdev. Not true
820 * for reiserfs.
821 */
822void invalidate_inode_buffers(struct inode *inode)
823{
824 if (inode_has_buffers(inode)) {
825 struct address_space *mapping = &inode->i_data;
826 struct list_head *list = &mapping->private_list;
827 struct address_space *buffer_mapping = mapping->assoc_mapping;
828
829 spin_lock(&buffer_mapping->private_lock);
830 while (!list_empty(list))
831 __remove_assoc_queue(BH_ENTRY(list->next));
832 spin_unlock(&buffer_mapping->private_lock);
833 }
834}
52b19ac9 835EXPORT_SYMBOL(invalidate_inode_buffers);
1da177e4
LT
836
837/*
838 * Remove any clean buffers from the inode's buffer list. This is called
839 * when we're trying to free the inode itself. Those buffers can pin it.
840 *
841 * Returns true if all buffers were removed.
842 */
843int remove_inode_buffers(struct inode *inode)
844{
845 int ret = 1;
846
847 if (inode_has_buffers(inode)) {
848 struct address_space *mapping = &inode->i_data;
849 struct list_head *list = &mapping->private_list;
850 struct address_space *buffer_mapping = mapping->assoc_mapping;
851
852 spin_lock(&buffer_mapping->private_lock);
853 while (!list_empty(list)) {
854 struct buffer_head *bh = BH_ENTRY(list->next);
855 if (buffer_dirty(bh)) {
856 ret = 0;
857 break;
858 }
859 __remove_assoc_queue(bh);
860 }
861 spin_unlock(&buffer_mapping->private_lock);
862 }
863 return ret;
864}
865
866/*
867 * Create the appropriate buffers when given a page for data area and
868 * the size of each buffer.. Use the bh->b_this_page linked list to
869 * follow the buffers created. Return NULL if unable to create more
870 * buffers.
871 *
872 * The retry flag is used to differentiate async IO (paging, swapping)
873 * which may not fail from ordinary buffer allocations.
874 */
875struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
876 int retry)
877{
878 struct buffer_head *bh, *head;
879 long offset;
880
881try_again:
882 head = NULL;
883 offset = PAGE_SIZE;
884 while ((offset -= size) >= 0) {
885 bh = alloc_buffer_head(GFP_NOFS);
886 if (!bh)
887 goto no_grow;
888
889 bh->b_bdev = NULL;
890 bh->b_this_page = head;
891 bh->b_blocknr = -1;
892 head = bh;
893
894 bh->b_state = 0;
895 atomic_set(&bh->b_count, 0);
896 bh->b_size = size;
897
898 /* Link the buffer to its page */
899 set_bh_page(bh, page, offset);
900
01ffe339 901 init_buffer(bh, NULL, NULL);
1da177e4
LT
902 }
903 return head;
904/*
905 * In case anything failed, we just free everything we got.
906 */
907no_grow:
908 if (head) {
909 do {
910 bh = head;
911 head = head->b_this_page;
912 free_buffer_head(bh);
913 } while (head);
914 }
915
916 /*
917 * Return failure for non-async IO requests. Async IO requests
918 * are not allowed to fail, so we have to wait until buffer heads
919 * become available. But we don't want tasks sleeping with
920 * partially complete buffers, so all were released above.
921 */
922 if (!retry)
923 return NULL;
924
925 /* We're _really_ low on memory. Now we just
926 * wait for old buffer heads to become free due to
927 * finishing IO. Since this is an async request and
928 * the reserve list is empty, we're sure there are
929 * async buffer heads in use.
930 */
931 free_more_memory();
932 goto try_again;
933}
934EXPORT_SYMBOL_GPL(alloc_page_buffers);
935
936static inline void
937link_dev_buffers(struct page *page, struct buffer_head *head)
938{
939 struct buffer_head *bh, *tail;
940
941 bh = head;
942 do {
943 tail = bh;
944 bh = bh->b_this_page;
945 } while (bh);
946 tail->b_this_page = head;
947 attach_page_buffers(page, head);
948}
949
950/*
951 * Initialise the state of a blockdev page's buffers.
952 */
953static void
954init_page_buffers(struct page *page, struct block_device *bdev,
955 sector_t block, int size)
956{
957 struct buffer_head *head = page_buffers(page);
958 struct buffer_head *bh = head;
959 int uptodate = PageUptodate(page);
960
961 do {
962 if (!buffer_mapped(bh)) {
963 init_buffer(bh, NULL, NULL);
964 bh->b_bdev = bdev;
965 bh->b_blocknr = block;
966 if (uptodate)
967 set_buffer_uptodate(bh);
968 set_buffer_mapped(bh);
969 }
970 block++;
971 bh = bh->b_this_page;
972 } while (bh != head);
973}
974
975/*
976 * Create the page-cache page that contains the requested block.
977 *
978 * This is user purely for blockdev mappings.
979 */
980static struct page *
981grow_dev_page(struct block_device *bdev, sector_t block,
982 pgoff_t index, int size)
983{
984 struct inode *inode = bdev->bd_inode;
985 struct page *page;
986 struct buffer_head *bh;
987
ea125892 988 page = find_or_create_page(inode->i_mapping, index,
769848c0 989 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1da177e4
LT
990 if (!page)
991 return NULL;
992
e827f923 993 BUG_ON(!PageLocked(page));
1da177e4
LT
994
995 if (page_has_buffers(page)) {
996 bh = page_buffers(page);
997 if (bh->b_size == size) {
998 init_page_buffers(page, bdev, block, size);
999 return page;
1000 }
1001 if (!try_to_free_buffers(page))
1002 goto failed;
1003 }
1004
1005 /*
1006 * Allocate some buffers for this page
1007 */
1008 bh = alloc_page_buffers(page, size, 0);
1009 if (!bh)
1010 goto failed;
1011
1012 /*
1013 * Link the page to the buffers and initialise them. Take the
1014 * lock to be atomic wrt __find_get_block(), which does not
1015 * run under the page lock.
1016 */
1017 spin_lock(&inode->i_mapping->private_lock);
1018 link_dev_buffers(page, bh);
1019 init_page_buffers(page, bdev, block, size);
1020 spin_unlock(&inode->i_mapping->private_lock);
1021 return page;
1022
1023failed:
1024 BUG();
1025 unlock_page(page);
1026 page_cache_release(page);
1027 return NULL;
1028}
1029
1030/*
1031 * Create buffers for the specified block device block's page. If
1032 * that page was dirty, the buffers are set dirty also.
1da177e4 1033 */
858119e1 1034static int
1da177e4
LT
1035grow_buffers(struct block_device *bdev, sector_t block, int size)
1036{
1037 struct page *page;
1038 pgoff_t index;
1039 int sizebits;
1040
1041 sizebits = -1;
1042 do {
1043 sizebits++;
1044 } while ((size << sizebits) < PAGE_SIZE);
1045
1046 index = block >> sizebits;
1da177e4 1047
e5657933
AM
1048 /*
1049 * Check for a block which wants to lie outside our maximum possible
1050 * pagecache index. (this comparison is done using sector_t types).
1051 */
1052 if (unlikely(index != block >> sizebits)) {
1053 char b[BDEVNAME_SIZE];
1054
1055 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1056 "device %s\n",
8e24eea7 1057 __func__, (unsigned long long)block,
e5657933
AM
1058 bdevname(bdev, b));
1059 return -EIO;
1060 }
1061 block = index << sizebits;
1da177e4
LT
1062 /* Create a page with the proper size buffers.. */
1063 page = grow_dev_page(bdev, block, index, size);
1064 if (!page)
1065 return 0;
1066 unlock_page(page);
1067 page_cache_release(page);
1068 return 1;
1069}
1070
75c96f85 1071static struct buffer_head *
1da177e4
LT
1072__getblk_slow(struct block_device *bdev, sector_t block, int size)
1073{
1074 /* Size must be multiple of hard sectorsize */
e1defc4f 1075 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1da177e4
LT
1076 (size < 512 || size > PAGE_SIZE))) {
1077 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1078 size);
e1defc4f
MP
1079 printk(KERN_ERR "logical block size: %d\n",
1080 bdev_logical_block_size(bdev));
1da177e4
LT
1081
1082 dump_stack();
1083 return NULL;
1084 }
1085
1086 for (;;) {
1087 struct buffer_head * bh;
e5657933 1088 int ret;
1da177e4
LT
1089
1090 bh = __find_get_block(bdev, block, size);
1091 if (bh)
1092 return bh;
1093
e5657933
AM
1094 ret = grow_buffers(bdev, block, size);
1095 if (ret < 0)
1096 return NULL;
1097 if (ret == 0)
1da177e4
LT
1098 free_more_memory();
1099 }
1100}
1101
1102/*
1103 * The relationship between dirty buffers and dirty pages:
1104 *
1105 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1106 * the page is tagged dirty in its radix tree.
1107 *
1108 * At all times, the dirtiness of the buffers represents the dirtiness of
1109 * subsections of the page. If the page has buffers, the page dirty bit is
1110 * merely a hint about the true dirty state.
1111 *
1112 * When a page is set dirty in its entirety, all its buffers are marked dirty
1113 * (if the page has buffers).
1114 *
1115 * When a buffer is marked dirty, its page is dirtied, but the page's other
1116 * buffers are not.
1117 *
1118 * Also. When blockdev buffers are explicitly read with bread(), they
1119 * individually become uptodate. But their backing page remains not
1120 * uptodate - even if all of its buffers are uptodate. A subsequent
1121 * block_read_full_page() against that page will discover all the uptodate
1122 * buffers, will set the page uptodate and will perform no I/O.
1123 */
1124
1125/**
1126 * mark_buffer_dirty - mark a buffer_head as needing writeout
67be2dd1 1127 * @bh: the buffer_head to mark dirty
1da177e4
LT
1128 *
1129 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1130 * backing page dirty, then tag the page as dirty in its address_space's radix
1131 * tree and then attach the address_space's inode to its superblock's dirty
1132 * inode list.
1133 *
1134 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1135 * mapping->tree_lock and the global inode_lock.
1136 */
fc9b52cd 1137void mark_buffer_dirty(struct buffer_head *bh)
1da177e4 1138{
787d2214 1139 WARN_ON_ONCE(!buffer_uptodate(bh));
1be62dc1
LT
1140
1141 /*
1142 * Very *carefully* optimize the it-is-already-dirty case.
1143 *
1144 * Don't let the final "is it dirty" escape to before we
1145 * perhaps modified the buffer.
1146 */
1147 if (buffer_dirty(bh)) {
1148 smp_mb();
1149 if (buffer_dirty(bh))
1150 return;
1151 }
1152
a8e7d49a
LT
1153 if (!test_set_buffer_dirty(bh)) {
1154 struct page *page = bh->b_page;
8e9d78ed
LT
1155 if (!TestSetPageDirty(page)) {
1156 struct address_space *mapping = page_mapping(page);
1157 if (mapping)
1158 __set_page_dirty(page, mapping, 0);
1159 }
a8e7d49a 1160 }
1da177e4 1161}
1fe72eaa 1162EXPORT_SYMBOL(mark_buffer_dirty);
1da177e4
LT
1163
1164/*
1165 * Decrement a buffer_head's reference count. If all buffers against a page
1166 * have zero reference count, are clean and unlocked, and if the page is clean
1167 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1168 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1169 * a page but it ends up not being freed, and buffers may later be reattached).
1170 */
1171void __brelse(struct buffer_head * buf)
1172{
1173 if (atomic_read(&buf->b_count)) {
1174 put_bh(buf);
1175 return;
1176 }
5c752ad9 1177 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1da177e4 1178}
1fe72eaa 1179EXPORT_SYMBOL(__brelse);
1da177e4
LT
1180
1181/*
1182 * bforget() is like brelse(), except it discards any
1183 * potentially dirty data.
1184 */
1185void __bforget(struct buffer_head *bh)
1186{
1187 clear_buffer_dirty(bh);
535ee2fb 1188 if (bh->b_assoc_map) {
1da177e4
LT
1189 struct address_space *buffer_mapping = bh->b_page->mapping;
1190
1191 spin_lock(&buffer_mapping->private_lock);
1192 list_del_init(&bh->b_assoc_buffers);
58ff407b 1193 bh->b_assoc_map = NULL;
1da177e4
LT
1194 spin_unlock(&buffer_mapping->private_lock);
1195 }
1196 __brelse(bh);
1197}
1fe72eaa 1198EXPORT_SYMBOL(__bforget);
1da177e4
LT
1199
1200static struct buffer_head *__bread_slow(struct buffer_head *bh)
1201{
1202 lock_buffer(bh);
1203 if (buffer_uptodate(bh)) {
1204 unlock_buffer(bh);
1205 return bh;
1206 } else {
1207 get_bh(bh);
1208 bh->b_end_io = end_buffer_read_sync;
1209 submit_bh(READ, bh);
1210 wait_on_buffer(bh);
1211 if (buffer_uptodate(bh))
1212 return bh;
1213 }
1214 brelse(bh);
1215 return NULL;
1216}
1217
1218/*
1219 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1220 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1221 * refcount elevated by one when they're in an LRU. A buffer can only appear
1222 * once in a particular CPU's LRU. A single buffer can be present in multiple
1223 * CPU's LRUs at the same time.
1224 *
1225 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1226 * sb_find_get_block().
1227 *
1228 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1229 * a local interrupt disable for that.
1230 */
1231
1232#define BH_LRU_SIZE 8
1233
1234struct bh_lru {
1235 struct buffer_head *bhs[BH_LRU_SIZE];
1236};
1237
1238static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1239
1240#ifdef CONFIG_SMP
1241#define bh_lru_lock() local_irq_disable()
1242#define bh_lru_unlock() local_irq_enable()
1243#else
1244#define bh_lru_lock() preempt_disable()
1245#define bh_lru_unlock() preempt_enable()
1246#endif
1247
1248static inline void check_irqs_on(void)
1249{
1250#ifdef irqs_disabled
1251 BUG_ON(irqs_disabled());
1252#endif
1253}
1254
1255/*
1256 * The LRU management algorithm is dopey-but-simple. Sorry.
1257 */
1258static void bh_lru_install(struct buffer_head *bh)
1259{
1260 struct buffer_head *evictee = NULL;
1da177e4
LT
1261
1262 check_irqs_on();
1263 bh_lru_lock();
c7b92516 1264 if (__this_cpu_read(bh_lrus.bhs[0]) != bh) {
1da177e4
LT
1265 struct buffer_head *bhs[BH_LRU_SIZE];
1266 int in;
1267 int out = 0;
1268
1269 get_bh(bh);
1270 bhs[out++] = bh;
1271 for (in = 0; in < BH_LRU_SIZE; in++) {
c7b92516
CL
1272 struct buffer_head *bh2 =
1273 __this_cpu_read(bh_lrus.bhs[in]);
1da177e4
LT
1274
1275 if (bh2 == bh) {
1276 __brelse(bh2);
1277 } else {
1278 if (out >= BH_LRU_SIZE) {
1279 BUG_ON(evictee != NULL);
1280 evictee = bh2;
1281 } else {
1282 bhs[out++] = bh2;
1283 }
1284 }
1285 }
1286 while (out < BH_LRU_SIZE)
1287 bhs[out++] = NULL;
c7b92516 1288 memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
1da177e4
LT
1289 }
1290 bh_lru_unlock();
1291
1292 if (evictee)
1293 __brelse(evictee);
1294}
1295
1296/*
1297 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1298 */
858119e1 1299static struct buffer_head *
3991d3bd 1300lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1301{
1302 struct buffer_head *ret = NULL;
3991d3bd 1303 unsigned int i;
1da177e4
LT
1304
1305 check_irqs_on();
1306 bh_lru_lock();
1da177e4 1307 for (i = 0; i < BH_LRU_SIZE; i++) {
c7b92516 1308 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1da177e4
LT
1309
1310 if (bh && bh->b_bdev == bdev &&
1311 bh->b_blocknr == block && bh->b_size == size) {
1312 if (i) {
1313 while (i) {
c7b92516
CL
1314 __this_cpu_write(bh_lrus.bhs[i],
1315 __this_cpu_read(bh_lrus.bhs[i - 1]));
1da177e4
LT
1316 i--;
1317 }
c7b92516 1318 __this_cpu_write(bh_lrus.bhs[0], bh);
1da177e4
LT
1319 }
1320 get_bh(bh);
1321 ret = bh;
1322 break;
1323 }
1324 }
1325 bh_lru_unlock();
1326 return ret;
1327}
1328
1329/*
1330 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1331 * it in the LRU and mark it as accessed. If it is not present then return
1332 * NULL
1333 */
1334struct buffer_head *
3991d3bd 1335__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1336{
1337 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1338
1339 if (bh == NULL) {
385fd4c5 1340 bh = __find_get_block_slow(bdev, block);
1da177e4
LT
1341 if (bh)
1342 bh_lru_install(bh);
1343 }
1344 if (bh)
1345 touch_buffer(bh);
1346 return bh;
1347}
1348EXPORT_SYMBOL(__find_get_block);
1349
1350/*
1351 * __getblk will locate (and, if necessary, create) the buffer_head
1352 * which corresponds to the passed block_device, block and size. The
1353 * returned buffer has its reference count incremented.
1354 *
1355 * __getblk() cannot fail - it just keeps trying. If you pass it an
1356 * illegal block number, __getblk() will happily return a buffer_head
1357 * which represents the non-existent block. Very weird.
1358 *
1359 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1360 * attempt is failing. FIXME, perhaps?
1361 */
1362struct buffer_head *
3991d3bd 1363__getblk(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1364{
1365 struct buffer_head *bh = __find_get_block(bdev, block, size);
1366
1367 might_sleep();
1368 if (bh == NULL)
1369 bh = __getblk_slow(bdev, block, size);
1370 return bh;
1371}
1372EXPORT_SYMBOL(__getblk);
1373
1374/*
1375 * Do async read-ahead on a buffer..
1376 */
3991d3bd 1377void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1378{
1379 struct buffer_head *bh = __getblk(bdev, block, size);
a3e713b5
AM
1380 if (likely(bh)) {
1381 ll_rw_block(READA, 1, &bh);
1382 brelse(bh);
1383 }
1da177e4
LT
1384}
1385EXPORT_SYMBOL(__breadahead);
1386
1387/**
1388 * __bread() - reads a specified block and returns the bh
67be2dd1 1389 * @bdev: the block_device to read from
1da177e4
LT
1390 * @block: number of block
1391 * @size: size (in bytes) to read
1392 *
1393 * Reads a specified block, and returns buffer head that contains it.
1394 * It returns NULL if the block was unreadable.
1395 */
1396struct buffer_head *
3991d3bd 1397__bread(struct block_device *bdev, sector_t block, unsigned size)
1da177e4
LT
1398{
1399 struct buffer_head *bh = __getblk(bdev, block, size);
1400
a3e713b5 1401 if (likely(bh) && !buffer_uptodate(bh))
1da177e4
LT
1402 bh = __bread_slow(bh);
1403 return bh;
1404}
1405EXPORT_SYMBOL(__bread);
1406
1407/*
1408 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1409 * This doesn't race because it runs in each cpu either in irq
1410 * or with preempt disabled.
1411 */
1412static void invalidate_bh_lru(void *arg)
1413{
1414 struct bh_lru *b = &get_cpu_var(bh_lrus);
1415 int i;
1416
1417 for (i = 0; i < BH_LRU_SIZE; i++) {
1418 brelse(b->bhs[i]);
1419 b->bhs[i] = NULL;
1420 }
1421 put_cpu_var(bh_lrus);
1422}
1423
f9a14399 1424void invalidate_bh_lrus(void)
1da177e4 1425{
15c8b6c1 1426 on_each_cpu(invalidate_bh_lru, NULL, 1);
1da177e4 1427}
9db5579b 1428EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1da177e4
LT
1429
1430void set_bh_page(struct buffer_head *bh,
1431 struct page *page, unsigned long offset)
1432{
1433 bh->b_page = page;
e827f923 1434 BUG_ON(offset >= PAGE_SIZE);
1da177e4
LT
1435 if (PageHighMem(page))
1436 /*
1437 * This catches illegal uses and preserves the offset:
1438 */
1439 bh->b_data = (char *)(0 + offset);
1440 else
1441 bh->b_data = page_address(page) + offset;
1442}
1443EXPORT_SYMBOL(set_bh_page);
1444
1445/*
1446 * Called when truncating a buffer on a page completely.
1447 */
858119e1 1448static void discard_buffer(struct buffer_head * bh)
1da177e4
LT
1449{
1450 lock_buffer(bh);
1451 clear_buffer_dirty(bh);
1452 bh->b_bdev = NULL;
1453 clear_buffer_mapped(bh);
1454 clear_buffer_req(bh);
1455 clear_buffer_new(bh);
1456 clear_buffer_delay(bh);
33a266dd 1457 clear_buffer_unwritten(bh);
1da177e4
LT
1458 unlock_buffer(bh);
1459}
1460
1da177e4
LT
1461/**
1462 * block_invalidatepage - invalidate part of all of a buffer-backed page
1463 *
1464 * @page: the page which is affected
1465 * @offset: the index of the truncation point
1466 *
1467 * block_invalidatepage() is called when all or part of the page has become
1468 * invalidatedby a truncate operation.
1469 *
1470 * block_invalidatepage() does not have to release all buffers, but it must
1471 * ensure that no dirty buffer is left outside @offset and that no I/O
1472 * is underway against any of the blocks which are outside the truncation
1473 * point. Because the caller is about to free (and possibly reuse) those
1474 * blocks on-disk.
1475 */
2ff28e22 1476void block_invalidatepage(struct page *page, unsigned long offset)
1da177e4
LT
1477{
1478 struct buffer_head *head, *bh, *next;
1479 unsigned int curr_off = 0;
1da177e4
LT
1480
1481 BUG_ON(!PageLocked(page));
1482 if (!page_has_buffers(page))
1483 goto out;
1484
1485 head = page_buffers(page);
1486 bh = head;
1487 do {
1488 unsigned int next_off = curr_off + bh->b_size;
1489 next = bh->b_this_page;
1490
1491 /*
1492 * is this block fully invalidated?
1493 */
1494 if (offset <= curr_off)
1495 discard_buffer(bh);
1496 curr_off = next_off;
1497 bh = next;
1498 } while (bh != head);
1499
1500 /*
1501 * We release buffers only if the entire page is being invalidated.
1502 * The get_block cached value has been unconditionally invalidated,
1503 * so real IO is not possible anymore.
1504 */
1505 if (offset == 0)
2ff28e22 1506 try_to_release_page(page, 0);
1da177e4 1507out:
2ff28e22 1508 return;
1da177e4
LT
1509}
1510EXPORT_SYMBOL(block_invalidatepage);
1511
1512/*
1513 * We attach and possibly dirty the buffers atomically wrt
1514 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1515 * is already excluded via the page lock.
1516 */
1517void create_empty_buffers(struct page *page,
1518 unsigned long blocksize, unsigned long b_state)
1519{
1520 struct buffer_head *bh, *head, *tail;
1521
1522 head = alloc_page_buffers(page, blocksize, 1);
1523 bh = head;
1524 do {
1525 bh->b_state |= b_state;
1526 tail = bh;
1527 bh = bh->b_this_page;
1528 } while (bh);
1529 tail->b_this_page = head;
1530
1531 spin_lock(&page->mapping->private_lock);
1532 if (PageUptodate(page) || PageDirty(page)) {
1533 bh = head;
1534 do {
1535 if (PageDirty(page))
1536 set_buffer_dirty(bh);
1537 if (PageUptodate(page))
1538 set_buffer_uptodate(bh);
1539 bh = bh->b_this_page;
1540 } while (bh != head);
1541 }
1542 attach_page_buffers(page, head);
1543 spin_unlock(&page->mapping->private_lock);
1544}
1545EXPORT_SYMBOL(create_empty_buffers);
1546
1547/*
1548 * We are taking a block for data and we don't want any output from any
1549 * buffer-cache aliases starting from return from that function and
1550 * until the moment when something will explicitly mark the buffer
1551 * dirty (hopefully that will not happen until we will free that block ;-)
1552 * We don't even need to mark it not-uptodate - nobody can expect
1553 * anything from a newly allocated buffer anyway. We used to used
1554 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1555 * don't want to mark the alias unmapped, for example - it would confuse
1556 * anyone who might pick it with bread() afterwards...
1557 *
1558 * Also.. Note that bforget() doesn't lock the buffer. So there can
1559 * be writeout I/O going on against recently-freed buffers. We don't
1560 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1561 * only if we really need to. That happens here.
1562 */
1563void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1564{
1565 struct buffer_head *old_bh;
1566
1567 might_sleep();
1568
385fd4c5 1569 old_bh = __find_get_block_slow(bdev, block);
1da177e4
LT
1570 if (old_bh) {
1571 clear_buffer_dirty(old_bh);
1572 wait_on_buffer(old_bh);
1573 clear_buffer_req(old_bh);
1574 __brelse(old_bh);
1575 }
1576}
1577EXPORT_SYMBOL(unmap_underlying_metadata);
1578
1579/*
1580 * NOTE! All mapped/uptodate combinations are valid:
1581 *
1582 * Mapped Uptodate Meaning
1583 *
1584 * No No "unknown" - must do get_block()
1585 * No Yes "hole" - zero-filled
1586 * Yes No "allocated" - allocated on disk, not read in
1587 * Yes Yes "valid" - allocated and up-to-date in memory.
1588 *
1589 * "Dirty" is valid only with the last case (mapped+uptodate).
1590 */
1591
1592/*
1593 * While block_write_full_page is writing back the dirty buffers under
1594 * the page lock, whoever dirtied the buffers may decide to clean them
1595 * again at any time. We handle that by only looking at the buffer
1596 * state inside lock_buffer().
1597 *
1598 * If block_write_full_page() is called for regular writeback
1599 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1600 * locked buffer. This only can happen if someone has written the buffer
1601 * directly, with submit_bh(). At the address_space level PageWriteback
1602 * prevents this contention from occurring.
6e34eedd
TT
1603 *
1604 * If block_write_full_page() is called with wbc->sync_mode ==
721a9602
JA
1605 * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
1606 * causes the writes to be flagged as synchronous writes.
1da177e4
LT
1607 */
1608static int __block_write_full_page(struct inode *inode, struct page *page,
35c80d5f
CM
1609 get_block_t *get_block, struct writeback_control *wbc,
1610 bh_end_io_t *handler)
1da177e4
LT
1611{
1612 int err;
1613 sector_t block;
1614 sector_t last_block;
f0fbd5fc 1615 struct buffer_head *bh, *head;
b0cf2321 1616 const unsigned blocksize = 1 << inode->i_blkbits;
1da177e4 1617 int nr_underway = 0;
6e34eedd 1618 int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
721a9602 1619 WRITE_SYNC : WRITE);
1da177e4
LT
1620
1621 BUG_ON(!PageLocked(page));
1622
1623 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1624
1625 if (!page_has_buffers(page)) {
b0cf2321 1626 create_empty_buffers(page, blocksize,
1da177e4
LT
1627 (1 << BH_Dirty)|(1 << BH_Uptodate));
1628 }
1629
1630 /*
1631 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1632 * here, and the (potentially unmapped) buffers may become dirty at
1633 * any time. If a buffer becomes dirty here after we've inspected it
1634 * then we just miss that fact, and the page stays dirty.
1635 *
1636 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1637 * handle that here by just cleaning them.
1638 */
1639
54b21a79 1640 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4
LT
1641 head = page_buffers(page);
1642 bh = head;
1643
1644 /*
1645 * Get all the dirty buffers mapped to disk addresses and
1646 * handle any aliases from the underlying blockdev's mapping.
1647 */
1648 do {
1649 if (block > last_block) {
1650 /*
1651 * mapped buffers outside i_size will occur, because
1652 * this page can be outside i_size when there is a
1653 * truncate in progress.
1654 */
1655 /*
1656 * The buffer was zeroed by block_write_full_page()
1657 */
1658 clear_buffer_dirty(bh);
1659 set_buffer_uptodate(bh);
29a814d2
AT
1660 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1661 buffer_dirty(bh)) {
b0cf2321 1662 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
1663 err = get_block(inode, block, bh, 1);
1664 if (err)
1665 goto recover;
29a814d2 1666 clear_buffer_delay(bh);
1da177e4
LT
1667 if (buffer_new(bh)) {
1668 /* blockdev mappings never come here */
1669 clear_buffer_new(bh);
1670 unmap_underlying_metadata(bh->b_bdev,
1671 bh->b_blocknr);
1672 }
1673 }
1674 bh = bh->b_this_page;
1675 block++;
1676 } while (bh != head);
1677
1678 do {
1da177e4
LT
1679 if (!buffer_mapped(bh))
1680 continue;
1681 /*
1682 * If it's a fully non-blocking write attempt and we cannot
1683 * lock the buffer then redirty the page. Note that this can
5b0830cb
JA
1684 * potentially cause a busy-wait loop from writeback threads
1685 * and kswapd activity, but those code paths have their own
1686 * higher-level throttling.
1da177e4 1687 */
1b430bee 1688 if (wbc->sync_mode != WB_SYNC_NONE) {
1da177e4 1689 lock_buffer(bh);
ca5de404 1690 } else if (!trylock_buffer(bh)) {
1da177e4
LT
1691 redirty_page_for_writepage(wbc, page);
1692 continue;
1693 }
1694 if (test_clear_buffer_dirty(bh)) {
35c80d5f 1695 mark_buffer_async_write_endio(bh, handler);
1da177e4
LT
1696 } else {
1697 unlock_buffer(bh);
1698 }
1699 } while ((bh = bh->b_this_page) != head);
1700
1701 /*
1702 * The page and its buffers are protected by PageWriteback(), so we can
1703 * drop the bh refcounts early.
1704 */
1705 BUG_ON(PageWriteback(page));
1706 set_page_writeback(page);
1da177e4
LT
1707
1708 do {
1709 struct buffer_head *next = bh->b_this_page;
1710 if (buffer_async_write(bh)) {
a64c8610 1711 submit_bh(write_op, bh);
1da177e4
LT
1712 nr_underway++;
1713 }
1da177e4
LT
1714 bh = next;
1715 } while (bh != head);
05937baa 1716 unlock_page(page);
1da177e4
LT
1717
1718 err = 0;
1719done:
1720 if (nr_underway == 0) {
1721 /*
1722 * The page was marked dirty, but the buffers were
1723 * clean. Someone wrote them back by hand with
1724 * ll_rw_block/submit_bh. A rare case.
1725 */
1da177e4 1726 end_page_writeback(page);
3d67f2d7 1727
1da177e4
LT
1728 /*
1729 * The page and buffer_heads can be released at any time from
1730 * here on.
1731 */
1da177e4
LT
1732 }
1733 return err;
1734
1735recover:
1736 /*
1737 * ENOSPC, or some other error. We may already have added some
1738 * blocks to the file, so we need to write these out to avoid
1739 * exposing stale data.
1740 * The page is currently locked and not marked for writeback
1741 */
1742 bh = head;
1743 /* Recovery: lock and submit the mapped buffers */
1744 do {
29a814d2
AT
1745 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1746 !buffer_delay(bh)) {
1da177e4 1747 lock_buffer(bh);
35c80d5f 1748 mark_buffer_async_write_endio(bh, handler);
1da177e4
LT
1749 } else {
1750 /*
1751 * The buffer may have been set dirty during
1752 * attachment to a dirty page.
1753 */
1754 clear_buffer_dirty(bh);
1755 }
1756 } while ((bh = bh->b_this_page) != head);
1757 SetPageError(page);
1758 BUG_ON(PageWriteback(page));
7e4c3690 1759 mapping_set_error(page->mapping, err);
1da177e4 1760 set_page_writeback(page);
1da177e4
LT
1761 do {
1762 struct buffer_head *next = bh->b_this_page;
1763 if (buffer_async_write(bh)) {
1764 clear_buffer_dirty(bh);
a64c8610 1765 submit_bh(write_op, bh);
1da177e4
LT
1766 nr_underway++;
1767 }
1da177e4
LT
1768 bh = next;
1769 } while (bh != head);
ffda9d30 1770 unlock_page(page);
1da177e4
LT
1771 goto done;
1772}
1773
afddba49
NP
1774/*
1775 * If a page has any new buffers, zero them out here, and mark them uptodate
1776 * and dirty so they'll be written out (in order to prevent uninitialised
1777 * block data from leaking). And clear the new bit.
1778 */
1779void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1780{
1781 unsigned int block_start, block_end;
1782 struct buffer_head *head, *bh;
1783
1784 BUG_ON(!PageLocked(page));
1785 if (!page_has_buffers(page))
1786 return;
1787
1788 bh = head = page_buffers(page);
1789 block_start = 0;
1790 do {
1791 block_end = block_start + bh->b_size;
1792
1793 if (buffer_new(bh)) {
1794 if (block_end > from && block_start < to) {
1795 if (!PageUptodate(page)) {
1796 unsigned start, size;
1797
1798 start = max(from, block_start);
1799 size = min(to, block_end) - start;
1800
eebd2aa3 1801 zero_user(page, start, size);
afddba49
NP
1802 set_buffer_uptodate(bh);
1803 }
1804
1805 clear_buffer_new(bh);
1806 mark_buffer_dirty(bh);
1807 }
1808 }
1809
1810 block_start = block_end;
1811 bh = bh->b_this_page;
1812 } while (bh != head);
1813}
1814EXPORT_SYMBOL(page_zero_new_buffers);
1815
ebdec241 1816int __block_write_begin(struct page *page, loff_t pos, unsigned len,
6e1db88d 1817 get_block_t *get_block)
1da177e4 1818{
ebdec241
CH
1819 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1820 unsigned to = from + len;
6e1db88d 1821 struct inode *inode = page->mapping->host;
1da177e4
LT
1822 unsigned block_start, block_end;
1823 sector_t block;
1824 int err = 0;
1825 unsigned blocksize, bbits;
1826 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1827
1828 BUG_ON(!PageLocked(page));
1829 BUG_ON(from > PAGE_CACHE_SIZE);
1830 BUG_ON(to > PAGE_CACHE_SIZE);
1831 BUG_ON(from > to);
1832
1833 blocksize = 1 << inode->i_blkbits;
1834 if (!page_has_buffers(page))
1835 create_empty_buffers(page, blocksize, 0);
1836 head = page_buffers(page);
1837
1838 bbits = inode->i_blkbits;
1839 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1840
1841 for(bh = head, block_start = 0; bh != head || !block_start;
1842 block++, block_start=block_end, bh = bh->b_this_page) {
1843 block_end = block_start + blocksize;
1844 if (block_end <= from || block_start >= to) {
1845 if (PageUptodate(page)) {
1846 if (!buffer_uptodate(bh))
1847 set_buffer_uptodate(bh);
1848 }
1849 continue;
1850 }
1851 if (buffer_new(bh))
1852 clear_buffer_new(bh);
1853 if (!buffer_mapped(bh)) {
b0cf2321 1854 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
1855 err = get_block(inode, block, bh, 1);
1856 if (err)
f3ddbdc6 1857 break;
1da177e4 1858 if (buffer_new(bh)) {
1da177e4
LT
1859 unmap_underlying_metadata(bh->b_bdev,
1860 bh->b_blocknr);
1861 if (PageUptodate(page)) {
637aff46 1862 clear_buffer_new(bh);
1da177e4 1863 set_buffer_uptodate(bh);
637aff46 1864 mark_buffer_dirty(bh);
1da177e4
LT
1865 continue;
1866 }
eebd2aa3
CL
1867 if (block_end > to || block_start < from)
1868 zero_user_segments(page,
1869 to, block_end,
1870 block_start, from);
1da177e4
LT
1871 continue;
1872 }
1873 }
1874 if (PageUptodate(page)) {
1875 if (!buffer_uptodate(bh))
1876 set_buffer_uptodate(bh);
1877 continue;
1878 }
1879 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
33a266dd 1880 !buffer_unwritten(bh) &&
1da177e4
LT
1881 (block_start < from || block_end > to)) {
1882 ll_rw_block(READ, 1, &bh);
1883 *wait_bh++=bh;
1884 }
1885 }
1886 /*
1887 * If we issued read requests - let them complete.
1888 */
1889 while(wait_bh > wait) {
1890 wait_on_buffer(*--wait_bh);
1891 if (!buffer_uptodate(*wait_bh))
f3ddbdc6 1892 err = -EIO;
1da177e4 1893 }
6e1db88d 1894 if (unlikely(err)) {
afddba49 1895 page_zero_new_buffers(page, from, to);
6e1db88d
CH
1896 ClearPageUptodate(page);
1897 }
1da177e4
LT
1898 return err;
1899}
ebdec241 1900EXPORT_SYMBOL(__block_write_begin);
1da177e4
LT
1901
1902static int __block_commit_write(struct inode *inode, struct page *page,
1903 unsigned from, unsigned to)
1904{
1905 unsigned block_start, block_end;
1906 int partial = 0;
1907 unsigned blocksize;
1908 struct buffer_head *bh, *head;
1909
1910 blocksize = 1 << inode->i_blkbits;
1911
1912 for(bh = head = page_buffers(page), block_start = 0;
1913 bh != head || !block_start;
1914 block_start=block_end, bh = bh->b_this_page) {
1915 block_end = block_start + blocksize;
1916 if (block_end <= from || block_start >= to) {
1917 if (!buffer_uptodate(bh))
1918 partial = 1;
1919 } else {
1920 set_buffer_uptodate(bh);
1921 mark_buffer_dirty(bh);
1922 }
afddba49 1923 clear_buffer_new(bh);
1da177e4
LT
1924 }
1925
1926 /*
1927 * If this is a partial write which happened to make all buffers
1928 * uptodate then we can optimize away a bogus readpage() for
1929 * the next read(). Here we 'discover' whether the page went
1930 * uptodate as a result of this (potentially partial) write.
1931 */
1932 if (!partial)
1933 SetPageUptodate(page);
1934 return 0;
1935}
1936
afddba49 1937/*
155130a4
CH
1938 * block_write_begin takes care of the basic task of block allocation and
1939 * bringing partial write blocks uptodate first.
1940 *
7bb46a67 1941 * The filesystem needs to handle block truncation upon failure.
afddba49 1942 */
155130a4
CH
1943int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
1944 unsigned flags, struct page **pagep, get_block_t *get_block)
afddba49 1945{
6e1db88d 1946 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
afddba49 1947 struct page *page;
6e1db88d 1948 int status;
afddba49 1949
6e1db88d
CH
1950 page = grab_cache_page_write_begin(mapping, index, flags);
1951 if (!page)
1952 return -ENOMEM;
afddba49 1953
6e1db88d 1954 status = __block_write_begin(page, pos, len, get_block);
afddba49 1955 if (unlikely(status)) {
6e1db88d
CH
1956 unlock_page(page);
1957 page_cache_release(page);
1958 page = NULL;
afddba49
NP
1959 }
1960
6e1db88d 1961 *pagep = page;
afddba49
NP
1962 return status;
1963}
1964EXPORT_SYMBOL(block_write_begin);
1965
1966int block_write_end(struct file *file, struct address_space *mapping,
1967 loff_t pos, unsigned len, unsigned copied,
1968 struct page *page, void *fsdata)
1969{
1970 struct inode *inode = mapping->host;
1971 unsigned start;
1972
1973 start = pos & (PAGE_CACHE_SIZE - 1);
1974
1975 if (unlikely(copied < len)) {
1976 /*
1977 * The buffers that were written will now be uptodate, so we
1978 * don't have to worry about a readpage reading them and
1979 * overwriting a partial write. However if we have encountered
1980 * a short write and only partially written into a buffer, it
1981 * will not be marked uptodate, so a readpage might come in and
1982 * destroy our partial write.
1983 *
1984 * Do the simplest thing, and just treat any short write to a
1985 * non uptodate page as a zero-length write, and force the
1986 * caller to redo the whole thing.
1987 */
1988 if (!PageUptodate(page))
1989 copied = 0;
1990
1991 page_zero_new_buffers(page, start+copied, start+len);
1992 }
1993 flush_dcache_page(page);
1994
1995 /* This could be a short (even 0-length) commit */
1996 __block_commit_write(inode, page, start, start+copied);
1997
1998 return copied;
1999}
2000EXPORT_SYMBOL(block_write_end);
2001
2002int generic_write_end(struct file *file, struct address_space *mapping,
2003 loff_t pos, unsigned len, unsigned copied,
2004 struct page *page, void *fsdata)
2005{
2006 struct inode *inode = mapping->host;
c7d206b3 2007 int i_size_changed = 0;
afddba49
NP
2008
2009 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2010
2011 /*
2012 * No need to use i_size_read() here, the i_size
2013 * cannot change under us because we hold i_mutex.
2014 *
2015 * But it's important to update i_size while still holding page lock:
2016 * page writeout could otherwise come in and zero beyond i_size.
2017 */
2018 if (pos+copied > inode->i_size) {
2019 i_size_write(inode, pos+copied);
c7d206b3 2020 i_size_changed = 1;
afddba49
NP
2021 }
2022
2023 unlock_page(page);
2024 page_cache_release(page);
2025
c7d206b3
JK
2026 /*
2027 * Don't mark the inode dirty under page lock. First, it unnecessarily
2028 * makes the holding time of page lock longer. Second, it forces lock
2029 * ordering of page lock and transaction start for journaling
2030 * filesystems.
2031 */
2032 if (i_size_changed)
2033 mark_inode_dirty(inode);
2034
afddba49
NP
2035 return copied;
2036}
2037EXPORT_SYMBOL(generic_write_end);
2038
8ab22b9a
HH
2039/*
2040 * block_is_partially_uptodate checks whether buffers within a page are
2041 * uptodate or not.
2042 *
2043 * Returns true if all buffers which correspond to a file portion
2044 * we want to read are uptodate.
2045 */
2046int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2047 unsigned long from)
2048{
2049 struct inode *inode = page->mapping->host;
2050 unsigned block_start, block_end, blocksize;
2051 unsigned to;
2052 struct buffer_head *bh, *head;
2053 int ret = 1;
2054
2055 if (!page_has_buffers(page))
2056 return 0;
2057
2058 blocksize = 1 << inode->i_blkbits;
2059 to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2060 to = from + to;
2061 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2062 return 0;
2063
2064 head = page_buffers(page);
2065 bh = head;
2066 block_start = 0;
2067 do {
2068 block_end = block_start + blocksize;
2069 if (block_end > from && block_start < to) {
2070 if (!buffer_uptodate(bh)) {
2071 ret = 0;
2072 break;
2073 }
2074 if (block_end >= to)
2075 break;
2076 }
2077 block_start = block_end;
2078 bh = bh->b_this_page;
2079 } while (bh != head);
2080
2081 return ret;
2082}
2083EXPORT_SYMBOL(block_is_partially_uptodate);
2084
1da177e4
LT
2085/*
2086 * Generic "read page" function for block devices that have the normal
2087 * get_block functionality. This is most of the block device filesystems.
2088 * Reads the page asynchronously --- the unlock_buffer() and
2089 * set/clear_buffer_uptodate() functions propagate buffer state into the
2090 * page struct once IO has completed.
2091 */
2092int block_read_full_page(struct page *page, get_block_t *get_block)
2093{
2094 struct inode *inode = page->mapping->host;
2095 sector_t iblock, lblock;
2096 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2097 unsigned int blocksize;
2098 int nr, i;
2099 int fully_mapped = 1;
2100
cd7619d6 2101 BUG_ON(!PageLocked(page));
1da177e4
LT
2102 blocksize = 1 << inode->i_blkbits;
2103 if (!page_has_buffers(page))
2104 create_empty_buffers(page, blocksize, 0);
2105 head = page_buffers(page);
2106
2107 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2108 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2109 bh = head;
2110 nr = 0;
2111 i = 0;
2112
2113 do {
2114 if (buffer_uptodate(bh))
2115 continue;
2116
2117 if (!buffer_mapped(bh)) {
c64610ba
AM
2118 int err = 0;
2119
1da177e4
LT
2120 fully_mapped = 0;
2121 if (iblock < lblock) {
b0cf2321 2122 WARN_ON(bh->b_size != blocksize);
c64610ba
AM
2123 err = get_block(inode, iblock, bh, 0);
2124 if (err)
1da177e4
LT
2125 SetPageError(page);
2126 }
2127 if (!buffer_mapped(bh)) {
eebd2aa3 2128 zero_user(page, i * blocksize, blocksize);
c64610ba
AM
2129 if (!err)
2130 set_buffer_uptodate(bh);
1da177e4
LT
2131 continue;
2132 }
2133 /*
2134 * get_block() might have updated the buffer
2135 * synchronously
2136 */
2137 if (buffer_uptodate(bh))
2138 continue;
2139 }
2140 arr[nr++] = bh;
2141 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2142
2143 if (fully_mapped)
2144 SetPageMappedToDisk(page);
2145
2146 if (!nr) {
2147 /*
2148 * All buffers are uptodate - we can set the page uptodate
2149 * as well. But not if get_block() returned an error.
2150 */
2151 if (!PageError(page))
2152 SetPageUptodate(page);
2153 unlock_page(page);
2154 return 0;
2155 }
2156
2157 /* Stage two: lock the buffers */
2158 for (i = 0; i < nr; i++) {
2159 bh = arr[i];
2160 lock_buffer(bh);
2161 mark_buffer_async_read(bh);
2162 }
2163
2164 /*
2165 * Stage 3: start the IO. Check for uptodateness
2166 * inside the buffer lock in case another process reading
2167 * the underlying blockdev brought it uptodate (the sct fix).
2168 */
2169 for (i = 0; i < nr; i++) {
2170 bh = arr[i];
2171 if (buffer_uptodate(bh))
2172 end_buffer_async_read(bh, 1);
2173 else
2174 submit_bh(READ, bh);
2175 }
2176 return 0;
2177}
1fe72eaa 2178EXPORT_SYMBOL(block_read_full_page);
1da177e4
LT
2179
2180/* utility function for filesystems that need to do work on expanding
89e10787 2181 * truncates. Uses filesystem pagecache writes to allow the filesystem to
1da177e4
LT
2182 * deal with the hole.
2183 */
89e10787 2184int generic_cont_expand_simple(struct inode *inode, loff_t size)
1da177e4
LT
2185{
2186 struct address_space *mapping = inode->i_mapping;
2187 struct page *page;
89e10787 2188 void *fsdata;
1da177e4
LT
2189 int err;
2190
c08d3b0e 2191 err = inode_newsize_ok(inode, size);
2192 if (err)
1da177e4
LT
2193 goto out;
2194
89e10787
NP
2195 err = pagecache_write_begin(NULL, mapping, size, 0,
2196 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2197 &page, &fsdata);
2198 if (err)
05eb0b51 2199 goto out;
05eb0b51 2200
89e10787
NP
2201 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2202 BUG_ON(err > 0);
05eb0b51 2203
1da177e4
LT
2204out:
2205 return err;
2206}
1fe72eaa 2207EXPORT_SYMBOL(generic_cont_expand_simple);
1da177e4 2208
f1e3af72
AB
2209static int cont_expand_zero(struct file *file, struct address_space *mapping,
2210 loff_t pos, loff_t *bytes)
1da177e4 2211{
1da177e4 2212 struct inode *inode = mapping->host;
1da177e4 2213 unsigned blocksize = 1 << inode->i_blkbits;
89e10787
NP
2214 struct page *page;
2215 void *fsdata;
2216 pgoff_t index, curidx;
2217 loff_t curpos;
2218 unsigned zerofrom, offset, len;
2219 int err = 0;
1da177e4 2220
89e10787
NP
2221 index = pos >> PAGE_CACHE_SHIFT;
2222 offset = pos & ~PAGE_CACHE_MASK;
2223
2224 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2225 zerofrom = curpos & ~PAGE_CACHE_MASK;
1da177e4
LT
2226 if (zerofrom & (blocksize-1)) {
2227 *bytes |= (blocksize-1);
2228 (*bytes)++;
2229 }
89e10787 2230 len = PAGE_CACHE_SIZE - zerofrom;
1da177e4 2231
89e10787
NP
2232 err = pagecache_write_begin(file, mapping, curpos, len,
2233 AOP_FLAG_UNINTERRUPTIBLE,
2234 &page, &fsdata);
2235 if (err)
2236 goto out;
eebd2aa3 2237 zero_user(page, zerofrom, len);
89e10787
NP
2238 err = pagecache_write_end(file, mapping, curpos, len, len,
2239 page, fsdata);
2240 if (err < 0)
2241 goto out;
2242 BUG_ON(err != len);
2243 err = 0;
061e9746
OH
2244
2245 balance_dirty_pages_ratelimited(mapping);
89e10787 2246 }
1da177e4 2247
89e10787
NP
2248 /* page covers the boundary, find the boundary offset */
2249 if (index == curidx) {
2250 zerofrom = curpos & ~PAGE_CACHE_MASK;
1da177e4 2251 /* if we will expand the thing last block will be filled */
89e10787
NP
2252 if (offset <= zerofrom) {
2253 goto out;
2254 }
2255 if (zerofrom & (blocksize-1)) {
1da177e4
LT
2256 *bytes |= (blocksize-1);
2257 (*bytes)++;
2258 }
89e10787 2259 len = offset - zerofrom;
1da177e4 2260
89e10787
NP
2261 err = pagecache_write_begin(file, mapping, curpos, len,
2262 AOP_FLAG_UNINTERRUPTIBLE,
2263 &page, &fsdata);
2264 if (err)
2265 goto out;
eebd2aa3 2266 zero_user(page, zerofrom, len);
89e10787
NP
2267 err = pagecache_write_end(file, mapping, curpos, len, len,
2268 page, fsdata);
2269 if (err < 0)
2270 goto out;
2271 BUG_ON(err != len);
2272 err = 0;
1da177e4 2273 }
89e10787
NP
2274out:
2275 return err;
2276}
2277
2278/*
2279 * For moronic filesystems that do not allow holes in file.
2280 * We may have to extend the file.
2281 */
282dc178 2282int cont_write_begin(struct file *file, struct address_space *mapping,
89e10787
NP
2283 loff_t pos, unsigned len, unsigned flags,
2284 struct page **pagep, void **fsdata,
2285 get_block_t *get_block, loff_t *bytes)
2286{
2287 struct inode *inode = mapping->host;
2288 unsigned blocksize = 1 << inode->i_blkbits;
2289 unsigned zerofrom;
2290 int err;
2291
2292 err = cont_expand_zero(file, mapping, pos, bytes);
2293 if (err)
155130a4 2294 return err;
89e10787
NP
2295
2296 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2297 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2298 *bytes |= (blocksize-1);
2299 (*bytes)++;
1da177e4 2300 }
1da177e4 2301
155130a4 2302 return block_write_begin(mapping, pos, len, flags, pagep, get_block);
1da177e4 2303}
1fe72eaa 2304EXPORT_SYMBOL(cont_write_begin);
1da177e4 2305
1da177e4
LT
2306int block_commit_write(struct page *page, unsigned from, unsigned to)
2307{
2308 struct inode *inode = page->mapping->host;
2309 __block_commit_write(inode,page,from,to);
2310 return 0;
2311}
1fe72eaa 2312EXPORT_SYMBOL(block_commit_write);
1da177e4 2313
54171690
DC
2314/*
2315 * block_page_mkwrite() is not allowed to change the file size as it gets
2316 * called from a page fault handler when a page is first dirtied. Hence we must
2317 * be careful to check for EOF conditions here. We set the page up correctly
2318 * for a written page which means we get ENOSPC checking when writing into
2319 * holes and correct delalloc and unwritten extent mapping on filesystems that
2320 * support these features.
2321 *
2322 * We are not allowed to take the i_mutex here so we have to play games to
2323 * protect against truncate races as the page could now be beyond EOF. Because
7bb46a67 2324 * truncate writes the inode size before removing pages, once we have the
54171690
DC
2325 * page lock we can determine safely if the page is beyond EOF. If it is not
2326 * beyond EOF, then the page is guaranteed safe against truncation until we
2327 * unlock the page.
2328 */
2329int
c2ec175c 2330block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
54171690
DC
2331 get_block_t get_block)
2332{
c2ec175c 2333 struct page *page = vmf->page;
54171690
DC
2334 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2335 unsigned long end;
2336 loff_t size;
56a76f82 2337 int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
54171690
DC
2338
2339 lock_page(page);
2340 size = i_size_read(inode);
2341 if ((page->mapping != inode->i_mapping) ||
18336338 2342 (page_offset(page) > size)) {
54171690 2343 /* page got truncated out from underneath us */
b827e496
NP
2344 unlock_page(page);
2345 goto out;
54171690
DC
2346 }
2347
2348 /* page is wholly or partially inside EOF */
2349 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2350 end = size & ~PAGE_CACHE_MASK;
2351 else
2352 end = PAGE_CACHE_SIZE;
2353
ebdec241 2354 ret = __block_write_begin(page, 0, end, get_block);
54171690
DC
2355 if (!ret)
2356 ret = block_commit_write(page, 0, end);
2357
56a76f82 2358 if (unlikely(ret)) {
b827e496 2359 unlock_page(page);
56a76f82
NP
2360 if (ret == -ENOMEM)
2361 ret = VM_FAULT_OOM;
2362 else /* -ENOSPC, -EIO, etc */
2363 ret = VM_FAULT_SIGBUS;
b827e496
NP
2364 } else
2365 ret = VM_FAULT_LOCKED;
c2ec175c 2366
b827e496 2367out:
54171690
DC
2368 return ret;
2369}
1fe72eaa 2370EXPORT_SYMBOL(block_page_mkwrite);
1da177e4
LT
2371
2372/*
03158cd7 2373 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
1da177e4
LT
2374 * immediately, while under the page lock. So it needs a special end_io
2375 * handler which does not touch the bh after unlocking it.
1da177e4
LT
2376 */
2377static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2378{
68671f35 2379 __end_buffer_read_notouch(bh, uptodate);
1da177e4
LT
2380}
2381
03158cd7
NP
2382/*
2383 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2384 * the page (converting it to circular linked list and taking care of page
2385 * dirty races).
2386 */
2387static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2388{
2389 struct buffer_head *bh;
2390
2391 BUG_ON(!PageLocked(page));
2392
2393 spin_lock(&page->mapping->private_lock);
2394 bh = head;
2395 do {
2396 if (PageDirty(page))
2397 set_buffer_dirty(bh);
2398 if (!bh->b_this_page)
2399 bh->b_this_page = head;
2400 bh = bh->b_this_page;
2401 } while (bh != head);
2402 attach_page_buffers(page, head);
2403 spin_unlock(&page->mapping->private_lock);
2404}
2405
1da177e4 2406/*
ea0f04e5
CH
2407 * On entry, the page is fully not uptodate.
2408 * On exit the page is fully uptodate in the areas outside (from,to)
7bb46a67 2409 * The filesystem needs to handle block truncation upon failure.
1da177e4 2410 */
ea0f04e5 2411int nobh_write_begin(struct address_space *mapping,
03158cd7
NP
2412 loff_t pos, unsigned len, unsigned flags,
2413 struct page **pagep, void **fsdata,
1da177e4
LT
2414 get_block_t *get_block)
2415{
03158cd7 2416 struct inode *inode = mapping->host;
1da177e4
LT
2417 const unsigned blkbits = inode->i_blkbits;
2418 const unsigned blocksize = 1 << blkbits;
a4b0672d 2419 struct buffer_head *head, *bh;
03158cd7
NP
2420 struct page *page;
2421 pgoff_t index;
2422 unsigned from, to;
1da177e4 2423 unsigned block_in_page;
a4b0672d 2424 unsigned block_start, block_end;
1da177e4 2425 sector_t block_in_file;
1da177e4 2426 int nr_reads = 0;
1da177e4
LT
2427 int ret = 0;
2428 int is_mapped_to_disk = 1;
1da177e4 2429
03158cd7
NP
2430 index = pos >> PAGE_CACHE_SHIFT;
2431 from = pos & (PAGE_CACHE_SIZE - 1);
2432 to = from + len;
2433
54566b2c 2434 page = grab_cache_page_write_begin(mapping, index, flags);
03158cd7
NP
2435 if (!page)
2436 return -ENOMEM;
2437 *pagep = page;
2438 *fsdata = NULL;
2439
2440 if (page_has_buffers(page)) {
309f77ad
NK
2441 ret = __block_write_begin(page, pos, len, get_block);
2442 if (unlikely(ret))
2443 goto out_release;
2444 return ret;
03158cd7 2445 }
a4b0672d 2446
1da177e4
LT
2447 if (PageMappedToDisk(page))
2448 return 0;
2449
a4b0672d
NP
2450 /*
2451 * Allocate buffers so that we can keep track of state, and potentially
2452 * attach them to the page if an error occurs. In the common case of
2453 * no error, they will just be freed again without ever being attached
2454 * to the page (which is all OK, because we're under the page lock).
2455 *
2456 * Be careful: the buffer linked list is a NULL terminated one, rather
2457 * than the circular one we're used to.
2458 */
2459 head = alloc_page_buffers(page, blocksize, 0);
03158cd7
NP
2460 if (!head) {
2461 ret = -ENOMEM;
2462 goto out_release;
2463 }
a4b0672d 2464
1da177e4 2465 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
1da177e4
LT
2466
2467 /*
2468 * We loop across all blocks in the page, whether or not they are
2469 * part of the affected region. This is so we can discover if the
2470 * page is fully mapped-to-disk.
2471 */
a4b0672d 2472 for (block_start = 0, block_in_page = 0, bh = head;
1da177e4 2473 block_start < PAGE_CACHE_SIZE;
a4b0672d 2474 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
1da177e4
LT
2475 int create;
2476
a4b0672d
NP
2477 block_end = block_start + blocksize;
2478 bh->b_state = 0;
1da177e4
LT
2479 create = 1;
2480 if (block_start >= to)
2481 create = 0;
2482 ret = get_block(inode, block_in_file + block_in_page,
a4b0672d 2483 bh, create);
1da177e4
LT
2484 if (ret)
2485 goto failed;
a4b0672d 2486 if (!buffer_mapped(bh))
1da177e4 2487 is_mapped_to_disk = 0;
a4b0672d
NP
2488 if (buffer_new(bh))
2489 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2490 if (PageUptodate(page)) {
2491 set_buffer_uptodate(bh);
1da177e4 2492 continue;
a4b0672d
NP
2493 }
2494 if (buffer_new(bh) || !buffer_mapped(bh)) {
eebd2aa3
CL
2495 zero_user_segments(page, block_start, from,
2496 to, block_end);
1da177e4
LT
2497 continue;
2498 }
a4b0672d 2499 if (buffer_uptodate(bh))
1da177e4
LT
2500 continue; /* reiserfs does this */
2501 if (block_start < from || block_end > to) {
a4b0672d
NP
2502 lock_buffer(bh);
2503 bh->b_end_io = end_buffer_read_nobh;
2504 submit_bh(READ, bh);
2505 nr_reads++;
1da177e4
LT
2506 }
2507 }
2508
2509 if (nr_reads) {
1da177e4
LT
2510 /*
2511 * The page is locked, so these buffers are protected from
2512 * any VM or truncate activity. Hence we don't need to care
2513 * for the buffer_head refcounts.
2514 */
a4b0672d 2515 for (bh = head; bh; bh = bh->b_this_page) {
1da177e4
LT
2516 wait_on_buffer(bh);
2517 if (!buffer_uptodate(bh))
2518 ret = -EIO;
1da177e4
LT
2519 }
2520 if (ret)
2521 goto failed;
2522 }
2523
2524 if (is_mapped_to_disk)
2525 SetPageMappedToDisk(page);
1da177e4 2526
03158cd7 2527 *fsdata = head; /* to be released by nobh_write_end */
a4b0672d 2528
1da177e4
LT
2529 return 0;
2530
2531failed:
03158cd7 2532 BUG_ON(!ret);
1da177e4 2533 /*
a4b0672d
NP
2534 * Error recovery is a bit difficult. We need to zero out blocks that
2535 * were newly allocated, and dirty them to ensure they get written out.
2536 * Buffers need to be attached to the page at this point, otherwise
2537 * the handling of potential IO errors during writeout would be hard
2538 * (could try doing synchronous writeout, but what if that fails too?)
1da177e4 2539 */
03158cd7
NP
2540 attach_nobh_buffers(page, head);
2541 page_zero_new_buffers(page, from, to);
a4b0672d 2542
03158cd7
NP
2543out_release:
2544 unlock_page(page);
2545 page_cache_release(page);
2546 *pagep = NULL;
a4b0672d 2547
7bb46a67 2548 return ret;
2549}
03158cd7 2550EXPORT_SYMBOL(nobh_write_begin);
1da177e4 2551
03158cd7
NP
2552int nobh_write_end(struct file *file, struct address_space *mapping,
2553 loff_t pos, unsigned len, unsigned copied,
2554 struct page *page, void *fsdata)
1da177e4
LT
2555{
2556 struct inode *inode = page->mapping->host;
efdc3131 2557 struct buffer_head *head = fsdata;
03158cd7 2558 struct buffer_head *bh;
5b41e74a 2559 BUG_ON(fsdata != NULL && page_has_buffers(page));
1da177e4 2560
d4cf109f 2561 if (unlikely(copied < len) && head)
5b41e74a
DM
2562 attach_nobh_buffers(page, head);
2563 if (page_has_buffers(page))
2564 return generic_write_end(file, mapping, pos, len,
2565 copied, page, fsdata);
a4b0672d 2566
22c8ca78 2567 SetPageUptodate(page);
1da177e4 2568 set_page_dirty(page);
03158cd7
NP
2569 if (pos+copied > inode->i_size) {
2570 i_size_write(inode, pos+copied);
1da177e4
LT
2571 mark_inode_dirty(inode);
2572 }
03158cd7
NP
2573
2574 unlock_page(page);
2575 page_cache_release(page);
2576
03158cd7
NP
2577 while (head) {
2578 bh = head;
2579 head = head->b_this_page;
2580 free_buffer_head(bh);
2581 }
2582
2583 return copied;
1da177e4 2584}
03158cd7 2585EXPORT_SYMBOL(nobh_write_end);
1da177e4
LT
2586
2587/*
2588 * nobh_writepage() - based on block_full_write_page() except
2589 * that it tries to operate without attaching bufferheads to
2590 * the page.
2591 */
2592int nobh_writepage(struct page *page, get_block_t *get_block,
2593 struct writeback_control *wbc)
2594{
2595 struct inode * const inode = page->mapping->host;
2596 loff_t i_size = i_size_read(inode);
2597 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2598 unsigned offset;
1da177e4
LT
2599 int ret;
2600
2601 /* Is the page fully inside i_size? */
2602 if (page->index < end_index)
2603 goto out;
2604
2605 /* Is the page fully outside i_size? (truncate in progress) */
2606 offset = i_size & (PAGE_CACHE_SIZE-1);
2607 if (page->index >= end_index+1 || !offset) {
2608 /*
2609 * The page may have dirty, unmapped buffers. For example,
2610 * they may have been added in ext3_writepage(). Make them
2611 * freeable here, so the page does not leak.
2612 */
2613#if 0
2614 /* Not really sure about this - do we need this ? */
2615 if (page->mapping->a_ops->invalidatepage)
2616 page->mapping->a_ops->invalidatepage(page, offset);
2617#endif
2618 unlock_page(page);
2619 return 0; /* don't care */
2620 }
2621
2622 /*
2623 * The page straddles i_size. It must be zeroed out on each and every
2624 * writepage invocation because it may be mmapped. "A file is mapped
2625 * in multiples of the page size. For a file that is not a multiple of
2626 * the page size, the remaining memory is zeroed when mapped, and
2627 * writes to that region are not written out to the file."
2628 */
eebd2aa3 2629 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
1da177e4
LT
2630out:
2631 ret = mpage_writepage(page, get_block, wbc);
2632 if (ret == -EAGAIN)
35c80d5f
CM
2633 ret = __block_write_full_page(inode, page, get_block, wbc,
2634 end_buffer_async_write);
1da177e4
LT
2635 return ret;
2636}
2637EXPORT_SYMBOL(nobh_writepage);
2638
03158cd7
NP
2639int nobh_truncate_page(struct address_space *mapping,
2640 loff_t from, get_block_t *get_block)
1da177e4 2641{
1da177e4
LT
2642 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2643 unsigned offset = from & (PAGE_CACHE_SIZE-1);
03158cd7
NP
2644 unsigned blocksize;
2645 sector_t iblock;
2646 unsigned length, pos;
2647 struct inode *inode = mapping->host;
1da177e4 2648 struct page *page;
03158cd7
NP
2649 struct buffer_head map_bh;
2650 int err;
1da177e4 2651
03158cd7
NP
2652 blocksize = 1 << inode->i_blkbits;
2653 length = offset & (blocksize - 1);
2654
2655 /* Block boundary? Nothing to do */
2656 if (!length)
2657 return 0;
2658
2659 length = blocksize - length;
2660 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4 2661
1da177e4 2662 page = grab_cache_page(mapping, index);
03158cd7 2663 err = -ENOMEM;
1da177e4
LT
2664 if (!page)
2665 goto out;
2666
03158cd7
NP
2667 if (page_has_buffers(page)) {
2668has_buffers:
2669 unlock_page(page);
2670 page_cache_release(page);
2671 return block_truncate_page(mapping, from, get_block);
2672 }
2673
2674 /* Find the buffer that contains "offset" */
2675 pos = blocksize;
2676 while (offset >= pos) {
2677 iblock++;
2678 pos += blocksize;
2679 }
2680
460bcf57
TT
2681 map_bh.b_size = blocksize;
2682 map_bh.b_state = 0;
03158cd7
NP
2683 err = get_block(inode, iblock, &map_bh, 0);
2684 if (err)
2685 goto unlock;
2686 /* unmapped? It's a hole - nothing to do */
2687 if (!buffer_mapped(&map_bh))
2688 goto unlock;
2689
2690 /* Ok, it's mapped. Make sure it's up-to-date */
2691 if (!PageUptodate(page)) {
2692 err = mapping->a_ops->readpage(NULL, page);
2693 if (err) {
2694 page_cache_release(page);
2695 goto out;
2696 }
2697 lock_page(page);
2698 if (!PageUptodate(page)) {
2699 err = -EIO;
2700 goto unlock;
2701 }
2702 if (page_has_buffers(page))
2703 goto has_buffers;
1da177e4 2704 }
eebd2aa3 2705 zero_user(page, offset, length);
03158cd7
NP
2706 set_page_dirty(page);
2707 err = 0;
2708
2709unlock:
1da177e4
LT
2710 unlock_page(page);
2711 page_cache_release(page);
2712out:
03158cd7 2713 return err;
1da177e4
LT
2714}
2715EXPORT_SYMBOL(nobh_truncate_page);
2716
2717int block_truncate_page(struct address_space *mapping,
2718 loff_t from, get_block_t *get_block)
2719{
2720 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2721 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2722 unsigned blocksize;
54b21a79 2723 sector_t iblock;
1da177e4
LT
2724 unsigned length, pos;
2725 struct inode *inode = mapping->host;
2726 struct page *page;
2727 struct buffer_head *bh;
1da177e4
LT
2728 int err;
2729
2730 blocksize = 1 << inode->i_blkbits;
2731 length = offset & (blocksize - 1);
2732
2733 /* Block boundary? Nothing to do */
2734 if (!length)
2735 return 0;
2736
2737 length = blocksize - length;
54b21a79 2738 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1da177e4
LT
2739
2740 page = grab_cache_page(mapping, index);
2741 err = -ENOMEM;
2742 if (!page)
2743 goto out;
2744
2745 if (!page_has_buffers(page))
2746 create_empty_buffers(page, blocksize, 0);
2747
2748 /* Find the buffer that contains "offset" */
2749 bh = page_buffers(page);
2750 pos = blocksize;
2751 while (offset >= pos) {
2752 bh = bh->b_this_page;
2753 iblock++;
2754 pos += blocksize;
2755 }
2756
2757 err = 0;
2758 if (!buffer_mapped(bh)) {
b0cf2321 2759 WARN_ON(bh->b_size != blocksize);
1da177e4
LT
2760 err = get_block(inode, iblock, bh, 0);
2761 if (err)
2762 goto unlock;
2763 /* unmapped? It's a hole - nothing to do */
2764 if (!buffer_mapped(bh))
2765 goto unlock;
2766 }
2767
2768 /* Ok, it's mapped. Make sure it's up-to-date */
2769 if (PageUptodate(page))
2770 set_buffer_uptodate(bh);
2771
33a266dd 2772 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
1da177e4
LT
2773 err = -EIO;
2774 ll_rw_block(READ, 1, &bh);
2775 wait_on_buffer(bh);
2776 /* Uhhuh. Read error. Complain and punt. */
2777 if (!buffer_uptodate(bh))
2778 goto unlock;
2779 }
2780
eebd2aa3 2781 zero_user(page, offset, length);
1da177e4
LT
2782 mark_buffer_dirty(bh);
2783 err = 0;
2784
2785unlock:
2786 unlock_page(page);
2787 page_cache_release(page);
2788out:
2789 return err;
2790}
1fe72eaa 2791EXPORT_SYMBOL(block_truncate_page);
1da177e4
LT
2792
2793/*
2794 * The generic ->writepage function for buffer-backed address_spaces
35c80d5f 2795 * this form passes in the end_io handler used to finish the IO.
1da177e4 2796 */
35c80d5f
CM
2797int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2798 struct writeback_control *wbc, bh_end_io_t *handler)
1da177e4
LT
2799{
2800 struct inode * const inode = page->mapping->host;
2801 loff_t i_size = i_size_read(inode);
2802 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2803 unsigned offset;
1da177e4
LT
2804
2805 /* Is the page fully inside i_size? */
2806 if (page->index < end_index)
35c80d5f
CM
2807 return __block_write_full_page(inode, page, get_block, wbc,
2808 handler);
1da177e4
LT
2809
2810 /* Is the page fully outside i_size? (truncate in progress) */
2811 offset = i_size & (PAGE_CACHE_SIZE-1);
2812 if (page->index >= end_index+1 || !offset) {
2813 /*
2814 * The page may have dirty, unmapped buffers. For example,
2815 * they may have been added in ext3_writepage(). Make them
2816 * freeable here, so the page does not leak.
2817 */
aaa4059b 2818 do_invalidatepage(page, 0);
1da177e4
LT
2819 unlock_page(page);
2820 return 0; /* don't care */
2821 }
2822
2823 /*
2824 * The page straddles i_size. It must be zeroed out on each and every
2a61aa40 2825 * writepage invocation because it may be mmapped. "A file is mapped
1da177e4
LT
2826 * in multiples of the page size. For a file that is not a multiple of
2827 * the page size, the remaining memory is zeroed when mapped, and
2828 * writes to that region are not written out to the file."
2829 */
eebd2aa3 2830 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
35c80d5f 2831 return __block_write_full_page(inode, page, get_block, wbc, handler);
1da177e4 2832}
1fe72eaa 2833EXPORT_SYMBOL(block_write_full_page_endio);
1da177e4 2834
35c80d5f
CM
2835/*
2836 * The generic ->writepage function for buffer-backed address_spaces
2837 */
2838int block_write_full_page(struct page *page, get_block_t *get_block,
2839 struct writeback_control *wbc)
2840{
2841 return block_write_full_page_endio(page, get_block, wbc,
2842 end_buffer_async_write);
2843}
1fe72eaa 2844EXPORT_SYMBOL(block_write_full_page);
35c80d5f 2845
1da177e4
LT
2846sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2847 get_block_t *get_block)
2848{
2849 struct buffer_head tmp;
2850 struct inode *inode = mapping->host;
2851 tmp.b_state = 0;
2852 tmp.b_blocknr = 0;
b0cf2321 2853 tmp.b_size = 1 << inode->i_blkbits;
1da177e4
LT
2854 get_block(inode, block, &tmp, 0);
2855 return tmp.b_blocknr;
2856}
1fe72eaa 2857EXPORT_SYMBOL(generic_block_bmap);
1da177e4 2858
6712ecf8 2859static void end_bio_bh_io_sync(struct bio *bio, int err)
1da177e4
LT
2860{
2861 struct buffer_head *bh = bio->bi_private;
2862
1da177e4
LT
2863 if (err == -EOPNOTSUPP) {
2864 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
1da177e4
LT
2865 }
2866
08bafc03
KM
2867 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2868 set_bit(BH_Quiet, &bh->b_state);
2869
1da177e4
LT
2870 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2871 bio_put(bio);
1da177e4
LT
2872}
2873
2874int submit_bh(int rw, struct buffer_head * bh)
2875{
2876 struct bio *bio;
2877 int ret = 0;
2878
2879 BUG_ON(!buffer_locked(bh));
2880 BUG_ON(!buffer_mapped(bh));
2881 BUG_ON(!bh->b_end_io);
8fb0e342
AK
2882 BUG_ON(buffer_delay(bh));
2883 BUG_ON(buffer_unwritten(bh));
1da177e4 2884
1da177e4 2885 /*
48fd4f93 2886 * Only clear out a write error when rewriting
1da177e4 2887 */
48fd4f93 2888 if (test_set_buffer_req(bh) && (rw & WRITE))
1da177e4
LT
2889 clear_buffer_write_io_error(bh);
2890
2891 /*
2892 * from here on down, it's all bio -- do the initial mapping,
2893 * submit_bio -> generic_make_request may further map this bio around
2894 */
2895 bio = bio_alloc(GFP_NOIO, 1);
2896
2897 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2898 bio->bi_bdev = bh->b_bdev;
2899 bio->bi_io_vec[0].bv_page = bh->b_page;
2900 bio->bi_io_vec[0].bv_len = bh->b_size;
2901 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2902
2903 bio->bi_vcnt = 1;
2904 bio->bi_idx = 0;
2905 bio->bi_size = bh->b_size;
2906
2907 bio->bi_end_io = end_bio_bh_io_sync;
2908 bio->bi_private = bh;
2909
2910 bio_get(bio);
2911 submit_bio(rw, bio);
2912
2913 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2914 ret = -EOPNOTSUPP;
2915
2916 bio_put(bio);
2917 return ret;
2918}
1fe72eaa 2919EXPORT_SYMBOL(submit_bh);
1da177e4
LT
2920
2921/**
2922 * ll_rw_block: low-level access to block devices (DEPRECATED)
9cb569d6 2923 * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
1da177e4
LT
2924 * @nr: number of &struct buffer_heads in the array
2925 * @bhs: array of pointers to &struct buffer_head
2926 *
a7662236
JK
2927 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2928 * requests an I/O operation on them, either a %READ or a %WRITE. The third
9cb569d6
CH
2929 * %READA option is described in the documentation for generic_make_request()
2930 * which ll_rw_block() calls.
1da177e4
LT
2931 *
2932 * This function drops any buffer that it cannot get a lock on (with the
9cb569d6
CH
2933 * BH_Lock state bit), any buffer that appears to be clean when doing a write
2934 * request, and any buffer that appears to be up-to-date when doing read
2935 * request. Further it marks as clean buffers that are processed for
2936 * writing (the buffer cache won't assume that they are actually clean
2937 * until the buffer gets unlocked).
1da177e4
LT
2938 *
2939 * ll_rw_block sets b_end_io to simple completion handler that marks
2940 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2941 * any waiters.
2942 *
2943 * All of the buffers must be for the same device, and must also be a
2944 * multiple of the current approved size for the device.
2945 */
2946void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2947{
2948 int i;
2949
2950 for (i = 0; i < nr; i++) {
2951 struct buffer_head *bh = bhs[i];
2952
9cb569d6 2953 if (!trylock_buffer(bh))
1da177e4 2954 continue;
9cb569d6 2955 if (rw == WRITE) {
1da177e4 2956 if (test_clear_buffer_dirty(bh)) {
76c3073a 2957 bh->b_end_io = end_buffer_write_sync;
e60e5c50 2958 get_bh(bh);
9cb569d6 2959 submit_bh(WRITE, bh);
1da177e4
LT
2960 continue;
2961 }
2962 } else {
1da177e4 2963 if (!buffer_uptodate(bh)) {
76c3073a 2964 bh->b_end_io = end_buffer_read_sync;
e60e5c50 2965 get_bh(bh);
1da177e4
LT
2966 submit_bh(rw, bh);
2967 continue;
2968 }
2969 }
2970 unlock_buffer(bh);
1da177e4
LT
2971 }
2972}
1fe72eaa 2973EXPORT_SYMBOL(ll_rw_block);
1da177e4 2974
9cb569d6
CH
2975void write_dirty_buffer(struct buffer_head *bh, int rw)
2976{
2977 lock_buffer(bh);
2978 if (!test_clear_buffer_dirty(bh)) {
2979 unlock_buffer(bh);
2980 return;
2981 }
2982 bh->b_end_io = end_buffer_write_sync;
2983 get_bh(bh);
2984 submit_bh(rw, bh);
2985}
2986EXPORT_SYMBOL(write_dirty_buffer);
2987
1da177e4
LT
2988/*
2989 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2990 * and then start new I/O and then wait upon it. The caller must have a ref on
2991 * the buffer_head.
2992 */
87e99511 2993int __sync_dirty_buffer(struct buffer_head *bh, int rw)
1da177e4
LT
2994{
2995 int ret = 0;
2996
2997 WARN_ON(atomic_read(&bh->b_count) < 1);
2998 lock_buffer(bh);
2999 if (test_clear_buffer_dirty(bh)) {
3000 get_bh(bh);
3001 bh->b_end_io = end_buffer_write_sync;
87e99511 3002 ret = submit_bh(rw, bh);
1da177e4 3003 wait_on_buffer(bh);
1da177e4
LT
3004 if (!ret && !buffer_uptodate(bh))
3005 ret = -EIO;
3006 } else {
3007 unlock_buffer(bh);
3008 }
3009 return ret;
3010}
87e99511
CH
3011EXPORT_SYMBOL(__sync_dirty_buffer);
3012
3013int sync_dirty_buffer(struct buffer_head *bh)
3014{
3015 return __sync_dirty_buffer(bh, WRITE_SYNC);
3016}
1fe72eaa 3017EXPORT_SYMBOL(sync_dirty_buffer);
1da177e4
LT
3018
3019/*
3020 * try_to_free_buffers() checks if all the buffers on this particular page
3021 * are unused, and releases them if so.
3022 *
3023 * Exclusion against try_to_free_buffers may be obtained by either
3024 * locking the page or by holding its mapping's private_lock.
3025 *
3026 * If the page is dirty but all the buffers are clean then we need to
3027 * be sure to mark the page clean as well. This is because the page
3028 * may be against a block device, and a later reattachment of buffers
3029 * to a dirty page will set *all* buffers dirty. Which would corrupt
3030 * filesystem data on the same device.
3031 *
3032 * The same applies to regular filesystem pages: if all the buffers are
3033 * clean then we set the page clean and proceed. To do that, we require
3034 * total exclusion from __set_page_dirty_buffers(). That is obtained with
3035 * private_lock.
3036 *
3037 * try_to_free_buffers() is non-blocking.
3038 */
3039static inline int buffer_busy(struct buffer_head *bh)
3040{
3041 return atomic_read(&bh->b_count) |
3042 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3043}
3044
3045static int
3046drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3047{
3048 struct buffer_head *head = page_buffers(page);
3049 struct buffer_head *bh;
3050
3051 bh = head;
3052 do {
de7d5a3b 3053 if (buffer_write_io_error(bh) && page->mapping)
1da177e4
LT
3054 set_bit(AS_EIO, &page->mapping->flags);
3055 if (buffer_busy(bh))
3056 goto failed;
3057 bh = bh->b_this_page;
3058 } while (bh != head);
3059
3060 do {
3061 struct buffer_head *next = bh->b_this_page;
3062
535ee2fb 3063 if (bh->b_assoc_map)
1da177e4
LT
3064 __remove_assoc_queue(bh);
3065 bh = next;
3066 } while (bh != head);
3067 *buffers_to_free = head;
3068 __clear_page_buffers(page);
3069 return 1;
3070failed:
3071 return 0;
3072}
3073
3074int try_to_free_buffers(struct page *page)
3075{
3076 struct address_space * const mapping = page->mapping;
3077 struct buffer_head *buffers_to_free = NULL;
3078 int ret = 0;
3079
3080 BUG_ON(!PageLocked(page));
ecdfc978 3081 if (PageWriteback(page))
1da177e4
LT
3082 return 0;
3083
3084 if (mapping == NULL) { /* can this still happen? */
3085 ret = drop_buffers(page, &buffers_to_free);
3086 goto out;
3087 }
3088
3089 spin_lock(&mapping->private_lock);
3090 ret = drop_buffers(page, &buffers_to_free);
ecdfc978
LT
3091
3092 /*
3093 * If the filesystem writes its buffers by hand (eg ext3)
3094 * then we can have clean buffers against a dirty page. We
3095 * clean the page here; otherwise the VM will never notice
3096 * that the filesystem did any IO at all.
3097 *
3098 * Also, during truncate, discard_buffer will have marked all
3099 * the page's buffers clean. We discover that here and clean
3100 * the page also.
87df7241
NP
3101 *
3102 * private_lock must be held over this entire operation in order
3103 * to synchronise against __set_page_dirty_buffers and prevent the
3104 * dirty bit from being lost.
ecdfc978
LT
3105 */
3106 if (ret)
3107 cancel_dirty_page(page, PAGE_CACHE_SIZE);
87df7241 3108 spin_unlock(&mapping->private_lock);
1da177e4
LT
3109out:
3110 if (buffers_to_free) {
3111 struct buffer_head *bh = buffers_to_free;
3112
3113 do {
3114 struct buffer_head *next = bh->b_this_page;
3115 free_buffer_head(bh);
3116 bh = next;
3117 } while (bh != buffers_to_free);
3118 }
3119 return ret;
3120}
3121EXPORT_SYMBOL(try_to_free_buffers);
3122
1da177e4
LT
3123/*
3124 * There are no bdflush tunables left. But distributions are
3125 * still running obsolete flush daemons, so we terminate them here.
3126 *
3127 * Use of bdflush() is deprecated and will be removed in a future kernel.
5b0830cb 3128 * The `flush-X' kernel threads fully replace bdflush daemons and this call.
1da177e4 3129 */
bdc480e3 3130SYSCALL_DEFINE2(bdflush, int, func, long, data)
1da177e4
LT
3131{
3132 static int msg_count;
3133
3134 if (!capable(CAP_SYS_ADMIN))
3135 return -EPERM;
3136
3137 if (msg_count < 5) {
3138 msg_count++;
3139 printk(KERN_INFO
3140 "warning: process `%s' used the obsolete bdflush"
3141 " system call\n", current->comm);
3142 printk(KERN_INFO "Fix your initscripts?\n");
3143 }
3144
3145 if (func == 1)
3146 do_exit(0);
3147 return 0;
3148}
3149
3150/*
3151 * Buffer-head allocation
3152 */
e18b890b 3153static struct kmem_cache *bh_cachep;
1da177e4
LT
3154
3155/*
3156 * Once the number of bh's in the machine exceeds this level, we start
3157 * stripping them in writeback.
3158 */
3159static int max_buffer_heads;
3160
3161int buffer_heads_over_limit;
3162
3163struct bh_accounting {
3164 int nr; /* Number of live bh's */
3165 int ratelimit; /* Limit cacheline bouncing */
3166};
3167
3168static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3169
3170static void recalc_bh_state(void)
3171{
3172 int i;
3173 int tot = 0;
3174
ee1be862 3175 if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
1da177e4 3176 return;
c7b92516 3177 __this_cpu_write(bh_accounting.ratelimit, 0);
8a143426 3178 for_each_online_cpu(i)
1da177e4
LT
3179 tot += per_cpu(bh_accounting, i).nr;
3180 buffer_heads_over_limit = (tot > max_buffer_heads);
3181}
c7b92516 3182
dd0fc66f 3183struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
1da177e4 3184{
019b4d12 3185 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
1da177e4 3186 if (ret) {
a35afb83 3187 INIT_LIST_HEAD(&ret->b_assoc_buffers);
c7b92516
CL
3188 preempt_disable();
3189 __this_cpu_inc(bh_accounting.nr);
1da177e4 3190 recalc_bh_state();
c7b92516 3191 preempt_enable();
1da177e4
LT
3192 }
3193 return ret;
3194}
3195EXPORT_SYMBOL(alloc_buffer_head);
3196
3197void free_buffer_head(struct buffer_head *bh)
3198{
3199 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3200 kmem_cache_free(bh_cachep, bh);
c7b92516
CL
3201 preempt_disable();
3202 __this_cpu_dec(bh_accounting.nr);
1da177e4 3203 recalc_bh_state();
c7b92516 3204 preempt_enable();
1da177e4
LT
3205}
3206EXPORT_SYMBOL(free_buffer_head);
3207
1da177e4
LT
3208static void buffer_exit_cpu(int cpu)
3209{
3210 int i;
3211 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3212
3213 for (i = 0; i < BH_LRU_SIZE; i++) {
3214 brelse(b->bhs[i]);
3215 b->bhs[i] = NULL;
3216 }
c7b92516 3217 this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
8a143426 3218 per_cpu(bh_accounting, cpu).nr = 0;
1da177e4
LT
3219}
3220
3221static int buffer_cpu_notify(struct notifier_block *self,
3222 unsigned long action, void *hcpu)
3223{
8bb78442 3224 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1da177e4
LT
3225 buffer_exit_cpu((unsigned long)hcpu);
3226 return NOTIFY_OK;
3227}
1da177e4 3228
389d1b08 3229/**
a6b91919 3230 * bh_uptodate_or_lock - Test whether the buffer is uptodate
389d1b08
AK
3231 * @bh: struct buffer_head
3232 *
3233 * Return true if the buffer is up-to-date and false,
3234 * with the buffer locked, if not.
3235 */
3236int bh_uptodate_or_lock(struct buffer_head *bh)
3237{
3238 if (!buffer_uptodate(bh)) {
3239 lock_buffer(bh);
3240 if (!buffer_uptodate(bh))
3241 return 0;
3242 unlock_buffer(bh);
3243 }
3244 return 1;
3245}
3246EXPORT_SYMBOL(bh_uptodate_or_lock);
3247
3248/**
a6b91919 3249 * bh_submit_read - Submit a locked buffer for reading
389d1b08
AK
3250 * @bh: struct buffer_head
3251 *
3252 * Returns zero on success and -EIO on error.
3253 */
3254int bh_submit_read(struct buffer_head *bh)
3255{
3256 BUG_ON(!buffer_locked(bh));
3257
3258 if (buffer_uptodate(bh)) {
3259 unlock_buffer(bh);
3260 return 0;
3261 }
3262
3263 get_bh(bh);
3264 bh->b_end_io = end_buffer_read_sync;
3265 submit_bh(READ, bh);
3266 wait_on_buffer(bh);
3267 if (buffer_uptodate(bh))
3268 return 0;
3269 return -EIO;
3270}
3271EXPORT_SYMBOL(bh_submit_read);
3272
1da177e4
LT
3273void __init buffer_init(void)
3274{
3275 int nrpages;
3276
b98938c3
CL
3277 bh_cachep = kmem_cache_create("buffer_head",
3278 sizeof(struct buffer_head), 0,
3279 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3280 SLAB_MEM_SPREAD),
019b4d12 3281 NULL);
1da177e4
LT
3282
3283 /*
3284 * Limit the bh occupancy to 10% of ZONE_NORMAL
3285 */
3286 nrpages = (nr_free_buffer_pages() * 10) / 100;
3287 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3288 hotcpu_notifier(buffer_cpu_notify, 0);
3289}