]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * fs/mpage.c | |
3 | * | |
4 | * Copyright (C) 2002, Linus Torvalds. | |
5 | * | |
6 | * Contains functions related to preparing and submitting BIOs which contain | |
7 | * multiple pagecache pages. | |
8 | * | |
9 | * 15May2002 akpm@zip.com.au | |
10 | * Initial version | |
11 | * 27Jun2002 axboe@suse.de | |
12 | * use bio_add_page() to build bio's just the right size | |
13 | */ | |
14 | ||
15 | #include <linux/kernel.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/mm.h> | |
18 | #include <linux/kdev_t.h> | |
19 | #include <linux/bio.h> | |
20 | #include <linux/fs.h> | |
21 | #include <linux/buffer_head.h> | |
22 | #include <linux/blkdev.h> | |
23 | #include <linux/highmem.h> | |
24 | #include <linux/prefetch.h> | |
25 | #include <linux/mpage.h> | |
26 | #include <linux/writeback.h> | |
27 | #include <linux/backing-dev.h> | |
28 | #include <linux/pagevec.h> | |
29 | ||
30 | /* | |
31 | * I/O completion handler for multipage BIOs. | |
32 | * | |
33 | * The mpage code never puts partial pages into a BIO (except for end-of-file). | |
34 | * If a page does not map to a contiguous run of blocks then it simply falls | |
35 | * back to block_read_full_page(). | |
36 | * | |
37 | * Why is this? If a page's completion depends on a number of different BIOs | |
38 | * which can complete in any order (or at the same time) then determining the | |
39 | * status of that page is hard. See end_buffer_async_read() for the details. | |
40 | * There is no point in duplicating all that complexity. | |
41 | */ | |
42 | static int mpage_end_io_read(struct bio *bio, unsigned int bytes_done, int err) | |
43 | { | |
44 | const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | |
45 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; | |
46 | ||
47 | if (bio->bi_size) | |
48 | return 1; | |
49 | ||
50 | do { | |
51 | struct page *page = bvec->bv_page; | |
52 | ||
53 | if (--bvec >= bio->bi_io_vec) | |
54 | prefetchw(&bvec->bv_page->flags); | |
55 | ||
56 | if (uptodate) { | |
57 | SetPageUptodate(page); | |
58 | } else { | |
59 | ClearPageUptodate(page); | |
60 | SetPageError(page); | |
61 | } | |
62 | unlock_page(page); | |
63 | } while (bvec >= bio->bi_io_vec); | |
64 | bio_put(bio); | |
65 | return 0; | |
66 | } | |
67 | ||
68 | static int mpage_end_io_write(struct bio *bio, unsigned int bytes_done, int err) | |
69 | { | |
70 | const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | |
71 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; | |
72 | ||
73 | if (bio->bi_size) | |
74 | return 1; | |
75 | ||
76 | do { | |
77 | struct page *page = bvec->bv_page; | |
78 | ||
79 | if (--bvec >= bio->bi_io_vec) | |
80 | prefetchw(&bvec->bv_page->flags); | |
81 | ||
82 | if (!uptodate) | |
83 | SetPageError(page); | |
84 | end_page_writeback(page); | |
85 | } while (bvec >= bio->bi_io_vec); | |
86 | bio_put(bio); | |
87 | return 0; | |
88 | } | |
89 | ||
90 | struct bio *mpage_bio_submit(int rw, struct bio *bio) | |
91 | { | |
92 | bio->bi_end_io = mpage_end_io_read; | |
93 | if (rw == WRITE) | |
94 | bio->bi_end_io = mpage_end_io_write; | |
95 | submit_bio(rw, bio); | |
96 | return NULL; | |
97 | } | |
98 | ||
99 | static struct bio * | |
100 | mpage_alloc(struct block_device *bdev, | |
101 | sector_t first_sector, int nr_vecs, | |
102 | unsigned int __nocast gfp_flags) | |
103 | { | |
104 | struct bio *bio; | |
105 | ||
106 | bio = bio_alloc(gfp_flags, nr_vecs); | |
107 | ||
108 | if (bio == NULL && (current->flags & PF_MEMALLOC)) { | |
109 | while (!bio && (nr_vecs /= 2)) | |
110 | bio = bio_alloc(gfp_flags, nr_vecs); | |
111 | } | |
112 | ||
113 | if (bio) { | |
114 | bio->bi_bdev = bdev; | |
115 | bio->bi_sector = first_sector; | |
116 | } | |
117 | return bio; | |
118 | } | |
119 | ||
120 | /* | |
121 | * support function for mpage_readpages. The fs supplied get_block might | |
122 | * return an up to date buffer. This is used to map that buffer into | |
123 | * the page, which allows readpage to avoid triggering a duplicate call | |
124 | * to get_block. | |
125 | * | |
126 | * The idea is to avoid adding buffers to pages that don't already have | |
127 | * them. So when the buffer is up to date and the page size == block size, | |
128 | * this marks the page up to date instead of adding new buffers. | |
129 | */ | |
130 | static void | |
131 | map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) | |
132 | { | |
133 | struct inode *inode = page->mapping->host; | |
134 | struct buffer_head *page_bh, *head; | |
135 | int block = 0; | |
136 | ||
137 | if (!page_has_buffers(page)) { | |
138 | /* | |
139 | * don't make any buffers if there is only one buffer on | |
140 | * the page and the page just needs to be set up to date | |
141 | */ | |
142 | if (inode->i_blkbits == PAGE_CACHE_SHIFT && | |
143 | buffer_uptodate(bh)) { | |
144 | SetPageUptodate(page); | |
145 | return; | |
146 | } | |
147 | create_empty_buffers(page, 1 << inode->i_blkbits, 0); | |
148 | } | |
149 | head = page_buffers(page); | |
150 | page_bh = head; | |
151 | do { | |
152 | if (block == page_block) { | |
153 | page_bh->b_state = bh->b_state; | |
154 | page_bh->b_bdev = bh->b_bdev; | |
155 | page_bh->b_blocknr = bh->b_blocknr; | |
156 | break; | |
157 | } | |
158 | page_bh = page_bh->b_this_page; | |
159 | block++; | |
160 | } while (page_bh != head); | |
161 | } | |
162 | ||
1da177e4 LT |
163 | static struct bio * |
164 | do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, | |
165 | sector_t *last_block_in_bio, get_block_t get_block) | |
166 | { | |
167 | struct inode *inode = page->mapping->host; | |
168 | const unsigned blkbits = inode->i_blkbits; | |
169 | const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; | |
170 | const unsigned blocksize = 1 << blkbits; | |
171 | sector_t block_in_file; | |
172 | sector_t last_block; | |
173 | sector_t blocks[MAX_BUF_PER_PAGE]; | |
174 | unsigned page_block; | |
175 | unsigned first_hole = blocks_per_page; | |
176 | struct block_device *bdev = NULL; | |
177 | struct buffer_head bh; | |
178 | int length; | |
179 | int fully_mapped = 1; | |
180 | ||
181 | if (page_has_buffers(page)) | |
182 | goto confused; | |
183 | ||
184 | block_in_file = page->index << (PAGE_CACHE_SHIFT - blkbits); | |
185 | last_block = (i_size_read(inode) + blocksize - 1) >> blkbits; | |
186 | ||
187 | bh.b_page = page; | |
188 | for (page_block = 0; page_block < blocks_per_page; | |
189 | page_block++, block_in_file++) { | |
190 | bh.b_state = 0; | |
191 | if (block_in_file < last_block) { | |
192 | if (get_block(inode, block_in_file, &bh, 0)) | |
193 | goto confused; | |
194 | } | |
195 | ||
196 | if (!buffer_mapped(&bh)) { | |
197 | fully_mapped = 0; | |
198 | if (first_hole == blocks_per_page) | |
199 | first_hole = page_block; | |
200 | continue; | |
201 | } | |
202 | ||
203 | /* some filesystems will copy data into the page during | |
204 | * the get_block call, in which case we don't want to | |
205 | * read it again. map_buffer_to_page copies the data | |
206 | * we just collected from get_block into the page's buffers | |
207 | * so readpage doesn't have to repeat the get_block call | |
208 | */ | |
209 | if (buffer_uptodate(&bh)) { | |
210 | map_buffer_to_page(page, &bh, page_block); | |
211 | goto confused; | |
212 | } | |
213 | ||
214 | if (first_hole != blocks_per_page) | |
215 | goto confused; /* hole -> non-hole */ | |
216 | ||
217 | /* Contiguous blocks? */ | |
218 | if (page_block && blocks[page_block-1] != bh.b_blocknr-1) | |
219 | goto confused; | |
220 | blocks[page_block] = bh.b_blocknr; | |
221 | bdev = bh.b_bdev; | |
222 | } | |
223 | ||
224 | if (first_hole != blocks_per_page) { | |
225 | char *kaddr = kmap_atomic(page, KM_USER0); | |
226 | memset(kaddr + (first_hole << blkbits), 0, | |
227 | PAGE_CACHE_SIZE - (first_hole << blkbits)); | |
228 | flush_dcache_page(page); | |
229 | kunmap_atomic(kaddr, KM_USER0); | |
230 | if (first_hole == 0) { | |
231 | SetPageUptodate(page); | |
232 | unlock_page(page); | |
233 | goto out; | |
234 | } | |
235 | } else if (fully_mapped) { | |
236 | SetPageMappedToDisk(page); | |
237 | } | |
238 | ||
239 | /* | |
240 | * This page will go to BIO. Do we need to send this BIO off first? | |
241 | */ | |
242 | if (bio && (*last_block_in_bio != blocks[0] - 1)) | |
243 | bio = mpage_bio_submit(READ, bio); | |
244 | ||
245 | alloc_new: | |
246 | if (bio == NULL) { | |
247 | bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), | |
248 | min_t(int, nr_pages, bio_get_nr_vecs(bdev)), | |
249 | GFP_KERNEL); | |
250 | if (bio == NULL) | |
251 | goto confused; | |
252 | } | |
253 | ||
254 | length = first_hole << blkbits; | |
255 | if (bio_add_page(bio, page, length, 0) < length) { | |
256 | bio = mpage_bio_submit(READ, bio); | |
257 | goto alloc_new; | |
258 | } | |
259 | ||
260 | if (buffer_boundary(&bh) || (first_hole != blocks_per_page)) | |
261 | bio = mpage_bio_submit(READ, bio); | |
262 | else | |
263 | *last_block_in_bio = blocks[blocks_per_page - 1]; | |
264 | out: | |
265 | return bio; | |
266 | ||
267 | confused: | |
268 | if (bio) | |
269 | bio = mpage_bio_submit(READ, bio); | |
270 | if (!PageUptodate(page)) | |
271 | block_read_full_page(page, get_block); | |
272 | else | |
273 | unlock_page(page); | |
274 | goto out; | |
275 | } | |
276 | ||
67be2dd1 MW |
277 | /** |
278 | * mpage_readpages - populate an address space with some pages, and | |
279 | * start reads against them. | |
280 | * | |
281 | * @mapping: the address_space | |
282 | * @pages: The address of a list_head which contains the target pages. These | |
283 | * pages have their ->index populated and are otherwise uninitialised. | |
284 | * | |
285 | * The page at @pages->prev has the lowest file offset, and reads should be | |
286 | * issued in @pages->prev to @pages->next order. | |
287 | * | |
288 | * @nr_pages: The number of pages at *@pages | |
289 | * @get_block: The filesystem's block mapper function. | |
290 | * | |
291 | * This function walks the pages and the blocks within each page, building and | |
292 | * emitting large BIOs. | |
293 | * | |
294 | * If anything unusual happens, such as: | |
295 | * | |
296 | * - encountering a page which has buffers | |
297 | * - encountering a page which has a non-hole after a hole | |
298 | * - encountering a page with non-contiguous blocks | |
299 | * | |
300 | * then this code just gives up and calls the buffer_head-based read function. | |
301 | * It does handle a page which has holes at the end - that is a common case: | |
302 | * the end-of-file on blocksize < PAGE_CACHE_SIZE setups. | |
303 | * | |
304 | * BH_Boundary explanation: | |
305 | * | |
306 | * There is a problem. The mpage read code assembles several pages, gets all | |
307 | * their disk mappings, and then submits them all. That's fine, but obtaining | |
308 | * the disk mappings may require I/O. Reads of indirect blocks, for example. | |
309 | * | |
310 | * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be | |
311 | * submitted in the following order: | |
312 | * 12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16 | |
313 | * because the indirect block has to be read to get the mappings of blocks | |
314 | * 13,14,15,16. Obviously, this impacts performance. | |
315 | * | |
316 | * So what we do it to allow the filesystem's get_block() function to set | |
317 | * BH_Boundary when it maps block 11. BH_Boundary says: mapping of the block | |
318 | * after this one will require I/O against a block which is probably close to | |
319 | * this one. So you should push what I/O you have currently accumulated. | |
320 | * | |
321 | * This all causes the disk requests to be issued in the correct order. | |
322 | */ | |
1da177e4 LT |
323 | int |
324 | mpage_readpages(struct address_space *mapping, struct list_head *pages, | |
325 | unsigned nr_pages, get_block_t get_block) | |
326 | { | |
327 | struct bio *bio = NULL; | |
328 | unsigned page_idx; | |
329 | sector_t last_block_in_bio = 0; | |
330 | struct pagevec lru_pvec; | |
331 | ||
332 | pagevec_init(&lru_pvec, 0); | |
333 | for (page_idx = 0; page_idx < nr_pages; page_idx++) { | |
334 | struct page *page = list_entry(pages->prev, struct page, lru); | |
335 | ||
336 | prefetchw(&page->flags); | |
337 | list_del(&page->lru); | |
338 | if (!add_to_page_cache(page, mapping, | |
339 | page->index, GFP_KERNEL)) { | |
340 | bio = do_mpage_readpage(bio, page, | |
341 | nr_pages - page_idx, | |
342 | &last_block_in_bio, get_block); | |
343 | if (!pagevec_add(&lru_pvec, page)) | |
344 | __pagevec_lru_add(&lru_pvec); | |
345 | } else { | |
346 | page_cache_release(page); | |
347 | } | |
348 | } | |
349 | pagevec_lru_add(&lru_pvec); | |
350 | BUG_ON(!list_empty(pages)); | |
351 | if (bio) | |
352 | mpage_bio_submit(READ, bio); | |
353 | return 0; | |
354 | } | |
355 | EXPORT_SYMBOL(mpage_readpages); | |
356 | ||
357 | /* | |
358 | * This isn't called much at all | |
359 | */ | |
360 | int mpage_readpage(struct page *page, get_block_t get_block) | |
361 | { | |
362 | struct bio *bio = NULL; | |
363 | sector_t last_block_in_bio = 0; | |
364 | ||
365 | bio = do_mpage_readpage(bio, page, 1, | |
366 | &last_block_in_bio, get_block); | |
367 | if (bio) | |
368 | mpage_bio_submit(READ, bio); | |
369 | return 0; | |
370 | } | |
371 | EXPORT_SYMBOL(mpage_readpage); | |
372 | ||
373 | /* | |
374 | * Writing is not so simple. | |
375 | * | |
376 | * If the page has buffers then they will be used for obtaining the disk | |
377 | * mapping. We only support pages which are fully mapped-and-dirty, with a | |
378 | * special case for pages which are unmapped at the end: end-of-file. | |
379 | * | |
380 | * If the page has no buffers (preferred) then the page is mapped here. | |
381 | * | |
382 | * If all blocks are found to be contiguous then the page can go into the | |
383 | * BIO. Otherwise fall back to the mapping's writepage(). | |
384 | * | |
385 | * FIXME: This code wants an estimate of how many pages are still to be | |
386 | * written, so it can intelligently allocate a suitably-sized BIO. For now, | |
387 | * just allocate full-size (16-page) BIOs. | |
388 | */ | |
389 | static struct bio * | |
390 | __mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block, | |
391 | sector_t *last_block_in_bio, int *ret, struct writeback_control *wbc, | |
392 | writepage_t writepage_fn) | |
393 | { | |
394 | struct address_space *mapping = page->mapping; | |
395 | struct inode *inode = page->mapping->host; | |
396 | const unsigned blkbits = inode->i_blkbits; | |
397 | unsigned long end_index; | |
398 | const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; | |
399 | sector_t last_block; | |
400 | sector_t block_in_file; | |
401 | sector_t blocks[MAX_BUF_PER_PAGE]; | |
402 | unsigned page_block; | |
403 | unsigned first_unmapped = blocks_per_page; | |
404 | struct block_device *bdev = NULL; | |
405 | int boundary = 0; | |
406 | sector_t boundary_block = 0; | |
407 | struct block_device *boundary_bdev = NULL; | |
408 | int length; | |
409 | struct buffer_head map_bh; | |
410 | loff_t i_size = i_size_read(inode); | |
411 | ||
412 | if (page_has_buffers(page)) { | |
413 | struct buffer_head *head = page_buffers(page); | |
414 | struct buffer_head *bh = head; | |
415 | ||
416 | /* If they're all mapped and dirty, do it */ | |
417 | page_block = 0; | |
418 | do { | |
419 | BUG_ON(buffer_locked(bh)); | |
420 | if (!buffer_mapped(bh)) { | |
421 | /* | |
422 | * unmapped dirty buffers are created by | |
423 | * __set_page_dirty_buffers -> mmapped data | |
424 | */ | |
425 | if (buffer_dirty(bh)) | |
426 | goto confused; | |
427 | if (first_unmapped == blocks_per_page) | |
428 | first_unmapped = page_block; | |
429 | continue; | |
430 | } | |
431 | ||
432 | if (first_unmapped != blocks_per_page) | |
433 | goto confused; /* hole -> non-hole */ | |
434 | ||
435 | if (!buffer_dirty(bh) || !buffer_uptodate(bh)) | |
436 | goto confused; | |
437 | if (page_block) { | |
438 | if (bh->b_blocknr != blocks[page_block-1] + 1) | |
439 | goto confused; | |
440 | } | |
441 | blocks[page_block++] = bh->b_blocknr; | |
442 | boundary = buffer_boundary(bh); | |
443 | if (boundary) { | |
444 | boundary_block = bh->b_blocknr; | |
445 | boundary_bdev = bh->b_bdev; | |
446 | } | |
447 | bdev = bh->b_bdev; | |
448 | } while ((bh = bh->b_this_page) != head); | |
449 | ||
450 | if (first_unmapped) | |
451 | goto page_is_mapped; | |
452 | ||
453 | /* | |
454 | * Page has buffers, but they are all unmapped. The page was | |
455 | * created by pagein or read over a hole which was handled by | |
456 | * block_read_full_page(). If this address_space is also | |
457 | * using mpage_readpages then this can rarely happen. | |
458 | */ | |
459 | goto confused; | |
460 | } | |
461 | ||
462 | /* | |
463 | * The page has no buffers: map it to disk | |
464 | */ | |
465 | BUG_ON(!PageUptodate(page)); | |
466 | block_in_file = page->index << (PAGE_CACHE_SHIFT - blkbits); | |
467 | last_block = (i_size - 1) >> blkbits; | |
468 | map_bh.b_page = page; | |
469 | for (page_block = 0; page_block < blocks_per_page; ) { | |
470 | ||
471 | map_bh.b_state = 0; | |
472 | if (get_block(inode, block_in_file, &map_bh, 1)) | |
473 | goto confused; | |
474 | if (buffer_new(&map_bh)) | |
475 | unmap_underlying_metadata(map_bh.b_bdev, | |
476 | map_bh.b_blocknr); | |
477 | if (buffer_boundary(&map_bh)) { | |
478 | boundary_block = map_bh.b_blocknr; | |
479 | boundary_bdev = map_bh.b_bdev; | |
480 | } | |
481 | if (page_block) { | |
482 | if (map_bh.b_blocknr != blocks[page_block-1] + 1) | |
483 | goto confused; | |
484 | } | |
485 | blocks[page_block++] = map_bh.b_blocknr; | |
486 | boundary = buffer_boundary(&map_bh); | |
487 | bdev = map_bh.b_bdev; | |
488 | if (block_in_file == last_block) | |
489 | break; | |
490 | block_in_file++; | |
491 | } | |
492 | BUG_ON(page_block == 0); | |
493 | ||
494 | first_unmapped = page_block; | |
495 | ||
496 | page_is_mapped: | |
497 | end_index = i_size >> PAGE_CACHE_SHIFT; | |
498 | if (page->index >= end_index) { | |
499 | /* | |
500 | * The page straddles i_size. It must be zeroed out on each | |
501 | * and every writepage invokation because it may be mmapped. | |
502 | * "A file is mapped in multiples of the page size. For a file | |
503 | * that is not a multiple of the page size, the remaining memory | |
504 | * is zeroed when mapped, and writes to that region are not | |
505 | * written out to the file." | |
506 | */ | |
507 | unsigned offset = i_size & (PAGE_CACHE_SIZE - 1); | |
508 | char *kaddr; | |
509 | ||
510 | if (page->index > end_index || !offset) | |
511 | goto confused; | |
512 | kaddr = kmap_atomic(page, KM_USER0); | |
513 | memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); | |
514 | flush_dcache_page(page); | |
515 | kunmap_atomic(kaddr, KM_USER0); | |
516 | } | |
517 | ||
518 | /* | |
519 | * This page will go to BIO. Do we need to send this BIO off first? | |
520 | */ | |
521 | if (bio && *last_block_in_bio != blocks[0] - 1) | |
522 | bio = mpage_bio_submit(WRITE, bio); | |
523 | ||
524 | alloc_new: | |
525 | if (bio == NULL) { | |
526 | bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), | |
527 | bio_get_nr_vecs(bdev), GFP_NOFS|__GFP_HIGH); | |
528 | if (bio == NULL) | |
529 | goto confused; | |
530 | } | |
531 | ||
532 | /* | |
533 | * Must try to add the page before marking the buffer clean or | |
534 | * the confused fail path above (OOM) will be very confused when | |
535 | * it finds all bh marked clean (i.e. it will not write anything) | |
536 | */ | |
537 | length = first_unmapped << blkbits; | |
538 | if (bio_add_page(bio, page, length, 0) < length) { | |
539 | bio = mpage_bio_submit(WRITE, bio); | |
540 | goto alloc_new; | |
541 | } | |
542 | ||
543 | /* | |
544 | * OK, we have our BIO, so we can now mark the buffers clean. Make | |
545 | * sure to only clean buffers which we know we'll be writing. | |
546 | */ | |
547 | if (page_has_buffers(page)) { | |
548 | struct buffer_head *head = page_buffers(page); | |
549 | struct buffer_head *bh = head; | |
550 | unsigned buffer_counter = 0; | |
551 | ||
552 | do { | |
553 | if (buffer_counter++ == first_unmapped) | |
554 | break; | |
555 | clear_buffer_dirty(bh); | |
556 | bh = bh->b_this_page; | |
557 | } while (bh != head); | |
558 | ||
559 | /* | |
560 | * we cannot drop the bh if the page is not uptodate | |
561 | * or a concurrent readpage would fail to serialize with the bh | |
562 | * and it would read from disk before we reach the platter. | |
563 | */ | |
564 | if (buffer_heads_over_limit && PageUptodate(page)) | |
565 | try_to_free_buffers(page); | |
566 | } | |
567 | ||
568 | BUG_ON(PageWriteback(page)); | |
569 | set_page_writeback(page); | |
570 | unlock_page(page); | |
571 | if (boundary || (first_unmapped != blocks_per_page)) { | |
572 | bio = mpage_bio_submit(WRITE, bio); | |
573 | if (boundary_block) { | |
574 | write_boundary_block(boundary_bdev, | |
575 | boundary_block, 1 << blkbits); | |
576 | } | |
577 | } else { | |
578 | *last_block_in_bio = blocks[blocks_per_page - 1]; | |
579 | } | |
580 | goto out; | |
581 | ||
582 | confused: | |
583 | if (bio) | |
584 | bio = mpage_bio_submit(WRITE, bio); | |
585 | ||
586 | if (writepage_fn) { | |
587 | *ret = (*writepage_fn)(page, wbc); | |
588 | } else { | |
589 | *ret = -EAGAIN; | |
590 | goto out; | |
591 | } | |
592 | /* | |
593 | * The caller has a ref on the inode, so *mapping is stable | |
594 | */ | |
595 | if (*ret) { | |
596 | if (*ret == -ENOSPC) | |
597 | set_bit(AS_ENOSPC, &mapping->flags); | |
598 | else | |
599 | set_bit(AS_EIO, &mapping->flags); | |
600 | } | |
601 | out: | |
602 | return bio; | |
603 | } | |
604 | ||
605 | /** | |
606 | * mpage_writepages - walk the list of dirty pages of the given | |
607 | * address space and writepage() all of them. | |
608 | * | |
609 | * @mapping: address space structure to write | |
610 | * @wbc: subtract the number of written pages from *@wbc->nr_to_write | |
611 | * @get_block: the filesystem's block mapper function. | |
612 | * If this is NULL then use a_ops->writepage. Otherwise, go | |
613 | * direct-to-BIO. | |
614 | * | |
615 | * This is a library function, which implements the writepages() | |
616 | * address_space_operation. | |
617 | * | |
618 | * If a page is already under I/O, generic_writepages() skips it, even | |
619 | * if it's dirty. This is desirable behaviour for memory-cleaning writeback, | |
620 | * but it is INCORRECT for data-integrity system calls such as fsync(). fsync() | |
621 | * and msync() need to guarantee that all the data which was dirty at the time | |
622 | * the call was made get new I/O started against them. If wbc->sync_mode is | |
623 | * WB_SYNC_ALL then we were called for data integrity and we must wait for | |
624 | * existing IO to complete. | |
625 | */ | |
626 | int | |
627 | mpage_writepages(struct address_space *mapping, | |
628 | struct writeback_control *wbc, get_block_t get_block) | |
1da177e4 LT |
629 | { |
630 | struct backing_dev_info *bdi = mapping->backing_dev_info; | |
631 | struct bio *bio = NULL; | |
632 | sector_t last_block_in_bio = 0; | |
633 | int ret = 0; | |
634 | int done = 0; | |
635 | int (*writepage)(struct page *page, struct writeback_control *wbc); | |
636 | struct pagevec pvec; | |
637 | int nr_pages; | |
638 | pgoff_t index; | |
639 | pgoff_t end = -1; /* Inclusive */ | |
640 | int scanned = 0; | |
641 | int is_range = 0; | |
642 | ||
643 | if (wbc->nonblocking && bdi_write_congested(bdi)) { | |
644 | wbc->encountered_congestion = 1; | |
645 | return 0; | |
646 | } | |
647 | ||
648 | writepage = NULL; | |
649 | if (get_block == NULL) | |
650 | writepage = mapping->a_ops->writepage; | |
651 | ||
652 | pagevec_init(&pvec, 0); | |
653 | if (wbc->sync_mode == WB_SYNC_NONE) { | |
654 | index = mapping->writeback_index; /* Start from prev offset */ | |
655 | } else { | |
656 | index = 0; /* whole-file sweep */ | |
657 | scanned = 1; | |
658 | } | |
659 | if (wbc->start || wbc->end) { | |
660 | index = wbc->start >> PAGE_CACHE_SHIFT; | |
661 | end = wbc->end >> PAGE_CACHE_SHIFT; | |
662 | is_range = 1; | |
663 | scanned = 1; | |
664 | } | |
665 | retry: | |
666 | while (!done && (index <= end) && | |
667 | (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, | |
668 | PAGECACHE_TAG_DIRTY, | |
669 | min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) { | |
670 | unsigned i; | |
671 | ||
672 | scanned = 1; | |
673 | for (i = 0; i < nr_pages; i++) { | |
674 | struct page *page = pvec.pages[i]; | |
675 | ||
676 | /* | |
677 | * At this point we hold neither mapping->tree_lock nor | |
678 | * lock on the page itself: the page may be truncated or | |
679 | * invalidated (changing page->mapping to NULL), or even | |
680 | * swizzled back from swapper_space to tmpfs file | |
681 | * mapping | |
682 | */ | |
683 | ||
684 | lock_page(page); | |
685 | ||
686 | if (unlikely(page->mapping != mapping)) { | |
687 | unlock_page(page); | |
688 | continue; | |
689 | } | |
690 | ||
691 | if (unlikely(is_range) && page->index > end) { | |
692 | done = 1; | |
693 | unlock_page(page); | |
694 | continue; | |
695 | } | |
696 | ||
697 | if (wbc->sync_mode != WB_SYNC_NONE) | |
698 | wait_on_page_writeback(page); | |
699 | ||
700 | if (PageWriteback(page) || | |
701 | !clear_page_dirty_for_io(page)) { | |
702 | unlock_page(page); | |
703 | continue; | |
704 | } | |
705 | ||
706 | if (writepage) { | |
707 | ret = (*writepage)(page, wbc); | |
708 | if (ret) { | |
709 | if (ret == -ENOSPC) | |
710 | set_bit(AS_ENOSPC, | |
711 | &mapping->flags); | |
712 | else | |
713 | set_bit(AS_EIO, | |
714 | &mapping->flags); | |
715 | } | |
716 | } else { | |
717 | bio = __mpage_writepage(bio, page, get_block, | |
718 | &last_block_in_bio, &ret, wbc, | |
d17d7fa4 | 719 | page->mapping->a_ops->writepage); |
1da177e4 | 720 | } |
552fca4c ND |
721 | if (unlikely(ret == WRITEPAGE_ACTIVATE)) |
722 | unlock_page(page); | |
1da177e4 LT |
723 | if (ret || (--(wbc->nr_to_write) <= 0)) |
724 | done = 1; | |
725 | if (wbc->nonblocking && bdi_write_congested(bdi)) { | |
726 | wbc->encountered_congestion = 1; | |
727 | done = 1; | |
728 | } | |
729 | } | |
730 | pagevec_release(&pvec); | |
731 | cond_resched(); | |
732 | } | |
733 | if (!scanned && !done) { | |
734 | /* | |
735 | * We hit the last page and there is more work to be done: wrap | |
736 | * back to the start of the file | |
737 | */ | |
738 | scanned = 1; | |
739 | index = 0; | |
740 | goto retry; | |
741 | } | |
742 | if (!is_range) | |
743 | mapping->writeback_index = index; | |
744 | if (bio) | |
745 | mpage_bio_submit(WRITE, bio); | |
746 | return ret; | |
747 | } | |
748 | EXPORT_SYMBOL(mpage_writepages); | |
1da177e4 LT |
749 | |
750 | int mpage_writepage(struct page *page, get_block_t get_block, | |
751 | struct writeback_control *wbc) | |
752 | { | |
753 | int ret = 0; | |
754 | struct bio *bio; | |
755 | sector_t last_block_in_bio = 0; | |
756 | ||
757 | bio = __mpage_writepage(NULL, page, get_block, | |
758 | &last_block_in_bio, &ret, wbc, NULL); | |
759 | if (bio) | |
760 | mpage_bio_submit(WRITE, bio); | |
761 | ||
762 | return ret; | |
763 | } | |
764 | EXPORT_SYMBOL(mpage_writepage); |