]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/xfs/linux-2.6/xfs_aops.c
ext4: convert to new aops
[mirror_ubuntu-artful-kernel.git] / fs / xfs / linux-2.6 / xfs_aops.c
CommitLineData
1da177e4 1/*
7b718769
NS
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
1da177e4 18#include "xfs.h"
a844f451 19#include "xfs_bit.h"
1da177e4 20#include "xfs_log.h"
a844f451 21#include "xfs_inum.h"
1da177e4 22#include "xfs_sb.h"
a844f451 23#include "xfs_ag.h"
1da177e4
LT
24#include "xfs_dir2.h"
25#include "xfs_trans.h"
26#include "xfs_dmapi.h"
27#include "xfs_mount.h"
28#include "xfs_bmap_btree.h"
29#include "xfs_alloc_btree.h"
30#include "xfs_ialloc_btree.h"
1da177e4 31#include "xfs_dir2_sf.h"
a844f451 32#include "xfs_attr_sf.h"
1da177e4
LT
33#include "xfs_dinode.h"
34#include "xfs_inode.h"
a844f451
NS
35#include "xfs_alloc.h"
36#include "xfs_btree.h"
1da177e4
LT
37#include "xfs_error.h"
38#include "xfs_rw.h"
39#include "xfs_iomap.h"
40#include <linux/mpage.h>
10ce4444 41#include <linux/pagevec.h>
1da177e4
LT
42#include <linux/writeback.h>
43
f51623b2
NS
44STATIC void
45xfs_count_page_state(
46 struct page *page,
47 int *delalloc,
48 int *unmapped,
49 int *unwritten)
50{
51 struct buffer_head *bh, *head;
52
53 *delalloc = *unmapped = *unwritten = 0;
54
55 bh = head = page_buffers(page);
56 do {
57 if (buffer_uptodate(bh) && !buffer_mapped(bh))
58 (*unmapped) = 1;
f51623b2
NS
59 else if (buffer_unwritten(bh))
60 (*unwritten) = 1;
61 else if (buffer_delay(bh))
62 (*delalloc) = 1;
63 } while ((bh = bh->b_this_page) != head);
64}
65
1da177e4
LT
66#if defined(XFS_RW_TRACE)
67void
68xfs_page_trace(
69 int tag,
70 struct inode *inode,
71 struct page *page,
ed9d88f7 72 unsigned long pgoff)
1da177e4
LT
73{
74 xfs_inode_t *ip;
67fcaa73 75 bhv_vnode_t *vp = vn_from_inode(inode);
1da177e4 76 loff_t isize = i_size_read(inode);
f6d6d4fc 77 loff_t offset = page_offset(page);
1da177e4
LT
78 int delalloc = -1, unmapped = -1, unwritten = -1;
79
80 if (page_has_buffers(page))
81 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
82
75e17b3c 83 ip = xfs_vtoi(vp);
1da177e4
LT
84 if (!ip->i_rwtrace)
85 return;
86
87 ktrace_enter(ip->i_rwtrace,
88 (void *)((unsigned long)tag),
89 (void *)ip,
90 (void *)inode,
91 (void *)page,
ed9d88f7 92 (void *)pgoff,
1da177e4
LT
93 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
94 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
95 (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
96 (void *)((unsigned long)(isize & 0xffffffff)),
97 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
98 (void *)((unsigned long)(offset & 0xffffffff)),
99 (void *)((unsigned long)delalloc),
100 (void *)((unsigned long)unmapped),
101 (void *)((unsigned long)unwritten),
f1fdc848 102 (void *)((unsigned long)current_pid()),
1da177e4
LT
103 (void *)NULL);
104}
105#else
ed9d88f7 106#define xfs_page_trace(tag, inode, page, pgoff)
1da177e4
LT
107#endif
108
0829c360
CH
109/*
110 * Schedule IO completion handling on a xfsdatad if this was
e927af90
DC
111 * the final hold on this ioend. If we are asked to wait,
112 * flush the workqueue.
0829c360
CH
113 */
114STATIC void
115xfs_finish_ioend(
e927af90
DC
116 xfs_ioend_t *ioend,
117 int wait)
0829c360 118{
e927af90 119 if (atomic_dec_and_test(&ioend->io_remaining)) {
0829c360 120 queue_work(xfsdatad_workqueue, &ioend->io_work);
e927af90
DC
121 if (wait)
122 flush_workqueue(xfsdatad_workqueue);
123 }
0829c360
CH
124}
125
f6d6d4fc
CH
126/*
127 * We're now finished for good with this ioend structure.
128 * Update the page state via the associated buffer_heads,
129 * release holds on the inode and bio, and finally free
130 * up memory. Do not use the ioend after this.
131 */
0829c360
CH
132STATIC void
133xfs_destroy_ioend(
134 xfs_ioend_t *ioend)
135{
f6d6d4fc
CH
136 struct buffer_head *bh, *next;
137
138 for (bh = ioend->io_buffer_head; bh; bh = next) {
139 next = bh->b_private;
7d04a335 140 bh->b_end_io(bh, !ioend->io_error);
f6d6d4fc 141 }
7d04a335
NS
142 if (unlikely(ioend->io_error))
143 vn_ioerror(ioend->io_vnode, ioend->io_error, __FILE__,__LINE__);
0829c360
CH
144 vn_iowake(ioend->io_vnode);
145 mempool_free(ioend, xfs_ioend_pool);
146}
147
ba87ea69
LM
148/*
149 * Update on-disk file size now that data has been written to disk.
150 * The current in-memory file size is i_size. If a write is beyond
151 * eof io_new_size will be the intended file size until i_size is
152 * updated. If this write does not extend all the way to the valid
153 * file size then restrict this update to the end of the write.
154 */
155STATIC void
156xfs_setfilesize(
157 xfs_ioend_t *ioend)
158{
159 xfs_inode_t *ip;
160 xfs_fsize_t isize;
161 xfs_fsize_t bsize;
162
163 ip = xfs_vtoi(ioend->io_vnode);
b2826136
DC
164 if (!ip)
165 return;
ba87ea69
LM
166
167 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
168 ASSERT(ioend->io_type != IOMAP_READ);
169
170 if (unlikely(ioend->io_error))
171 return;
172
173 bsize = ioend->io_offset + ioend->io_size;
174
175 xfs_ilock(ip, XFS_ILOCK_EXCL);
176
177 isize = MAX(ip->i_size, ip->i_iocore.io_new_size);
178 isize = MIN(isize, bsize);
179
180 if (ip->i_d.di_size < isize) {
181 ip->i_d.di_size = isize;
182 ip->i_update_core = 1;
183 ip->i_update_size = 1;
776a75fa 184 mark_inode_dirty_sync(vn_to_inode(ioend->io_vnode));
ba87ea69
LM
185 }
186
187 xfs_iunlock(ip, XFS_ILOCK_EXCL);
188}
189
0829c360 190/*
f6d6d4fc 191 * Buffered IO write completion for delayed allocate extents.
f6d6d4fc
CH
192 */
193STATIC void
194xfs_end_bio_delalloc(
c4028958 195 struct work_struct *work)
f6d6d4fc 196{
c4028958
DH
197 xfs_ioend_t *ioend =
198 container_of(work, xfs_ioend_t, io_work);
f6d6d4fc 199
ba87ea69 200 xfs_setfilesize(ioend);
f6d6d4fc
CH
201 xfs_destroy_ioend(ioend);
202}
203
204/*
205 * Buffered IO write completion for regular, written extents.
206 */
207STATIC void
208xfs_end_bio_written(
c4028958 209 struct work_struct *work)
f6d6d4fc 210{
c4028958
DH
211 xfs_ioend_t *ioend =
212 container_of(work, xfs_ioend_t, io_work);
f6d6d4fc 213
ba87ea69 214 xfs_setfilesize(ioend);
f6d6d4fc
CH
215 xfs_destroy_ioend(ioend);
216}
217
218/*
219 * IO write completion for unwritten extents.
220 *
0829c360 221 * Issue transactions to convert a buffer range from unwritten
f0973863 222 * to written extents.
0829c360
CH
223 */
224STATIC void
225xfs_end_bio_unwritten(
c4028958 226 struct work_struct *work)
0829c360 227{
c4028958
DH
228 xfs_ioend_t *ioend =
229 container_of(work, xfs_ioend_t, io_work);
67fcaa73 230 bhv_vnode_t *vp = ioend->io_vnode;
0829c360
CH
231 xfs_off_t offset = ioend->io_offset;
232 size_t size = ioend->io_size;
0829c360 233
ba87ea69 234 if (likely(!ioend->io_error)) {
67fcaa73 235 bhv_vop_bmap(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL);
ba87ea69
LM
236 xfs_setfilesize(ioend);
237 }
238 xfs_destroy_ioend(ioend);
239}
240
241/*
242 * IO read completion for regular, written extents.
243 */
244STATIC void
245xfs_end_bio_read(
246 struct work_struct *work)
247{
248 xfs_ioend_t *ioend =
249 container_of(work, xfs_ioend_t, io_work);
250
0829c360
CH
251 xfs_destroy_ioend(ioend);
252}
253
254/*
255 * Allocate and initialise an IO completion structure.
256 * We need to track unwritten extent write completion here initially.
257 * We'll need to extend this for updating the ondisk inode size later
258 * (vs. incore size).
259 */
260STATIC xfs_ioend_t *
261xfs_alloc_ioend(
f6d6d4fc
CH
262 struct inode *inode,
263 unsigned int type)
0829c360
CH
264{
265 xfs_ioend_t *ioend;
266
267 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
268
269 /*
270 * Set the count to 1 initially, which will prevent an I/O
271 * completion callback from happening before we have started
272 * all the I/O from calling the completion routine too early.
273 */
274 atomic_set(&ioend->io_remaining, 1);
7d04a335 275 ioend->io_error = 0;
f6d6d4fc
CH
276 ioend->io_list = NULL;
277 ioend->io_type = type;
ec86dc02 278 ioend->io_vnode = vn_from_inode(inode);
c1a073bd 279 ioend->io_buffer_head = NULL;
f6d6d4fc 280 ioend->io_buffer_tail = NULL;
0829c360
CH
281 atomic_inc(&ioend->io_vnode->v_iocount);
282 ioend->io_offset = 0;
283 ioend->io_size = 0;
284
f6d6d4fc 285 if (type == IOMAP_UNWRITTEN)
c4028958 286 INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
f6d6d4fc 287 else if (type == IOMAP_DELAY)
c4028958 288 INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
ba87ea69
LM
289 else if (type == IOMAP_READ)
290 INIT_WORK(&ioend->io_work, xfs_end_bio_read);
f6d6d4fc 291 else
c4028958 292 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
0829c360
CH
293
294 return ioend;
295}
296
1da177e4
LT
297STATIC int
298xfs_map_blocks(
299 struct inode *inode,
300 loff_t offset,
301 ssize_t count,
302 xfs_iomap_t *mapp,
303 int flags)
304{
67fcaa73 305 bhv_vnode_t *vp = vn_from_inode(inode);
1da177e4
LT
306 int error, nmaps = 1;
307
67fcaa73 308 error = bhv_vop_bmap(vp, offset, count, flags, mapp, &nmaps);
1da177e4
LT
309 if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
310 VMODIFY(vp);
311 return -error;
312}
313
7989cb8e 314STATIC_INLINE int
1defeac9 315xfs_iomap_valid(
1da177e4 316 xfs_iomap_t *iomapp,
1defeac9 317 loff_t offset)
1da177e4 318{
1defeac9
CH
319 return offset >= iomapp->iomap_offset &&
320 offset < iomapp->iomap_offset + iomapp->iomap_bsize;
1da177e4
LT
321}
322
f6d6d4fc
CH
323/*
324 * BIO completion handler for buffered IO.
325 */
782e3b3b 326STATIC void
f6d6d4fc
CH
327xfs_end_bio(
328 struct bio *bio,
f6d6d4fc
CH
329 int error)
330{
331 xfs_ioend_t *ioend = bio->bi_private;
332
f6d6d4fc 333 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
7d04a335 334 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
f6d6d4fc
CH
335
336 /* Toss bio and pass work off to an xfsdatad thread */
f6d6d4fc
CH
337 bio->bi_private = NULL;
338 bio->bi_end_io = NULL;
f6d6d4fc 339 bio_put(bio);
7d04a335 340
e927af90 341 xfs_finish_ioend(ioend, 0);
f6d6d4fc
CH
342}
343
344STATIC void
345xfs_submit_ioend_bio(
346 xfs_ioend_t *ioend,
347 struct bio *bio)
348{
349 atomic_inc(&ioend->io_remaining);
350
351 bio->bi_private = ioend;
352 bio->bi_end_io = xfs_end_bio;
353
354 submit_bio(WRITE, bio);
355 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
356 bio_put(bio);
357}
358
359STATIC struct bio *
360xfs_alloc_ioend_bio(
361 struct buffer_head *bh)
362{
363 struct bio *bio;
364 int nvecs = bio_get_nr_vecs(bh->b_bdev);
365
366 do {
367 bio = bio_alloc(GFP_NOIO, nvecs);
368 nvecs >>= 1;
369 } while (!bio);
370
371 ASSERT(bio->bi_private == NULL);
372 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
373 bio->bi_bdev = bh->b_bdev;
374 bio_get(bio);
375 return bio;
376}
377
378STATIC void
379xfs_start_buffer_writeback(
380 struct buffer_head *bh)
381{
382 ASSERT(buffer_mapped(bh));
383 ASSERT(buffer_locked(bh));
384 ASSERT(!buffer_delay(bh));
385 ASSERT(!buffer_unwritten(bh));
386
387 mark_buffer_async_write(bh);
388 set_buffer_uptodate(bh);
389 clear_buffer_dirty(bh);
390}
391
392STATIC void
393xfs_start_page_writeback(
394 struct page *page,
395 struct writeback_control *wbc,
396 int clear_dirty,
397 int buffers)
398{
399 ASSERT(PageLocked(page));
400 ASSERT(!PageWriteback(page));
f6d6d4fc 401 if (clear_dirty)
92132021
DC
402 clear_page_dirty_for_io(page);
403 set_page_writeback(page);
f6d6d4fc
CH
404 unlock_page(page);
405 if (!buffers) {
406 end_page_writeback(page);
407 wbc->pages_skipped++; /* We didn't write this page */
408 }
409}
410
411static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
412{
413 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
414}
415
416/*
d88992f6
DC
417 * Submit all of the bios for all of the ioends we have saved up, covering the
418 * initial writepage page and also any probed pages.
419 *
420 * Because we may have multiple ioends spanning a page, we need to start
421 * writeback on all the buffers before we submit them for I/O. If we mark the
422 * buffers as we got, then we can end up with a page that only has buffers
423 * marked async write and I/O complete on can occur before we mark the other
424 * buffers async write.
425 *
426 * The end result of this is that we trip a bug in end_page_writeback() because
427 * we call it twice for the one page as the code in end_buffer_async_write()
428 * assumes that all buffers on the page are started at the same time.
429 *
430 * The fix is two passes across the ioend list - one to start writeback on the
c41564b5 431 * buffer_heads, and then submit them for I/O on the second pass.
f6d6d4fc
CH
432 */
433STATIC void
434xfs_submit_ioend(
435 xfs_ioend_t *ioend)
436{
d88992f6 437 xfs_ioend_t *head = ioend;
f6d6d4fc
CH
438 xfs_ioend_t *next;
439 struct buffer_head *bh;
440 struct bio *bio;
441 sector_t lastblock = 0;
442
d88992f6
DC
443 /* Pass 1 - start writeback */
444 do {
445 next = ioend->io_list;
446 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
447 xfs_start_buffer_writeback(bh);
448 }
449 } while ((ioend = next) != NULL);
450
451 /* Pass 2 - submit I/O */
452 ioend = head;
f6d6d4fc
CH
453 do {
454 next = ioend->io_list;
455 bio = NULL;
456
457 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
f6d6d4fc
CH
458
459 if (!bio) {
460 retry:
461 bio = xfs_alloc_ioend_bio(bh);
462 } else if (bh->b_blocknr != lastblock + 1) {
463 xfs_submit_ioend_bio(ioend, bio);
464 goto retry;
465 }
466
467 if (bio_add_buffer(bio, bh) != bh->b_size) {
468 xfs_submit_ioend_bio(ioend, bio);
469 goto retry;
470 }
471
472 lastblock = bh->b_blocknr;
473 }
474 if (bio)
475 xfs_submit_ioend_bio(ioend, bio);
e927af90 476 xfs_finish_ioend(ioend, 0);
f6d6d4fc
CH
477 } while ((ioend = next) != NULL);
478}
479
480/*
481 * Cancel submission of all buffer_heads so far in this endio.
482 * Toss the endio too. Only ever called for the initial page
483 * in a writepage request, so only ever one page.
484 */
485STATIC void
486xfs_cancel_ioend(
487 xfs_ioend_t *ioend)
488{
489 xfs_ioend_t *next;
490 struct buffer_head *bh, *next_bh;
491
492 do {
493 next = ioend->io_list;
494 bh = ioend->io_buffer_head;
495 do {
496 next_bh = bh->b_private;
497 clear_buffer_async_write(bh);
498 unlock_buffer(bh);
499 } while ((bh = next_bh) != NULL);
500
501 vn_iowake(ioend->io_vnode);
502 mempool_free(ioend, xfs_ioend_pool);
503 } while ((ioend = next) != NULL);
504}
505
506/*
507 * Test to see if we've been building up a completion structure for
508 * earlier buffers -- if so, we try to append to this ioend if we
509 * can, otherwise we finish off any current ioend and start another.
510 * Return true if we've finished the given ioend.
511 */
512STATIC void
513xfs_add_to_ioend(
514 struct inode *inode,
515 struct buffer_head *bh,
7336cea8 516 xfs_off_t offset,
f6d6d4fc
CH
517 unsigned int type,
518 xfs_ioend_t **result,
519 int need_ioend)
520{
521 xfs_ioend_t *ioend = *result;
522
523 if (!ioend || need_ioend || type != ioend->io_type) {
524 xfs_ioend_t *previous = *result;
f6d6d4fc 525
f6d6d4fc
CH
526 ioend = xfs_alloc_ioend(inode, type);
527 ioend->io_offset = offset;
528 ioend->io_buffer_head = bh;
529 ioend->io_buffer_tail = bh;
530 if (previous)
531 previous->io_list = ioend;
532 *result = ioend;
533 } else {
534 ioend->io_buffer_tail->b_private = bh;
535 ioend->io_buffer_tail = bh;
536 }
537
538 bh->b_private = NULL;
539 ioend->io_size += bh->b_size;
540}
541
87cbc49c
NS
542STATIC void
543xfs_map_buffer(
544 struct buffer_head *bh,
545 xfs_iomap_t *mp,
546 xfs_off_t offset,
547 uint block_bits)
548{
549 sector_t bn;
550
551 ASSERT(mp->iomap_bn != IOMAP_DADDR_NULL);
552
553 bn = (mp->iomap_bn >> (block_bits - BBSHIFT)) +
554 ((offset - mp->iomap_offset) >> block_bits);
555
556 ASSERT(bn || (mp->iomap_flags & IOMAP_REALTIME));
557
558 bh->b_blocknr = bn;
559 set_buffer_mapped(bh);
560}
561
1da177e4
LT
562STATIC void
563xfs_map_at_offset(
1da177e4 564 struct buffer_head *bh,
1defeac9 565 loff_t offset,
1da177e4 566 int block_bits,
1defeac9 567 xfs_iomap_t *iomapp)
1da177e4 568{
1da177e4
LT
569 ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
570 ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
1da177e4
LT
571
572 lock_buffer(bh);
87cbc49c 573 xfs_map_buffer(bh, iomapp, offset, block_bits);
ce8e922c 574 bh->b_bdev = iomapp->iomap_target->bt_bdev;
1da177e4
LT
575 set_buffer_mapped(bh);
576 clear_buffer_delay(bh);
f6d6d4fc 577 clear_buffer_unwritten(bh);
1da177e4
LT
578}
579
580/*
6c4fe19f 581 * Look for a page at index that is suitable for clustering.
1da177e4
LT
582 */
583STATIC unsigned int
6c4fe19f 584xfs_probe_page(
10ce4444 585 struct page *page,
6c4fe19f
CH
586 unsigned int pg_offset,
587 int mapped)
1da177e4 588{
1da177e4
LT
589 int ret = 0;
590
1da177e4 591 if (PageWriteback(page))
10ce4444 592 return 0;
1da177e4
LT
593
594 if (page->mapping && PageDirty(page)) {
595 if (page_has_buffers(page)) {
596 struct buffer_head *bh, *head;
597
598 bh = head = page_buffers(page);
599 do {
6c4fe19f
CH
600 if (!buffer_uptodate(bh))
601 break;
602 if (mapped != buffer_mapped(bh))
1da177e4
LT
603 break;
604 ret += bh->b_size;
605 if (ret >= pg_offset)
606 break;
607 } while ((bh = bh->b_this_page) != head);
608 } else
6c4fe19f 609 ret = mapped ? 0 : PAGE_CACHE_SIZE;
1da177e4
LT
610 }
611
1da177e4
LT
612 return ret;
613}
614
f6d6d4fc 615STATIC size_t
6c4fe19f 616xfs_probe_cluster(
1da177e4
LT
617 struct inode *inode,
618 struct page *startpage,
619 struct buffer_head *bh,
6c4fe19f
CH
620 struct buffer_head *head,
621 int mapped)
1da177e4 622{
10ce4444 623 struct pagevec pvec;
1da177e4 624 pgoff_t tindex, tlast, tloff;
10ce4444
CH
625 size_t total = 0;
626 int done = 0, i;
1da177e4
LT
627
628 /* First sum forwards in this page */
629 do {
2353e8e9 630 if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh)))
10ce4444 631 return total;
1da177e4
LT
632 total += bh->b_size;
633 } while ((bh = bh->b_this_page) != head);
634
10ce4444
CH
635 /* if we reached the end of the page, sum forwards in following pages */
636 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
637 tindex = startpage->index + 1;
638
639 /* Prune this back to avoid pathological behavior */
640 tloff = min(tlast, startpage->index + 64);
641
642 pagevec_init(&pvec, 0);
643 while (!done && tindex <= tloff) {
644 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
645
646 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
647 break;
648
649 for (i = 0; i < pagevec_count(&pvec); i++) {
650 struct page *page = pvec.pages[i];
265c1fac 651 size_t pg_offset, pg_len = 0;
10ce4444
CH
652
653 if (tindex == tlast) {
654 pg_offset =
655 i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
1defeac9
CH
656 if (!pg_offset) {
657 done = 1;
10ce4444 658 break;
1defeac9 659 }
10ce4444
CH
660 } else
661 pg_offset = PAGE_CACHE_SIZE;
662
663 if (page->index == tindex && !TestSetPageLocked(page)) {
265c1fac 664 pg_len = xfs_probe_page(page, pg_offset, mapped);
10ce4444
CH
665 unlock_page(page);
666 }
667
265c1fac 668 if (!pg_len) {
10ce4444
CH
669 done = 1;
670 break;
671 }
672
265c1fac 673 total += pg_len;
1defeac9 674 tindex++;
1da177e4 675 }
10ce4444
CH
676
677 pagevec_release(&pvec);
678 cond_resched();
1da177e4 679 }
10ce4444 680
1da177e4
LT
681 return total;
682}
683
684/*
10ce4444
CH
685 * Test if a given page is suitable for writing as part of an unwritten
686 * or delayed allocate extent.
1da177e4 687 */
10ce4444
CH
688STATIC int
689xfs_is_delayed_page(
690 struct page *page,
f6d6d4fc 691 unsigned int type)
1da177e4 692{
1da177e4 693 if (PageWriteback(page))
10ce4444 694 return 0;
1da177e4
LT
695
696 if (page->mapping && page_has_buffers(page)) {
697 struct buffer_head *bh, *head;
698 int acceptable = 0;
699
700 bh = head = page_buffers(page);
701 do {
f6d6d4fc
CH
702 if (buffer_unwritten(bh))
703 acceptable = (type == IOMAP_UNWRITTEN);
704 else if (buffer_delay(bh))
705 acceptable = (type == IOMAP_DELAY);
2ddee844 706 else if (buffer_dirty(bh) && buffer_mapped(bh))
df3c7244 707 acceptable = (type == IOMAP_NEW);
f6d6d4fc 708 else
1da177e4 709 break;
1da177e4
LT
710 } while ((bh = bh->b_this_page) != head);
711
712 if (acceptable)
10ce4444 713 return 1;
1da177e4
LT
714 }
715
10ce4444 716 return 0;
1da177e4
LT
717}
718
1da177e4
LT
719/*
720 * Allocate & map buffers for page given the extent map. Write it out.
721 * except for the original page of a writepage, this is called on
722 * delalloc/unwritten pages only, for the original page it is possible
723 * that the page has no mapping at all.
724 */
f6d6d4fc 725STATIC int
1da177e4
LT
726xfs_convert_page(
727 struct inode *inode,
728 struct page *page,
10ce4444 729 loff_t tindex,
1defeac9 730 xfs_iomap_t *mp,
f6d6d4fc 731 xfs_ioend_t **ioendp,
1da177e4 732 struct writeback_control *wbc,
1da177e4
LT
733 int startio,
734 int all_bh)
735{
f6d6d4fc 736 struct buffer_head *bh, *head;
9260dc6b
CH
737 xfs_off_t end_offset;
738 unsigned long p_offset;
f6d6d4fc 739 unsigned int type;
1da177e4 740 int bbits = inode->i_blkbits;
24e17b5f 741 int len, page_dirty;
f6d6d4fc 742 int count = 0, done = 0, uptodate = 1;
9260dc6b 743 xfs_off_t offset = page_offset(page);
1da177e4 744
10ce4444
CH
745 if (page->index != tindex)
746 goto fail;
747 if (TestSetPageLocked(page))
748 goto fail;
749 if (PageWriteback(page))
750 goto fail_unlock_page;
751 if (page->mapping != inode->i_mapping)
752 goto fail_unlock_page;
753 if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
754 goto fail_unlock_page;
755
24e17b5f
NS
756 /*
757 * page_dirty is initially a count of buffers on the page before
c41564b5 758 * EOF and is decremented as we move each into a cleanable state.
9260dc6b
CH
759 *
760 * Derivation:
761 *
762 * End offset is the highest offset that this page should represent.
763 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
764 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
765 * hence give us the correct page_dirty count. On any other page,
766 * it will be zero and in that case we need page_dirty to be the
767 * count of buffers on the page.
24e17b5f 768 */
9260dc6b
CH
769 end_offset = min_t(unsigned long long,
770 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
771 i_size_read(inode));
772
24e17b5f 773 len = 1 << inode->i_blkbits;
9260dc6b
CH
774 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
775 PAGE_CACHE_SIZE);
776 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
777 page_dirty = p_offset / len;
24e17b5f 778
1da177e4
LT
779 bh = head = page_buffers(page);
780 do {
9260dc6b 781 if (offset >= end_offset)
1da177e4 782 break;
f6d6d4fc
CH
783 if (!buffer_uptodate(bh))
784 uptodate = 0;
785 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
786 done = 1;
1da177e4 787 continue;
f6d6d4fc
CH
788 }
789
9260dc6b
CH
790 if (buffer_unwritten(bh) || buffer_delay(bh)) {
791 if (buffer_unwritten(bh))
792 type = IOMAP_UNWRITTEN;
793 else
794 type = IOMAP_DELAY;
795
796 if (!xfs_iomap_valid(mp, offset)) {
f6d6d4fc 797 done = 1;
9260dc6b
CH
798 continue;
799 }
800
801 ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
802 ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
803
804 xfs_map_at_offset(bh, offset, bbits, mp);
805 if (startio) {
7336cea8 806 xfs_add_to_ioend(inode, bh, offset,
9260dc6b
CH
807 type, ioendp, done);
808 } else {
809 set_buffer_dirty(bh);
810 unlock_buffer(bh);
811 mark_buffer_dirty(bh);
812 }
813 page_dirty--;
814 count++;
815 } else {
df3c7244 816 type = IOMAP_NEW;
9260dc6b 817 if (buffer_mapped(bh) && all_bh && startio) {
1da177e4 818 lock_buffer(bh);
7336cea8 819 xfs_add_to_ioend(inode, bh, offset,
f6d6d4fc
CH
820 type, ioendp, done);
821 count++;
24e17b5f 822 page_dirty--;
9260dc6b
CH
823 } else {
824 done = 1;
1da177e4 825 }
1da177e4 826 }
7336cea8 827 } while (offset += len, (bh = bh->b_this_page) != head);
1da177e4 828
f6d6d4fc
CH
829 if (uptodate && bh == head)
830 SetPageUptodate(page);
831
832 if (startio) {
f5e596bb
CH
833 if (count) {
834 struct backing_dev_info *bdi;
835
836 bdi = inode->i_mapping->backing_dev_info;
9fddaca2 837 wbc->nr_to_write--;
f5e596bb
CH
838 if (bdi_write_congested(bdi)) {
839 wbc->encountered_congestion = 1;
840 done = 1;
9fddaca2 841 } else if (wbc->nr_to_write <= 0) {
f5e596bb
CH
842 done = 1;
843 }
844 }
f6d6d4fc 845 xfs_start_page_writeback(page, wbc, !page_dirty, count);
1da177e4 846 }
f6d6d4fc
CH
847
848 return done;
10ce4444
CH
849 fail_unlock_page:
850 unlock_page(page);
851 fail:
852 return 1;
1da177e4
LT
853}
854
855/*
856 * Convert & write out a cluster of pages in the same extent as defined
857 * by mp and following the start page.
858 */
859STATIC void
860xfs_cluster_write(
861 struct inode *inode,
862 pgoff_t tindex,
863 xfs_iomap_t *iomapp,
f6d6d4fc 864 xfs_ioend_t **ioendp,
1da177e4
LT
865 struct writeback_control *wbc,
866 int startio,
867 int all_bh,
868 pgoff_t tlast)
869{
10ce4444
CH
870 struct pagevec pvec;
871 int done = 0, i;
1da177e4 872
10ce4444
CH
873 pagevec_init(&pvec, 0);
874 while (!done && tindex <= tlast) {
875 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
876
877 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
1da177e4 878 break;
10ce4444
CH
879
880 for (i = 0; i < pagevec_count(&pvec); i++) {
881 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
882 iomapp, ioendp, wbc, startio, all_bh);
883 if (done)
884 break;
885 }
886
887 pagevec_release(&pvec);
888 cond_resched();
1da177e4
LT
889 }
890}
891
892/*
893 * Calling this without startio set means we are being asked to make a dirty
894 * page ready for freeing it's buffers. When called with startio set then
895 * we are coming from writepage.
896 *
897 * When called with startio set it is important that we write the WHOLE
898 * page if possible.
899 * The bh->b_state's cannot know if any of the blocks or which block for
900 * that matter are dirty due to mmap writes, and therefore bh uptodate is
c41564b5 901 * only valid if the page itself isn't completely uptodate. Some layers
1da177e4
LT
902 * may clear the page dirty flag prior to calling write page, under the
903 * assumption the entire page will be written out; by not writing out the
904 * whole page the page can be reused before all valid dirty data is
905 * written out. Note: in the case of a page that has been dirty'd by
906 * mapwrite and but partially setup by block_prepare_write the
907 * bh->b_states's will not agree and only ones setup by BPW/BCW will have
908 * valid state, thus the whole page must be written out thing.
909 */
910
911STATIC int
912xfs_page_state_convert(
913 struct inode *inode,
914 struct page *page,
915 struct writeback_control *wbc,
916 int startio,
917 int unmapped) /* also implies page uptodate */
918{
f6d6d4fc 919 struct buffer_head *bh, *head;
1defeac9 920 xfs_iomap_t iomap;
f6d6d4fc 921 xfs_ioend_t *ioend = NULL, *iohead = NULL;
1da177e4
LT
922 loff_t offset;
923 unsigned long p_offset = 0;
f6d6d4fc 924 unsigned int type;
1da177e4
LT
925 __uint64_t end_offset;
926 pgoff_t end_index, last_index, tlast;
d5cb48aa
CH
927 ssize_t size, len;
928 int flags, err, iomap_valid = 0, uptodate = 1;
8272145c
NS
929 int page_dirty, count = 0;
930 int trylock = 0;
6c4fe19f 931 int all_bh = unmapped;
1da177e4 932
8272145c
NS
933 if (startio) {
934 if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
935 trylock |= BMAPI_TRYLOCK;
936 }
3ba0815a 937
1da177e4
LT
938 /* Is this page beyond the end of the file? */
939 offset = i_size_read(inode);
940 end_index = offset >> PAGE_CACHE_SHIFT;
941 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
942 if (page->index >= end_index) {
943 if ((page->index >= end_index + 1) ||
944 !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
19d5bcf3
NS
945 if (startio)
946 unlock_page(page);
947 return 0;
1da177e4
LT
948 }
949 }
950
1da177e4 951 /*
24e17b5f 952 * page_dirty is initially a count of buffers on the page before
c41564b5 953 * EOF and is decremented as we move each into a cleanable state.
f6d6d4fc
CH
954 *
955 * Derivation:
956 *
957 * End offset is the highest offset that this page should represent.
958 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
959 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
960 * hence give us the correct page_dirty count. On any other page,
961 * it will be zero and in that case we need page_dirty to be the
962 * count of buffers on the page.
963 */
964 end_offset = min_t(unsigned long long,
965 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
24e17b5f 966 len = 1 << inode->i_blkbits;
f6d6d4fc
CH
967 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
968 PAGE_CACHE_SIZE);
969 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
24e17b5f
NS
970 page_dirty = p_offset / len;
971
24e17b5f 972 bh = head = page_buffers(page);
f6d6d4fc 973 offset = page_offset(page);
df3c7244
DC
974 flags = BMAPI_READ;
975 type = IOMAP_NEW;
f6d6d4fc 976
f6d6d4fc 977 /* TODO: cleanup count and page_dirty */
1da177e4
LT
978
979 do {
980 if (offset >= end_offset)
981 break;
982 if (!buffer_uptodate(bh))
983 uptodate = 0;
f6d6d4fc 984 if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
1defeac9
CH
985 /*
986 * the iomap is actually still valid, but the ioend
987 * isn't. shouldn't happen too often.
988 */
989 iomap_valid = 0;
1da177e4 990 continue;
f6d6d4fc 991 }
1da177e4 992
1defeac9
CH
993 if (iomap_valid)
994 iomap_valid = xfs_iomap_valid(&iomap, offset);
1da177e4
LT
995
996 /*
997 * First case, map an unwritten extent and prepare for
998 * extent state conversion transaction on completion.
f6d6d4fc 999 *
1da177e4
LT
1000 * Second case, allocate space for a delalloc buffer.
1001 * We can return EAGAIN here in the release page case.
d5cb48aa
CH
1002 *
1003 * Third case, an unmapped buffer was found, and we are
1004 * in a path where we need to write the whole page out.
df3c7244 1005 */
d5cb48aa
CH
1006 if (buffer_unwritten(bh) || buffer_delay(bh) ||
1007 ((buffer_uptodate(bh) || PageUptodate(page)) &&
1008 !buffer_mapped(bh) && (unmapped || startio))) {
effd120e
DC
1009 int new_ioend = 0;
1010
df3c7244 1011 /*
6c4fe19f
CH
1012 * Make sure we don't use a read-only iomap
1013 */
df3c7244 1014 if (flags == BMAPI_READ)
6c4fe19f
CH
1015 iomap_valid = 0;
1016
f6d6d4fc
CH
1017 if (buffer_unwritten(bh)) {
1018 type = IOMAP_UNWRITTEN;
8272145c 1019 flags = BMAPI_WRITE | BMAPI_IGNSTATE;
d5cb48aa 1020 } else if (buffer_delay(bh)) {
f6d6d4fc 1021 type = IOMAP_DELAY;
8272145c 1022 flags = BMAPI_ALLOCATE | trylock;
d5cb48aa 1023 } else {
6c4fe19f 1024 type = IOMAP_NEW;
8272145c 1025 flags = BMAPI_WRITE | BMAPI_MMAP;
f6d6d4fc
CH
1026 }
1027
1defeac9 1028 if (!iomap_valid) {
effd120e
DC
1029 /*
1030 * if we didn't have a valid mapping then we
1031 * need to ensure that we put the new mapping
1032 * in a new ioend structure. This needs to be
1033 * done to ensure that the ioends correctly
1034 * reflect the block mappings at io completion
1035 * for unwritten extent conversion.
1036 */
1037 new_ioend = 1;
6c4fe19f
CH
1038 if (type == IOMAP_NEW) {
1039 size = xfs_probe_cluster(inode,
1040 page, bh, head, 0);
d5cb48aa
CH
1041 } else {
1042 size = len;
1043 }
1044
1045 err = xfs_map_blocks(inode, offset, size,
1046 &iomap, flags);
f6d6d4fc 1047 if (err)
1da177e4 1048 goto error;
1defeac9 1049 iomap_valid = xfs_iomap_valid(&iomap, offset);
1da177e4 1050 }
1defeac9
CH
1051 if (iomap_valid) {
1052 xfs_map_at_offset(bh, offset,
1053 inode->i_blkbits, &iomap);
1da177e4 1054 if (startio) {
7336cea8 1055 xfs_add_to_ioend(inode, bh, offset,
1defeac9 1056 type, &ioend,
effd120e 1057 new_ioend);
1da177e4
LT
1058 } else {
1059 set_buffer_dirty(bh);
1060 unlock_buffer(bh);
1061 mark_buffer_dirty(bh);
1062 }
1063 page_dirty--;
f6d6d4fc 1064 count++;
1da177e4 1065 }
d5cb48aa 1066 } else if (buffer_uptodate(bh) && startio) {
6c4fe19f
CH
1067 /*
1068 * we got here because the buffer is already mapped.
1069 * That means it must already have extents allocated
1070 * underneath it. Map the extent by reading it.
1071 */
df3c7244 1072 if (!iomap_valid || flags != BMAPI_READ) {
6c4fe19f
CH
1073 flags = BMAPI_READ;
1074 size = xfs_probe_cluster(inode, page, bh,
1075 head, 1);
1076 err = xfs_map_blocks(inode, offset, size,
1077 &iomap, flags);
1078 if (err)
1079 goto error;
1080 iomap_valid = xfs_iomap_valid(&iomap, offset);
1081 }
d5cb48aa 1082
df3c7244
DC
1083 /*
1084 * We set the type to IOMAP_NEW in case we are doing a
1085 * small write at EOF that is extending the file but
1086 * without needing an allocation. We need to update the
1087 * file size on I/O completion in this case so it is
1088 * the same case as having just allocated a new extent
1089 * that we are writing into for the first time.
1090 */
1091 type = IOMAP_NEW;
d5cb48aa
CH
1092 if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
1093 ASSERT(buffer_mapped(bh));
6c4fe19f
CH
1094 if (iomap_valid)
1095 all_bh = 1;
7336cea8 1096 xfs_add_to_ioend(inode, bh, offset, type,
d5cb48aa
CH
1097 &ioend, !iomap_valid);
1098 page_dirty--;
1099 count++;
f6d6d4fc 1100 } else {
1defeac9 1101 iomap_valid = 0;
1da177e4 1102 }
d5cb48aa
CH
1103 } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
1104 (unmapped || startio)) {
1105 iomap_valid = 0;
1da177e4 1106 }
f6d6d4fc
CH
1107
1108 if (!iohead)
1109 iohead = ioend;
1110
1111 } while (offset += len, ((bh = bh->b_this_page) != head));
1da177e4
LT
1112
1113 if (uptodate && bh == head)
1114 SetPageUptodate(page);
1115
f6d6d4fc
CH
1116 if (startio)
1117 xfs_start_page_writeback(page, wbc, 1, count);
1da177e4 1118
1defeac9
CH
1119 if (ioend && iomap_valid) {
1120 offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
1da177e4 1121 PAGE_CACHE_SHIFT;
775bf6c9 1122 tlast = min_t(pgoff_t, offset, last_index);
1defeac9 1123 xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
6c4fe19f 1124 wbc, startio, all_bh, tlast);
1da177e4
LT
1125 }
1126
f6d6d4fc
CH
1127 if (iohead)
1128 xfs_submit_ioend(iohead);
1129
1da177e4
LT
1130 return page_dirty;
1131
1132error:
f6d6d4fc
CH
1133 if (iohead)
1134 xfs_cancel_ioend(iohead);
1da177e4
LT
1135
1136 /*
1137 * If it's delalloc and we have nowhere to put it,
1138 * throw it away, unless the lower layers told
1139 * us to try again.
1140 */
1141 if (err != -EAGAIN) {
f6d6d4fc 1142 if (!unmapped)
1da177e4 1143 block_invalidatepage(page, 0);
1da177e4
LT
1144 ClearPageUptodate(page);
1145 }
1146 return err;
1147}
1148
f51623b2
NS
1149/*
1150 * writepage: Called from one of two places:
1151 *
1152 * 1. we are flushing a delalloc buffer head.
1153 *
1154 * 2. we are writing out a dirty page. Typically the page dirty
1155 * state is cleared before we get here. In this case is it
1156 * conceivable we have no buffer heads.
1157 *
1158 * For delalloc space on the page we need to allocate space and
1159 * flush it. For unmapped buffer heads on the page we should
1160 * allocate space if the page is uptodate. For any other dirty
1161 * buffer heads on the page we should flush them.
1162 *
1163 * If we detect that a transaction would be required to flush
1164 * the page, we have to check the process flags first, if we
1165 * are already in a transaction or disk I/O during allocations
1166 * is off, we need to fail the writepage and redirty the page.
1167 */
1168
1169STATIC int
e4c573bb 1170xfs_vm_writepage(
f51623b2
NS
1171 struct page *page,
1172 struct writeback_control *wbc)
1173{
1174 int error;
1175 int need_trans;
1176 int delalloc, unmapped, unwritten;
1177 struct inode *inode = page->mapping->host;
1178
1179 xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
1180
1181 /*
1182 * We need a transaction if:
1183 * 1. There are delalloc buffers on the page
1184 * 2. The page is uptodate and we have unmapped buffers
1185 * 3. The page is uptodate and we have no buffers
1186 * 4. There are unwritten buffers on the page
1187 */
1188
1189 if (!page_has_buffers(page)) {
1190 unmapped = 1;
1191 need_trans = 1;
1192 } else {
1193 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1194 if (!PageUptodate(page))
1195 unmapped = 0;
1196 need_trans = delalloc + unmapped + unwritten;
1197 }
1198
1199 /*
1200 * If we need a transaction and the process flags say
1201 * we are already in a transaction, or no IO is allowed
1202 * then mark the page dirty again and leave the page
1203 * as is.
1204 */
59c1b082 1205 if (current_test_flags(PF_FSTRANS) && need_trans)
f51623b2
NS
1206 goto out_fail;
1207
1208 /*
1209 * Delay hooking up buffer heads until we have
1210 * made our go/no-go decision.
1211 */
1212 if (!page_has_buffers(page))
1213 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
1214
1215 /*
1216 * Convert delayed allocate, unwritten or unmapped space
1217 * to real space and flush out to disk.
1218 */
1219 error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
1220 if (error == -EAGAIN)
1221 goto out_fail;
1222 if (unlikely(error < 0))
1223 goto out_unlock;
1224
1225 return 0;
1226
1227out_fail:
1228 redirty_page_for_writepage(wbc, page);
1229 unlock_page(page);
1230 return 0;
1231out_unlock:
1232 unlock_page(page);
1233 return error;
1234}
1235
7d4fb40a
NS
1236STATIC int
1237xfs_vm_writepages(
1238 struct address_space *mapping,
1239 struct writeback_control *wbc)
1240{
67fcaa73 1241 struct bhv_vnode *vp = vn_from_inode(mapping->host);
7d4fb40a
NS
1242
1243 if (VN_TRUNC(vp))
1244 VUNTRUNCATE(vp);
1245 return generic_writepages(mapping, wbc);
1246}
1247
f51623b2
NS
1248/*
1249 * Called to move a page into cleanable state - and from there
1250 * to be released. Possibly the page is already clean. We always
1251 * have buffer heads in this call.
1252 *
1253 * Returns 0 if the page is ok to release, 1 otherwise.
1254 *
1255 * Possible scenarios are:
1256 *
1257 * 1. We are being called to release a page which has been written
1258 * to via regular I/O. buffer heads will be dirty and possibly
1259 * delalloc. If no delalloc buffer heads in this case then we
1260 * can just return zero.
1261 *
1262 * 2. We are called to release a page which has been written via
1263 * mmap, all we need to do is ensure there is no delalloc
1264 * state in the buffer heads, if not we can let the caller
1265 * free them and we should come back later via writepage.
1266 */
1267STATIC int
238f4c54 1268xfs_vm_releasepage(
f51623b2
NS
1269 struct page *page,
1270 gfp_t gfp_mask)
1271{
1272 struct inode *inode = page->mapping->host;
1273 int dirty, delalloc, unmapped, unwritten;
1274 struct writeback_control wbc = {
1275 .sync_mode = WB_SYNC_ALL,
1276 .nr_to_write = 1,
1277 };
1278
ed9d88f7 1279 xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0);
f51623b2 1280
238f4c54
NS
1281 if (!page_has_buffers(page))
1282 return 0;
1283
f51623b2
NS
1284 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1285 if (!delalloc && !unwritten)
1286 goto free_buffers;
1287
1288 if (!(gfp_mask & __GFP_FS))
1289 return 0;
1290
1291 /* If we are already inside a transaction or the thread cannot
1292 * do I/O, we cannot release this page.
1293 */
59c1b082 1294 if (current_test_flags(PF_FSTRANS))
f51623b2
NS
1295 return 0;
1296
1297 /*
1298 * Convert delalloc space to real space, do not flush the
1299 * data out to disk, that will be done by the caller.
1300 * Never need to allocate space here - we will always
1301 * come back to writepage in that case.
1302 */
1303 dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
1304 if (dirty == 0 && !unwritten)
1305 goto free_buffers;
1306 return 0;
1307
1308free_buffers:
1309 return try_to_free_buffers(page);
1310}
1311
1da177e4 1312STATIC int
c2536668 1313__xfs_get_blocks(
1da177e4
LT
1314 struct inode *inode,
1315 sector_t iblock,
1da177e4
LT
1316 struct buffer_head *bh_result,
1317 int create,
1318 int direct,
1319 bmapi_flags_t flags)
1320{
67fcaa73 1321 bhv_vnode_t *vp = vn_from_inode(inode);
1da177e4 1322 xfs_iomap_t iomap;
fdc7ed75
NS
1323 xfs_off_t offset;
1324 ssize_t size;
c2536668 1325 int niomap = 1;
1da177e4 1326 int error;
1da177e4 1327
fdc7ed75 1328 offset = (xfs_off_t)iblock << inode->i_blkbits;
c2536668
NS
1329 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1330 size = bh_result->b_size;
67fcaa73
NS
1331 error = bhv_vop_bmap(vp, offset, size,
1332 create ? flags : BMAPI_READ, &iomap, &niomap);
1da177e4
LT
1333 if (error)
1334 return -error;
c2536668 1335 if (niomap == 0)
1da177e4
LT
1336 return 0;
1337
1338 if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
87cbc49c
NS
1339 /*
1340 * For unwritten extents do not report a disk address on
1da177e4
LT
1341 * the read case (treat as if we're reading into a hole).
1342 */
1343 if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
87cbc49c
NS
1344 xfs_map_buffer(bh_result, &iomap, offset,
1345 inode->i_blkbits);
1da177e4
LT
1346 }
1347 if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
1348 if (direct)
1349 bh_result->b_private = inode;
1350 set_buffer_unwritten(bh_result);
1da177e4
LT
1351 }
1352 }
1353
c2536668
NS
1354 /*
1355 * If this is a realtime file, data may be on a different device.
1356 * to that pointed to from the buffer_head b_bdev currently.
1357 */
ce8e922c 1358 bh_result->b_bdev = iomap.iomap_target->bt_bdev;
1da177e4 1359
c2536668 1360 /*
549054af
DC
1361 * If we previously allocated a block out beyond eof and we are now
1362 * coming back to use it then we will need to flag it as new even if it
1363 * has a disk address.
1364 *
1365 * With sub-block writes into unwritten extents we also need to mark
1366 * the buffer as new so that the unwritten parts of the buffer gets
1367 * correctly zeroed.
1da177e4
LT
1368 */
1369 if (create &&
1370 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
549054af
DC
1371 (offset >= i_size_read(inode)) ||
1372 (iomap.iomap_flags & (IOMAP_NEW|IOMAP_UNWRITTEN))))
1da177e4 1373 set_buffer_new(bh_result);
1da177e4
LT
1374
1375 if (iomap.iomap_flags & IOMAP_DELAY) {
1376 BUG_ON(direct);
1377 if (create) {
1378 set_buffer_uptodate(bh_result);
1379 set_buffer_mapped(bh_result);
1380 set_buffer_delay(bh_result);
1381 }
1382 }
1383
c2536668 1384 if (direct || size > (1 << inode->i_blkbits)) {
fdc7ed75
NS
1385 ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
1386 offset = min_t(xfs_off_t,
c2536668
NS
1387 iomap.iomap_bsize - iomap.iomap_delta, size);
1388 bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);
1da177e4
LT
1389 }
1390
1391 return 0;
1392}
1393
1394int
c2536668 1395xfs_get_blocks(
1da177e4
LT
1396 struct inode *inode,
1397 sector_t iblock,
1398 struct buffer_head *bh_result,
1399 int create)
1400{
c2536668 1401 return __xfs_get_blocks(inode, iblock,
fa30bd05 1402 bh_result, create, 0, BMAPI_WRITE);
1da177e4
LT
1403}
1404
1405STATIC int
e4c573bb 1406xfs_get_blocks_direct(
1da177e4
LT
1407 struct inode *inode,
1408 sector_t iblock,
1da177e4
LT
1409 struct buffer_head *bh_result,
1410 int create)
1411{
c2536668 1412 return __xfs_get_blocks(inode, iblock,
1d8fa7a2 1413 bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
1da177e4
LT
1414}
1415
f0973863 1416STATIC void
e4c573bb 1417xfs_end_io_direct(
f0973863
CH
1418 struct kiocb *iocb,
1419 loff_t offset,
1420 ssize_t size,
1421 void *private)
1422{
1423 xfs_ioend_t *ioend = iocb->private;
1424
1425 /*
1426 * Non-NULL private data means we need to issue a transaction to
1427 * convert a range from unwritten to written extents. This needs
c41564b5 1428 * to happen from process context but aio+dio I/O completion
f0973863 1429 * happens from irq context so we need to defer it to a workqueue.
c41564b5 1430 * This is not necessary for synchronous direct I/O, but we do
f0973863
CH
1431 * it anyway to keep the code uniform and simpler.
1432 *
e927af90
DC
1433 * Well, if only it were that simple. Because synchronous direct I/O
1434 * requires extent conversion to occur *before* we return to userspace,
1435 * we have to wait for extent conversion to complete. Look at the
1436 * iocb that has been passed to us to determine if this is AIO or
1437 * not. If it is synchronous, tell xfs_finish_ioend() to kick the
1438 * workqueue and wait for it to complete.
1439 *
f0973863
CH
1440 * The core direct I/O code might be changed to always call the
1441 * completion handler in the future, in which case all this can
1442 * go away.
1443 */
ba87ea69
LM
1444 ioend->io_offset = offset;
1445 ioend->io_size = size;
1446 if (ioend->io_type == IOMAP_READ) {
e927af90 1447 xfs_finish_ioend(ioend, 0);
ba87ea69 1448 } else if (private && size > 0) {
e927af90 1449 xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
f0973863 1450 } else {
ba87ea69
LM
1451 /*
1452 * A direct I/O write ioend starts it's life in unwritten
1453 * state in case they map an unwritten extent. This write
1454 * didn't map an unwritten extent so switch it's completion
1455 * handler.
1456 */
1457 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
e927af90 1458 xfs_finish_ioend(ioend, 0);
f0973863
CH
1459 }
1460
1461 /*
c41564b5 1462 * blockdev_direct_IO can return an error even after the I/O
f0973863
CH
1463 * completion handler was called. Thus we need to protect
1464 * against double-freeing.
1465 */
1466 iocb->private = NULL;
1467}
1468
1da177e4 1469STATIC ssize_t
e4c573bb 1470xfs_vm_direct_IO(
1da177e4
LT
1471 int rw,
1472 struct kiocb *iocb,
1473 const struct iovec *iov,
1474 loff_t offset,
1475 unsigned long nr_segs)
1476{
1477 struct file *file = iocb->ki_filp;
1478 struct inode *inode = file->f_mapping->host;
67fcaa73 1479 bhv_vnode_t *vp = vn_from_inode(inode);
1da177e4
LT
1480 xfs_iomap_t iomap;
1481 int maps = 1;
1482 int error;
f0973863 1483 ssize_t ret;
1da177e4 1484
67fcaa73 1485 error = bhv_vop_bmap(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps);
1da177e4
LT
1486 if (error)
1487 return -error;
1488
721259bc 1489 if (rw == WRITE) {
ba87ea69 1490 iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
721259bc
LM
1491 ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
1492 iomap.iomap_target->bt_bdev,
1493 iov, offset, nr_segs,
1494 xfs_get_blocks_direct,
1495 xfs_end_io_direct);
1496 } else {
ba87ea69 1497 iocb->private = xfs_alloc_ioend(inode, IOMAP_READ);
721259bc
LM
1498 ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
1499 iomap.iomap_target->bt_bdev,
1500 iov, offset, nr_segs,
1501 xfs_get_blocks_direct,
1502 xfs_end_io_direct);
1503 }
f0973863 1504
8459d86a 1505 if (unlikely(ret != -EIOCBQUEUED && iocb->private))
f0973863
CH
1506 xfs_destroy_ioend(iocb->private);
1507 return ret;
1da177e4
LT
1508}
1509
f51623b2 1510STATIC int
e4c573bb 1511xfs_vm_prepare_write(
f51623b2
NS
1512 struct file *file,
1513 struct page *page,
1514 unsigned int from,
1515 unsigned int to)
1516{
c2536668 1517 return block_prepare_write(page, from, to, xfs_get_blocks);
f51623b2 1518}
1da177e4
LT
1519
1520STATIC sector_t
e4c573bb 1521xfs_vm_bmap(
1da177e4
LT
1522 struct address_space *mapping,
1523 sector_t block)
1524{
1525 struct inode *inode = (struct inode *)mapping->host;
67fcaa73 1526 bhv_vnode_t *vp = vn_from_inode(inode);
1da177e4 1527
e4c573bb 1528 vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
67fcaa73
NS
1529 bhv_vop_rwlock(vp, VRWLOCK_READ);
1530 bhv_vop_flush_pages(vp, (xfs_off_t)0, -1, 0, FI_REMAPF);
1531 bhv_vop_rwunlock(vp, VRWLOCK_READ);
c2536668 1532 return generic_block_bmap(mapping, block, xfs_get_blocks);
1da177e4
LT
1533}
1534
1535STATIC int
e4c573bb 1536xfs_vm_readpage(
1da177e4
LT
1537 struct file *unused,
1538 struct page *page)
1539{
c2536668 1540 return mpage_readpage(page, xfs_get_blocks);
1da177e4
LT
1541}
1542
1543STATIC int
e4c573bb 1544xfs_vm_readpages(
1da177e4
LT
1545 struct file *unused,
1546 struct address_space *mapping,
1547 struct list_head *pages,
1548 unsigned nr_pages)
1549{
c2536668 1550 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1da177e4
LT
1551}
1552
2ff28e22 1553STATIC void
238f4c54 1554xfs_vm_invalidatepage(
bcec2b7f
NS
1555 struct page *page,
1556 unsigned long offset)
1557{
1558 xfs_page_trace(XFS_INVALIDPAGE_ENTER,
1559 page->mapping->host, page, offset);
2ff28e22 1560 block_invalidatepage(page, offset);
bcec2b7f
NS
1561}
1562
f5e54d6e 1563const struct address_space_operations xfs_address_space_operations = {
e4c573bb
NS
1564 .readpage = xfs_vm_readpage,
1565 .readpages = xfs_vm_readpages,
1566 .writepage = xfs_vm_writepage,
7d4fb40a 1567 .writepages = xfs_vm_writepages,
1da177e4 1568 .sync_page = block_sync_page,
238f4c54
NS
1569 .releasepage = xfs_vm_releasepage,
1570 .invalidatepage = xfs_vm_invalidatepage,
e4c573bb 1571 .prepare_write = xfs_vm_prepare_write,
1da177e4 1572 .commit_write = generic_commit_write,
e4c573bb
NS
1573 .bmap = xfs_vm_bmap,
1574 .direct_IO = xfs_vm_direct_IO,
e965f963 1575 .migratepage = buffer_migrate_page,
1da177e4 1576};