]>
Commit | Line | Data |
---|---|---|
afc51aaa DW |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Copyright (C) 2010 Red Hat, Inc. | |
598ecfba | 4 | * Copyright (C) 2016-2019 Christoph Hellwig. |
afc51aaa DW |
5 | */ |
6 | #include <linux/module.h> | |
7 | #include <linux/compiler.h> | |
8 | #include <linux/fs.h> | |
9 | #include <linux/iomap.h> | |
10 | #include <linux/pagemap.h> | |
11 | #include <linux/uio.h> | |
12 | #include <linux/buffer_head.h> | |
13 | #include <linux/dax.h> | |
14 | #include <linux/writeback.h> | |
598ecfba | 15 | #include <linux/list_sort.h> |
afc51aaa DW |
16 | #include <linux/swap.h> |
17 | #include <linux/bio.h> | |
18 | #include <linux/sched/signal.h> | |
19 | #include <linux/migrate.h> | |
9e91c572 | 20 | #include "trace.h" |
afc51aaa DW |
21 | |
22 | #include "../internal.h" | |
23 | ||
ab08b01e | 24 | /* |
0a195b91 MWO |
25 | * Structure allocated for each page or THP when block size < page size |
26 | * to track sub-page uptodate status and I/O completions. | |
ab08b01e CH |
27 | */ |
28 | struct iomap_page { | |
7d636676 | 29 | atomic_t read_bytes_pending; |
0fb2d720 | 30 | atomic_t write_bytes_pending; |
1cea335d | 31 | spinlock_t uptodate_lock; |
0a195b91 | 32 | unsigned long uptodate[]; |
ab08b01e CH |
33 | }; |
34 | ||
35 | static inline struct iomap_page *to_iomap_page(struct page *page) | |
36 | { | |
0a195b91 MWO |
37 | /* |
38 | * per-block data is stored in the head page. Callers should | |
39 | * not be dealing with tail pages (and if they are, they can | |
40 | * call thp_head() first. | |
41 | */ | |
42 | VM_BUG_ON_PGFLAGS(PageTail(page), page); | |
43 | ||
ab08b01e CH |
44 | if (page_has_private(page)) |
45 | return (struct iomap_page *)page_private(page); | |
46 | return NULL; | |
47 | } | |
48 | ||
598ecfba CH |
49 | static struct bio_set iomap_ioend_bioset; |
50 | ||
afc51aaa DW |
51 | static struct iomap_page * |
52 | iomap_page_create(struct inode *inode, struct page *page) | |
53 | { | |
54 | struct iomap_page *iop = to_iomap_page(page); | |
0a195b91 | 55 | unsigned int nr_blocks = i_blocks_per_page(inode, page); |
afc51aaa | 56 | |
0a195b91 | 57 | if (iop || nr_blocks <= 1) |
afc51aaa DW |
58 | return iop; |
59 | ||
0a195b91 MWO |
60 | iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)), |
61 | GFP_NOFS | __GFP_NOFAIL); | |
1cea335d | 62 | spin_lock_init(&iop->uptodate_lock); |
4595a298 MWO |
63 | if (PageUptodate(page)) |
64 | bitmap_fill(iop->uptodate, nr_blocks); | |
58aeb731 | 65 | attach_page_private(page, iop); |
afc51aaa DW |
66 | return iop; |
67 | } | |
68 | ||
69 | static void | |
70 | iomap_page_release(struct page *page) | |
71 | { | |
58aeb731 | 72 | struct iomap_page *iop = detach_page_private(page); |
0a195b91 | 73 | unsigned int nr_blocks = i_blocks_per_page(page->mapping->host, page); |
afc51aaa DW |
74 | |
75 | if (!iop) | |
76 | return; | |
7d636676 | 77 | WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending)); |
0fb2d720 | 78 | WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending)); |
0a195b91 MWO |
79 | WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) != |
80 | PageUptodate(page)); | |
afc51aaa DW |
81 | kfree(iop); |
82 | } | |
83 | ||
84 | /* | |
85 | * Calculate the range inside the page that we actually need to read. | |
86 | */ | |
87 | static void | |
88 | iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop, | |
89 | loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp) | |
90 | { | |
91 | loff_t orig_pos = *pos; | |
92 | loff_t isize = i_size_read(inode); | |
93 | unsigned block_bits = inode->i_blkbits; | |
94 | unsigned block_size = (1 << block_bits); | |
95 | unsigned poff = offset_in_page(*pos); | |
96 | unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length); | |
97 | unsigned first = poff >> block_bits; | |
98 | unsigned last = (poff + plen - 1) >> block_bits; | |
99 | ||
100 | /* | |
101 | * If the block size is smaller than the page size we need to check the | |
102 | * per-block uptodate status and adjust the offset and length if needed | |
103 | * to avoid reading in already uptodate ranges. | |
104 | */ | |
105 | if (iop) { | |
106 | unsigned int i; | |
107 | ||
108 | /* move forward for each leading block marked uptodate */ | |
109 | for (i = first; i <= last; i++) { | |
110 | if (!test_bit(i, iop->uptodate)) | |
111 | break; | |
112 | *pos += block_size; | |
113 | poff += block_size; | |
114 | plen -= block_size; | |
115 | first++; | |
116 | } | |
117 | ||
118 | /* truncate len if we find any trailing uptodate block(s) */ | |
119 | for ( ; i <= last; i++) { | |
120 | if (test_bit(i, iop->uptodate)) { | |
121 | plen -= (last - i + 1) * block_size; | |
122 | last = i - 1; | |
123 | break; | |
124 | } | |
125 | } | |
126 | } | |
127 | ||
128 | /* | |
129 | * If the extent spans the block that contains the i_size we need to | |
130 | * handle both halves separately so that we properly zero data in the | |
131 | * page cache for blocks that are entirely outside of i_size. | |
132 | */ | |
133 | if (orig_pos <= isize && orig_pos + length > isize) { | |
134 | unsigned end = offset_in_page(isize - 1) >> block_bits; | |
135 | ||
136 | if (first <= end && last > end) | |
137 | plen -= (last - end) * block_size; | |
138 | } | |
139 | ||
140 | *offp = poff; | |
141 | *lenp = plen; | |
142 | } | |
143 | ||
144 | static void | |
1cea335d | 145 | iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len) |
afc51aaa DW |
146 | { |
147 | struct iomap_page *iop = to_iomap_page(page); | |
148 | struct inode *inode = page->mapping->host; | |
149 | unsigned first = off >> inode->i_blkbits; | |
150 | unsigned last = (off + len - 1) >> inode->i_blkbits; | |
1cea335d | 151 | unsigned long flags; |
afc51aaa | 152 | |
1cea335d | 153 | spin_lock_irqsave(&iop->uptodate_lock, flags); |
b21866f5 MWO |
154 | bitmap_set(iop->uptodate, first, last - first + 1); |
155 | if (bitmap_full(iop->uptodate, i_blocks_per_page(inode, page))) | |
1cea335d CH |
156 | SetPageUptodate(page); |
157 | spin_unlock_irqrestore(&iop->uptodate_lock, flags); | |
158 | } | |
159 | ||
160 | static void | |
161 | iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len) | |
162 | { | |
163 | if (PageError(page)) | |
164 | return; | |
165 | ||
166 | if (page_has_private(page)) | |
167 | iomap_iop_set_range_uptodate(page, off, len); | |
168 | else | |
afc51aaa DW |
169 | SetPageUptodate(page); |
170 | } | |
171 | ||
afc51aaa DW |
172 | static void |
173 | iomap_read_page_end_io(struct bio_vec *bvec, int error) | |
174 | { | |
175 | struct page *page = bvec->bv_page; | |
176 | struct iomap_page *iop = to_iomap_page(page); | |
177 | ||
178 | if (unlikely(error)) { | |
179 | ClearPageUptodate(page); | |
180 | SetPageError(page); | |
181 | } else { | |
182 | iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len); | |
183 | } | |
184 | ||
7d636676 MWO |
185 | if (!iop || atomic_sub_and_test(bvec->bv_len, &iop->read_bytes_pending)) |
186 | unlock_page(page); | |
afc51aaa DW |
187 | } |
188 | ||
189 | static void | |
190 | iomap_read_end_io(struct bio *bio) | |
191 | { | |
192 | int error = blk_status_to_errno(bio->bi_status); | |
193 | struct bio_vec *bvec; | |
194 | struct bvec_iter_all iter_all; | |
195 | ||
196 | bio_for_each_segment_all(bvec, bio, iter_all) | |
197 | iomap_read_page_end_io(bvec, error); | |
198 | bio_put(bio); | |
199 | } | |
200 | ||
201 | struct iomap_readpage_ctx { | |
202 | struct page *cur_page; | |
203 | bool cur_page_in_bio; | |
afc51aaa | 204 | struct bio *bio; |
9d24a13a | 205 | struct readahead_control *rac; |
afc51aaa DW |
206 | }; |
207 | ||
208 | static void | |
209 | iomap_read_inline_data(struct inode *inode, struct page *page, | |
210 | struct iomap *iomap) | |
211 | { | |
212 | size_t size = i_size_read(inode); | |
213 | void *addr; | |
214 | ||
215 | if (PageUptodate(page)) | |
216 | return; | |
217 | ||
218 | BUG_ON(page->index); | |
219 | BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data)); | |
220 | ||
221 | addr = kmap_atomic(page); | |
222 | memcpy(addr, iomap->inline_data, size); | |
223 | memset(addr + size, 0, PAGE_SIZE - size); | |
224 | kunmap_atomic(addr); | |
225 | SetPageUptodate(page); | |
226 | } | |
227 | ||
009d8d84 CH |
228 | static inline bool iomap_block_needs_zeroing(struct inode *inode, |
229 | struct iomap *iomap, loff_t pos) | |
230 | { | |
231 | return iomap->type != IOMAP_MAPPED || | |
232 | (iomap->flags & IOMAP_F_NEW) || | |
233 | pos >= i_size_read(inode); | |
234 | } | |
235 | ||
afc51aaa DW |
236 | static loff_t |
237 | iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data, | |
c039b997 | 238 | struct iomap *iomap, struct iomap *srcmap) |
afc51aaa DW |
239 | { |
240 | struct iomap_readpage_ctx *ctx = data; | |
241 | struct page *page = ctx->cur_page; | |
242 | struct iomap_page *iop = iomap_page_create(inode, page); | |
243 | bool same_page = false, is_contig = false; | |
244 | loff_t orig_pos = pos; | |
245 | unsigned poff, plen; | |
246 | sector_t sector; | |
247 | ||
248 | if (iomap->type == IOMAP_INLINE) { | |
249 | WARN_ON_ONCE(pos); | |
250 | iomap_read_inline_data(inode, page, iomap); | |
251 | return PAGE_SIZE; | |
252 | } | |
253 | ||
254 | /* zero post-eof blocks as the page may be mapped */ | |
255 | iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen); | |
256 | if (plen == 0) | |
257 | goto done; | |
258 | ||
009d8d84 | 259 | if (iomap_block_needs_zeroing(inode, iomap, pos)) { |
afc51aaa DW |
260 | zero_user(page, poff, plen); |
261 | iomap_set_range_uptodate(page, poff, plen); | |
262 | goto done; | |
263 | } | |
264 | ||
265 | ctx->cur_page_in_bio = true; | |
7d636676 MWO |
266 | if (iop) |
267 | atomic_add(plen, &iop->read_bytes_pending); | |
afc51aaa | 268 | |
7d636676 | 269 | /* Try to merge into a previous segment if we can */ |
afc51aaa | 270 | sector = iomap_sector(iomap, pos); |
7d636676 MWO |
271 | if (ctx->bio && bio_end_sector(ctx->bio) == sector) { |
272 | if (__bio_try_merge_page(ctx->bio, page, plen, poff, | |
273 | &same_page)) | |
274 | goto done; | |
afc51aaa | 275 | is_contig = true; |
afc51aaa DW |
276 | } |
277 | ||
7d636676 | 278 | if (!is_contig || bio_full(ctx->bio, plen)) { |
afc51aaa | 279 | gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL); |
457df33e | 280 | gfp_t orig_gfp = gfp; |
afc51aaa DW |
281 | int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT; |
282 | ||
283 | if (ctx->bio) | |
284 | submit_bio(ctx->bio); | |
285 | ||
9d24a13a | 286 | if (ctx->rac) /* same as readahead_gfp_mask */ |
afc51aaa DW |
287 | gfp |= __GFP_NORETRY | __GFP_NOWARN; |
288 | ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs)); | |
457df33e MWO |
289 | /* |
290 | * If the bio_alloc fails, try it again for a single page to | |
291 | * avoid having to deal with partial page reads. This emulates | |
292 | * what do_mpage_readpage does. | |
293 | */ | |
294 | if (!ctx->bio) | |
295 | ctx->bio = bio_alloc(orig_gfp, 1); | |
afc51aaa | 296 | ctx->bio->bi_opf = REQ_OP_READ; |
9d24a13a | 297 | if (ctx->rac) |
afc51aaa DW |
298 | ctx->bio->bi_opf |= REQ_RAHEAD; |
299 | ctx->bio->bi_iter.bi_sector = sector; | |
300 | bio_set_dev(ctx->bio, iomap->bdev); | |
301 | ctx->bio->bi_end_io = iomap_read_end_io; | |
302 | } | |
303 | ||
304 | bio_add_page(ctx->bio, page, plen, poff); | |
305 | done: | |
306 | /* | |
307 | * Move the caller beyond our range so that it keeps making progress. | |
308 | * For that we have to include any leading non-uptodate ranges, but | |
309 | * we can skip trailing ones as they will be handled in the next | |
310 | * iteration. | |
311 | */ | |
312 | return pos - orig_pos + plen; | |
313 | } | |
314 | ||
315 | int | |
316 | iomap_readpage(struct page *page, const struct iomap_ops *ops) | |
317 | { | |
318 | struct iomap_readpage_ctx ctx = { .cur_page = page }; | |
319 | struct inode *inode = page->mapping->host; | |
320 | unsigned poff; | |
321 | loff_t ret; | |
322 | ||
9e91c572 CH |
323 | trace_iomap_readpage(page->mapping->host, 1); |
324 | ||
afc51aaa DW |
325 | for (poff = 0; poff < PAGE_SIZE; poff += ret) { |
326 | ret = iomap_apply(inode, page_offset(page) + poff, | |
327 | PAGE_SIZE - poff, 0, ops, &ctx, | |
328 | iomap_readpage_actor); | |
329 | if (ret <= 0) { | |
330 | WARN_ON_ONCE(ret == 0); | |
331 | SetPageError(page); | |
332 | break; | |
333 | } | |
334 | } | |
335 | ||
336 | if (ctx.bio) { | |
337 | submit_bio(ctx.bio); | |
338 | WARN_ON_ONCE(!ctx.cur_page_in_bio); | |
339 | } else { | |
340 | WARN_ON_ONCE(ctx.cur_page_in_bio); | |
341 | unlock_page(page); | |
342 | } | |
343 | ||
344 | /* | |
d4388340 | 345 | * Just like mpage_readahead and block_read_full_page we always |
afc51aaa DW |
346 | * return 0 and just mark the page as PageError on errors. This |
347 | * should be cleaned up all through the stack eventually. | |
348 | */ | |
349 | return 0; | |
350 | } | |
351 | EXPORT_SYMBOL_GPL(iomap_readpage); | |
352 | ||
afc51aaa | 353 | static loff_t |
9d24a13a | 354 | iomap_readahead_actor(struct inode *inode, loff_t pos, loff_t length, |
c039b997 | 355 | void *data, struct iomap *iomap, struct iomap *srcmap) |
afc51aaa DW |
356 | { |
357 | struct iomap_readpage_ctx *ctx = data; | |
358 | loff_t done, ret; | |
359 | ||
360 | for (done = 0; done < length; done += ret) { | |
361 | if (ctx->cur_page && offset_in_page(pos + done) == 0) { | |
362 | if (!ctx->cur_page_in_bio) | |
363 | unlock_page(ctx->cur_page); | |
364 | put_page(ctx->cur_page); | |
365 | ctx->cur_page = NULL; | |
366 | } | |
367 | if (!ctx->cur_page) { | |
9d24a13a | 368 | ctx->cur_page = readahead_page(ctx->rac); |
afc51aaa DW |
369 | ctx->cur_page_in_bio = false; |
370 | } | |
371 | ret = iomap_readpage_actor(inode, pos + done, length - done, | |
c039b997 | 372 | ctx, iomap, srcmap); |
afc51aaa DW |
373 | } |
374 | ||
375 | return done; | |
376 | } | |
377 | ||
9d24a13a MWO |
378 | /** |
379 | * iomap_readahead - Attempt to read pages from a file. | |
380 | * @rac: Describes the pages to be read. | |
381 | * @ops: The operations vector for the filesystem. | |
382 | * | |
383 | * This function is for filesystems to call to implement their readahead | |
384 | * address_space operation. | |
385 | * | |
386 | * Context: The @ops callbacks may submit I/O (eg to read the addresses of | |
387 | * blocks from disc), and may wait for it. The caller may be trying to | |
388 | * access a different page, and so sleeping excessively should be avoided. | |
389 | * It may allocate memory, but should avoid costly allocations. This | |
390 | * function is called with memalloc_nofs set, so allocations will not cause | |
391 | * the filesystem to be reentered. | |
392 | */ | |
393 | void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops) | |
afc51aaa | 394 | { |
9d24a13a MWO |
395 | struct inode *inode = rac->mapping->host; |
396 | loff_t pos = readahead_pos(rac); | |
397 | loff_t length = readahead_length(rac); | |
afc51aaa | 398 | struct iomap_readpage_ctx ctx = { |
9d24a13a | 399 | .rac = rac, |
afc51aaa | 400 | }; |
afc51aaa | 401 | |
9d24a13a | 402 | trace_iomap_readahead(inode, readahead_count(rac)); |
9e91c572 | 403 | |
afc51aaa | 404 | while (length > 0) { |
9d24a13a MWO |
405 | loff_t ret = iomap_apply(inode, pos, length, 0, ops, |
406 | &ctx, iomap_readahead_actor); | |
afc51aaa DW |
407 | if (ret <= 0) { |
408 | WARN_ON_ONCE(ret == 0); | |
9d24a13a | 409 | break; |
afc51aaa DW |
410 | } |
411 | pos += ret; | |
412 | length -= ret; | |
413 | } | |
9d24a13a | 414 | |
afc51aaa DW |
415 | if (ctx.bio) |
416 | submit_bio(ctx.bio); | |
417 | if (ctx.cur_page) { | |
418 | if (!ctx.cur_page_in_bio) | |
419 | unlock_page(ctx.cur_page); | |
420 | put_page(ctx.cur_page); | |
421 | } | |
afc51aaa | 422 | } |
9d24a13a | 423 | EXPORT_SYMBOL_GPL(iomap_readahead); |
afc51aaa DW |
424 | |
425 | /* | |
426 | * iomap_is_partially_uptodate checks whether blocks within a page are | |
427 | * uptodate or not. | |
428 | * | |
429 | * Returns true if all blocks which correspond to a file portion | |
430 | * we want to read within the page are uptodate. | |
431 | */ | |
432 | int | |
433 | iomap_is_partially_uptodate(struct page *page, unsigned long from, | |
434 | unsigned long count) | |
435 | { | |
436 | struct iomap_page *iop = to_iomap_page(page); | |
437 | struct inode *inode = page->mapping->host; | |
438 | unsigned len, first, last; | |
439 | unsigned i; | |
440 | ||
441 | /* Limit range to one page */ | |
442 | len = min_t(unsigned, PAGE_SIZE - from, count); | |
443 | ||
444 | /* First and last blocks in range within page */ | |
445 | first = from >> inode->i_blkbits; | |
446 | last = (from + len - 1) >> inode->i_blkbits; | |
447 | ||
448 | if (iop) { | |
449 | for (i = first; i <= last; i++) | |
450 | if (!test_bit(i, iop->uptodate)) | |
451 | return 0; | |
452 | return 1; | |
453 | } | |
454 | ||
455 | return 0; | |
456 | } | |
457 | EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate); | |
458 | ||
459 | int | |
460 | iomap_releasepage(struct page *page, gfp_t gfp_mask) | |
461 | { | |
1ac99452 MWO |
462 | trace_iomap_releasepage(page->mapping->host, page_offset(page), |
463 | PAGE_SIZE); | |
9e91c572 | 464 | |
afc51aaa DW |
465 | /* |
466 | * mm accommodates an old ext3 case where clean pages might not have had | |
467 | * the dirty bit cleared. Thus, it can send actual dirty pages to | |
468 | * ->releasepage() via shrink_active_list(), skip those here. | |
469 | */ | |
470 | if (PageDirty(page) || PageWriteback(page)) | |
471 | return 0; | |
472 | iomap_page_release(page); | |
473 | return 1; | |
474 | } | |
475 | EXPORT_SYMBOL_GPL(iomap_releasepage); | |
476 | ||
477 | void | |
478 | iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len) | |
479 | { | |
1ac99452 | 480 | trace_iomap_invalidatepage(page->mapping->host, offset, len); |
9e91c572 | 481 | |
afc51aaa DW |
482 | /* |
483 | * If we are invalidating the entire page, clear the dirty state from it | |
484 | * and release it to avoid unnecessary buildup of the LRU. | |
485 | */ | |
486 | if (offset == 0 && len == PAGE_SIZE) { | |
487 | WARN_ON_ONCE(PageWriteback(page)); | |
488 | cancel_dirty_page(page); | |
489 | iomap_page_release(page); | |
490 | } | |
491 | } | |
492 | EXPORT_SYMBOL_GPL(iomap_invalidatepage); | |
493 | ||
494 | #ifdef CONFIG_MIGRATION | |
495 | int | |
496 | iomap_migrate_page(struct address_space *mapping, struct page *newpage, | |
497 | struct page *page, enum migrate_mode mode) | |
498 | { | |
499 | int ret; | |
500 | ||
26473f83 | 501 | ret = migrate_page_move_mapping(mapping, newpage, page, 0); |
afc51aaa DW |
502 | if (ret != MIGRATEPAGE_SUCCESS) |
503 | return ret; | |
504 | ||
58aeb731 GJ |
505 | if (page_has_private(page)) |
506 | attach_page_private(newpage, detach_page_private(page)); | |
afc51aaa DW |
507 | |
508 | if (mode != MIGRATE_SYNC_NO_COPY) | |
509 | migrate_page_copy(newpage, page); | |
510 | else | |
511 | migrate_page_states(newpage, page); | |
512 | return MIGRATEPAGE_SUCCESS; | |
513 | } | |
514 | EXPORT_SYMBOL_GPL(iomap_migrate_page); | |
515 | #endif /* CONFIG_MIGRATION */ | |
516 | ||
32a38a49 CH |
517 | enum { |
518 | IOMAP_WRITE_F_UNSHARE = (1 << 0), | |
519 | }; | |
520 | ||
afc51aaa DW |
521 | static void |
522 | iomap_write_failed(struct inode *inode, loff_t pos, unsigned len) | |
523 | { | |
524 | loff_t i_size = i_size_read(inode); | |
525 | ||
526 | /* | |
527 | * Only truncate newly allocated pages beyoned EOF, even if the | |
528 | * write started inside the existing inode size. | |
529 | */ | |
530 | if (pos + len > i_size) | |
531 | truncate_pagecache_range(inode, max(pos, i_size), pos + len); | |
532 | } | |
533 | ||
534 | static int | |
d3b40439 CH |
535 | iomap_read_page_sync(loff_t block_start, struct page *page, unsigned poff, |
536 | unsigned plen, struct iomap *iomap) | |
afc51aaa DW |
537 | { |
538 | struct bio_vec bvec; | |
539 | struct bio bio; | |
540 | ||
afc51aaa DW |
541 | bio_init(&bio, &bvec, 1); |
542 | bio.bi_opf = REQ_OP_READ; | |
543 | bio.bi_iter.bi_sector = iomap_sector(iomap, block_start); | |
544 | bio_set_dev(&bio, iomap->bdev); | |
545 | __bio_add_page(&bio, page, plen, poff); | |
546 | return submit_bio_wait(&bio); | |
547 | } | |
548 | ||
549 | static int | |
32a38a49 | 550 | __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags, |
c039b997 | 551 | struct page *page, struct iomap *srcmap) |
afc51aaa DW |
552 | { |
553 | struct iomap_page *iop = iomap_page_create(inode, page); | |
554 | loff_t block_size = i_blocksize(inode); | |
6cc19c5f NB |
555 | loff_t block_start = round_down(pos, block_size); |
556 | loff_t block_end = round_up(pos + len, block_size); | |
afc51aaa | 557 | unsigned from = offset_in_page(pos), to = from + len, poff, plen; |
afc51aaa DW |
558 | |
559 | if (PageUptodate(page)) | |
560 | return 0; | |
e6e7ca92 | 561 | ClearPageError(page); |
afc51aaa DW |
562 | |
563 | do { | |
564 | iomap_adjust_read_range(inode, iop, &block_start, | |
565 | block_end - block_start, &poff, &plen); | |
566 | if (plen == 0) | |
567 | break; | |
568 | ||
32a38a49 CH |
569 | if (!(flags & IOMAP_WRITE_F_UNSHARE) && |
570 | (from <= poff || from >= poff + plen) && | |
d3b40439 CH |
571 | (to <= poff || to >= poff + plen)) |
572 | continue; | |
573 | ||
c039b997 | 574 | if (iomap_block_needs_zeroing(inode, srcmap, block_start)) { |
32a38a49 CH |
575 | if (WARN_ON_ONCE(flags & IOMAP_WRITE_F_UNSHARE)) |
576 | return -EIO; | |
d3b40439 | 577 | zero_user_segments(page, poff, from, to, poff + plen); |
14284fed MWO |
578 | } else { |
579 | int status = iomap_read_page_sync(block_start, page, | |
580 | poff, plen, srcmap); | |
581 | if (status) | |
582 | return status; | |
afc51aaa | 583 | } |
14284fed | 584 | iomap_set_range_uptodate(page, poff, plen); |
afc51aaa DW |
585 | } while ((block_start += plen) < block_end); |
586 | ||
d3b40439 | 587 | return 0; |
afc51aaa DW |
588 | } |
589 | ||
590 | static int | |
591 | iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags, | |
c039b997 | 592 | struct page **pagep, struct iomap *iomap, struct iomap *srcmap) |
afc51aaa DW |
593 | { |
594 | const struct iomap_page_ops *page_ops = iomap->page_ops; | |
afc51aaa DW |
595 | struct page *page; |
596 | int status = 0; | |
597 | ||
598 | BUG_ON(pos + len > iomap->offset + iomap->length); | |
c039b997 GR |
599 | if (srcmap != iomap) |
600 | BUG_ON(pos + len > srcmap->offset + srcmap->length); | |
afc51aaa DW |
601 | |
602 | if (fatal_signal_pending(current)) | |
603 | return -EINTR; | |
604 | ||
605 | if (page_ops && page_ops->page_prepare) { | |
606 | status = page_ops->page_prepare(inode, pos, len, iomap); | |
607 | if (status) | |
608 | return status; | |
609 | } | |
610 | ||
dcd6158d CH |
611 | page = grab_cache_page_write_begin(inode->i_mapping, pos >> PAGE_SHIFT, |
612 | AOP_FLAG_NOFS); | |
afc51aaa DW |
613 | if (!page) { |
614 | status = -ENOMEM; | |
615 | goto out_no_page; | |
616 | } | |
617 | ||
c039b997 GR |
618 | if (srcmap->type == IOMAP_INLINE) |
619 | iomap_read_inline_data(inode, page, srcmap); | |
afc51aaa | 620 | else if (iomap->flags & IOMAP_F_BUFFER_HEAD) |
c039b997 | 621 | status = __block_write_begin_int(page, pos, len, NULL, srcmap); |
afc51aaa | 622 | else |
32a38a49 | 623 | status = __iomap_write_begin(inode, pos, len, flags, page, |
c039b997 | 624 | srcmap); |
afc51aaa DW |
625 | |
626 | if (unlikely(status)) | |
627 | goto out_unlock; | |
628 | ||
629 | *pagep = page; | |
630 | return 0; | |
631 | ||
632 | out_unlock: | |
633 | unlock_page(page); | |
634 | put_page(page); | |
635 | iomap_write_failed(inode, pos, len); | |
636 | ||
637 | out_no_page: | |
638 | if (page_ops && page_ops->page_done) | |
639 | page_ops->page_done(inode, pos, 0, NULL, iomap); | |
640 | return status; | |
641 | } | |
642 | ||
643 | int | |
644 | iomap_set_page_dirty(struct page *page) | |
645 | { | |
646 | struct address_space *mapping = page_mapping(page); | |
647 | int newly_dirty; | |
648 | ||
649 | if (unlikely(!mapping)) | |
650 | return !TestSetPageDirty(page); | |
651 | ||
652 | /* | |
653 | * Lock out page->mem_cgroup migration to keep PageDirty | |
654 | * synchronized with per-memcg dirty page counters. | |
655 | */ | |
656 | lock_page_memcg(page); | |
657 | newly_dirty = !TestSetPageDirty(page); | |
658 | if (newly_dirty) | |
659 | __set_page_dirty(page, mapping, 0); | |
660 | unlock_page_memcg(page); | |
661 | ||
662 | if (newly_dirty) | |
663 | __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); | |
664 | return newly_dirty; | |
665 | } | |
666 | EXPORT_SYMBOL_GPL(iomap_set_page_dirty); | |
667 | ||
e25ba8cb MWO |
668 | static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len, |
669 | size_t copied, struct page *page) | |
afc51aaa DW |
670 | { |
671 | flush_dcache_page(page); | |
672 | ||
673 | /* | |
674 | * The blocks that were entirely written will now be uptodate, so we | |
675 | * don't have to worry about a readpage reading them and overwriting a | |
676 | * partial write. However if we have encountered a short write and only | |
677 | * partially written into a block, it will not be marked uptodate, so a | |
678 | * readpage might come in and destroy our partial write. | |
679 | * | |
680 | * Do the simplest thing, and just treat any short write to a non | |
681 | * uptodate page as a zero-length write, and force the caller to redo | |
682 | * the whole thing. | |
683 | */ | |
684 | if (unlikely(copied < len && !PageUptodate(page))) | |
685 | return 0; | |
686 | iomap_set_range_uptodate(page, offset_in_page(pos), len); | |
687 | iomap_set_page_dirty(page); | |
688 | return copied; | |
689 | } | |
690 | ||
e25ba8cb MWO |
691 | static size_t iomap_write_end_inline(struct inode *inode, struct page *page, |
692 | struct iomap *iomap, loff_t pos, size_t copied) | |
afc51aaa DW |
693 | { |
694 | void *addr; | |
695 | ||
696 | WARN_ON_ONCE(!PageUptodate(page)); | |
697 | BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data)); | |
698 | ||
7ed3cd1a | 699 | flush_dcache_page(page); |
afc51aaa DW |
700 | addr = kmap_atomic(page); |
701 | memcpy(iomap->inline_data + pos, addr + pos, copied); | |
702 | kunmap_atomic(addr); | |
703 | ||
704 | mark_inode_dirty(inode); | |
705 | return copied; | |
706 | } | |
707 | ||
e25ba8cb MWO |
708 | /* Returns the number of bytes copied. May be 0. Cannot be an errno. */ |
709 | static size_t iomap_write_end(struct inode *inode, loff_t pos, size_t len, | |
710 | size_t copied, struct page *page, struct iomap *iomap, | |
711 | struct iomap *srcmap) | |
afc51aaa DW |
712 | { |
713 | const struct iomap_page_ops *page_ops = iomap->page_ops; | |
714 | loff_t old_size = inode->i_size; | |
e25ba8cb | 715 | size_t ret; |
afc51aaa | 716 | |
c039b997 | 717 | if (srcmap->type == IOMAP_INLINE) { |
afc51aaa | 718 | ret = iomap_write_end_inline(inode, page, iomap, pos, copied); |
c039b997 | 719 | } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) { |
afc51aaa DW |
720 | ret = block_write_end(NULL, inode->i_mapping, pos, len, copied, |
721 | page, NULL); | |
722 | } else { | |
c12d6fa8 | 723 | ret = __iomap_write_end(inode, pos, len, copied, page); |
afc51aaa DW |
724 | } |
725 | ||
726 | /* | |
727 | * Update the in-memory inode size after copying the data into the page | |
728 | * cache. It's up to the file system to write the updated size to disk, | |
729 | * preferably after I/O completion so that no stale data is exposed. | |
730 | */ | |
731 | if (pos + ret > old_size) { | |
732 | i_size_write(inode, pos + ret); | |
733 | iomap->flags |= IOMAP_F_SIZE_CHANGED; | |
734 | } | |
735 | unlock_page(page); | |
736 | ||
737 | if (old_size < pos) | |
738 | pagecache_isize_extended(inode, old_size, pos); | |
739 | if (page_ops && page_ops->page_done) | |
740 | page_ops->page_done(inode, pos, ret, page, iomap); | |
741 | put_page(page); | |
742 | ||
743 | if (ret < len) | |
744 | iomap_write_failed(inode, pos, len); | |
745 | return ret; | |
746 | } | |
747 | ||
748 | static loff_t | |
749 | iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data, | |
c039b997 | 750 | struct iomap *iomap, struct iomap *srcmap) |
afc51aaa DW |
751 | { |
752 | struct iov_iter *i = data; | |
753 | long status = 0; | |
754 | ssize_t written = 0; | |
afc51aaa DW |
755 | |
756 | do { | |
757 | struct page *page; | |
758 | unsigned long offset; /* Offset into pagecache page */ | |
759 | unsigned long bytes; /* Bytes to write to page */ | |
760 | size_t copied; /* Bytes copied from user */ | |
761 | ||
762 | offset = offset_in_page(pos); | |
763 | bytes = min_t(unsigned long, PAGE_SIZE - offset, | |
764 | iov_iter_count(i)); | |
765 | again: | |
766 | if (bytes > length) | |
767 | bytes = length; | |
768 | ||
769 | /* | |
770 | * Bring in the user page that we will copy from _first_. | |
771 | * Otherwise there's a nasty deadlock on copying from the | |
772 | * same page as we're writing to, without it being marked | |
773 | * up-to-date. | |
774 | * | |
775 | * Not only is this an optimisation, but it is also required | |
776 | * to check that the address is actually valid, when atomic | |
777 | * usercopies are used, below. | |
778 | */ | |
779 | if (unlikely(iov_iter_fault_in_readable(i, bytes))) { | |
780 | status = -EFAULT; | |
781 | break; | |
782 | } | |
783 | ||
c039b997 GR |
784 | status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap, |
785 | srcmap); | |
afc51aaa DW |
786 | if (unlikely(status)) |
787 | break; | |
788 | ||
789 | if (mapping_writably_mapped(inode->i_mapping)) | |
790 | flush_dcache_page(page); | |
791 | ||
792 | copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); | |
793 | ||
e25ba8cb | 794 | copied = iomap_write_end(inode, pos, bytes, copied, page, iomap, |
c039b997 | 795 | srcmap); |
afc51aaa DW |
796 | |
797 | cond_resched(); | |
798 | ||
799 | iov_iter_advance(i, copied); | |
800 | if (unlikely(copied == 0)) { | |
801 | /* | |
802 | * If we were unable to copy any data at all, we must | |
803 | * fall back to a single segment length write. | |
804 | * | |
805 | * If we didn't fallback here, we could livelock | |
806 | * because not all segments in the iov can be copied at | |
807 | * once without a pagefault. | |
808 | */ | |
809 | bytes = min_t(unsigned long, PAGE_SIZE - offset, | |
810 | iov_iter_single_seg_count(i)); | |
811 | goto again; | |
812 | } | |
813 | pos += copied; | |
814 | written += copied; | |
815 | length -= copied; | |
816 | ||
817 | balance_dirty_pages_ratelimited(inode->i_mapping); | |
818 | } while (iov_iter_count(i) && length); | |
819 | ||
820 | return written ? written : status; | |
821 | } | |
822 | ||
823 | ssize_t | |
824 | iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter, | |
825 | const struct iomap_ops *ops) | |
826 | { | |
827 | struct inode *inode = iocb->ki_filp->f_mapping->host; | |
828 | loff_t pos = iocb->ki_pos, ret = 0, written = 0; | |
829 | ||
830 | while (iov_iter_count(iter)) { | |
831 | ret = iomap_apply(inode, pos, iov_iter_count(iter), | |
832 | IOMAP_WRITE, ops, iter, iomap_write_actor); | |
833 | if (ret <= 0) | |
834 | break; | |
835 | pos += ret; | |
836 | written += ret; | |
837 | } | |
838 | ||
839 | return written ? written : ret; | |
840 | } | |
841 | EXPORT_SYMBOL_GPL(iomap_file_buffered_write); | |
842 | ||
afc51aaa | 843 | static loff_t |
3590c4d8 | 844 | iomap_unshare_actor(struct inode *inode, loff_t pos, loff_t length, void *data, |
c039b997 | 845 | struct iomap *iomap, struct iomap *srcmap) |
afc51aaa DW |
846 | { |
847 | long status = 0; | |
d4ff3b2e | 848 | loff_t written = 0; |
afc51aaa | 849 | |
3590c4d8 CH |
850 | /* don't bother with blocks that are not shared to start with */ |
851 | if (!(iomap->flags & IOMAP_F_SHARED)) | |
852 | return length; | |
853 | /* don't bother with holes or unwritten extents */ | |
c039b997 | 854 | if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) |
3590c4d8 CH |
855 | return length; |
856 | ||
afc51aaa | 857 | do { |
32a38a49 CH |
858 | unsigned long offset = offset_in_page(pos); |
859 | unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length); | |
860 | struct page *page; | |
afc51aaa | 861 | |
32a38a49 | 862 | status = iomap_write_begin(inode, pos, bytes, |
c039b997 | 863 | IOMAP_WRITE_F_UNSHARE, &page, iomap, srcmap); |
afc51aaa DW |
864 | if (unlikely(status)) |
865 | return status; | |
866 | ||
c039b997 GR |
867 | status = iomap_write_end(inode, pos, bytes, bytes, page, iomap, |
868 | srcmap); | |
e25ba8cb MWO |
869 | if (WARN_ON_ONCE(status == 0)) |
870 | return -EIO; | |
afc51aaa DW |
871 | |
872 | cond_resched(); | |
873 | ||
874 | pos += status; | |
875 | written += status; | |
876 | length -= status; | |
877 | ||
878 | balance_dirty_pages_ratelimited(inode->i_mapping); | |
879 | } while (length); | |
880 | ||
881 | return written; | |
882 | } | |
883 | ||
884 | int | |
3590c4d8 | 885 | iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, |
afc51aaa DW |
886 | const struct iomap_ops *ops) |
887 | { | |
888 | loff_t ret; | |
889 | ||
890 | while (len) { | |
891 | ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL, | |
3590c4d8 | 892 | iomap_unshare_actor); |
afc51aaa DW |
893 | if (ret <= 0) |
894 | return ret; | |
895 | pos += ret; | |
896 | len -= ret; | |
897 | } | |
898 | ||
899 | return 0; | |
900 | } | |
3590c4d8 | 901 | EXPORT_SYMBOL_GPL(iomap_file_unshare); |
afc51aaa | 902 | |
81ee8e52 MWO |
903 | static s64 iomap_zero(struct inode *inode, loff_t pos, u64 length, |
904 | struct iomap *iomap, struct iomap *srcmap) | |
afc51aaa DW |
905 | { |
906 | struct page *page; | |
907 | int status; | |
81ee8e52 MWO |
908 | unsigned offset = offset_in_page(pos); |
909 | unsigned bytes = min_t(u64, PAGE_SIZE - offset, length); | |
afc51aaa | 910 | |
c039b997 | 911 | status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap, srcmap); |
afc51aaa DW |
912 | if (status) |
913 | return status; | |
914 | ||
915 | zero_user(page, offset, bytes); | |
916 | mark_page_accessed(page); | |
917 | ||
c039b997 | 918 | return iomap_write_end(inode, pos, bytes, bytes, page, iomap, srcmap); |
afc51aaa DW |
919 | } |
920 | ||
81ee8e52 MWO |
921 | static loff_t iomap_zero_range_actor(struct inode *inode, loff_t pos, |
922 | loff_t length, void *data, struct iomap *iomap, | |
923 | struct iomap *srcmap) | |
afc51aaa DW |
924 | { |
925 | bool *did_zero = data; | |
926 | loff_t written = 0; | |
afc51aaa DW |
927 | |
928 | /* already zeroed? we're done. */ | |
c039b997 | 929 | if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) |
81ee8e52 | 930 | return length; |
afc51aaa DW |
931 | |
932 | do { | |
81ee8e52 | 933 | s64 bytes; |
afc51aaa DW |
934 | |
935 | if (IS_DAX(inode)) | |
81ee8e52 | 936 | bytes = dax_iomap_zero(pos, length, iomap); |
afc51aaa | 937 | else |
81ee8e52 MWO |
938 | bytes = iomap_zero(inode, pos, length, iomap, srcmap); |
939 | if (bytes < 0) | |
940 | return bytes; | |
afc51aaa DW |
941 | |
942 | pos += bytes; | |
81ee8e52 | 943 | length -= bytes; |
afc51aaa DW |
944 | written += bytes; |
945 | if (did_zero) | |
946 | *did_zero = true; | |
81ee8e52 | 947 | } while (length > 0); |
afc51aaa DW |
948 | |
949 | return written; | |
950 | } | |
951 | ||
952 | int | |
953 | iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, | |
954 | const struct iomap_ops *ops) | |
955 | { | |
956 | loff_t ret; | |
957 | ||
958 | while (len > 0) { | |
959 | ret = iomap_apply(inode, pos, len, IOMAP_ZERO, | |
960 | ops, did_zero, iomap_zero_range_actor); | |
961 | if (ret <= 0) | |
962 | return ret; | |
963 | ||
964 | pos += ret; | |
965 | len -= ret; | |
966 | } | |
967 | ||
968 | return 0; | |
969 | } | |
970 | EXPORT_SYMBOL_GPL(iomap_zero_range); | |
971 | ||
972 | int | |
973 | iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, | |
974 | const struct iomap_ops *ops) | |
975 | { | |
976 | unsigned int blocksize = i_blocksize(inode); | |
977 | unsigned int off = pos & (blocksize - 1); | |
978 | ||
979 | /* Block boundary? Nothing to do */ | |
980 | if (!off) | |
981 | return 0; | |
982 | return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops); | |
983 | } | |
984 | EXPORT_SYMBOL_GPL(iomap_truncate_page); | |
985 | ||
986 | static loff_t | |
987 | iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length, | |
c039b997 | 988 | void *data, struct iomap *iomap, struct iomap *srcmap) |
afc51aaa DW |
989 | { |
990 | struct page *page = data; | |
991 | int ret; | |
992 | ||
993 | if (iomap->flags & IOMAP_F_BUFFER_HEAD) { | |
994 | ret = __block_write_begin_int(page, pos, length, NULL, iomap); | |
995 | if (ret) | |
996 | return ret; | |
997 | block_commit_write(page, 0, length); | |
998 | } else { | |
999 | WARN_ON_ONCE(!PageUptodate(page)); | |
1000 | iomap_page_create(inode, page); | |
1001 | set_page_dirty(page); | |
1002 | } | |
1003 | ||
1004 | return length; | |
1005 | } | |
1006 | ||
1007 | vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops) | |
1008 | { | |
1009 | struct page *page = vmf->page; | |
1010 | struct inode *inode = file_inode(vmf->vma->vm_file); | |
1011 | unsigned long length; | |
243145bc | 1012 | loff_t offset; |
afc51aaa DW |
1013 | ssize_t ret; |
1014 | ||
1015 | lock_page(page); | |
243145bc AG |
1016 | ret = page_mkwrite_check_truncate(page, inode); |
1017 | if (ret < 0) | |
afc51aaa | 1018 | goto out_unlock; |
243145bc | 1019 | length = ret; |
afc51aaa | 1020 | |
243145bc | 1021 | offset = page_offset(page); |
afc51aaa DW |
1022 | while (length > 0) { |
1023 | ret = iomap_apply(inode, offset, length, | |
1024 | IOMAP_WRITE | IOMAP_FAULT, ops, page, | |
1025 | iomap_page_mkwrite_actor); | |
1026 | if (unlikely(ret <= 0)) | |
1027 | goto out_unlock; | |
1028 | offset += ret; | |
1029 | length -= ret; | |
1030 | } | |
1031 | ||
1032 | wait_for_stable_page(page); | |
1033 | return VM_FAULT_LOCKED; | |
1034 | out_unlock: | |
1035 | unlock_page(page); | |
1036 | return block_page_mkwrite_return(ret); | |
1037 | } | |
1038 | EXPORT_SYMBOL_GPL(iomap_page_mkwrite); | |
598ecfba CH |
1039 | |
1040 | static void | |
48d64cd1 | 1041 | iomap_finish_page_writeback(struct inode *inode, struct page *page, |
0fb2d720 | 1042 | int error, unsigned int len) |
598ecfba | 1043 | { |
48d64cd1 | 1044 | struct iomap_page *iop = to_iomap_page(page); |
598ecfba CH |
1045 | |
1046 | if (error) { | |
48d64cd1 | 1047 | SetPageError(page); |
598ecfba CH |
1048 | mapping_set_error(inode->i_mapping, -EIO); |
1049 | } | |
1050 | ||
24addd84 | 1051 | WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop); |
0fb2d720 | 1052 | WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) <= 0); |
598ecfba | 1053 | |
0fb2d720 | 1054 | if (!iop || atomic_sub_and_test(len, &iop->write_bytes_pending)) |
48d64cd1 | 1055 | end_page_writeback(page); |
598ecfba CH |
1056 | } |
1057 | ||
1058 | /* | |
1059 | * We're now finished for good with this ioend structure. Update the page | |
1060 | * state, release holds on bios, and finally free up memory. Do not use the | |
1061 | * ioend after this. | |
1062 | */ | |
1063 | static void | |
1064 | iomap_finish_ioend(struct iomap_ioend *ioend, int error) | |
1065 | { | |
1066 | struct inode *inode = ioend->io_inode; | |
1067 | struct bio *bio = &ioend->io_inline_bio; | |
1068 | struct bio *last = ioend->io_bio, *next; | |
1069 | u64 start = bio->bi_iter.bi_sector; | |
c275779f | 1070 | loff_t offset = ioend->io_offset; |
598ecfba CH |
1071 | bool quiet = bio_flagged(bio, BIO_QUIET); |
1072 | ||
1073 | for (bio = &ioend->io_inline_bio; bio; bio = next) { | |
1074 | struct bio_vec *bv; | |
1075 | struct bvec_iter_all iter_all; | |
1076 | ||
1077 | /* | |
1078 | * For the last bio, bi_private points to the ioend, so we | |
1079 | * need to explicitly end the iteration here. | |
1080 | */ | |
1081 | if (bio == last) | |
1082 | next = NULL; | |
1083 | else | |
1084 | next = bio->bi_private; | |
1085 | ||
1086 | /* walk each page on bio, ending page IO on them */ | |
1087 | bio_for_each_segment_all(bv, bio, iter_all) | |
0fb2d720 MWO |
1088 | iomap_finish_page_writeback(inode, bv->bv_page, error, |
1089 | bv->bv_len); | |
598ecfba CH |
1090 | bio_put(bio); |
1091 | } | |
c275779f | 1092 | /* The ioend has been freed by bio_put() */ |
598ecfba CH |
1093 | |
1094 | if (unlikely(error && !quiet)) { | |
1095 | printk_ratelimited(KERN_ERR | |
9cd0ed63 | 1096 | "%s: writeback error on inode %lu, offset %lld, sector %llu", |
c275779f | 1097 | inode->i_sb->s_id, inode->i_ino, offset, start); |
598ecfba CH |
1098 | } |
1099 | } | |
1100 | ||
1101 | void | |
1102 | iomap_finish_ioends(struct iomap_ioend *ioend, int error) | |
1103 | { | |
1104 | struct list_head tmp; | |
1105 | ||
1106 | list_replace_init(&ioend->io_list, &tmp); | |
1107 | iomap_finish_ioend(ioend, error); | |
1108 | ||
1109 | while (!list_empty(&tmp)) { | |
1110 | ioend = list_first_entry(&tmp, struct iomap_ioend, io_list); | |
1111 | list_del_init(&ioend->io_list); | |
1112 | iomap_finish_ioend(ioend, error); | |
1113 | } | |
1114 | } | |
1115 | EXPORT_SYMBOL_GPL(iomap_finish_ioends); | |
1116 | ||
1117 | /* | |
1118 | * We can merge two adjacent ioends if they have the same set of work to do. | |
1119 | */ | |
1120 | static bool | |
1121 | iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next) | |
1122 | { | |
1123 | if (ioend->io_bio->bi_status != next->io_bio->bi_status) | |
1124 | return false; | |
1125 | if ((ioend->io_flags & IOMAP_F_SHARED) ^ | |
1126 | (next->io_flags & IOMAP_F_SHARED)) | |
1127 | return false; | |
1128 | if ((ioend->io_type == IOMAP_UNWRITTEN) ^ | |
1129 | (next->io_type == IOMAP_UNWRITTEN)) | |
1130 | return false; | |
1131 | if (ioend->io_offset + ioend->io_size != next->io_offset) | |
1132 | return false; | |
1133 | return true; | |
1134 | } | |
1135 | ||
1136 | void | |
1137 | iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends, | |
1138 | void (*merge_private)(struct iomap_ioend *ioend, | |
1139 | struct iomap_ioend *next)) | |
1140 | { | |
1141 | struct iomap_ioend *next; | |
1142 | ||
1143 | INIT_LIST_HEAD(&ioend->io_list); | |
1144 | ||
1145 | while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend, | |
1146 | io_list))) { | |
1147 | if (!iomap_ioend_can_merge(ioend, next)) | |
1148 | break; | |
1149 | list_move_tail(&next->io_list, &ioend->io_list); | |
1150 | ioend->io_size += next->io_size; | |
1151 | if (next->io_private && merge_private) | |
1152 | merge_private(ioend, next); | |
1153 | } | |
1154 | } | |
1155 | EXPORT_SYMBOL_GPL(iomap_ioend_try_merge); | |
1156 | ||
1157 | static int | |
1158 | iomap_ioend_compare(void *priv, struct list_head *a, struct list_head *b) | |
1159 | { | |
b3d423ec CH |
1160 | struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list); |
1161 | struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list); | |
598ecfba | 1162 | |
598ecfba CH |
1163 | if (ia->io_offset < ib->io_offset) |
1164 | return -1; | |
b3d423ec | 1165 | if (ia->io_offset > ib->io_offset) |
598ecfba CH |
1166 | return 1; |
1167 | return 0; | |
1168 | } | |
1169 | ||
1170 | void | |
1171 | iomap_sort_ioends(struct list_head *ioend_list) | |
1172 | { | |
1173 | list_sort(NULL, ioend_list, iomap_ioend_compare); | |
1174 | } | |
1175 | EXPORT_SYMBOL_GPL(iomap_sort_ioends); | |
1176 | ||
1177 | static void iomap_writepage_end_bio(struct bio *bio) | |
1178 | { | |
1179 | struct iomap_ioend *ioend = bio->bi_private; | |
1180 | ||
1181 | iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status)); | |
1182 | } | |
1183 | ||
1184 | /* | |
1185 | * Submit the final bio for an ioend. | |
1186 | * | |
1187 | * If @error is non-zero, it means that we have a situation where some part of | |
1188 | * the submission process has failed after we have marked paged for writeback | |
1189 | * and unlocked them. In this situation, we need to fail the bio instead of | |
1190 | * submitting it. This typically only happens on a filesystem shutdown. | |
1191 | */ | |
1192 | static int | |
1193 | iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend, | |
1194 | int error) | |
1195 | { | |
1196 | ioend->io_bio->bi_private = ioend; | |
1197 | ioend->io_bio->bi_end_io = iomap_writepage_end_bio; | |
1198 | ||
1199 | if (wpc->ops->prepare_ioend) | |
1200 | error = wpc->ops->prepare_ioend(ioend, error); | |
1201 | if (error) { | |
1202 | /* | |
1203 | * If we are failing the IO now, just mark the ioend with an | |
1204 | * error and finish it. This will run IO completion immediately | |
1205 | * as there is only one reference to the ioend at this point in | |
1206 | * time. | |
1207 | */ | |
1208 | ioend->io_bio->bi_status = errno_to_blk_status(error); | |
1209 | bio_endio(ioend->io_bio); | |
1210 | return error; | |
1211 | } | |
1212 | ||
1213 | submit_bio(ioend->io_bio); | |
1214 | return 0; | |
1215 | } | |
1216 | ||
1217 | static struct iomap_ioend * | |
1218 | iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc, | |
1219 | loff_t offset, sector_t sector, struct writeback_control *wbc) | |
1220 | { | |
1221 | struct iomap_ioend *ioend; | |
1222 | struct bio *bio; | |
1223 | ||
1224 | bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &iomap_ioend_bioset); | |
1225 | bio_set_dev(bio, wpc->iomap.bdev); | |
1226 | bio->bi_iter.bi_sector = sector; | |
1227 | bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc); | |
1228 | bio->bi_write_hint = inode->i_write_hint; | |
1229 | wbc_init_bio(wbc, bio); | |
1230 | ||
1231 | ioend = container_of(bio, struct iomap_ioend, io_inline_bio); | |
1232 | INIT_LIST_HEAD(&ioend->io_list); | |
1233 | ioend->io_type = wpc->iomap.type; | |
1234 | ioend->io_flags = wpc->iomap.flags; | |
1235 | ioend->io_inode = inode; | |
1236 | ioend->io_size = 0; | |
1237 | ioend->io_offset = offset; | |
1238 | ioend->io_private = NULL; | |
1239 | ioend->io_bio = bio; | |
1240 | return ioend; | |
1241 | } | |
1242 | ||
1243 | /* | |
1244 | * Allocate a new bio, and chain the old bio to the new one. | |
1245 | * | |
1246 | * Note that we have to do perform the chaining in this unintuitive order | |
1247 | * so that the bi_private linkage is set up in the right direction for the | |
1248 | * traversal in iomap_finish_ioend(). | |
1249 | */ | |
1250 | static struct bio * | |
1251 | iomap_chain_bio(struct bio *prev) | |
1252 | { | |
1253 | struct bio *new; | |
1254 | ||
1255 | new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES); | |
1256 | bio_copy_dev(new, prev);/* also copies over blkcg information */ | |
1257 | new->bi_iter.bi_sector = bio_end_sector(prev); | |
1258 | new->bi_opf = prev->bi_opf; | |
1259 | new->bi_write_hint = prev->bi_write_hint; | |
1260 | ||
1261 | bio_chain(prev, new); | |
1262 | bio_get(prev); /* for iomap_finish_ioend */ | |
1263 | submit_bio(prev); | |
1264 | return new; | |
1265 | } | |
1266 | ||
1267 | static bool | |
1268 | iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset, | |
1269 | sector_t sector) | |
1270 | { | |
1271 | if ((wpc->iomap.flags & IOMAP_F_SHARED) != | |
1272 | (wpc->ioend->io_flags & IOMAP_F_SHARED)) | |
1273 | return false; | |
1274 | if (wpc->iomap.type != wpc->ioend->io_type) | |
1275 | return false; | |
1276 | if (offset != wpc->ioend->io_offset + wpc->ioend->io_size) | |
1277 | return false; | |
1278 | if (sector != bio_end_sector(wpc->ioend->io_bio)) | |
1279 | return false; | |
1280 | return true; | |
1281 | } | |
1282 | ||
1283 | /* | |
1284 | * Test to see if we have an existing ioend structure that we could append to | |
1285 | * first, otherwise finish off the current ioend and start another. | |
1286 | */ | |
1287 | static void | |
1288 | iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page, | |
1289 | struct iomap_page *iop, struct iomap_writepage_ctx *wpc, | |
1290 | struct writeback_control *wbc, struct list_head *iolist) | |
1291 | { | |
1292 | sector_t sector = iomap_sector(&wpc->iomap, offset); | |
1293 | unsigned len = i_blocksize(inode); | |
1294 | unsigned poff = offset & (PAGE_SIZE - 1); | |
1295 | bool merged, same_page = false; | |
1296 | ||
1297 | if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, offset, sector)) { | |
1298 | if (wpc->ioend) | |
1299 | list_add(&wpc->ioend->io_list, iolist); | |
1300 | wpc->ioend = iomap_alloc_ioend(inode, wpc, offset, sector, wbc); | |
1301 | } | |
1302 | ||
1303 | merged = __bio_try_merge_page(wpc->ioend->io_bio, page, len, poff, | |
1304 | &same_page); | |
0fb2d720 MWO |
1305 | if (iop) |
1306 | atomic_add(len, &iop->write_bytes_pending); | |
598ecfba CH |
1307 | |
1308 | if (!merged) { | |
1309 | if (bio_full(wpc->ioend->io_bio, len)) { | |
1310 | wpc->ioend->io_bio = | |
1311 | iomap_chain_bio(wpc->ioend->io_bio); | |
1312 | } | |
1313 | bio_add_page(wpc->ioend->io_bio, page, len, poff); | |
1314 | } | |
1315 | ||
1316 | wpc->ioend->io_size += len; | |
1317 | wbc_account_cgroup_owner(wbc, page, len); | |
1318 | } | |
1319 | ||
1320 | /* | |
1321 | * We implement an immediate ioend submission policy here to avoid needing to | |
1322 | * chain multiple ioends and hence nest mempool allocations which can violate | |
1323 | * forward progress guarantees we need to provide. The current ioend we are | |
1324 | * adding blocks to is cached on the writepage context, and if the new block | |
1325 | * does not append to the cached ioend it will create a new ioend and cache that | |
1326 | * instead. | |
1327 | * | |
1328 | * If a new ioend is created and cached, the old ioend is returned and queued | |
1329 | * locally for submission once the entire page is processed or an error has been | |
1330 | * detected. While ioends are submitted immediately after they are completed, | |
1331 | * batching optimisations are provided by higher level block plugging. | |
1332 | * | |
1333 | * At the end of a writeback pass, there will be a cached ioend remaining on the | |
1334 | * writepage context that the caller will need to submit. | |
1335 | */ | |
1336 | static int | |
1337 | iomap_writepage_map(struct iomap_writepage_ctx *wpc, | |
1338 | struct writeback_control *wbc, struct inode *inode, | |
1339 | struct page *page, u64 end_offset) | |
1340 | { | |
1341 | struct iomap_page *iop = to_iomap_page(page); | |
1342 | struct iomap_ioend *ioend, *next; | |
1343 | unsigned len = i_blocksize(inode); | |
1344 | u64 file_offset; /* file offset of page */ | |
1345 | int error = 0, count = 0, i; | |
1346 | LIST_HEAD(submit_list); | |
1347 | ||
24addd84 | 1348 | WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop); |
0fb2d720 | 1349 | WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0); |
598ecfba CH |
1350 | |
1351 | /* | |
1352 | * Walk through the page to find areas to write back. If we run off the | |
1353 | * end of the current map or find the current map invalid, grab a new | |
1354 | * one. | |
1355 | */ | |
1356 | for (i = 0, file_offset = page_offset(page); | |
1357 | i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset; | |
1358 | i++, file_offset += len) { | |
1359 | if (iop && !test_bit(i, iop->uptodate)) | |
1360 | continue; | |
1361 | ||
1362 | error = wpc->ops->map_blocks(wpc, inode, file_offset); | |
1363 | if (error) | |
1364 | break; | |
3e19e6f3 CH |
1365 | if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE)) |
1366 | continue; | |
598ecfba CH |
1367 | if (wpc->iomap.type == IOMAP_HOLE) |
1368 | continue; | |
1369 | iomap_add_to_ioend(inode, file_offset, page, iop, wpc, wbc, | |
1370 | &submit_list); | |
1371 | count++; | |
1372 | } | |
1373 | ||
1374 | WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list)); | |
1375 | WARN_ON_ONCE(!PageLocked(page)); | |
1376 | WARN_ON_ONCE(PageWriteback(page)); | |
1377 | ||
1378 | /* | |
1379 | * We cannot cancel the ioend directly here on error. We may have | |
1380 | * already set other pages under writeback and hence we have to run I/O | |
1381 | * completion to mark the error state of the pages under writeback | |
1382 | * appropriately. | |
1383 | */ | |
1384 | if (unlikely(error)) { | |
1385 | if (!count) { | |
1386 | /* | |
1387 | * If the current page hasn't been added to ioend, it | |
1388 | * won't be affected by I/O completions and we must | |
1389 | * discard and unlock it right here. | |
1390 | */ | |
1391 | if (wpc->ops->discard_page) | |
1392 | wpc->ops->discard_page(page); | |
1393 | ClearPageUptodate(page); | |
1394 | unlock_page(page); | |
1395 | goto done; | |
1396 | } | |
1397 | ||
1398 | /* | |
1399 | * If the page was not fully cleaned, we need to ensure that the | |
1400 | * higher layers come back to it correctly. That means we need | |
1401 | * to keep the page dirty, and for WB_SYNC_ALL writeback we need | |
1402 | * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed | |
1403 | * so another attempt to write this page in this writeback sweep | |
1404 | * will be made. | |
1405 | */ | |
1406 | set_page_writeback_keepwrite(page); | |
1407 | } else { | |
1408 | clear_page_dirty_for_io(page); | |
1409 | set_page_writeback(page); | |
1410 | } | |
1411 | ||
1412 | unlock_page(page); | |
1413 | ||
1414 | /* | |
1415 | * Preserve the original error if there was one, otherwise catch | |
1416 | * submission errors here and propagate into subsequent ioend | |
1417 | * submissions. | |
1418 | */ | |
1419 | list_for_each_entry_safe(ioend, next, &submit_list, io_list) { | |
1420 | int error2; | |
1421 | ||
1422 | list_del_init(&ioend->io_list); | |
1423 | error2 = iomap_submit_ioend(wpc, ioend, error); | |
1424 | if (error2 && !error) | |
1425 | error = error2; | |
1426 | } | |
1427 | ||
1428 | /* | |
1429 | * We can end up here with no error and nothing to write only if we race | |
1430 | * with a partial page truncate on a sub-page block sized filesystem. | |
1431 | */ | |
1432 | if (!count) | |
1433 | end_page_writeback(page); | |
1434 | done: | |
1435 | mapping_set_error(page->mapping, error); | |
1436 | return error; | |
1437 | } | |
1438 | ||
1439 | /* | |
1440 | * Write out a dirty page. | |
1441 | * | |
1442 | * For delalloc space on the page we need to allocate space and flush it. | |
1443 | * For unwritten space on the page we need to start the conversion to | |
1444 | * regular allocated space. | |
1445 | */ | |
1446 | static int | |
1447 | iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data) | |
1448 | { | |
1449 | struct iomap_writepage_ctx *wpc = data; | |
1450 | struct inode *inode = page->mapping->host; | |
1451 | pgoff_t end_index; | |
1452 | u64 end_offset; | |
1453 | loff_t offset; | |
1454 | ||
1ac99452 | 1455 | trace_iomap_writepage(inode, page_offset(page), PAGE_SIZE); |
598ecfba CH |
1456 | |
1457 | /* | |
1458 | * Refuse to write the page out if we are called from reclaim context. | |
1459 | * | |
1460 | * This avoids stack overflows when called from deeply used stacks in | |
1461 | * random callers for direct reclaim or memcg reclaim. We explicitly | |
1462 | * allow reclaim from kswapd as the stack usage there is relatively low. | |
1463 | * | |
1464 | * This should never happen except in the case of a VM regression so | |
1465 | * warn about it. | |
1466 | */ | |
1467 | if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == | |
1468 | PF_MEMALLOC)) | |
1469 | goto redirty; | |
1470 | ||
1471 | /* | |
1472 | * Given that we do not allow direct reclaim to call us, we should | |
1473 | * never be called in a recursive filesystem reclaim context. | |
1474 | */ | |
1475 | if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS)) | |
1476 | goto redirty; | |
1477 | ||
1478 | /* | |
1479 | * Is this page beyond the end of the file? | |
1480 | * | |
1481 | * The page index is less than the end_index, adjust the end_offset | |
1482 | * to the highest offset that this page should represent. | |
1483 | * ----------------------------------------------------- | |
1484 | * | file mapping | <EOF> | | |
1485 | * ----------------------------------------------------- | |
1486 | * | Page ... | Page N-2 | Page N-1 | Page N | | | |
1487 | * ^--------------------------------^----------|-------- | |
1488 | * | desired writeback range | see else | | |
1489 | * ---------------------------------^------------------| | |
1490 | */ | |
1491 | offset = i_size_read(inode); | |
1492 | end_index = offset >> PAGE_SHIFT; | |
1493 | if (page->index < end_index) | |
1494 | end_offset = (loff_t)(page->index + 1) << PAGE_SHIFT; | |
1495 | else { | |
1496 | /* | |
1497 | * Check whether the page to write out is beyond or straddles | |
1498 | * i_size or not. | |
1499 | * ------------------------------------------------------- | |
1500 | * | file mapping | <EOF> | | |
1501 | * ------------------------------------------------------- | |
1502 | * | Page ... | Page N-2 | Page N-1 | Page N | Beyond | | |
1503 | * ^--------------------------------^-----------|--------- | |
1504 | * | | Straddles | | |
1505 | * ---------------------------------^-----------|--------| | |
1506 | */ | |
1507 | unsigned offset_into_page = offset & (PAGE_SIZE - 1); | |
1508 | ||
1509 | /* | |
1510 | * Skip the page if it is fully outside i_size, e.g. due to a | |
1511 | * truncate operation that is in progress. We must redirty the | |
1512 | * page so that reclaim stops reclaiming it. Otherwise | |
1513 | * iomap_vm_releasepage() is called on it and gets confused. | |
1514 | * | |
1515 | * Note that the end_index is unsigned long, it would overflow | |
1516 | * if the given offset is greater than 16TB on 32-bit system | |
1517 | * and if we do check the page is fully outside i_size or not | |
1518 | * via "if (page->index >= end_index + 1)" as "end_index + 1" | |
1519 | * will be evaluated to 0. Hence this page will be redirtied | |
1520 | * and be written out repeatedly which would result in an | |
1521 | * infinite loop, the user program that perform this operation | |
1522 | * will hang. Instead, we can verify this situation by checking | |
1523 | * if the page to write is totally beyond the i_size or if it's | |
1524 | * offset is just equal to the EOF. | |
1525 | */ | |
1526 | if (page->index > end_index || | |
1527 | (page->index == end_index && offset_into_page == 0)) | |
1528 | goto redirty; | |
1529 | ||
1530 | /* | |
1531 | * The page straddles i_size. It must be zeroed out on each | |
1532 | * and every writepage invocation because it may be mmapped. | |
1533 | * "A file is mapped in multiples of the page size. For a file | |
1534 | * that is not a multiple of the page size, the remaining | |
1535 | * memory is zeroed when mapped, and writes to that region are | |
1536 | * not written out to the file." | |
1537 | */ | |
1538 | zero_user_segment(page, offset_into_page, PAGE_SIZE); | |
1539 | ||
1540 | /* Adjust the end_offset to the end of file */ | |
1541 | end_offset = offset; | |
1542 | } | |
1543 | ||
1544 | return iomap_writepage_map(wpc, wbc, inode, page, end_offset); | |
1545 | ||
1546 | redirty: | |
1547 | redirty_page_for_writepage(wbc, page); | |
1548 | unlock_page(page); | |
1549 | return 0; | |
1550 | } | |
1551 | ||
1552 | int | |
1553 | iomap_writepage(struct page *page, struct writeback_control *wbc, | |
1554 | struct iomap_writepage_ctx *wpc, | |
1555 | const struct iomap_writeback_ops *ops) | |
1556 | { | |
1557 | int ret; | |
1558 | ||
1559 | wpc->ops = ops; | |
1560 | ret = iomap_do_writepage(page, wbc, wpc); | |
1561 | if (!wpc->ioend) | |
1562 | return ret; | |
1563 | return iomap_submit_ioend(wpc, wpc->ioend, ret); | |
1564 | } | |
1565 | EXPORT_SYMBOL_GPL(iomap_writepage); | |
1566 | ||
1567 | int | |
1568 | iomap_writepages(struct address_space *mapping, struct writeback_control *wbc, | |
1569 | struct iomap_writepage_ctx *wpc, | |
1570 | const struct iomap_writeback_ops *ops) | |
1571 | { | |
1572 | int ret; | |
1573 | ||
1574 | wpc->ops = ops; | |
1575 | ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc); | |
1576 | if (!wpc->ioend) | |
1577 | return ret; | |
1578 | return iomap_submit_ioend(wpc, wpc->ioend, ret); | |
1579 | } | |
1580 | EXPORT_SYMBOL_GPL(iomap_writepages); | |
1581 | ||
1582 | static int __init iomap_init(void) | |
1583 | { | |
1584 | return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE), | |
1585 | offsetof(struct iomap_ioend, io_inline_bio), | |
1586 | BIOSET_NEED_BVECS); | |
1587 | } | |
1588 | fs_initcall(iomap_init); |