]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/iomap.c
iomap: remove superflous mark_page_accessed from iomap_write_actor
[mirror_ubuntu-artful-kernel.git] / fs / iomap.c
1 /*
2 * Copyright (C) 2010 Red Hat, Inc.
3 * Copyright (c) 2016 Christoph Hellwig.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14 #include <linux/module.h>
15 #include <linux/compiler.h>
16 #include <linux/fs.h>
17 #include <linux/iomap.h>
18 #include <linux/uaccess.h>
19 #include <linux/gfp.h>
20 #include <linux/mm.h>
21 #include <linux/swap.h>
22 #include <linux/pagemap.h>
23 #include <linux/file.h>
24 #include <linux/uio.h>
25 #include <linux/backing-dev.h>
26 #include <linux/buffer_head.h>
27 #include <linux/dax.h>
28 #include "internal.h"
29
30 typedef loff_t (*iomap_actor_t)(struct inode *inode, loff_t pos, loff_t len,
31 void *data, struct iomap *iomap);
32
33 /*
34 * Execute a iomap write on a segment of the mapping that spans a
35 * contiguous range of pages that have identical block mapping state.
36 *
37 * This avoids the need to map pages individually, do individual allocations
38 * for each page and most importantly avoid the need for filesystem specific
39 * locking per page. Instead, all the operations are amortised over the entire
40 * range of pages. It is assumed that the filesystems will lock whatever
41 * resources they require in the iomap_begin call, and release them in the
42 * iomap_end call.
43 */
44 static loff_t
45 iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
46 struct iomap_ops *ops, void *data, iomap_actor_t actor)
47 {
48 struct iomap iomap = { 0 };
49 loff_t written = 0, ret;
50
51 /*
52 * Need to map a range from start position for length bytes. This can
53 * span multiple pages - it is only guaranteed to return a range of a
54 * single type of pages (e.g. all into a hole, all mapped or all
55 * unwritten). Failure at this point has nothing to undo.
56 *
57 * If allocation is required for this range, reserve the space now so
58 * that the allocation is guaranteed to succeed later on. Once we copy
59 * the data into the page cache pages, then we cannot fail otherwise we
60 * expose transient stale data. If the reserve fails, we can safely
61 * back out at this point as there is nothing to undo.
62 */
63 ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
64 if (ret)
65 return ret;
66 if (WARN_ON(iomap.offset > pos))
67 return -EIO;
68
69 /*
70 * Cut down the length to the one actually provided by the filesystem,
71 * as it might not be able to give us the whole size that we requested.
72 */
73 if (iomap.offset + iomap.length < pos + length)
74 length = iomap.offset + iomap.length - pos;
75
76 /*
77 * Now that we have guaranteed that the space allocation will succeed.
78 * we can do the copy-in page by page without having to worry about
79 * failures exposing transient data.
80 */
81 written = actor(inode, pos, length, data, &iomap);
82
83 /*
84 * Now the data has been copied, commit the range we've copied. This
85 * should not fail unless the filesystem has had a fatal error.
86 */
87 ret = ops->iomap_end(inode, pos, length, written > 0 ? written : 0,
88 flags, &iomap);
89
90 return written ? written : ret;
91 }
92
93 static void
94 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
95 {
96 loff_t i_size = i_size_read(inode);
97
98 /*
99 * Only truncate newly allocated pages beyoned EOF, even if the
100 * write started inside the existing inode size.
101 */
102 if (pos + len > i_size)
103 truncate_pagecache_range(inode, max(pos, i_size), pos + len);
104 }
105
106 static int
107 iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
108 struct page **pagep, struct iomap *iomap)
109 {
110 pgoff_t index = pos >> PAGE_SHIFT;
111 struct page *page;
112 int status = 0;
113
114 BUG_ON(pos + len > iomap->offset + iomap->length);
115
116 page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
117 if (!page)
118 return -ENOMEM;
119
120 status = __block_write_begin_int(page, pos, len, NULL, iomap);
121 if (unlikely(status)) {
122 unlock_page(page);
123 put_page(page);
124 page = NULL;
125
126 iomap_write_failed(inode, pos, len);
127 }
128
129 *pagep = page;
130 return status;
131 }
132
133 static int
134 iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
135 unsigned copied, struct page *page)
136 {
137 int ret;
138
139 ret = generic_write_end(NULL, inode->i_mapping, pos, len,
140 copied, page, NULL);
141 if (ret < len)
142 iomap_write_failed(inode, pos, len);
143 return ret;
144 }
145
146 static loff_t
147 iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
148 struct iomap *iomap)
149 {
150 struct iov_iter *i = data;
151 long status = 0;
152 ssize_t written = 0;
153 unsigned int flags = AOP_FLAG_NOFS;
154
155 /*
156 * Copies from kernel address space cannot fail (NFSD is a big user).
157 */
158 if (!iter_is_iovec(i))
159 flags |= AOP_FLAG_UNINTERRUPTIBLE;
160
161 do {
162 struct page *page;
163 unsigned long offset; /* Offset into pagecache page */
164 unsigned long bytes; /* Bytes to write to page */
165 size_t copied; /* Bytes copied from user */
166
167 offset = (pos & (PAGE_SIZE - 1));
168 bytes = min_t(unsigned long, PAGE_SIZE - offset,
169 iov_iter_count(i));
170 again:
171 if (bytes > length)
172 bytes = length;
173
174 /*
175 * Bring in the user page that we will copy from _first_.
176 * Otherwise there's a nasty deadlock on copying from the
177 * same page as we're writing to, without it being marked
178 * up-to-date.
179 *
180 * Not only is this an optimisation, but it is also required
181 * to check that the address is actually valid, when atomic
182 * usercopies are used, below.
183 */
184 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
185 status = -EFAULT;
186 break;
187 }
188
189 status = iomap_write_begin(inode, pos, bytes, flags, &page,
190 iomap);
191 if (unlikely(status))
192 break;
193
194 if (mapping_writably_mapped(inode->i_mapping))
195 flush_dcache_page(page);
196
197 pagefault_disable();
198 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
199 pagefault_enable();
200
201 flush_dcache_page(page);
202
203 status = iomap_write_end(inode, pos, bytes, copied, page);
204 if (unlikely(status < 0))
205 break;
206 copied = status;
207
208 cond_resched();
209
210 iov_iter_advance(i, copied);
211 if (unlikely(copied == 0)) {
212 /*
213 * If we were unable to copy any data at all, we must
214 * fall back to a single segment length write.
215 *
216 * If we didn't fallback here, we could livelock
217 * because not all segments in the iov can be copied at
218 * once without a pagefault.
219 */
220 bytes = min_t(unsigned long, PAGE_SIZE - offset,
221 iov_iter_single_seg_count(i));
222 goto again;
223 }
224 pos += copied;
225 written += copied;
226 length -= copied;
227
228 balance_dirty_pages_ratelimited(inode->i_mapping);
229 } while (iov_iter_count(i) && length);
230
231 return written ? written : status;
232 }
233
234 ssize_t
235 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
236 struct iomap_ops *ops)
237 {
238 struct inode *inode = iocb->ki_filp->f_mapping->host;
239 loff_t pos = iocb->ki_pos, ret = 0, written = 0;
240
241 while (iov_iter_count(iter)) {
242 ret = iomap_apply(inode, pos, iov_iter_count(iter),
243 IOMAP_WRITE, ops, iter, iomap_write_actor);
244 if (ret <= 0)
245 break;
246 pos += ret;
247 written += ret;
248 }
249
250 return written ? written : ret;
251 }
252 EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
253
254 static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
255 unsigned bytes, struct iomap *iomap)
256 {
257 struct page *page;
258 int status;
259
260 status = iomap_write_begin(inode, pos, bytes,
261 AOP_FLAG_UNINTERRUPTIBLE | AOP_FLAG_NOFS, &page, iomap);
262 if (status)
263 return status;
264
265 zero_user(page, offset, bytes);
266 mark_page_accessed(page);
267
268 return iomap_write_end(inode, pos, bytes, bytes, page);
269 }
270
271 static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
272 struct iomap *iomap)
273 {
274 sector_t sector = iomap->blkno +
275 (((pos & ~(PAGE_SIZE - 1)) - iomap->offset) >> 9);
276
277 return __dax_zero_page_range(iomap->bdev, sector, offset, bytes);
278 }
279
280 static loff_t
281 iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
282 void *data, struct iomap *iomap)
283 {
284 bool *did_zero = data;
285 loff_t written = 0;
286 int status;
287
288 /* already zeroed? we're done. */
289 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
290 return count;
291
292 do {
293 unsigned offset, bytes;
294
295 offset = pos & (PAGE_SIZE - 1); /* Within page */
296 bytes = min_t(unsigned, PAGE_SIZE - offset, count);
297
298 if (IS_DAX(inode))
299 status = iomap_dax_zero(pos, offset, bytes, iomap);
300 else
301 status = iomap_zero(inode, pos, offset, bytes, iomap);
302 if (status < 0)
303 return status;
304
305 pos += bytes;
306 count -= bytes;
307 written += bytes;
308 if (did_zero)
309 *did_zero = true;
310 } while (count > 0);
311
312 return written;
313 }
314
315 int
316 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
317 struct iomap_ops *ops)
318 {
319 loff_t ret;
320
321 while (len > 0) {
322 ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
323 ops, did_zero, iomap_zero_range_actor);
324 if (ret <= 0)
325 return ret;
326
327 pos += ret;
328 len -= ret;
329 }
330
331 return 0;
332 }
333 EXPORT_SYMBOL_GPL(iomap_zero_range);
334
335 int
336 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
337 struct iomap_ops *ops)
338 {
339 unsigned blocksize = (1 << inode->i_blkbits);
340 unsigned off = pos & (blocksize - 1);
341
342 /* Block boundary? Nothing to do */
343 if (!off)
344 return 0;
345 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
346 }
347 EXPORT_SYMBOL_GPL(iomap_truncate_page);
348
349 static loff_t
350 iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
351 void *data, struct iomap *iomap)
352 {
353 struct page *page = data;
354 int ret;
355
356 ret = __block_write_begin_int(page, pos & ~PAGE_MASK, length,
357 NULL, iomap);
358 if (ret)
359 return ret;
360
361 block_commit_write(page, 0, length);
362 return length;
363 }
364
365 int iomap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
366 struct iomap_ops *ops)
367 {
368 struct page *page = vmf->page;
369 struct inode *inode = file_inode(vma->vm_file);
370 unsigned long length;
371 loff_t offset, size;
372 ssize_t ret;
373
374 lock_page(page);
375 size = i_size_read(inode);
376 if ((page->mapping != inode->i_mapping) ||
377 (page_offset(page) > size)) {
378 /* We overload EFAULT to mean page got truncated */
379 ret = -EFAULT;
380 goto out_unlock;
381 }
382
383 /* page is wholly or partially inside EOF */
384 if (((page->index + 1) << PAGE_SHIFT) > size)
385 length = size & ~PAGE_MASK;
386 else
387 length = PAGE_SIZE;
388
389 offset = page_offset(page);
390 while (length > 0) {
391 ret = iomap_apply(inode, offset, length, IOMAP_WRITE,
392 ops, page, iomap_page_mkwrite_actor);
393 if (unlikely(ret <= 0))
394 goto out_unlock;
395 offset += ret;
396 length -= ret;
397 }
398
399 set_page_dirty(page);
400 wait_for_stable_page(page);
401 return 0;
402 out_unlock:
403 unlock_page(page);
404 return ret;
405 }
406 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
407
408 struct fiemap_ctx {
409 struct fiemap_extent_info *fi;
410 struct iomap prev;
411 };
412
413 static int iomap_to_fiemap(struct fiemap_extent_info *fi,
414 struct iomap *iomap, u32 flags)
415 {
416 switch (iomap->type) {
417 case IOMAP_HOLE:
418 /* skip holes */
419 return 0;
420 case IOMAP_DELALLOC:
421 flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
422 break;
423 case IOMAP_UNWRITTEN:
424 flags |= FIEMAP_EXTENT_UNWRITTEN;
425 break;
426 case IOMAP_MAPPED:
427 break;
428 }
429
430 return fiemap_fill_next_extent(fi, iomap->offset,
431 iomap->blkno != IOMAP_NULL_BLOCK ? iomap->blkno << 9: 0,
432 iomap->length, flags | FIEMAP_EXTENT_MERGED);
433
434 }
435
436 static loff_t
437 iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
438 struct iomap *iomap)
439 {
440 struct fiemap_ctx *ctx = data;
441 loff_t ret = length;
442
443 if (iomap->type == IOMAP_HOLE)
444 return length;
445
446 ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
447 ctx->prev = *iomap;
448 switch (ret) {
449 case 0: /* success */
450 return length;
451 case 1: /* extent array full */
452 return 0;
453 default:
454 return ret;
455 }
456 }
457
458 int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
459 loff_t start, loff_t len, struct iomap_ops *ops)
460 {
461 struct fiemap_ctx ctx;
462 loff_t ret;
463
464 memset(&ctx, 0, sizeof(ctx));
465 ctx.fi = fi;
466 ctx.prev.type = IOMAP_HOLE;
467
468 ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
469 if (ret)
470 return ret;
471
472 ret = filemap_write_and_wait(inode->i_mapping);
473 if (ret)
474 return ret;
475
476 while (len > 0) {
477 ret = iomap_apply(inode, start, len, 0, ops, &ctx,
478 iomap_fiemap_actor);
479 if (ret < 0)
480 return ret;
481 if (ret == 0)
482 break;
483
484 start += ret;
485 len -= ret;
486 }
487
488 if (ctx.prev.type != IOMAP_HOLE) {
489 ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
490 if (ret < 0)
491 return ret;
492 }
493
494 return 0;
495 }
496 EXPORT_SYMBOL_GPL(iomap_fiemap);