]>
Commit | Line | Data |
---|---|---|
ae259a9c CH |
1 | /* |
2 | * Copyright (C) 2010 Red Hat, Inc. | |
3 | * Copyright (c) 2016 Christoph Hellwig. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | #include <linux/module.h> | |
15 | #include <linux/compiler.h> | |
16 | #include <linux/fs.h> | |
17 | #include <linux/iomap.h> | |
18 | #include <linux/uaccess.h> | |
19 | #include <linux/gfp.h> | |
20 | #include <linux/mm.h> | |
21 | #include <linux/swap.h> | |
22 | #include <linux/pagemap.h> | |
23 | #include <linux/file.h> | |
24 | #include <linux/uio.h> | |
25 | #include <linux/backing-dev.h> | |
26 | #include <linux/buffer_head.h> | |
ff6a9292 | 27 | #include <linux/task_io_accounting_ops.h> |
9a286f0e | 28 | #include <linux/dax.h> |
f361bf4a IM |
29 | #include <linux/sched/signal.h> |
30 | ||
ae259a9c CH |
31 | #include "internal.h" |
32 | ||
ae259a9c CH |
33 | /* |
34 | * Execute a iomap write on a segment of the mapping that spans a | |
35 | * contiguous range of pages that have identical block mapping state. | |
36 | * | |
37 | * This avoids the need to map pages individually, do individual allocations | |
38 | * for each page and most importantly avoid the need for filesystem specific | |
39 | * locking per page. Instead, all the operations are amortised over the entire | |
40 | * range of pages. It is assumed that the filesystems will lock whatever | |
41 | * resources they require in the iomap_begin call, and release them in the | |
42 | * iomap_end call. | |
43 | */ | |
befb503c | 44 | loff_t |
ae259a9c | 45 | iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags, |
8ff6daa1 | 46 | const struct iomap_ops *ops, void *data, iomap_actor_t actor) |
ae259a9c CH |
47 | { |
48 | struct iomap iomap = { 0 }; | |
49 | loff_t written = 0, ret; | |
50 | ||
51 | /* | |
52 | * Need to map a range from start position for length bytes. This can | |
53 | * span multiple pages - it is only guaranteed to return a range of a | |
54 | * single type of pages (e.g. all into a hole, all mapped or all | |
55 | * unwritten). Failure at this point has nothing to undo. | |
56 | * | |
57 | * If allocation is required for this range, reserve the space now so | |
58 | * that the allocation is guaranteed to succeed later on. Once we copy | |
59 | * the data into the page cache pages, then we cannot fail otherwise we | |
60 | * expose transient stale data. If the reserve fails, we can safely | |
61 | * back out at this point as there is nothing to undo. | |
62 | */ | |
63 | ret = ops->iomap_begin(inode, pos, length, flags, &iomap); | |
64 | if (ret) | |
65 | return ret; | |
66 | if (WARN_ON(iomap.offset > pos)) | |
67 | return -EIO; | |
68 | ||
69 | /* | |
70 | * Cut down the length to the one actually provided by the filesystem, | |
71 | * as it might not be able to give us the whole size that we requested. | |
72 | */ | |
73 | if (iomap.offset + iomap.length < pos + length) | |
74 | length = iomap.offset + iomap.length - pos; | |
75 | ||
76 | /* | |
77 | * Now that we have guaranteed that the space allocation will succeed. | |
78 | * we can do the copy-in page by page without having to worry about | |
79 | * failures exposing transient data. | |
80 | */ | |
81 | written = actor(inode, pos, length, data, &iomap); | |
82 | ||
83 | /* | |
84 | * Now the data has been copied, commit the range we've copied. This | |
85 | * should not fail unless the filesystem has had a fatal error. | |
86 | */ | |
f20ac7ab CH |
87 | if (ops->iomap_end) { |
88 | ret = ops->iomap_end(inode, pos, length, | |
89 | written > 0 ? written : 0, | |
90 | flags, &iomap); | |
91 | } | |
ae259a9c CH |
92 | |
93 | return written ? written : ret; | |
94 | } | |
95 | ||
96 | static void | |
97 | iomap_write_failed(struct inode *inode, loff_t pos, unsigned len) | |
98 | { | |
99 | loff_t i_size = i_size_read(inode); | |
100 | ||
101 | /* | |
102 | * Only truncate newly allocated pages beyoned EOF, even if the | |
103 | * write started inside the existing inode size. | |
104 | */ | |
105 | if (pos + len > i_size) | |
106 | truncate_pagecache_range(inode, max(pos, i_size), pos + len); | |
107 | } | |
108 | ||
109 | static int | |
110 | iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags, | |
111 | struct page **pagep, struct iomap *iomap) | |
112 | { | |
113 | pgoff_t index = pos >> PAGE_SHIFT; | |
114 | struct page *page; | |
115 | int status = 0; | |
116 | ||
117 | BUG_ON(pos + len > iomap->offset + iomap->length); | |
118 | ||
d1908f52 MH |
119 | if (fatal_signal_pending(current)) |
120 | return -EINTR; | |
121 | ||
ae259a9c CH |
122 | page = grab_cache_page_write_begin(inode->i_mapping, index, flags); |
123 | if (!page) | |
124 | return -ENOMEM; | |
125 | ||
126 | status = __block_write_begin_int(page, pos, len, NULL, iomap); | |
127 | if (unlikely(status)) { | |
128 | unlock_page(page); | |
129 | put_page(page); | |
130 | page = NULL; | |
131 | ||
132 | iomap_write_failed(inode, pos, len); | |
133 | } | |
134 | ||
135 | *pagep = page; | |
136 | return status; | |
137 | } | |
138 | ||
139 | static int | |
140 | iomap_write_end(struct inode *inode, loff_t pos, unsigned len, | |
141 | unsigned copied, struct page *page) | |
142 | { | |
143 | int ret; | |
144 | ||
145 | ret = generic_write_end(NULL, inode->i_mapping, pos, len, | |
146 | copied, page, NULL); | |
147 | if (ret < len) | |
148 | iomap_write_failed(inode, pos, len); | |
149 | return ret; | |
150 | } | |
151 | ||
152 | static loff_t | |
153 | iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data, | |
154 | struct iomap *iomap) | |
155 | { | |
156 | struct iov_iter *i = data; | |
157 | long status = 0; | |
158 | ssize_t written = 0; | |
159 | unsigned int flags = AOP_FLAG_NOFS; | |
160 | ||
161 | /* | |
162 | * Copies from kernel address space cannot fail (NFSD is a big user). | |
163 | */ | |
164 | if (!iter_is_iovec(i)) | |
165 | flags |= AOP_FLAG_UNINTERRUPTIBLE; | |
166 | ||
167 | do { | |
168 | struct page *page; | |
169 | unsigned long offset; /* Offset into pagecache page */ | |
170 | unsigned long bytes; /* Bytes to write to page */ | |
171 | size_t copied; /* Bytes copied from user */ | |
172 | ||
173 | offset = (pos & (PAGE_SIZE - 1)); | |
174 | bytes = min_t(unsigned long, PAGE_SIZE - offset, | |
175 | iov_iter_count(i)); | |
176 | again: | |
177 | if (bytes > length) | |
178 | bytes = length; | |
179 | ||
180 | /* | |
181 | * Bring in the user page that we will copy from _first_. | |
182 | * Otherwise there's a nasty deadlock on copying from the | |
183 | * same page as we're writing to, without it being marked | |
184 | * up-to-date. | |
185 | * | |
186 | * Not only is this an optimisation, but it is also required | |
187 | * to check that the address is actually valid, when atomic | |
188 | * usercopies are used, below. | |
189 | */ | |
190 | if (unlikely(iov_iter_fault_in_readable(i, bytes))) { | |
191 | status = -EFAULT; | |
192 | break; | |
193 | } | |
194 | ||
195 | status = iomap_write_begin(inode, pos, bytes, flags, &page, | |
196 | iomap); | |
197 | if (unlikely(status)) | |
198 | break; | |
199 | ||
200 | if (mapping_writably_mapped(inode->i_mapping)) | |
201 | flush_dcache_page(page); | |
202 | ||
ae259a9c | 203 | copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); |
ae259a9c CH |
204 | |
205 | flush_dcache_page(page); | |
ae259a9c CH |
206 | |
207 | status = iomap_write_end(inode, pos, bytes, copied, page); | |
208 | if (unlikely(status < 0)) | |
209 | break; | |
210 | copied = status; | |
211 | ||
212 | cond_resched(); | |
213 | ||
214 | iov_iter_advance(i, copied); | |
215 | if (unlikely(copied == 0)) { | |
216 | /* | |
217 | * If we were unable to copy any data at all, we must | |
218 | * fall back to a single segment length write. | |
219 | * | |
220 | * If we didn't fallback here, we could livelock | |
221 | * because not all segments in the iov can be copied at | |
222 | * once without a pagefault. | |
223 | */ | |
224 | bytes = min_t(unsigned long, PAGE_SIZE - offset, | |
225 | iov_iter_single_seg_count(i)); | |
226 | goto again; | |
227 | } | |
228 | pos += copied; | |
229 | written += copied; | |
230 | length -= copied; | |
231 | ||
232 | balance_dirty_pages_ratelimited(inode->i_mapping); | |
233 | } while (iov_iter_count(i) && length); | |
234 | ||
235 | return written ? written : status; | |
236 | } | |
237 | ||
238 | ssize_t | |
239 | iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter, | |
8ff6daa1 | 240 | const struct iomap_ops *ops) |
ae259a9c CH |
241 | { |
242 | struct inode *inode = iocb->ki_filp->f_mapping->host; | |
243 | loff_t pos = iocb->ki_pos, ret = 0, written = 0; | |
244 | ||
245 | while (iov_iter_count(iter)) { | |
246 | ret = iomap_apply(inode, pos, iov_iter_count(iter), | |
247 | IOMAP_WRITE, ops, iter, iomap_write_actor); | |
248 | if (ret <= 0) | |
249 | break; | |
250 | pos += ret; | |
251 | written += ret; | |
252 | } | |
253 | ||
254 | return written ? written : ret; | |
255 | } | |
256 | EXPORT_SYMBOL_GPL(iomap_file_buffered_write); | |
257 | ||
5f4e5752 CH |
258 | static struct page * |
259 | __iomap_read_page(struct inode *inode, loff_t offset) | |
260 | { | |
261 | struct address_space *mapping = inode->i_mapping; | |
262 | struct page *page; | |
263 | ||
264 | page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL); | |
265 | if (IS_ERR(page)) | |
266 | return page; | |
267 | if (!PageUptodate(page)) { | |
268 | put_page(page); | |
269 | return ERR_PTR(-EIO); | |
270 | } | |
271 | return page; | |
272 | } | |
273 | ||
274 | static loff_t | |
275 | iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data, | |
276 | struct iomap *iomap) | |
277 | { | |
278 | long status = 0; | |
279 | ssize_t written = 0; | |
280 | ||
281 | do { | |
282 | struct page *page, *rpage; | |
283 | unsigned long offset; /* Offset into pagecache page */ | |
284 | unsigned long bytes; /* Bytes to write to page */ | |
285 | ||
286 | offset = (pos & (PAGE_SIZE - 1)); | |
287 | bytes = min_t(unsigned long, PAGE_SIZE - offset, length); | |
288 | ||
289 | rpage = __iomap_read_page(inode, pos); | |
290 | if (IS_ERR(rpage)) | |
291 | return PTR_ERR(rpage); | |
292 | ||
293 | status = iomap_write_begin(inode, pos, bytes, | |
294 | AOP_FLAG_NOFS | AOP_FLAG_UNINTERRUPTIBLE, | |
295 | &page, iomap); | |
296 | put_page(rpage); | |
297 | if (unlikely(status)) | |
298 | return status; | |
299 | ||
300 | WARN_ON_ONCE(!PageUptodate(page)); | |
301 | ||
302 | status = iomap_write_end(inode, pos, bytes, bytes, page); | |
303 | if (unlikely(status <= 0)) { | |
304 | if (WARN_ON_ONCE(status == 0)) | |
305 | return -EIO; | |
306 | return status; | |
307 | } | |
308 | ||
309 | cond_resched(); | |
310 | ||
311 | pos += status; | |
312 | written += status; | |
313 | length -= status; | |
314 | ||
315 | balance_dirty_pages_ratelimited(inode->i_mapping); | |
316 | } while (length); | |
317 | ||
318 | return written; | |
319 | } | |
320 | ||
321 | int | |
322 | iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len, | |
8ff6daa1 | 323 | const struct iomap_ops *ops) |
5f4e5752 CH |
324 | { |
325 | loff_t ret; | |
326 | ||
327 | while (len) { | |
328 | ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL, | |
329 | iomap_dirty_actor); | |
330 | if (ret <= 0) | |
331 | return ret; | |
332 | pos += ret; | |
333 | len -= ret; | |
334 | } | |
335 | ||
336 | return 0; | |
337 | } | |
338 | EXPORT_SYMBOL_GPL(iomap_file_dirty); | |
339 | ||
ae259a9c CH |
340 | static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset, |
341 | unsigned bytes, struct iomap *iomap) | |
342 | { | |
343 | struct page *page; | |
344 | int status; | |
345 | ||
346 | status = iomap_write_begin(inode, pos, bytes, | |
347 | AOP_FLAG_UNINTERRUPTIBLE | AOP_FLAG_NOFS, &page, iomap); | |
348 | if (status) | |
349 | return status; | |
350 | ||
351 | zero_user(page, offset, bytes); | |
352 | mark_page_accessed(page); | |
353 | ||
354 | return iomap_write_end(inode, pos, bytes, bytes, page); | |
355 | } | |
356 | ||
9a286f0e CH |
357 | static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes, |
358 | struct iomap *iomap) | |
359 | { | |
360 | sector_t sector = iomap->blkno + | |
361 | (((pos & ~(PAGE_SIZE - 1)) - iomap->offset) >> 9); | |
362 | ||
cccbce67 DW |
363 | return __dax_zero_page_range(iomap->bdev, iomap->dax_dev, sector, |
364 | offset, bytes); | |
9a286f0e CH |
365 | } |
366 | ||
ae259a9c CH |
367 | static loff_t |
368 | iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count, | |
369 | void *data, struct iomap *iomap) | |
370 | { | |
371 | bool *did_zero = data; | |
372 | loff_t written = 0; | |
373 | int status; | |
374 | ||
375 | /* already zeroed? we're done. */ | |
376 | if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) | |
377 | return count; | |
378 | ||
379 | do { | |
380 | unsigned offset, bytes; | |
381 | ||
382 | offset = pos & (PAGE_SIZE - 1); /* Within page */ | |
383 | bytes = min_t(unsigned, PAGE_SIZE - offset, count); | |
384 | ||
9a286f0e CH |
385 | if (IS_DAX(inode)) |
386 | status = iomap_dax_zero(pos, offset, bytes, iomap); | |
387 | else | |
388 | status = iomap_zero(inode, pos, offset, bytes, iomap); | |
ae259a9c CH |
389 | if (status < 0) |
390 | return status; | |
391 | ||
392 | pos += bytes; | |
393 | count -= bytes; | |
394 | written += bytes; | |
395 | if (did_zero) | |
396 | *did_zero = true; | |
397 | } while (count > 0); | |
398 | ||
399 | return written; | |
400 | } | |
401 | ||
402 | int | |
403 | iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, | |
8ff6daa1 | 404 | const struct iomap_ops *ops) |
ae259a9c CH |
405 | { |
406 | loff_t ret; | |
407 | ||
408 | while (len > 0) { | |
409 | ret = iomap_apply(inode, pos, len, IOMAP_ZERO, | |
410 | ops, did_zero, iomap_zero_range_actor); | |
411 | if (ret <= 0) | |
412 | return ret; | |
413 | ||
414 | pos += ret; | |
415 | len -= ret; | |
416 | } | |
417 | ||
418 | return 0; | |
419 | } | |
420 | EXPORT_SYMBOL_GPL(iomap_zero_range); | |
421 | ||
422 | int | |
423 | iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, | |
8ff6daa1 | 424 | const struct iomap_ops *ops) |
ae259a9c | 425 | { |
93407472 FF |
426 | unsigned int blocksize = i_blocksize(inode); |
427 | unsigned int off = pos & (blocksize - 1); | |
ae259a9c CH |
428 | |
429 | /* Block boundary? Nothing to do */ | |
430 | if (!off) | |
431 | return 0; | |
432 | return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops); | |
433 | } | |
434 | EXPORT_SYMBOL_GPL(iomap_truncate_page); | |
435 | ||
436 | static loff_t | |
437 | iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length, | |
438 | void *data, struct iomap *iomap) | |
439 | { | |
440 | struct page *page = data; | |
441 | int ret; | |
442 | ||
c663e29f | 443 | ret = __block_write_begin_int(page, pos, length, NULL, iomap); |
ae259a9c CH |
444 | if (ret) |
445 | return ret; | |
446 | ||
447 | block_commit_write(page, 0, length); | |
448 | return length; | |
449 | } | |
450 | ||
11bac800 | 451 | int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops) |
ae259a9c CH |
452 | { |
453 | struct page *page = vmf->page; | |
11bac800 | 454 | struct inode *inode = file_inode(vmf->vma->vm_file); |
ae259a9c CH |
455 | unsigned long length; |
456 | loff_t offset, size; | |
457 | ssize_t ret; | |
458 | ||
459 | lock_page(page); | |
460 | size = i_size_read(inode); | |
461 | if ((page->mapping != inode->i_mapping) || | |
462 | (page_offset(page) > size)) { | |
463 | /* We overload EFAULT to mean page got truncated */ | |
464 | ret = -EFAULT; | |
465 | goto out_unlock; | |
466 | } | |
467 | ||
468 | /* page is wholly or partially inside EOF */ | |
469 | if (((page->index + 1) << PAGE_SHIFT) > size) | |
470 | length = size & ~PAGE_MASK; | |
471 | else | |
472 | length = PAGE_SIZE; | |
473 | ||
474 | offset = page_offset(page); | |
475 | while (length > 0) { | |
9484ab1b JK |
476 | ret = iomap_apply(inode, offset, length, |
477 | IOMAP_WRITE | IOMAP_FAULT, ops, page, | |
478 | iomap_page_mkwrite_actor); | |
ae259a9c CH |
479 | if (unlikely(ret <= 0)) |
480 | goto out_unlock; | |
481 | offset += ret; | |
482 | length -= ret; | |
483 | } | |
484 | ||
485 | set_page_dirty(page); | |
486 | wait_for_stable_page(page); | |
487 | return 0; | |
488 | out_unlock: | |
489 | unlock_page(page); | |
490 | return ret; | |
491 | } | |
492 | EXPORT_SYMBOL_GPL(iomap_page_mkwrite); | |
8be9f564 CH |
493 | |
494 | struct fiemap_ctx { | |
495 | struct fiemap_extent_info *fi; | |
496 | struct iomap prev; | |
497 | }; | |
498 | ||
499 | static int iomap_to_fiemap(struct fiemap_extent_info *fi, | |
500 | struct iomap *iomap, u32 flags) | |
501 | { | |
502 | switch (iomap->type) { | |
503 | case IOMAP_HOLE: | |
504 | /* skip holes */ | |
505 | return 0; | |
506 | case IOMAP_DELALLOC: | |
507 | flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN; | |
508 | break; | |
509 | case IOMAP_UNWRITTEN: | |
510 | flags |= FIEMAP_EXTENT_UNWRITTEN; | |
511 | break; | |
512 | case IOMAP_MAPPED: | |
513 | break; | |
514 | } | |
515 | ||
17de0a9f CH |
516 | if (iomap->flags & IOMAP_F_MERGED) |
517 | flags |= FIEMAP_EXTENT_MERGED; | |
e43c460d DW |
518 | if (iomap->flags & IOMAP_F_SHARED) |
519 | flags |= FIEMAP_EXTENT_SHARED; | |
17de0a9f | 520 | |
8be9f564 CH |
521 | return fiemap_fill_next_extent(fi, iomap->offset, |
522 | iomap->blkno != IOMAP_NULL_BLOCK ? iomap->blkno << 9: 0, | |
17de0a9f | 523 | iomap->length, flags); |
8be9f564 CH |
524 | |
525 | } | |
526 | ||
527 | static loff_t | |
528 | iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, | |
529 | struct iomap *iomap) | |
530 | { | |
531 | struct fiemap_ctx *ctx = data; | |
532 | loff_t ret = length; | |
533 | ||
534 | if (iomap->type == IOMAP_HOLE) | |
535 | return length; | |
536 | ||
537 | ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0); | |
538 | ctx->prev = *iomap; | |
539 | switch (ret) { | |
540 | case 0: /* success */ | |
541 | return length; | |
542 | case 1: /* extent array full */ | |
543 | return 0; | |
544 | default: | |
545 | return ret; | |
546 | } | |
547 | } | |
548 | ||
549 | int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi, | |
8ff6daa1 | 550 | loff_t start, loff_t len, const struct iomap_ops *ops) |
8be9f564 CH |
551 | { |
552 | struct fiemap_ctx ctx; | |
553 | loff_t ret; | |
554 | ||
555 | memset(&ctx, 0, sizeof(ctx)); | |
556 | ctx.fi = fi; | |
557 | ctx.prev.type = IOMAP_HOLE; | |
558 | ||
559 | ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC); | |
560 | if (ret) | |
561 | return ret; | |
562 | ||
8896b8f6 DC |
563 | if (fi->fi_flags & FIEMAP_FLAG_SYNC) { |
564 | ret = filemap_write_and_wait(inode->i_mapping); | |
565 | if (ret) | |
566 | return ret; | |
567 | } | |
8be9f564 CH |
568 | |
569 | while (len > 0) { | |
d33fd776 | 570 | ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx, |
8be9f564 | 571 | iomap_fiemap_actor); |
ac2dc058 DC |
572 | /* inode with no (attribute) mapping will give ENOENT */ |
573 | if (ret == -ENOENT) | |
574 | break; | |
8be9f564 CH |
575 | if (ret < 0) |
576 | return ret; | |
577 | if (ret == 0) | |
578 | break; | |
579 | ||
580 | start += ret; | |
581 | len -= ret; | |
582 | } | |
583 | ||
584 | if (ctx.prev.type != IOMAP_HOLE) { | |
585 | ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST); | |
586 | if (ret < 0) | |
587 | return ret; | |
588 | } | |
589 | ||
590 | return 0; | |
591 | } | |
592 | EXPORT_SYMBOL_GPL(iomap_fiemap); | |
ff6a9292 CH |
593 | |
594 | /* | |
595 | * Private flags for iomap_dio, must not overlap with the public ones in | |
596 | * iomap.h: | |
597 | */ | |
598 | #define IOMAP_DIO_WRITE (1 << 30) | |
599 | #define IOMAP_DIO_DIRTY (1 << 31) | |
600 | ||
601 | struct iomap_dio { | |
602 | struct kiocb *iocb; | |
603 | iomap_dio_end_io_t *end_io; | |
604 | loff_t i_size; | |
605 | loff_t size; | |
606 | atomic_t ref; | |
607 | unsigned flags; | |
608 | int error; | |
609 | ||
610 | union { | |
611 | /* used during submission and for synchronous completion: */ | |
612 | struct { | |
613 | struct iov_iter *iter; | |
614 | struct task_struct *waiter; | |
615 | struct request_queue *last_queue; | |
616 | blk_qc_t cookie; | |
617 | } submit; | |
618 | ||
619 | /* used for aio completion: */ | |
620 | struct { | |
621 | struct work_struct work; | |
622 | } aio; | |
623 | }; | |
624 | }; | |
625 | ||
626 | static ssize_t iomap_dio_complete(struct iomap_dio *dio) | |
627 | { | |
628 | struct kiocb *iocb = dio->iocb; | |
629 | ssize_t ret; | |
630 | ||
631 | if (dio->end_io) { | |
632 | ret = dio->end_io(iocb, | |
633 | dio->error ? dio->error : dio->size, | |
634 | dio->flags); | |
635 | } else { | |
636 | ret = dio->error; | |
637 | } | |
638 | ||
639 | if (likely(!ret)) { | |
640 | ret = dio->size; | |
641 | /* check for short read */ | |
642 | if (iocb->ki_pos + ret > dio->i_size && | |
643 | !(dio->flags & IOMAP_DIO_WRITE)) | |
644 | ret = dio->i_size - iocb->ki_pos; | |
645 | iocb->ki_pos += ret; | |
646 | } | |
647 | ||
648 | inode_dio_end(file_inode(iocb->ki_filp)); | |
649 | kfree(dio); | |
650 | ||
651 | return ret; | |
652 | } | |
653 | ||
654 | static void iomap_dio_complete_work(struct work_struct *work) | |
655 | { | |
656 | struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work); | |
657 | struct kiocb *iocb = dio->iocb; | |
658 | bool is_write = (dio->flags & IOMAP_DIO_WRITE); | |
659 | ssize_t ret; | |
660 | ||
661 | ret = iomap_dio_complete(dio); | |
662 | if (is_write && ret > 0) | |
663 | ret = generic_write_sync(iocb, ret); | |
664 | iocb->ki_complete(iocb, ret, 0); | |
665 | } | |
666 | ||
667 | /* | |
668 | * Set an error in the dio if none is set yet. We have to use cmpxchg | |
669 | * as the submission context and the completion context(s) can race to | |
670 | * update the error. | |
671 | */ | |
672 | static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret) | |
673 | { | |
674 | cmpxchg(&dio->error, 0, ret); | |
675 | } | |
676 | ||
677 | static void iomap_dio_bio_end_io(struct bio *bio) | |
678 | { | |
679 | struct iomap_dio *dio = bio->bi_private; | |
680 | bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY); | |
681 | ||
682 | if (bio->bi_error) | |
683 | iomap_dio_set_error(dio, bio->bi_error); | |
684 | ||
685 | if (atomic_dec_and_test(&dio->ref)) { | |
686 | if (is_sync_kiocb(dio->iocb)) { | |
687 | struct task_struct *waiter = dio->submit.waiter; | |
688 | ||
689 | WRITE_ONCE(dio->submit.waiter, NULL); | |
690 | wake_up_process(waiter); | |
691 | } else if (dio->flags & IOMAP_DIO_WRITE) { | |
692 | struct inode *inode = file_inode(dio->iocb->ki_filp); | |
693 | ||
694 | INIT_WORK(&dio->aio.work, iomap_dio_complete_work); | |
695 | queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work); | |
696 | } else { | |
697 | iomap_dio_complete_work(&dio->aio.work); | |
698 | } | |
699 | } | |
700 | ||
701 | if (should_dirty) { | |
702 | bio_check_pages_dirty(bio); | |
703 | } else { | |
704 | struct bio_vec *bvec; | |
705 | int i; | |
706 | ||
707 | bio_for_each_segment_all(bvec, bio, i) | |
708 | put_page(bvec->bv_page); | |
709 | bio_put(bio); | |
710 | } | |
711 | } | |
712 | ||
713 | static blk_qc_t | |
714 | iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos, | |
715 | unsigned len) | |
716 | { | |
717 | struct page *page = ZERO_PAGE(0); | |
718 | struct bio *bio; | |
719 | ||
720 | bio = bio_alloc(GFP_KERNEL, 1); | |
721 | bio->bi_bdev = iomap->bdev; | |
722 | bio->bi_iter.bi_sector = | |
723 | iomap->blkno + ((pos - iomap->offset) >> 9); | |
724 | bio->bi_private = dio; | |
725 | bio->bi_end_io = iomap_dio_bio_end_io; | |
726 | ||
727 | get_page(page); | |
728 | if (bio_add_page(bio, page, len, 0) != len) | |
729 | BUG(); | |
5cc60aee | 730 | bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE); |
ff6a9292 CH |
731 | |
732 | atomic_inc(&dio->ref); | |
733 | return submit_bio(bio); | |
734 | } | |
735 | ||
736 | static loff_t | |
737 | iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length, | |
738 | void *data, struct iomap *iomap) | |
739 | { | |
740 | struct iomap_dio *dio = data; | |
93407472 FF |
741 | unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev)); |
742 | unsigned int fs_block_size = i_blocksize(inode), pad; | |
743 | unsigned int align = iov_iter_alignment(dio->submit.iter); | |
ff6a9292 CH |
744 | struct iov_iter iter; |
745 | struct bio *bio; | |
746 | bool need_zeroout = false; | |
747 | int nr_pages, ret; | |
748 | ||
749 | if ((pos | length | align) & ((1 << blkbits) - 1)) | |
750 | return -EINVAL; | |
751 | ||
752 | switch (iomap->type) { | |
753 | case IOMAP_HOLE: | |
754 | if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE)) | |
755 | return -EIO; | |
756 | /*FALLTHRU*/ | |
757 | case IOMAP_UNWRITTEN: | |
758 | if (!(dio->flags & IOMAP_DIO_WRITE)) { | |
759 | iov_iter_zero(length, dio->submit.iter); | |
760 | dio->size += length; | |
761 | return length; | |
762 | } | |
763 | dio->flags |= IOMAP_DIO_UNWRITTEN; | |
764 | need_zeroout = true; | |
765 | break; | |
766 | case IOMAP_MAPPED: | |
767 | if (iomap->flags & IOMAP_F_SHARED) | |
768 | dio->flags |= IOMAP_DIO_COW; | |
769 | if (iomap->flags & IOMAP_F_NEW) | |
770 | need_zeroout = true; | |
771 | break; | |
772 | default: | |
773 | WARN_ON_ONCE(1); | |
774 | return -EIO; | |
775 | } | |
776 | ||
777 | /* | |
778 | * Operate on a partial iter trimmed to the extent we were called for. | |
779 | * We'll update the iter in the dio once we're done with this extent. | |
780 | */ | |
781 | iter = *dio->submit.iter; | |
782 | iov_iter_truncate(&iter, length); | |
783 | ||
784 | nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES); | |
785 | if (nr_pages <= 0) | |
786 | return nr_pages; | |
787 | ||
788 | if (need_zeroout) { | |
789 | /* zero out from the start of the block to the write offset */ | |
790 | pad = pos & (fs_block_size - 1); | |
791 | if (pad) | |
792 | iomap_dio_zero(dio, iomap, pos - pad, pad); | |
793 | } | |
794 | ||
795 | do { | |
796 | if (dio->error) | |
797 | return 0; | |
798 | ||
799 | bio = bio_alloc(GFP_KERNEL, nr_pages); | |
800 | bio->bi_bdev = iomap->bdev; | |
801 | bio->bi_iter.bi_sector = | |
802 | iomap->blkno + ((pos - iomap->offset) >> 9); | |
803 | bio->bi_private = dio; | |
804 | bio->bi_end_io = iomap_dio_bio_end_io; | |
805 | ||
806 | ret = bio_iov_iter_get_pages(bio, &iter); | |
807 | if (unlikely(ret)) { | |
808 | bio_put(bio); | |
809 | return ret; | |
810 | } | |
811 | ||
812 | if (dio->flags & IOMAP_DIO_WRITE) { | |
5cc60aee | 813 | bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE); |
ff6a9292 CH |
814 | task_io_account_write(bio->bi_iter.bi_size); |
815 | } else { | |
816 | bio_set_op_attrs(bio, REQ_OP_READ, 0); | |
817 | if (dio->flags & IOMAP_DIO_DIRTY) | |
818 | bio_set_pages_dirty(bio); | |
819 | } | |
820 | ||
821 | dio->size += bio->bi_iter.bi_size; | |
822 | pos += bio->bi_iter.bi_size; | |
823 | ||
824 | nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES); | |
825 | ||
826 | atomic_inc(&dio->ref); | |
827 | ||
828 | dio->submit.last_queue = bdev_get_queue(iomap->bdev); | |
829 | dio->submit.cookie = submit_bio(bio); | |
830 | } while (nr_pages); | |
831 | ||
832 | if (need_zeroout) { | |
833 | /* zero out from the end of the write to the end of the block */ | |
834 | pad = pos & (fs_block_size - 1); | |
835 | if (pad) | |
836 | iomap_dio_zero(dio, iomap, pos, fs_block_size - pad); | |
837 | } | |
838 | ||
839 | iov_iter_advance(dio->submit.iter, length); | |
840 | return length; | |
841 | } | |
842 | ||
843 | ssize_t | |
8ff6daa1 CH |
844 | iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, |
845 | const struct iomap_ops *ops, iomap_dio_end_io_t end_io) | |
ff6a9292 CH |
846 | { |
847 | struct address_space *mapping = iocb->ki_filp->f_mapping; | |
848 | struct inode *inode = file_inode(iocb->ki_filp); | |
849 | size_t count = iov_iter_count(iter); | |
c771c14b EG |
850 | loff_t pos = iocb->ki_pos, start = pos; |
851 | loff_t end = iocb->ki_pos + count - 1, ret = 0; | |
ff6a9292 CH |
852 | unsigned int flags = IOMAP_DIRECT; |
853 | struct blk_plug plug; | |
854 | struct iomap_dio *dio; | |
855 | ||
856 | lockdep_assert_held(&inode->i_rwsem); | |
857 | ||
858 | if (!count) | |
859 | return 0; | |
860 | ||
861 | dio = kmalloc(sizeof(*dio), GFP_KERNEL); | |
862 | if (!dio) | |
863 | return -ENOMEM; | |
864 | ||
865 | dio->iocb = iocb; | |
866 | atomic_set(&dio->ref, 1); | |
867 | dio->size = 0; | |
868 | dio->i_size = i_size_read(inode); | |
869 | dio->end_io = end_io; | |
870 | dio->error = 0; | |
871 | dio->flags = 0; | |
872 | ||
873 | dio->submit.iter = iter; | |
874 | if (is_sync_kiocb(iocb)) { | |
875 | dio->submit.waiter = current; | |
876 | dio->submit.cookie = BLK_QC_T_NONE; | |
877 | dio->submit.last_queue = NULL; | |
878 | } | |
879 | ||
880 | if (iov_iter_rw(iter) == READ) { | |
881 | if (pos >= dio->i_size) | |
882 | goto out_free_dio; | |
883 | ||
884 | if (iter->type == ITER_IOVEC) | |
885 | dio->flags |= IOMAP_DIO_DIRTY; | |
886 | } else { | |
887 | dio->flags |= IOMAP_DIO_WRITE; | |
888 | flags |= IOMAP_WRITE; | |
889 | } | |
890 | ||
55635ba7 AR |
891 | ret = filemap_write_and_wait_range(mapping, start, end); |
892 | if (ret) | |
893 | goto out_free_dio; | |
ff6a9292 | 894 | |
55635ba7 AR |
895 | ret = invalidate_inode_pages2_range(mapping, |
896 | start >> PAGE_SHIFT, end >> PAGE_SHIFT); | |
897 | WARN_ON_ONCE(ret); | |
898 | ret = 0; | |
ff6a9292 CH |
899 | |
900 | inode_dio_begin(inode); | |
901 | ||
902 | blk_start_plug(&plug); | |
903 | do { | |
904 | ret = iomap_apply(inode, pos, count, flags, ops, dio, | |
905 | iomap_dio_actor); | |
906 | if (ret <= 0) { | |
907 | /* magic error code to fall back to buffered I/O */ | |
908 | if (ret == -ENOTBLK) | |
909 | ret = 0; | |
910 | break; | |
911 | } | |
912 | pos += ret; | |
a008c31c CR |
913 | |
914 | if (iov_iter_rw(iter) == READ && pos >= dio->i_size) | |
915 | break; | |
ff6a9292 CH |
916 | } while ((count = iov_iter_count(iter)) > 0); |
917 | blk_finish_plug(&plug); | |
918 | ||
919 | if (ret < 0) | |
920 | iomap_dio_set_error(dio, ret); | |
921 | ||
922 | if (ret >= 0 && iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) && | |
923 | !inode->i_sb->s_dio_done_wq) { | |
924 | ret = sb_init_dio_done_wq(inode->i_sb); | |
925 | if (ret < 0) | |
926 | iomap_dio_set_error(dio, ret); | |
927 | } | |
928 | ||
929 | if (!atomic_dec_and_test(&dio->ref)) { | |
930 | if (!is_sync_kiocb(iocb)) | |
931 | return -EIOCBQUEUED; | |
932 | ||
933 | for (;;) { | |
934 | set_current_state(TASK_UNINTERRUPTIBLE); | |
935 | if (!READ_ONCE(dio->submit.waiter)) | |
936 | break; | |
937 | ||
938 | if (!(iocb->ki_flags & IOCB_HIPRI) || | |
939 | !dio->submit.last_queue || | |
5cc60aee LT |
940 | !blk_mq_poll(dio->submit.last_queue, |
941 | dio->submit.cookie)) | |
ff6a9292 CH |
942 | io_schedule(); |
943 | } | |
944 | __set_current_state(TASK_RUNNING); | |
945 | } | |
946 | ||
c771c14b EG |
947 | ret = iomap_dio_complete(dio); |
948 | ||
ff6a9292 CH |
949 | /* |
950 | * Try again to invalidate clean pages which might have been cached by | |
951 | * non-direct readahead, or faulted in by get_user_pages() if the source | |
952 | * of the write was an mmap'ed region of the file we're writing. Either | |
953 | * one is a pretty crazy thing to do, so we don't support it 100%. If | |
954 | * this invalidation fails, tough, the write still worked... | |
955 | */ | |
55635ba7 | 956 | if (iov_iter_rw(iter) == WRITE) { |
c771c14b EG |
957 | int err = invalidate_inode_pages2_range(mapping, |
958 | start >> PAGE_SHIFT, end >> PAGE_SHIFT); | |
959 | WARN_ON_ONCE(err); | |
ff6a9292 CH |
960 | } |
961 | ||
c771c14b | 962 | return ret; |
ff6a9292 CH |
963 | |
964 | out_free_dio: | |
965 | kfree(dio); | |
966 | return ret; | |
967 | } | |
968 | EXPORT_SYMBOL_GPL(iomap_dio_rw); |