]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2010 Red Hat, Inc. | |
3 | * Copyright (c) 2016 Christoph Hellwig. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | #include <linux/module.h> | |
15 | #include <linux/compiler.h> | |
16 | #include <linux/fs.h> | |
17 | #include <linux/iomap.h> | |
18 | #include <linux/uaccess.h> | |
19 | #include <linux/gfp.h> | |
20 | #include <linux/mm.h> | |
21 | #include <linux/swap.h> | |
22 | #include <linux/pagemap.h> | |
23 | #include <linux/file.h> | |
24 | #include <linux/uio.h> | |
25 | #include <linux/backing-dev.h> | |
26 | #include <linux/buffer_head.h> | |
27 | #include "internal.h" | |
28 | ||
29 | typedef loff_t (*iomap_actor_t)(struct inode *inode, loff_t pos, loff_t len, | |
30 | void *data, struct iomap *iomap); | |
31 | ||
32 | /* | |
33 | * Execute a iomap write on a segment of the mapping that spans a | |
34 | * contiguous range of pages that have identical block mapping state. | |
35 | * | |
36 | * This avoids the need to map pages individually, do individual allocations | |
37 | * for each page and most importantly avoid the need for filesystem specific | |
38 | * locking per page. Instead, all the operations are amortised over the entire | |
39 | * range of pages. It is assumed that the filesystems will lock whatever | |
40 | * resources they require in the iomap_begin call, and release them in the | |
41 | * iomap_end call. | |
42 | */ | |
43 | static loff_t | |
44 | iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags, | |
45 | struct iomap_ops *ops, void *data, iomap_actor_t actor) | |
46 | { | |
47 | struct iomap iomap = { 0 }; | |
48 | loff_t written = 0, ret; | |
49 | ||
50 | /* | |
51 | * Need to map a range from start position for length bytes. This can | |
52 | * span multiple pages - it is only guaranteed to return a range of a | |
53 | * single type of pages (e.g. all into a hole, all mapped or all | |
54 | * unwritten). Failure at this point has nothing to undo. | |
55 | * | |
56 | * If allocation is required for this range, reserve the space now so | |
57 | * that the allocation is guaranteed to succeed later on. Once we copy | |
58 | * the data into the page cache pages, then we cannot fail otherwise we | |
59 | * expose transient stale data. If the reserve fails, we can safely | |
60 | * back out at this point as there is nothing to undo. | |
61 | */ | |
62 | ret = ops->iomap_begin(inode, pos, length, flags, &iomap); | |
63 | if (ret) | |
64 | return ret; | |
65 | if (WARN_ON(iomap.offset > pos)) | |
66 | return -EIO; | |
67 | ||
68 | /* | |
69 | * Cut down the length to the one actually provided by the filesystem, | |
70 | * as it might not be able to give us the whole size that we requested. | |
71 | */ | |
72 | if (iomap.offset + iomap.length < pos + length) | |
73 | length = iomap.offset + iomap.length - pos; | |
74 | ||
75 | /* | |
76 | * Now that we have guaranteed that the space allocation will succeed. | |
77 | * we can do the copy-in page by page without having to worry about | |
78 | * failures exposing transient data. | |
79 | */ | |
80 | written = actor(inode, pos, length, data, &iomap); | |
81 | ||
82 | /* | |
83 | * Now the data has been copied, commit the range we've copied. This | |
84 | * should not fail unless the filesystem has had a fatal error. | |
85 | */ | |
86 | ret = ops->iomap_end(inode, pos, length, written > 0 ? written : 0, | |
87 | flags, &iomap); | |
88 | ||
89 | return written ? written : ret; | |
90 | } | |
91 | ||
92 | static void | |
93 | iomap_write_failed(struct inode *inode, loff_t pos, unsigned len) | |
94 | { | |
95 | loff_t i_size = i_size_read(inode); | |
96 | ||
97 | /* | |
98 | * Only truncate newly allocated pages beyoned EOF, even if the | |
99 | * write started inside the existing inode size. | |
100 | */ | |
101 | if (pos + len > i_size) | |
102 | truncate_pagecache_range(inode, max(pos, i_size), pos + len); | |
103 | } | |
104 | ||
105 | static int | |
106 | iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags, | |
107 | struct page **pagep, struct iomap *iomap) | |
108 | { | |
109 | pgoff_t index = pos >> PAGE_SHIFT; | |
110 | struct page *page; | |
111 | int status = 0; | |
112 | ||
113 | BUG_ON(pos + len > iomap->offset + iomap->length); | |
114 | ||
115 | page = grab_cache_page_write_begin(inode->i_mapping, index, flags); | |
116 | if (!page) | |
117 | return -ENOMEM; | |
118 | ||
119 | status = __block_write_begin_int(page, pos, len, NULL, iomap); | |
120 | if (unlikely(status)) { | |
121 | unlock_page(page); | |
122 | put_page(page); | |
123 | page = NULL; | |
124 | ||
125 | iomap_write_failed(inode, pos, len); | |
126 | } | |
127 | ||
128 | *pagep = page; | |
129 | return status; | |
130 | } | |
131 | ||
132 | static int | |
133 | iomap_write_end(struct inode *inode, loff_t pos, unsigned len, | |
134 | unsigned copied, struct page *page) | |
135 | { | |
136 | int ret; | |
137 | ||
138 | ret = generic_write_end(NULL, inode->i_mapping, pos, len, | |
139 | copied, page, NULL); | |
140 | if (ret < len) | |
141 | iomap_write_failed(inode, pos, len); | |
142 | return ret; | |
143 | } | |
144 | ||
145 | static loff_t | |
146 | iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data, | |
147 | struct iomap *iomap) | |
148 | { | |
149 | struct iov_iter *i = data; | |
150 | long status = 0; | |
151 | ssize_t written = 0; | |
152 | unsigned int flags = AOP_FLAG_NOFS; | |
153 | ||
154 | /* | |
155 | * Copies from kernel address space cannot fail (NFSD is a big user). | |
156 | */ | |
157 | if (!iter_is_iovec(i)) | |
158 | flags |= AOP_FLAG_UNINTERRUPTIBLE; | |
159 | ||
160 | do { | |
161 | struct page *page; | |
162 | unsigned long offset; /* Offset into pagecache page */ | |
163 | unsigned long bytes; /* Bytes to write to page */ | |
164 | size_t copied; /* Bytes copied from user */ | |
165 | ||
166 | offset = (pos & (PAGE_SIZE - 1)); | |
167 | bytes = min_t(unsigned long, PAGE_SIZE - offset, | |
168 | iov_iter_count(i)); | |
169 | again: | |
170 | if (bytes > length) | |
171 | bytes = length; | |
172 | ||
173 | /* | |
174 | * Bring in the user page that we will copy from _first_. | |
175 | * Otherwise there's a nasty deadlock on copying from the | |
176 | * same page as we're writing to, without it being marked | |
177 | * up-to-date. | |
178 | * | |
179 | * Not only is this an optimisation, but it is also required | |
180 | * to check that the address is actually valid, when atomic | |
181 | * usercopies are used, below. | |
182 | */ | |
183 | if (unlikely(iov_iter_fault_in_readable(i, bytes))) { | |
184 | status = -EFAULT; | |
185 | break; | |
186 | } | |
187 | ||
188 | status = iomap_write_begin(inode, pos, bytes, flags, &page, | |
189 | iomap); | |
190 | if (unlikely(status)) | |
191 | break; | |
192 | ||
193 | if (mapping_writably_mapped(inode->i_mapping)) | |
194 | flush_dcache_page(page); | |
195 | ||
196 | pagefault_disable(); | |
197 | copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); | |
198 | pagefault_enable(); | |
199 | ||
200 | flush_dcache_page(page); | |
201 | mark_page_accessed(page); | |
202 | ||
203 | status = iomap_write_end(inode, pos, bytes, copied, page); | |
204 | if (unlikely(status < 0)) | |
205 | break; | |
206 | copied = status; | |
207 | ||
208 | cond_resched(); | |
209 | ||
210 | iov_iter_advance(i, copied); | |
211 | if (unlikely(copied == 0)) { | |
212 | /* | |
213 | * If we were unable to copy any data at all, we must | |
214 | * fall back to a single segment length write. | |
215 | * | |
216 | * If we didn't fallback here, we could livelock | |
217 | * because not all segments in the iov can be copied at | |
218 | * once without a pagefault. | |
219 | */ | |
220 | bytes = min_t(unsigned long, PAGE_SIZE - offset, | |
221 | iov_iter_single_seg_count(i)); | |
222 | goto again; | |
223 | } | |
224 | pos += copied; | |
225 | written += copied; | |
226 | length -= copied; | |
227 | ||
228 | balance_dirty_pages_ratelimited(inode->i_mapping); | |
229 | } while (iov_iter_count(i) && length); | |
230 | ||
231 | return written ? written : status; | |
232 | } | |
233 | ||
234 | ssize_t | |
235 | iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter, | |
236 | struct iomap_ops *ops) | |
237 | { | |
238 | struct inode *inode = iocb->ki_filp->f_mapping->host; | |
239 | loff_t pos = iocb->ki_pos, ret = 0, written = 0; | |
240 | ||
241 | while (iov_iter_count(iter)) { | |
242 | ret = iomap_apply(inode, pos, iov_iter_count(iter), | |
243 | IOMAP_WRITE, ops, iter, iomap_write_actor); | |
244 | if (ret <= 0) | |
245 | break; | |
246 | pos += ret; | |
247 | written += ret; | |
248 | } | |
249 | ||
250 | return written ? written : ret; | |
251 | } | |
252 | EXPORT_SYMBOL_GPL(iomap_file_buffered_write); | |
253 | ||
254 | static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset, | |
255 | unsigned bytes, struct iomap *iomap) | |
256 | { | |
257 | struct page *page; | |
258 | int status; | |
259 | ||
260 | status = iomap_write_begin(inode, pos, bytes, | |
261 | AOP_FLAG_UNINTERRUPTIBLE | AOP_FLAG_NOFS, &page, iomap); | |
262 | if (status) | |
263 | return status; | |
264 | ||
265 | zero_user(page, offset, bytes); | |
266 | mark_page_accessed(page); | |
267 | ||
268 | return iomap_write_end(inode, pos, bytes, bytes, page); | |
269 | } | |
270 | ||
271 | static loff_t | |
272 | iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count, | |
273 | void *data, struct iomap *iomap) | |
274 | { | |
275 | bool *did_zero = data; | |
276 | loff_t written = 0; | |
277 | int status; | |
278 | ||
279 | /* already zeroed? we're done. */ | |
280 | if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) | |
281 | return count; | |
282 | ||
283 | do { | |
284 | unsigned offset, bytes; | |
285 | ||
286 | offset = pos & (PAGE_SIZE - 1); /* Within page */ | |
287 | bytes = min_t(unsigned, PAGE_SIZE - offset, count); | |
288 | ||
289 | status = iomap_zero(inode, pos, offset, bytes, iomap); | |
290 | if (status < 0) | |
291 | return status; | |
292 | ||
293 | pos += bytes; | |
294 | count -= bytes; | |
295 | written += bytes; | |
296 | if (did_zero) | |
297 | *did_zero = true; | |
298 | } while (count > 0); | |
299 | ||
300 | return written; | |
301 | } | |
302 | ||
303 | int | |
304 | iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, | |
305 | struct iomap_ops *ops) | |
306 | { | |
307 | loff_t ret; | |
308 | ||
309 | while (len > 0) { | |
310 | ret = iomap_apply(inode, pos, len, IOMAP_ZERO, | |
311 | ops, did_zero, iomap_zero_range_actor); | |
312 | if (ret <= 0) | |
313 | return ret; | |
314 | ||
315 | pos += ret; | |
316 | len -= ret; | |
317 | } | |
318 | ||
319 | return 0; | |
320 | } | |
321 | EXPORT_SYMBOL_GPL(iomap_zero_range); | |
322 | ||
323 | int | |
324 | iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, | |
325 | struct iomap_ops *ops) | |
326 | { | |
327 | unsigned blocksize = (1 << inode->i_blkbits); | |
328 | unsigned off = pos & (blocksize - 1); | |
329 | ||
330 | /* Block boundary? Nothing to do */ | |
331 | if (!off) | |
332 | return 0; | |
333 | return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops); | |
334 | } | |
335 | EXPORT_SYMBOL_GPL(iomap_truncate_page); | |
336 | ||
337 | static loff_t | |
338 | iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length, | |
339 | void *data, struct iomap *iomap) | |
340 | { | |
341 | struct page *page = data; | |
342 | int ret; | |
343 | ||
344 | ret = __block_write_begin_int(page, pos & ~PAGE_MASK, length, | |
345 | NULL, iomap); | |
346 | if (ret) | |
347 | return ret; | |
348 | ||
349 | block_commit_write(page, 0, length); | |
350 | return length; | |
351 | } | |
352 | ||
353 | int iomap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | |
354 | struct iomap_ops *ops) | |
355 | { | |
356 | struct page *page = vmf->page; | |
357 | struct inode *inode = file_inode(vma->vm_file); | |
358 | unsigned long length; | |
359 | loff_t offset, size; | |
360 | ssize_t ret; | |
361 | ||
362 | lock_page(page); | |
363 | size = i_size_read(inode); | |
364 | if ((page->mapping != inode->i_mapping) || | |
365 | (page_offset(page) > size)) { | |
366 | /* We overload EFAULT to mean page got truncated */ | |
367 | ret = -EFAULT; | |
368 | goto out_unlock; | |
369 | } | |
370 | ||
371 | /* page is wholly or partially inside EOF */ | |
372 | if (((page->index + 1) << PAGE_SHIFT) > size) | |
373 | length = size & ~PAGE_MASK; | |
374 | else | |
375 | length = PAGE_SIZE; | |
376 | ||
377 | offset = page_offset(page); | |
378 | while (length > 0) { | |
379 | ret = iomap_apply(inode, offset, length, IOMAP_WRITE, | |
380 | ops, page, iomap_page_mkwrite_actor); | |
381 | if (unlikely(ret <= 0)) | |
382 | goto out_unlock; | |
383 | offset += ret; | |
384 | length -= ret; | |
385 | } | |
386 | ||
387 | set_page_dirty(page); | |
388 | wait_for_stable_page(page); | |
389 | return 0; | |
390 | out_unlock: | |
391 | unlock_page(page); | |
392 | return ret; | |
393 | } | |
394 | EXPORT_SYMBOL_GPL(iomap_page_mkwrite); |