]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2007 Oracle. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public | |
6 | * License v2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public | |
14 | * License along with this program; if not, write to the | |
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
16 | * Boston, MA 021110-1307, USA. | |
17 | */ | |
18 | ||
19 | #include <linux/fs.h> | |
20 | #include <linux/pagemap.h> | |
21 | #include <linux/highmem.h> | |
22 | #include <linux/time.h> | |
23 | #include <linux/init.h> | |
24 | #include <linux/string.h> | |
25 | #include <linux/backing-dev.h> | |
26 | #include <linux/mpage.h> | |
27 | #include <linux/falloc.h> | |
28 | #include <linux/swap.h> | |
29 | #include <linux/writeback.h> | |
30 | #include <linux/statfs.h> | |
31 | #include <linux/compat.h> | |
32 | #include <linux/slab.h> | |
33 | #include "ctree.h" | |
34 | #include "disk-io.h" | |
35 | #include "transaction.h" | |
36 | #include "btrfs_inode.h" | |
37 | #include "ioctl.h" | |
38 | #include "print-tree.h" | |
39 | #include "tree-log.h" | |
40 | #include "locking.h" | |
41 | #include "compat.h" | |
42 | ||
43 | ||
44 | /* simple helper to fault in pages and copy. This should go away | |
45 | * and be replaced with calls into generic code. | |
46 | */ | |
47 | static noinline int btrfs_copy_from_user(loff_t pos, int num_pages, | |
48 | size_t write_bytes, | |
49 | struct page **prepared_pages, | |
50 | struct iov_iter *i) | |
51 | { | |
52 | size_t copied = 0; | |
53 | size_t total_copied = 0; | |
54 | int pg = 0; | |
55 | int offset = pos & (PAGE_CACHE_SIZE - 1); | |
56 | ||
57 | while (write_bytes > 0) { | |
58 | size_t count = min_t(size_t, | |
59 | PAGE_CACHE_SIZE - offset, write_bytes); | |
60 | struct page *page = prepared_pages[pg]; | |
61 | /* | |
62 | * Copy data from userspace to the current page | |
63 | * | |
64 | * Disable pagefault to avoid recursive lock since | |
65 | * the pages are already locked | |
66 | */ | |
67 | pagefault_disable(); | |
68 | copied = iov_iter_copy_from_user_atomic(page, i, offset, count); | |
69 | pagefault_enable(); | |
70 | ||
71 | /* Flush processor's dcache for this page */ | |
72 | flush_dcache_page(page); | |
73 | ||
74 | /* | |
75 | * if we get a partial write, we can end up with | |
76 | * partially up to date pages. These add | |
77 | * a lot of complexity, so make sure they don't | |
78 | * happen by forcing this copy to be retried. | |
79 | * | |
80 | * The rest of the btrfs_file_write code will fall | |
81 | * back to page at a time copies after we return 0. | |
82 | */ | |
83 | if (!PageUptodate(page) && copied < count) | |
84 | copied = 0; | |
85 | ||
86 | iov_iter_advance(i, copied); | |
87 | write_bytes -= copied; | |
88 | total_copied += copied; | |
89 | ||
90 | /* Return to btrfs_file_aio_write to fault page */ | |
91 | if (unlikely(copied == 0)) | |
92 | break; | |
93 | ||
94 | if (unlikely(copied < PAGE_CACHE_SIZE - offset)) { | |
95 | offset += copied; | |
96 | } else { | |
97 | pg++; | |
98 | offset = 0; | |
99 | } | |
100 | } | |
101 | return total_copied; | |
102 | } | |
103 | ||
104 | /* | |
105 | * unlocks pages after btrfs_file_write is done with them | |
106 | */ | |
107 | void btrfs_drop_pages(struct page **pages, size_t num_pages) | |
108 | { | |
109 | size_t i; | |
110 | for (i = 0; i < num_pages; i++) { | |
111 | /* page checked is some magic around finding pages that | |
112 | * have been modified without going through btrfs_set_page_dirty | |
113 | * clear it here | |
114 | */ | |
115 | ClearPageChecked(pages[i]); | |
116 | unlock_page(pages[i]); | |
117 | mark_page_accessed(pages[i]); | |
118 | page_cache_release(pages[i]); | |
119 | } | |
120 | } | |
121 | ||
122 | /* | |
123 | * after copy_from_user, pages need to be dirtied and we need to make | |
124 | * sure holes are created between the current EOF and the start of | |
125 | * any next extents (if required). | |
126 | * | |
127 | * this also makes the decision about creating an inline extent vs | |
128 | * doing real data extents, marking pages dirty and delalloc as required. | |
129 | */ | |
130 | int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode, | |
131 | struct page **pages, size_t num_pages, | |
132 | loff_t pos, size_t write_bytes, | |
133 | struct extent_state **cached) | |
134 | { | |
135 | int err = 0; | |
136 | int i; | |
137 | u64 num_bytes; | |
138 | u64 start_pos; | |
139 | u64 end_of_last_block; | |
140 | u64 end_pos = pos + write_bytes; | |
141 | loff_t isize = i_size_read(inode); | |
142 | ||
143 | start_pos = pos & ~((u64)root->sectorsize - 1); | |
144 | num_bytes = (write_bytes + pos - start_pos + | |
145 | root->sectorsize - 1) & ~((u64)root->sectorsize - 1); | |
146 | ||
147 | end_of_last_block = start_pos + num_bytes - 1; | |
148 | err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, | |
149 | cached); | |
150 | if (err) | |
151 | return err; | |
152 | ||
153 | for (i = 0; i < num_pages; i++) { | |
154 | struct page *p = pages[i]; | |
155 | SetPageUptodate(p); | |
156 | ClearPageChecked(p); | |
157 | set_page_dirty(p); | |
158 | } | |
159 | ||
160 | /* | |
161 | * we've only changed i_size in ram, and we haven't updated | |
162 | * the disk i_size. There is no need to log the inode | |
163 | * at this time. | |
164 | */ | |
165 | if (end_pos > isize) | |
166 | i_size_write(inode, end_pos); | |
167 | return 0; | |
168 | } | |
169 | ||
170 | /* | |
171 | * this drops all the extents in the cache that intersect the range | |
172 | * [start, end]. Existing extents are split as required. | |
173 | */ | |
174 | int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, | |
175 | int skip_pinned) | |
176 | { | |
177 | struct extent_map *em; | |
178 | struct extent_map *split = NULL; | |
179 | struct extent_map *split2 = NULL; | |
180 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | |
181 | u64 len = end - start + 1; | |
182 | int ret; | |
183 | int testend = 1; | |
184 | unsigned long flags; | |
185 | int compressed = 0; | |
186 | ||
187 | WARN_ON(end < start); | |
188 | if (end == (u64)-1) { | |
189 | len = (u64)-1; | |
190 | testend = 0; | |
191 | } | |
192 | while (1) { | |
193 | if (!split) | |
194 | split = alloc_extent_map(GFP_NOFS); | |
195 | if (!split2) | |
196 | split2 = alloc_extent_map(GFP_NOFS); | |
197 | BUG_ON(!split || !split2); | |
198 | ||
199 | write_lock(&em_tree->lock); | |
200 | em = lookup_extent_mapping(em_tree, start, len); | |
201 | if (!em) { | |
202 | write_unlock(&em_tree->lock); | |
203 | break; | |
204 | } | |
205 | flags = em->flags; | |
206 | if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) { | |
207 | if (testend && em->start + em->len >= start + len) { | |
208 | free_extent_map(em); | |
209 | write_unlock(&em_tree->lock); | |
210 | break; | |
211 | } | |
212 | start = em->start + em->len; | |
213 | if (testend) | |
214 | len = start + len - (em->start + em->len); | |
215 | free_extent_map(em); | |
216 | write_unlock(&em_tree->lock); | |
217 | continue; | |
218 | } | |
219 | compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); | |
220 | clear_bit(EXTENT_FLAG_PINNED, &em->flags); | |
221 | remove_extent_mapping(em_tree, em); | |
222 | ||
223 | if (em->block_start < EXTENT_MAP_LAST_BYTE && | |
224 | em->start < start) { | |
225 | split->start = em->start; | |
226 | split->len = start - em->start; | |
227 | split->orig_start = em->orig_start; | |
228 | split->block_start = em->block_start; | |
229 | ||
230 | if (compressed) | |
231 | split->block_len = em->block_len; | |
232 | else | |
233 | split->block_len = split->len; | |
234 | ||
235 | split->bdev = em->bdev; | |
236 | split->flags = flags; | |
237 | split->compress_type = em->compress_type; | |
238 | ret = add_extent_mapping(em_tree, split); | |
239 | BUG_ON(ret); | |
240 | free_extent_map(split); | |
241 | split = split2; | |
242 | split2 = NULL; | |
243 | } | |
244 | if (em->block_start < EXTENT_MAP_LAST_BYTE && | |
245 | testend && em->start + em->len > start + len) { | |
246 | u64 diff = start + len - em->start; | |
247 | ||
248 | split->start = start + len; | |
249 | split->len = em->start + em->len - (start + len); | |
250 | split->bdev = em->bdev; | |
251 | split->flags = flags; | |
252 | split->compress_type = em->compress_type; | |
253 | ||
254 | if (compressed) { | |
255 | split->block_len = em->block_len; | |
256 | split->block_start = em->block_start; | |
257 | split->orig_start = em->orig_start; | |
258 | } else { | |
259 | split->block_len = split->len; | |
260 | split->block_start = em->block_start + diff; | |
261 | split->orig_start = split->start; | |
262 | } | |
263 | ||
264 | ret = add_extent_mapping(em_tree, split); | |
265 | BUG_ON(ret); | |
266 | free_extent_map(split); | |
267 | split = NULL; | |
268 | } | |
269 | write_unlock(&em_tree->lock); | |
270 | ||
271 | /* once for us */ | |
272 | free_extent_map(em); | |
273 | /* once for the tree*/ | |
274 | free_extent_map(em); | |
275 | } | |
276 | if (split) | |
277 | free_extent_map(split); | |
278 | if (split2) | |
279 | free_extent_map(split2); | |
280 | return 0; | |
281 | } | |
282 | ||
283 | /* | |
284 | * this is very complex, but the basic idea is to drop all extents | |
285 | * in the range start - end. hint_block is filled in with a block number | |
286 | * that would be a good hint to the block allocator for this file. | |
287 | * | |
288 | * If an extent intersects the range but is not entirely inside the range | |
289 | * it is either truncated or split. Anything entirely inside the range | |
290 | * is deleted from the tree. | |
291 | */ | |
292 | int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, | |
293 | u64 start, u64 end, u64 *hint_byte, int drop_cache) | |
294 | { | |
295 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
296 | struct extent_buffer *leaf; | |
297 | struct btrfs_file_extent_item *fi; | |
298 | struct btrfs_path *path; | |
299 | struct btrfs_key key; | |
300 | struct btrfs_key new_key; | |
301 | u64 search_start = start; | |
302 | u64 disk_bytenr = 0; | |
303 | u64 num_bytes = 0; | |
304 | u64 extent_offset = 0; | |
305 | u64 extent_end = 0; | |
306 | int del_nr = 0; | |
307 | int del_slot = 0; | |
308 | int extent_type; | |
309 | int recow; | |
310 | int ret; | |
311 | ||
312 | if (drop_cache) | |
313 | btrfs_drop_extent_cache(inode, start, end - 1, 0); | |
314 | ||
315 | path = btrfs_alloc_path(); | |
316 | if (!path) | |
317 | return -ENOMEM; | |
318 | ||
319 | while (1) { | |
320 | recow = 0; | |
321 | ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, | |
322 | search_start, -1); | |
323 | if (ret < 0) | |
324 | break; | |
325 | if (ret > 0 && path->slots[0] > 0 && search_start == start) { | |
326 | leaf = path->nodes[0]; | |
327 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); | |
328 | if (key.objectid == inode->i_ino && | |
329 | key.type == BTRFS_EXTENT_DATA_KEY) | |
330 | path->slots[0]--; | |
331 | } | |
332 | ret = 0; | |
333 | next_slot: | |
334 | leaf = path->nodes[0]; | |
335 | if (path->slots[0] >= btrfs_header_nritems(leaf)) { | |
336 | BUG_ON(del_nr > 0); | |
337 | ret = btrfs_next_leaf(root, path); | |
338 | if (ret < 0) | |
339 | break; | |
340 | if (ret > 0) { | |
341 | ret = 0; | |
342 | break; | |
343 | } | |
344 | leaf = path->nodes[0]; | |
345 | recow = 1; | |
346 | } | |
347 | ||
348 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); | |
349 | if (key.objectid > inode->i_ino || | |
350 | key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end) | |
351 | break; | |
352 | ||
353 | fi = btrfs_item_ptr(leaf, path->slots[0], | |
354 | struct btrfs_file_extent_item); | |
355 | extent_type = btrfs_file_extent_type(leaf, fi); | |
356 | ||
357 | if (extent_type == BTRFS_FILE_EXTENT_REG || | |
358 | extent_type == BTRFS_FILE_EXTENT_PREALLOC) { | |
359 | disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); | |
360 | num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); | |
361 | extent_offset = btrfs_file_extent_offset(leaf, fi); | |
362 | extent_end = key.offset + | |
363 | btrfs_file_extent_num_bytes(leaf, fi); | |
364 | } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { | |
365 | extent_end = key.offset + | |
366 | btrfs_file_extent_inline_len(leaf, fi); | |
367 | } else { | |
368 | WARN_ON(1); | |
369 | extent_end = search_start; | |
370 | } | |
371 | ||
372 | if (extent_end <= search_start) { | |
373 | path->slots[0]++; | |
374 | goto next_slot; | |
375 | } | |
376 | ||
377 | search_start = max(key.offset, start); | |
378 | if (recow) { | |
379 | btrfs_release_path(root, path); | |
380 | continue; | |
381 | } | |
382 | ||
383 | /* | |
384 | * | - range to drop - | | |
385 | * | -------- extent -------- | | |
386 | */ | |
387 | if (start > key.offset && end < extent_end) { | |
388 | BUG_ON(del_nr > 0); | |
389 | BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE); | |
390 | ||
391 | memcpy(&new_key, &key, sizeof(new_key)); | |
392 | new_key.offset = start; | |
393 | ret = btrfs_duplicate_item(trans, root, path, | |
394 | &new_key); | |
395 | if (ret == -EAGAIN) { | |
396 | btrfs_release_path(root, path); | |
397 | continue; | |
398 | } | |
399 | if (ret < 0) | |
400 | break; | |
401 | ||
402 | leaf = path->nodes[0]; | |
403 | fi = btrfs_item_ptr(leaf, path->slots[0] - 1, | |
404 | struct btrfs_file_extent_item); | |
405 | btrfs_set_file_extent_num_bytes(leaf, fi, | |
406 | start - key.offset); | |
407 | ||
408 | fi = btrfs_item_ptr(leaf, path->slots[0], | |
409 | struct btrfs_file_extent_item); | |
410 | ||
411 | extent_offset += start - key.offset; | |
412 | btrfs_set_file_extent_offset(leaf, fi, extent_offset); | |
413 | btrfs_set_file_extent_num_bytes(leaf, fi, | |
414 | extent_end - start); | |
415 | btrfs_mark_buffer_dirty(leaf); | |
416 | ||
417 | if (disk_bytenr > 0) { | |
418 | ret = btrfs_inc_extent_ref(trans, root, | |
419 | disk_bytenr, num_bytes, 0, | |
420 | root->root_key.objectid, | |
421 | new_key.objectid, | |
422 | start - extent_offset); | |
423 | BUG_ON(ret); | |
424 | *hint_byte = disk_bytenr; | |
425 | } | |
426 | key.offset = start; | |
427 | } | |
428 | /* | |
429 | * | ---- range to drop ----- | | |
430 | * | -------- extent -------- | | |
431 | */ | |
432 | if (start <= key.offset && end < extent_end) { | |
433 | BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE); | |
434 | ||
435 | memcpy(&new_key, &key, sizeof(new_key)); | |
436 | new_key.offset = end; | |
437 | btrfs_set_item_key_safe(trans, root, path, &new_key); | |
438 | ||
439 | extent_offset += end - key.offset; | |
440 | btrfs_set_file_extent_offset(leaf, fi, extent_offset); | |
441 | btrfs_set_file_extent_num_bytes(leaf, fi, | |
442 | extent_end - end); | |
443 | btrfs_mark_buffer_dirty(leaf); | |
444 | if (disk_bytenr > 0) { | |
445 | inode_sub_bytes(inode, end - key.offset); | |
446 | *hint_byte = disk_bytenr; | |
447 | } | |
448 | break; | |
449 | } | |
450 | ||
451 | search_start = extent_end; | |
452 | /* | |
453 | * | ---- range to drop ----- | | |
454 | * | -------- extent -------- | | |
455 | */ | |
456 | if (start > key.offset && end >= extent_end) { | |
457 | BUG_ON(del_nr > 0); | |
458 | BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE); | |
459 | ||
460 | btrfs_set_file_extent_num_bytes(leaf, fi, | |
461 | start - key.offset); | |
462 | btrfs_mark_buffer_dirty(leaf); | |
463 | if (disk_bytenr > 0) { | |
464 | inode_sub_bytes(inode, extent_end - start); | |
465 | *hint_byte = disk_bytenr; | |
466 | } | |
467 | if (end == extent_end) | |
468 | break; | |
469 | ||
470 | path->slots[0]++; | |
471 | goto next_slot; | |
472 | } | |
473 | ||
474 | /* | |
475 | * | ---- range to drop ----- | | |
476 | * | ------ extent ------ | | |
477 | */ | |
478 | if (start <= key.offset && end >= extent_end) { | |
479 | if (del_nr == 0) { | |
480 | del_slot = path->slots[0]; | |
481 | del_nr = 1; | |
482 | } else { | |
483 | BUG_ON(del_slot + del_nr != path->slots[0]); | |
484 | del_nr++; | |
485 | } | |
486 | ||
487 | if (extent_type == BTRFS_FILE_EXTENT_INLINE) { | |
488 | inode_sub_bytes(inode, | |
489 | extent_end - key.offset); | |
490 | extent_end = ALIGN(extent_end, | |
491 | root->sectorsize); | |
492 | } else if (disk_bytenr > 0) { | |
493 | ret = btrfs_free_extent(trans, root, | |
494 | disk_bytenr, num_bytes, 0, | |
495 | root->root_key.objectid, | |
496 | key.objectid, key.offset - | |
497 | extent_offset); | |
498 | BUG_ON(ret); | |
499 | inode_sub_bytes(inode, | |
500 | extent_end - key.offset); | |
501 | *hint_byte = disk_bytenr; | |
502 | } | |
503 | ||
504 | if (end == extent_end) | |
505 | break; | |
506 | ||
507 | if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) { | |
508 | path->slots[0]++; | |
509 | goto next_slot; | |
510 | } | |
511 | ||
512 | ret = btrfs_del_items(trans, root, path, del_slot, | |
513 | del_nr); | |
514 | BUG_ON(ret); | |
515 | ||
516 | del_nr = 0; | |
517 | del_slot = 0; | |
518 | ||
519 | btrfs_release_path(root, path); | |
520 | continue; | |
521 | } | |
522 | ||
523 | BUG_ON(1); | |
524 | } | |
525 | ||
526 | if (del_nr > 0) { | |
527 | ret = btrfs_del_items(trans, root, path, del_slot, del_nr); | |
528 | BUG_ON(ret); | |
529 | } | |
530 | ||
531 | btrfs_free_path(path); | |
532 | return ret; | |
533 | } | |
534 | ||
535 | static int extent_mergeable(struct extent_buffer *leaf, int slot, | |
536 | u64 objectid, u64 bytenr, u64 orig_offset, | |
537 | u64 *start, u64 *end) | |
538 | { | |
539 | struct btrfs_file_extent_item *fi; | |
540 | struct btrfs_key key; | |
541 | u64 extent_end; | |
542 | ||
543 | if (slot < 0 || slot >= btrfs_header_nritems(leaf)) | |
544 | return 0; | |
545 | ||
546 | btrfs_item_key_to_cpu(leaf, &key, slot); | |
547 | if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY) | |
548 | return 0; | |
549 | ||
550 | fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); | |
551 | if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG || | |
552 | btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr || | |
553 | btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset || | |
554 | btrfs_file_extent_compression(leaf, fi) || | |
555 | btrfs_file_extent_encryption(leaf, fi) || | |
556 | btrfs_file_extent_other_encoding(leaf, fi)) | |
557 | return 0; | |
558 | ||
559 | extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); | |
560 | if ((*start && *start != key.offset) || (*end && *end != extent_end)) | |
561 | return 0; | |
562 | ||
563 | *start = key.offset; | |
564 | *end = extent_end; | |
565 | return 1; | |
566 | } | |
567 | ||
568 | /* | |
569 | * Mark extent in the range start - end as written. | |
570 | * | |
571 | * This changes extent type from 'pre-allocated' to 'regular'. If only | |
572 | * part of extent is marked as written, the extent will be split into | |
573 | * two or three. | |
574 | */ | |
575 | int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, | |
576 | struct inode *inode, u64 start, u64 end) | |
577 | { | |
578 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
579 | struct extent_buffer *leaf; | |
580 | struct btrfs_path *path; | |
581 | struct btrfs_file_extent_item *fi; | |
582 | struct btrfs_key key; | |
583 | struct btrfs_key new_key; | |
584 | u64 bytenr; | |
585 | u64 num_bytes; | |
586 | u64 extent_end; | |
587 | u64 orig_offset; | |
588 | u64 other_start; | |
589 | u64 other_end; | |
590 | u64 split; | |
591 | int del_nr = 0; | |
592 | int del_slot = 0; | |
593 | int recow; | |
594 | int ret; | |
595 | ||
596 | btrfs_drop_extent_cache(inode, start, end - 1, 0); | |
597 | ||
598 | path = btrfs_alloc_path(); | |
599 | BUG_ON(!path); | |
600 | again: | |
601 | recow = 0; | |
602 | split = start; | |
603 | key.objectid = inode->i_ino; | |
604 | key.type = BTRFS_EXTENT_DATA_KEY; | |
605 | key.offset = split; | |
606 | ||
607 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); | |
608 | if (ret < 0) | |
609 | goto out; | |
610 | if (ret > 0 && path->slots[0] > 0) | |
611 | path->slots[0]--; | |
612 | ||
613 | leaf = path->nodes[0]; | |
614 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); | |
615 | BUG_ON(key.objectid != inode->i_ino || | |
616 | key.type != BTRFS_EXTENT_DATA_KEY); | |
617 | fi = btrfs_item_ptr(leaf, path->slots[0], | |
618 | struct btrfs_file_extent_item); | |
619 | BUG_ON(btrfs_file_extent_type(leaf, fi) != | |
620 | BTRFS_FILE_EXTENT_PREALLOC); | |
621 | extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); | |
622 | BUG_ON(key.offset > start || extent_end < end); | |
623 | ||
624 | bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); | |
625 | num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); | |
626 | orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi); | |
627 | memcpy(&new_key, &key, sizeof(new_key)); | |
628 | ||
629 | if (start == key.offset && end < extent_end) { | |
630 | other_start = 0; | |
631 | other_end = start; | |
632 | if (extent_mergeable(leaf, path->slots[0] - 1, | |
633 | inode->i_ino, bytenr, orig_offset, | |
634 | &other_start, &other_end)) { | |
635 | new_key.offset = end; | |
636 | btrfs_set_item_key_safe(trans, root, path, &new_key); | |
637 | fi = btrfs_item_ptr(leaf, path->slots[0], | |
638 | struct btrfs_file_extent_item); | |
639 | btrfs_set_file_extent_num_bytes(leaf, fi, | |
640 | extent_end - end); | |
641 | btrfs_set_file_extent_offset(leaf, fi, | |
642 | end - orig_offset); | |
643 | fi = btrfs_item_ptr(leaf, path->slots[0] - 1, | |
644 | struct btrfs_file_extent_item); | |
645 | btrfs_set_file_extent_num_bytes(leaf, fi, | |
646 | end - other_start); | |
647 | btrfs_mark_buffer_dirty(leaf); | |
648 | goto out; | |
649 | } | |
650 | } | |
651 | ||
652 | if (start > key.offset && end == extent_end) { | |
653 | other_start = end; | |
654 | other_end = 0; | |
655 | if (extent_mergeable(leaf, path->slots[0] + 1, | |
656 | inode->i_ino, bytenr, orig_offset, | |
657 | &other_start, &other_end)) { | |
658 | fi = btrfs_item_ptr(leaf, path->slots[0], | |
659 | struct btrfs_file_extent_item); | |
660 | btrfs_set_file_extent_num_bytes(leaf, fi, | |
661 | start - key.offset); | |
662 | path->slots[0]++; | |
663 | new_key.offset = start; | |
664 | btrfs_set_item_key_safe(trans, root, path, &new_key); | |
665 | ||
666 | fi = btrfs_item_ptr(leaf, path->slots[0], | |
667 | struct btrfs_file_extent_item); | |
668 | btrfs_set_file_extent_num_bytes(leaf, fi, | |
669 | other_end - start); | |
670 | btrfs_set_file_extent_offset(leaf, fi, | |
671 | start - orig_offset); | |
672 | btrfs_mark_buffer_dirty(leaf); | |
673 | goto out; | |
674 | } | |
675 | } | |
676 | ||
677 | while (start > key.offset || end < extent_end) { | |
678 | if (key.offset == start) | |
679 | split = end; | |
680 | ||
681 | new_key.offset = split; | |
682 | ret = btrfs_duplicate_item(trans, root, path, &new_key); | |
683 | if (ret == -EAGAIN) { | |
684 | btrfs_release_path(root, path); | |
685 | goto again; | |
686 | } | |
687 | BUG_ON(ret < 0); | |
688 | ||
689 | leaf = path->nodes[0]; | |
690 | fi = btrfs_item_ptr(leaf, path->slots[0] - 1, | |
691 | struct btrfs_file_extent_item); | |
692 | btrfs_set_file_extent_num_bytes(leaf, fi, | |
693 | split - key.offset); | |
694 | ||
695 | fi = btrfs_item_ptr(leaf, path->slots[0], | |
696 | struct btrfs_file_extent_item); | |
697 | ||
698 | btrfs_set_file_extent_offset(leaf, fi, split - orig_offset); | |
699 | btrfs_set_file_extent_num_bytes(leaf, fi, | |
700 | extent_end - split); | |
701 | btrfs_mark_buffer_dirty(leaf); | |
702 | ||
703 | ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0, | |
704 | root->root_key.objectid, | |
705 | inode->i_ino, orig_offset); | |
706 | BUG_ON(ret); | |
707 | ||
708 | if (split == start) { | |
709 | key.offset = start; | |
710 | } else { | |
711 | BUG_ON(start != key.offset); | |
712 | path->slots[0]--; | |
713 | extent_end = end; | |
714 | } | |
715 | recow = 1; | |
716 | } | |
717 | ||
718 | other_start = end; | |
719 | other_end = 0; | |
720 | if (extent_mergeable(leaf, path->slots[0] + 1, | |
721 | inode->i_ino, bytenr, orig_offset, | |
722 | &other_start, &other_end)) { | |
723 | if (recow) { | |
724 | btrfs_release_path(root, path); | |
725 | goto again; | |
726 | } | |
727 | extent_end = other_end; | |
728 | del_slot = path->slots[0] + 1; | |
729 | del_nr++; | |
730 | ret = btrfs_free_extent(trans, root, bytenr, num_bytes, | |
731 | 0, root->root_key.objectid, | |
732 | inode->i_ino, orig_offset); | |
733 | BUG_ON(ret); | |
734 | } | |
735 | other_start = 0; | |
736 | other_end = start; | |
737 | if (extent_mergeable(leaf, path->slots[0] - 1, | |
738 | inode->i_ino, bytenr, orig_offset, | |
739 | &other_start, &other_end)) { | |
740 | if (recow) { | |
741 | btrfs_release_path(root, path); | |
742 | goto again; | |
743 | } | |
744 | key.offset = other_start; | |
745 | del_slot = path->slots[0]; | |
746 | del_nr++; | |
747 | ret = btrfs_free_extent(trans, root, bytenr, num_bytes, | |
748 | 0, root->root_key.objectid, | |
749 | inode->i_ino, orig_offset); | |
750 | BUG_ON(ret); | |
751 | } | |
752 | if (del_nr == 0) { | |
753 | fi = btrfs_item_ptr(leaf, path->slots[0], | |
754 | struct btrfs_file_extent_item); | |
755 | btrfs_set_file_extent_type(leaf, fi, | |
756 | BTRFS_FILE_EXTENT_REG); | |
757 | btrfs_mark_buffer_dirty(leaf); | |
758 | } else { | |
759 | fi = btrfs_item_ptr(leaf, del_slot - 1, | |
760 | struct btrfs_file_extent_item); | |
761 | btrfs_set_file_extent_type(leaf, fi, | |
762 | BTRFS_FILE_EXTENT_REG); | |
763 | btrfs_set_file_extent_num_bytes(leaf, fi, | |
764 | extent_end - key.offset); | |
765 | btrfs_mark_buffer_dirty(leaf); | |
766 | ||
767 | ret = btrfs_del_items(trans, root, path, del_slot, del_nr); | |
768 | BUG_ON(ret); | |
769 | } | |
770 | out: | |
771 | btrfs_free_path(path); | |
772 | return 0; | |
773 | } | |
774 | ||
775 | /* | |
776 | * on error we return an unlocked page and the error value | |
777 | * on success we return a locked page and 0 | |
778 | */ | |
779 | static int prepare_uptodate_page(struct page *page, u64 pos) | |
780 | { | |
781 | int ret = 0; | |
782 | ||
783 | if ((pos & (PAGE_CACHE_SIZE - 1)) && !PageUptodate(page)) { | |
784 | ret = btrfs_readpage(NULL, page); | |
785 | if (ret) | |
786 | return ret; | |
787 | lock_page(page); | |
788 | if (!PageUptodate(page)) { | |
789 | unlock_page(page); | |
790 | return -EIO; | |
791 | } | |
792 | } | |
793 | return 0; | |
794 | } | |
795 | ||
796 | /* | |
797 | * this gets pages into the page cache and locks them down, it also properly | |
798 | * waits for data=ordered extents to finish before allowing the pages to be | |
799 | * modified. | |
800 | */ | |
801 | static noinline int prepare_pages(struct btrfs_root *root, struct file *file, | |
802 | struct page **pages, size_t num_pages, | |
803 | loff_t pos, unsigned long first_index, | |
804 | unsigned long last_index, size_t write_bytes) | |
805 | { | |
806 | struct extent_state *cached_state = NULL; | |
807 | int i; | |
808 | unsigned long index = pos >> PAGE_CACHE_SHIFT; | |
809 | struct inode *inode = fdentry(file)->d_inode; | |
810 | int err = 0; | |
811 | int faili = 0; | |
812 | u64 start_pos; | |
813 | u64 last_pos; | |
814 | ||
815 | start_pos = pos & ~((u64)root->sectorsize - 1); | |
816 | last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT; | |
817 | ||
818 | if (start_pos > inode->i_size) { | |
819 | err = btrfs_cont_expand(inode, i_size_read(inode), start_pos); | |
820 | if (err) | |
821 | return err; | |
822 | } | |
823 | ||
824 | again: | |
825 | for (i = 0; i < num_pages; i++) { | |
826 | pages[i] = grab_cache_page(inode->i_mapping, index + i); | |
827 | if (!pages[i]) { | |
828 | faili = i - 1; | |
829 | err = -ENOMEM; | |
830 | goto fail; | |
831 | } | |
832 | ||
833 | if (i == 0) | |
834 | err = prepare_uptodate_page(pages[i], pos); | |
835 | if (i == num_pages - 1) | |
836 | err = prepare_uptodate_page(pages[i], | |
837 | pos + write_bytes); | |
838 | if (err) { | |
839 | page_cache_release(pages[i]); | |
840 | faili = i - 1; | |
841 | goto fail; | |
842 | } | |
843 | wait_on_page_writeback(pages[i]); | |
844 | } | |
845 | err = 0; | |
846 | if (start_pos < inode->i_size) { | |
847 | struct btrfs_ordered_extent *ordered; | |
848 | lock_extent_bits(&BTRFS_I(inode)->io_tree, | |
849 | start_pos, last_pos - 1, 0, &cached_state, | |
850 | GFP_NOFS); | |
851 | ordered = btrfs_lookup_first_ordered_extent(inode, | |
852 | last_pos - 1); | |
853 | if (ordered && | |
854 | ordered->file_offset + ordered->len > start_pos && | |
855 | ordered->file_offset < last_pos) { | |
856 | btrfs_put_ordered_extent(ordered); | |
857 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, | |
858 | start_pos, last_pos - 1, | |
859 | &cached_state, GFP_NOFS); | |
860 | for (i = 0; i < num_pages; i++) { | |
861 | unlock_page(pages[i]); | |
862 | page_cache_release(pages[i]); | |
863 | } | |
864 | btrfs_wait_ordered_range(inode, start_pos, | |
865 | last_pos - start_pos); | |
866 | goto again; | |
867 | } | |
868 | if (ordered) | |
869 | btrfs_put_ordered_extent(ordered); | |
870 | ||
871 | clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, | |
872 | last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC | | |
873 | EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, | |
874 | GFP_NOFS); | |
875 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, | |
876 | start_pos, last_pos - 1, &cached_state, | |
877 | GFP_NOFS); | |
878 | } | |
879 | for (i = 0; i < num_pages; i++) { | |
880 | clear_page_dirty_for_io(pages[i]); | |
881 | set_page_extent_mapped(pages[i]); | |
882 | WARN_ON(!PageLocked(pages[i])); | |
883 | } | |
884 | return 0; | |
885 | fail: | |
886 | while (faili >= 0) { | |
887 | unlock_page(pages[faili]); | |
888 | page_cache_release(pages[faili]); | |
889 | faili--; | |
890 | } | |
891 | return err; | |
892 | ||
893 | } | |
894 | ||
895 | static noinline ssize_t __btrfs_buffered_write(struct file *file, | |
896 | struct iov_iter *i, | |
897 | loff_t pos) | |
898 | { | |
899 | struct inode *inode = fdentry(file)->d_inode; | |
900 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
901 | struct page **pages = NULL; | |
902 | unsigned long first_index; | |
903 | unsigned long last_index; | |
904 | size_t num_written = 0; | |
905 | int nrptrs; | |
906 | int ret = 0; | |
907 | ||
908 | nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) / | |
909 | PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / | |
910 | (sizeof(struct page *))); | |
911 | pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); | |
912 | if (!pages) | |
913 | return -ENOMEM; | |
914 | ||
915 | first_index = pos >> PAGE_CACHE_SHIFT; | |
916 | last_index = (pos + iov_iter_count(i)) >> PAGE_CACHE_SHIFT; | |
917 | ||
918 | while (iov_iter_count(i) > 0) { | |
919 | size_t offset = pos & (PAGE_CACHE_SIZE - 1); | |
920 | size_t write_bytes = min(iov_iter_count(i), | |
921 | nrptrs * (size_t)PAGE_CACHE_SIZE - | |
922 | offset); | |
923 | size_t num_pages = (write_bytes + offset + | |
924 | PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | |
925 | size_t dirty_pages; | |
926 | size_t copied; | |
927 | ||
928 | WARN_ON(num_pages > nrptrs); | |
929 | ||
930 | /* | |
931 | * Fault pages before locking them in prepare_pages | |
932 | * to avoid recursive lock | |
933 | */ | |
934 | if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) { | |
935 | ret = -EFAULT; | |
936 | break; | |
937 | } | |
938 | ||
939 | ret = btrfs_delalloc_reserve_space(inode, | |
940 | num_pages << PAGE_CACHE_SHIFT); | |
941 | if (ret) | |
942 | break; | |
943 | ||
944 | /* | |
945 | * This is going to setup the pages array with the number of | |
946 | * pages we want, so we don't really need to worry about the | |
947 | * contents of pages from loop to loop | |
948 | */ | |
949 | ret = prepare_pages(root, file, pages, num_pages, | |
950 | pos, first_index, last_index, | |
951 | write_bytes); | |
952 | if (ret) { | |
953 | btrfs_delalloc_release_space(inode, | |
954 | num_pages << PAGE_CACHE_SHIFT); | |
955 | break; | |
956 | } | |
957 | ||
958 | copied = btrfs_copy_from_user(pos, num_pages, | |
959 | write_bytes, pages, i); | |
960 | ||
961 | /* | |
962 | * if we have trouble faulting in the pages, fall | |
963 | * back to one page at a time | |
964 | */ | |
965 | if (copied < write_bytes) | |
966 | nrptrs = 1; | |
967 | ||
968 | if (copied == 0) | |
969 | dirty_pages = 0; | |
970 | else | |
971 | dirty_pages = (copied + offset + | |
972 | PAGE_CACHE_SIZE - 1) >> | |
973 | PAGE_CACHE_SHIFT; | |
974 | ||
975 | /* | |
976 | * If we had a short copy we need to release the excess delaloc | |
977 | * bytes we reserved. We need to increment outstanding_extents | |
978 | * because btrfs_delalloc_release_space will decrement it, but | |
979 | * we still have an outstanding extent for the chunk we actually | |
980 | * managed to copy. | |
981 | */ | |
982 | if (num_pages > dirty_pages) { | |
983 | if (copied > 0) | |
984 | atomic_inc( | |
985 | &BTRFS_I(inode)->outstanding_extents); | |
986 | btrfs_delalloc_release_space(inode, | |
987 | (num_pages - dirty_pages) << | |
988 | PAGE_CACHE_SHIFT); | |
989 | } | |
990 | ||
991 | if (copied > 0) { | |
992 | ret = btrfs_dirty_pages(root, inode, pages, | |
993 | dirty_pages, pos, copied, | |
994 | NULL); | |
995 | if (ret) { | |
996 | btrfs_delalloc_release_space(inode, | |
997 | dirty_pages << PAGE_CACHE_SHIFT); | |
998 | btrfs_drop_pages(pages, num_pages); | |
999 | break; | |
1000 | } | |
1001 | } | |
1002 | ||
1003 | btrfs_drop_pages(pages, num_pages); | |
1004 | ||
1005 | cond_resched(); | |
1006 | ||
1007 | balance_dirty_pages_ratelimited_nr(inode->i_mapping, | |
1008 | dirty_pages); | |
1009 | if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1) | |
1010 | btrfs_btree_balance_dirty(root, 1); | |
1011 | btrfs_throttle(root); | |
1012 | ||
1013 | pos += copied; | |
1014 | num_written += copied; | |
1015 | } | |
1016 | ||
1017 | kfree(pages); | |
1018 | ||
1019 | return num_written ? num_written : ret; | |
1020 | } | |
1021 | ||
1022 | static ssize_t __btrfs_direct_write(struct kiocb *iocb, | |
1023 | const struct iovec *iov, | |
1024 | unsigned long nr_segs, loff_t pos, | |
1025 | loff_t *ppos, size_t count, size_t ocount) | |
1026 | { | |
1027 | struct file *file = iocb->ki_filp; | |
1028 | struct inode *inode = fdentry(file)->d_inode; | |
1029 | struct iov_iter i; | |
1030 | ssize_t written; | |
1031 | ssize_t written_buffered; | |
1032 | loff_t endbyte; | |
1033 | int err; | |
1034 | ||
1035 | written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos, | |
1036 | count, ocount); | |
1037 | ||
1038 | /* | |
1039 | * the generic O_DIRECT will update in-memory i_size after the | |
1040 | * DIOs are done. But our endio handlers that update the on | |
1041 | * disk i_size never update past the in memory i_size. So we | |
1042 | * need one more update here to catch any additions to the | |
1043 | * file | |
1044 | */ | |
1045 | if (inode->i_size != BTRFS_I(inode)->disk_i_size) { | |
1046 | btrfs_ordered_update_i_size(inode, inode->i_size, NULL); | |
1047 | mark_inode_dirty(inode); | |
1048 | } | |
1049 | ||
1050 | if (written < 0 || written == count) | |
1051 | return written; | |
1052 | ||
1053 | pos += written; | |
1054 | count -= written; | |
1055 | iov_iter_init(&i, iov, nr_segs, count, written); | |
1056 | written_buffered = __btrfs_buffered_write(file, &i, pos); | |
1057 | if (written_buffered < 0) { | |
1058 | err = written_buffered; | |
1059 | goto out; | |
1060 | } | |
1061 | endbyte = pos + written_buffered - 1; | |
1062 | err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte); | |
1063 | if (err) | |
1064 | goto out; | |
1065 | written += written_buffered; | |
1066 | *ppos = pos + written_buffered; | |
1067 | invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT, | |
1068 | endbyte >> PAGE_CACHE_SHIFT); | |
1069 | out: | |
1070 | return written ? written : err; | |
1071 | } | |
1072 | ||
1073 | static ssize_t btrfs_file_aio_write(struct kiocb *iocb, | |
1074 | const struct iovec *iov, | |
1075 | unsigned long nr_segs, loff_t pos) | |
1076 | { | |
1077 | struct file *file = iocb->ki_filp; | |
1078 | struct inode *inode = fdentry(file)->d_inode; | |
1079 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
1080 | loff_t *ppos = &iocb->ki_pos; | |
1081 | ssize_t num_written = 0; | |
1082 | ssize_t err = 0; | |
1083 | size_t count, ocount; | |
1084 | ||
1085 | vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); | |
1086 | ||
1087 | mutex_lock(&inode->i_mutex); | |
1088 | ||
1089 | err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ); | |
1090 | if (err) { | |
1091 | mutex_unlock(&inode->i_mutex); | |
1092 | goto out; | |
1093 | } | |
1094 | count = ocount; | |
1095 | ||
1096 | current->backing_dev_info = inode->i_mapping->backing_dev_info; | |
1097 | err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); | |
1098 | if (err) { | |
1099 | mutex_unlock(&inode->i_mutex); | |
1100 | goto out; | |
1101 | } | |
1102 | ||
1103 | if (count == 0) { | |
1104 | mutex_unlock(&inode->i_mutex); | |
1105 | goto out; | |
1106 | } | |
1107 | ||
1108 | err = file_remove_suid(file); | |
1109 | if (err) { | |
1110 | mutex_unlock(&inode->i_mutex); | |
1111 | goto out; | |
1112 | } | |
1113 | ||
1114 | /* | |
1115 | * If BTRFS flips readonly due to some impossible error | |
1116 | * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR), | |
1117 | * although we have opened a file as writable, we have | |
1118 | * to stop this write operation to ensure FS consistency. | |
1119 | */ | |
1120 | if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { | |
1121 | mutex_unlock(&inode->i_mutex); | |
1122 | err = -EROFS; | |
1123 | goto out; | |
1124 | } | |
1125 | ||
1126 | file_update_time(file); | |
1127 | BTRFS_I(inode)->sequence++; | |
1128 | ||
1129 | if (unlikely(file->f_flags & O_DIRECT)) { | |
1130 | num_written = __btrfs_direct_write(iocb, iov, nr_segs, | |
1131 | pos, ppos, count, ocount); | |
1132 | } else { | |
1133 | struct iov_iter i; | |
1134 | ||
1135 | iov_iter_init(&i, iov, nr_segs, count, num_written); | |
1136 | ||
1137 | num_written = __btrfs_buffered_write(file, &i, pos); | |
1138 | if (num_written > 0) | |
1139 | *ppos = pos + num_written; | |
1140 | } | |
1141 | ||
1142 | mutex_unlock(&inode->i_mutex); | |
1143 | ||
1144 | /* | |
1145 | * we want to make sure fsync finds this change | |
1146 | * but we haven't joined a transaction running right now. | |
1147 | * | |
1148 | * Later on, someone is sure to update the inode and get the | |
1149 | * real transid recorded. | |
1150 | * | |
1151 | * We set last_trans now to the fs_info generation + 1, | |
1152 | * this will either be one more than the running transaction | |
1153 | * or the generation used for the next transaction if there isn't | |
1154 | * one running right now. | |
1155 | */ | |
1156 | BTRFS_I(inode)->last_trans = root->fs_info->generation + 1; | |
1157 | if (num_written > 0 || num_written == -EIOCBQUEUED) { | |
1158 | err = generic_write_sync(file, pos, num_written); | |
1159 | if (err < 0 && num_written > 0) | |
1160 | num_written = err; | |
1161 | } | |
1162 | out: | |
1163 | current->backing_dev_info = NULL; | |
1164 | return num_written ? num_written : err; | |
1165 | } | |
1166 | ||
1167 | int btrfs_release_file(struct inode *inode, struct file *filp) | |
1168 | { | |
1169 | /* | |
1170 | * ordered_data_close is set by settattr when we are about to truncate | |
1171 | * a file from a non-zero size to a zero size. This tries to | |
1172 | * flush down new bytes that may have been written if the | |
1173 | * application were using truncate to replace a file in place. | |
1174 | */ | |
1175 | if (BTRFS_I(inode)->ordered_data_close) { | |
1176 | BTRFS_I(inode)->ordered_data_close = 0; | |
1177 | btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode); | |
1178 | if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT) | |
1179 | filemap_flush(inode->i_mapping); | |
1180 | } | |
1181 | if (filp->private_data) | |
1182 | btrfs_ioctl_trans_end(filp); | |
1183 | return 0; | |
1184 | } | |
1185 | ||
1186 | /* | |
1187 | * fsync call for both files and directories. This logs the inode into | |
1188 | * the tree log instead of forcing full commits whenever possible. | |
1189 | * | |
1190 | * It needs to call filemap_fdatawait so that all ordered extent updates are | |
1191 | * in the metadata btree are up to date for copying to the log. | |
1192 | * | |
1193 | * It drops the inode mutex before doing the tree log commit. This is an | |
1194 | * important optimization for directories because holding the mutex prevents | |
1195 | * new operations on the dir while we write to disk. | |
1196 | */ | |
1197 | int btrfs_sync_file(struct file *file, int datasync) | |
1198 | { | |
1199 | struct dentry *dentry = file->f_path.dentry; | |
1200 | struct inode *inode = dentry->d_inode; | |
1201 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
1202 | int ret = 0; | |
1203 | struct btrfs_trans_handle *trans; | |
1204 | ||
1205 | trace_btrfs_sync_file(file, datasync); | |
1206 | ||
1207 | /* we wait first, since the writeback may change the inode */ | |
1208 | root->log_batch++; | |
1209 | /* the VFS called filemap_fdatawrite for us */ | |
1210 | btrfs_wait_ordered_range(inode, 0, (u64)-1); | |
1211 | root->log_batch++; | |
1212 | ||
1213 | /* | |
1214 | * check the transaction that last modified this inode | |
1215 | * and see if its already been committed | |
1216 | */ | |
1217 | if (!BTRFS_I(inode)->last_trans) | |
1218 | goto out; | |
1219 | ||
1220 | /* | |
1221 | * if the last transaction that changed this file was before | |
1222 | * the current transaction, we can bail out now without any | |
1223 | * syncing | |
1224 | */ | |
1225 | mutex_lock(&root->fs_info->trans_mutex); | |
1226 | if (BTRFS_I(inode)->last_trans <= | |
1227 | root->fs_info->last_trans_committed) { | |
1228 | BTRFS_I(inode)->last_trans = 0; | |
1229 | mutex_unlock(&root->fs_info->trans_mutex); | |
1230 | goto out; | |
1231 | } | |
1232 | mutex_unlock(&root->fs_info->trans_mutex); | |
1233 | ||
1234 | /* | |
1235 | * ok we haven't committed the transaction yet, lets do a commit | |
1236 | */ | |
1237 | if (file->private_data) | |
1238 | btrfs_ioctl_trans_end(file); | |
1239 | ||
1240 | trans = btrfs_start_transaction(root, 0); | |
1241 | if (IS_ERR(trans)) { | |
1242 | ret = PTR_ERR(trans); | |
1243 | goto out; | |
1244 | } | |
1245 | ||
1246 | ret = btrfs_log_dentry_safe(trans, root, dentry); | |
1247 | if (ret < 0) | |
1248 | goto out; | |
1249 | ||
1250 | /* we've logged all the items and now have a consistent | |
1251 | * version of the file in the log. It is possible that | |
1252 | * someone will come in and modify the file, but that's | |
1253 | * fine because the log is consistent on disk, and we | |
1254 | * have references to all of the file's extents | |
1255 | * | |
1256 | * It is possible that someone will come in and log the | |
1257 | * file again, but that will end up using the synchronization | |
1258 | * inside btrfs_sync_log to keep things safe. | |
1259 | */ | |
1260 | mutex_unlock(&dentry->d_inode->i_mutex); | |
1261 | ||
1262 | if (ret != BTRFS_NO_LOG_SYNC) { | |
1263 | if (ret > 0) { | |
1264 | ret = btrfs_commit_transaction(trans, root); | |
1265 | } else { | |
1266 | ret = btrfs_sync_log(trans, root); | |
1267 | if (ret == 0) | |
1268 | ret = btrfs_end_transaction(trans, root); | |
1269 | else | |
1270 | ret = btrfs_commit_transaction(trans, root); | |
1271 | } | |
1272 | } else { | |
1273 | ret = btrfs_end_transaction(trans, root); | |
1274 | } | |
1275 | mutex_lock(&dentry->d_inode->i_mutex); | |
1276 | out: | |
1277 | return ret > 0 ? -EIO : ret; | |
1278 | } | |
1279 | ||
1280 | static const struct vm_operations_struct btrfs_file_vm_ops = { | |
1281 | .fault = filemap_fault, | |
1282 | .page_mkwrite = btrfs_page_mkwrite, | |
1283 | }; | |
1284 | ||
1285 | static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma) | |
1286 | { | |
1287 | struct address_space *mapping = filp->f_mapping; | |
1288 | ||
1289 | if (!mapping->a_ops->readpage) | |
1290 | return -ENOEXEC; | |
1291 | ||
1292 | file_accessed(filp); | |
1293 | vma->vm_ops = &btrfs_file_vm_ops; | |
1294 | vma->vm_flags |= VM_CAN_NONLINEAR; | |
1295 | ||
1296 | return 0; | |
1297 | } | |
1298 | ||
1299 | static long btrfs_fallocate(struct file *file, int mode, | |
1300 | loff_t offset, loff_t len) | |
1301 | { | |
1302 | struct inode *inode = file->f_path.dentry->d_inode; | |
1303 | struct extent_state *cached_state = NULL; | |
1304 | u64 cur_offset; | |
1305 | u64 last_byte; | |
1306 | u64 alloc_start; | |
1307 | u64 alloc_end; | |
1308 | u64 alloc_hint = 0; | |
1309 | u64 locked_end; | |
1310 | u64 mask = BTRFS_I(inode)->root->sectorsize - 1; | |
1311 | struct extent_map *em; | |
1312 | int ret; | |
1313 | ||
1314 | alloc_start = offset & ~mask; | |
1315 | alloc_end = (offset + len + mask) & ~mask; | |
1316 | ||
1317 | /* We only support the FALLOC_FL_KEEP_SIZE mode */ | |
1318 | if (mode & ~FALLOC_FL_KEEP_SIZE) | |
1319 | return -EOPNOTSUPP; | |
1320 | ||
1321 | /* | |
1322 | * wait for ordered IO before we have any locks. We'll loop again | |
1323 | * below with the locks held. | |
1324 | */ | |
1325 | btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start); | |
1326 | ||
1327 | mutex_lock(&inode->i_mutex); | |
1328 | ret = inode_newsize_ok(inode, alloc_end); | |
1329 | if (ret) | |
1330 | goto out; | |
1331 | ||
1332 | if (alloc_start > inode->i_size) { | |
1333 | ret = btrfs_cont_expand(inode, i_size_read(inode), | |
1334 | alloc_start); | |
1335 | if (ret) | |
1336 | goto out; | |
1337 | } | |
1338 | ||
1339 | ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start); | |
1340 | if (ret) | |
1341 | goto out; | |
1342 | ||
1343 | locked_end = alloc_end - 1; | |
1344 | while (1) { | |
1345 | struct btrfs_ordered_extent *ordered; | |
1346 | ||
1347 | /* the extent lock is ordered inside the running | |
1348 | * transaction | |
1349 | */ | |
1350 | lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start, | |
1351 | locked_end, 0, &cached_state, GFP_NOFS); | |
1352 | ordered = btrfs_lookup_first_ordered_extent(inode, | |
1353 | alloc_end - 1); | |
1354 | if (ordered && | |
1355 | ordered->file_offset + ordered->len > alloc_start && | |
1356 | ordered->file_offset < alloc_end) { | |
1357 | btrfs_put_ordered_extent(ordered); | |
1358 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, | |
1359 | alloc_start, locked_end, | |
1360 | &cached_state, GFP_NOFS); | |
1361 | /* | |
1362 | * we can't wait on the range with the transaction | |
1363 | * running or with the extent lock held | |
1364 | */ | |
1365 | btrfs_wait_ordered_range(inode, alloc_start, | |
1366 | alloc_end - alloc_start); | |
1367 | } else { | |
1368 | if (ordered) | |
1369 | btrfs_put_ordered_extent(ordered); | |
1370 | break; | |
1371 | } | |
1372 | } | |
1373 | ||
1374 | cur_offset = alloc_start; | |
1375 | while (1) { | |
1376 | em = btrfs_get_extent(inode, NULL, 0, cur_offset, | |
1377 | alloc_end - cur_offset, 0); | |
1378 | BUG_ON(IS_ERR(em) || !em); | |
1379 | last_byte = min(extent_map_end(em), alloc_end); | |
1380 | last_byte = (last_byte + mask) & ~mask; | |
1381 | if (em->block_start == EXTENT_MAP_HOLE || | |
1382 | (cur_offset >= inode->i_size && | |
1383 | !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { | |
1384 | ret = btrfs_prealloc_file_range(inode, mode, cur_offset, | |
1385 | last_byte - cur_offset, | |
1386 | 1 << inode->i_blkbits, | |
1387 | offset + len, | |
1388 | &alloc_hint); | |
1389 | if (ret < 0) { | |
1390 | free_extent_map(em); | |
1391 | break; | |
1392 | } | |
1393 | } | |
1394 | free_extent_map(em); | |
1395 | ||
1396 | cur_offset = last_byte; | |
1397 | if (cur_offset >= alloc_end) { | |
1398 | ret = 0; | |
1399 | break; | |
1400 | } | |
1401 | } | |
1402 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, | |
1403 | &cached_state, GFP_NOFS); | |
1404 | ||
1405 | btrfs_free_reserved_data_space(inode, alloc_end - alloc_start); | |
1406 | out: | |
1407 | mutex_unlock(&inode->i_mutex); | |
1408 | return ret; | |
1409 | } | |
1410 | ||
1411 | const struct file_operations btrfs_file_operations = { | |
1412 | .llseek = generic_file_llseek, | |
1413 | .read = do_sync_read, | |
1414 | .write = do_sync_write, | |
1415 | .aio_read = generic_file_aio_read, | |
1416 | .splice_read = generic_file_splice_read, | |
1417 | .aio_write = btrfs_file_aio_write, | |
1418 | .mmap = btrfs_file_mmap, | |
1419 | .open = generic_file_open, | |
1420 | .release = btrfs_release_file, | |
1421 | .fsync = btrfs_sync_file, | |
1422 | .fallocate = btrfs_fallocate, | |
1423 | .unlocked_ioctl = btrfs_ioctl, | |
1424 | #ifdef CONFIG_COMPAT | |
1425 | .compat_ioctl = btrfs_ioctl, | |
1426 | #endif | |
1427 | }; |