]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/btrfs/ordered-data.c
Btrfs: fix file extent discount problem in the, snapshot
[mirror_ubuntu-artful-kernel.git] / fs / btrfs / ordered-data.c
CommitLineData
dc17ff8f
CM
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
dc17ff8f 19#include <linux/slab.h>
d6bfde87 20#include <linux/blkdev.h>
f421950f
CM
21#include <linux/writeback.h>
22#include <linux/pagevec.h>
dc17ff8f
CM
23#include "ctree.h"
24#include "transaction.h"
25#include "btrfs_inode.h"
e6dcd2dc 26#include "extent_io.h"
dc17ff8f 27
e6dcd2dc 28static u64 entry_end(struct btrfs_ordered_extent *entry)
dc17ff8f 29{
e6dcd2dc
CM
30 if (entry->file_offset + entry->len < entry->file_offset)
31 return (u64)-1;
32 return entry->file_offset + entry->len;
dc17ff8f
CM
33}
34
d352ac68
CM
35/* returns NULL if the insertion worked, or it returns the node it did find
36 * in the tree
37 */
e6dcd2dc
CM
38static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
39 struct rb_node *node)
dc17ff8f 40{
d397712b
CM
41 struct rb_node **p = &root->rb_node;
42 struct rb_node *parent = NULL;
e6dcd2dc 43 struct btrfs_ordered_extent *entry;
dc17ff8f 44
d397712b 45 while (*p) {
dc17ff8f 46 parent = *p;
e6dcd2dc 47 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
dc17ff8f 48
e6dcd2dc 49 if (file_offset < entry->file_offset)
dc17ff8f 50 p = &(*p)->rb_left;
e6dcd2dc 51 else if (file_offset >= entry_end(entry))
dc17ff8f
CM
52 p = &(*p)->rb_right;
53 else
54 return parent;
55 }
56
57 rb_link_node(node, parent, p);
58 rb_insert_color(node, root);
59 return NULL;
60}
61
43c04fb1
JM
62static void ordered_data_tree_panic(struct inode *inode, int errno,
63 u64 offset)
64{
65 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
66 btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset "
67 "%llu\n", (unsigned long long)offset);
68}
69
d352ac68
CM
70/*
71 * look for a given offset in the tree, and if it can't be found return the
72 * first lesser offset
73 */
e6dcd2dc
CM
74static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
75 struct rb_node **prev_ret)
dc17ff8f 76{
d397712b 77 struct rb_node *n = root->rb_node;
dc17ff8f 78 struct rb_node *prev = NULL;
e6dcd2dc
CM
79 struct rb_node *test;
80 struct btrfs_ordered_extent *entry;
81 struct btrfs_ordered_extent *prev_entry = NULL;
dc17ff8f 82
d397712b 83 while (n) {
e6dcd2dc 84 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
dc17ff8f
CM
85 prev = n;
86 prev_entry = entry;
dc17ff8f 87
e6dcd2dc 88 if (file_offset < entry->file_offset)
dc17ff8f 89 n = n->rb_left;
e6dcd2dc 90 else if (file_offset >= entry_end(entry))
dc17ff8f
CM
91 n = n->rb_right;
92 else
93 return n;
94 }
95 if (!prev_ret)
96 return NULL;
97
d397712b 98 while (prev && file_offset >= entry_end(prev_entry)) {
e6dcd2dc
CM
99 test = rb_next(prev);
100 if (!test)
101 break;
102 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
103 rb_node);
104 if (file_offset < entry_end(prev_entry))
105 break;
106
107 prev = test;
108 }
109 if (prev)
110 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
111 rb_node);
d397712b 112 while (prev && file_offset < entry_end(prev_entry)) {
e6dcd2dc
CM
113 test = rb_prev(prev);
114 if (!test)
115 break;
116 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
117 rb_node);
118 prev = test;
dc17ff8f
CM
119 }
120 *prev_ret = prev;
121 return NULL;
122}
123
d352ac68
CM
124/*
125 * helper to check if a given offset is inside a given entry
126 */
e6dcd2dc
CM
127static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
128{
129 if (file_offset < entry->file_offset ||
130 entry->file_offset + entry->len <= file_offset)
131 return 0;
132 return 1;
133}
134
4b46fce2
JB
135static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
136 u64 len)
137{
138 if (file_offset + len <= entry->file_offset ||
139 entry->file_offset + entry->len <= file_offset)
140 return 0;
141 return 1;
142}
143
d352ac68
CM
144/*
145 * look find the first ordered struct that has this offset, otherwise
146 * the first one less than this offset
147 */
e6dcd2dc
CM
148static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
149 u64 file_offset)
dc17ff8f 150{
e6dcd2dc 151 struct rb_root *root = &tree->tree;
c87fb6fd 152 struct rb_node *prev = NULL;
dc17ff8f 153 struct rb_node *ret;
e6dcd2dc
CM
154 struct btrfs_ordered_extent *entry;
155
156 if (tree->last) {
157 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
158 rb_node);
159 if (offset_in_entry(entry, file_offset))
160 return tree->last;
161 }
162 ret = __tree_search(root, file_offset, &prev);
dc17ff8f 163 if (!ret)
e6dcd2dc
CM
164 ret = prev;
165 if (ret)
166 tree->last = ret;
dc17ff8f
CM
167 return ret;
168}
169
eb84ae03
CM
170/* allocate and add a new ordered_extent into the per-inode tree.
171 * file_offset is the logical offset in the file
172 *
173 * start is the disk block number of an extent already reserved in the
174 * extent allocation tree
175 *
176 * len is the length of the extent
177 *
eb84ae03
CM
178 * The tree is given a single reference on the ordered extent that was
179 * inserted.
180 */
4b46fce2
JB
181static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
182 u64 start, u64 len, u64 disk_len,
261507a0 183 int type, int dio, int compress_type)
dc17ff8f 184{
dc17ff8f 185 struct btrfs_ordered_inode_tree *tree;
e6dcd2dc
CM
186 struct rb_node *node;
187 struct btrfs_ordered_extent *entry;
dc17ff8f 188
e6dcd2dc
CM
189 tree = &BTRFS_I(inode)->ordered_tree;
190 entry = kzalloc(sizeof(*entry), GFP_NOFS);
dc17ff8f
CM
191 if (!entry)
192 return -ENOMEM;
193
e6dcd2dc
CM
194 entry->file_offset = file_offset;
195 entry->start = start;
196 entry->len = len;
c8b97818 197 entry->disk_len = disk_len;
8b62b72b 198 entry->bytes_left = len;
5fd02043 199 entry->inode = igrab(inode);
261507a0 200 entry->compress_type = compress_type;
d899e052 201 if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
80ff3856 202 set_bit(type, &entry->flags);
3eaa2885 203
4b46fce2
JB
204 if (dio)
205 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
206
e6dcd2dc
CM
207 /* one ref for the tree */
208 atomic_set(&entry->refs, 1);
209 init_waitqueue_head(&entry->wait);
210 INIT_LIST_HEAD(&entry->list);
3eaa2885 211 INIT_LIST_HEAD(&entry->root_extent_list);
dc17ff8f 212
1abe9b8a 213 trace_btrfs_ordered_extent_add(inode, entry);
214
5fd02043 215 spin_lock_irq(&tree->lock);
e6dcd2dc
CM
216 node = tree_insert(&tree->tree, file_offset,
217 &entry->rb_node);
43c04fb1
JM
218 if (node)
219 ordered_data_tree_panic(inode, -EEXIST, file_offset);
5fd02043 220 spin_unlock_irq(&tree->lock);
d397712b 221
3eaa2885
CM
222 spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
223 list_add_tail(&entry->root_extent_list,
224 &BTRFS_I(inode)->root->fs_info->ordered_extents);
225 spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
226
dc17ff8f
CM
227 return 0;
228}
229
4b46fce2
JB
230int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
231 u64 start, u64 len, u64 disk_len, int type)
232{
233 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
261507a0
LZ
234 disk_len, type, 0,
235 BTRFS_COMPRESS_NONE);
4b46fce2
JB
236}
237
238int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
239 u64 start, u64 len, u64 disk_len, int type)
240{
241 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
261507a0
LZ
242 disk_len, type, 1,
243 BTRFS_COMPRESS_NONE);
244}
245
246int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
247 u64 start, u64 len, u64 disk_len,
248 int type, int compress_type)
249{
250 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
251 disk_len, type, 0,
252 compress_type);
4b46fce2
JB
253}
254
eb84ae03
CM
255/*
256 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
3edf7d33
CM
257 * when an ordered extent is finished. If the list covers more than one
258 * ordered extent, it is split across multiples.
eb84ae03 259 */
143bede5
JM
260void btrfs_add_ordered_sum(struct inode *inode,
261 struct btrfs_ordered_extent *entry,
262 struct btrfs_ordered_sum *sum)
dc17ff8f 263{
e6dcd2dc 264 struct btrfs_ordered_inode_tree *tree;
dc17ff8f 265
e6dcd2dc 266 tree = &BTRFS_I(inode)->ordered_tree;
5fd02043 267 spin_lock_irq(&tree->lock);
e6dcd2dc 268 list_add_tail(&sum->list, &entry->list);
5fd02043 269 spin_unlock_irq(&tree->lock);
dc17ff8f
CM
270}
271
163cf09c
CM
272/*
273 * this is used to account for finished IO across a given range
274 * of the file. The IO may span ordered extents. If
275 * a given ordered_extent is completely done, 1 is returned, otherwise
276 * 0.
277 *
278 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
279 * to make sure this function only returns 1 once for a given ordered extent.
280 *
281 * file_offset is updated to one byte past the range that is recorded as
282 * complete. This allows you to walk forward in the file.
283 */
284int btrfs_dec_test_first_ordered_pending(struct inode *inode,
285 struct btrfs_ordered_extent **cached,
5fd02043 286 u64 *file_offset, u64 io_size, int uptodate)
163cf09c
CM
287{
288 struct btrfs_ordered_inode_tree *tree;
289 struct rb_node *node;
290 struct btrfs_ordered_extent *entry = NULL;
291 int ret;
5fd02043 292 unsigned long flags;
163cf09c
CM
293 u64 dec_end;
294 u64 dec_start;
295 u64 to_dec;
296
297 tree = &BTRFS_I(inode)->ordered_tree;
5fd02043 298 spin_lock_irqsave(&tree->lock, flags);
163cf09c
CM
299 node = tree_search(tree, *file_offset);
300 if (!node) {
301 ret = 1;
302 goto out;
303 }
304
305 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
306 if (!offset_in_entry(entry, *file_offset)) {
307 ret = 1;
308 goto out;
309 }
310
311 dec_start = max(*file_offset, entry->file_offset);
312 dec_end = min(*file_offset + io_size, entry->file_offset +
313 entry->len);
314 *file_offset = dec_end;
315 if (dec_start > dec_end) {
316 printk(KERN_CRIT "bad ordering dec_start %llu end %llu\n",
317 (unsigned long long)dec_start,
318 (unsigned long long)dec_end);
319 }
320 to_dec = dec_end - dec_start;
321 if (to_dec > entry->bytes_left) {
322 printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
323 (unsigned long long)entry->bytes_left,
324 (unsigned long long)to_dec);
325 }
326 entry->bytes_left -= to_dec;
5fd02043
JB
327 if (!uptodate)
328 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
329
163cf09c
CM
330 if (entry->bytes_left == 0)
331 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
332 else
333 ret = 1;
334out:
335 if (!ret && cached && entry) {
336 *cached = entry;
337 atomic_inc(&entry->refs);
338 }
5fd02043 339 spin_unlock_irqrestore(&tree->lock, flags);
163cf09c
CM
340 return ret == 0;
341}
342
eb84ae03
CM
343/*
344 * this is used to account for finished IO across a given range
345 * of the file. The IO should not span ordered extents. If
346 * a given ordered_extent is completely done, 1 is returned, otherwise
347 * 0.
348 *
349 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
350 * to make sure this function only returns 1 once for a given ordered extent.
351 */
e6dcd2dc 352int btrfs_dec_test_ordered_pending(struct inode *inode,
5a1a3df1 353 struct btrfs_ordered_extent **cached,
5fd02043 354 u64 file_offset, u64 io_size, int uptodate)
dc17ff8f 355{
e6dcd2dc 356 struct btrfs_ordered_inode_tree *tree;
dc17ff8f 357 struct rb_node *node;
5a1a3df1 358 struct btrfs_ordered_extent *entry = NULL;
5fd02043 359 unsigned long flags;
e6dcd2dc
CM
360 int ret;
361
362 tree = &BTRFS_I(inode)->ordered_tree;
5fd02043
JB
363 spin_lock_irqsave(&tree->lock, flags);
364 if (cached && *cached) {
365 entry = *cached;
366 goto have_entry;
367 }
368
e6dcd2dc 369 node = tree_search(tree, file_offset);
dc17ff8f 370 if (!node) {
e6dcd2dc
CM
371 ret = 1;
372 goto out;
dc17ff8f
CM
373 }
374
e6dcd2dc 375 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
5fd02043 376have_entry:
e6dcd2dc
CM
377 if (!offset_in_entry(entry, file_offset)) {
378 ret = 1;
379 goto out;
dc17ff8f 380 }
e6dcd2dc 381
8b62b72b
CM
382 if (io_size > entry->bytes_left) {
383 printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
384 (unsigned long long)entry->bytes_left,
385 (unsigned long long)io_size);
386 }
387 entry->bytes_left -= io_size;
5fd02043
JB
388 if (!uptodate)
389 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
390
8b62b72b 391 if (entry->bytes_left == 0)
e6dcd2dc 392 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
8b62b72b
CM
393 else
394 ret = 1;
e6dcd2dc 395out:
5a1a3df1
JB
396 if (!ret && cached && entry) {
397 *cached = entry;
398 atomic_inc(&entry->refs);
399 }
5fd02043 400 spin_unlock_irqrestore(&tree->lock, flags);
e6dcd2dc
CM
401 return ret == 0;
402}
dc17ff8f 403
eb84ae03
CM
404/*
405 * used to drop a reference on an ordered extent. This will free
406 * the extent if the last reference is dropped
407 */
143bede5 408void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
e6dcd2dc 409{
ba1da2f4
CM
410 struct list_head *cur;
411 struct btrfs_ordered_sum *sum;
412
1abe9b8a 413 trace_btrfs_ordered_extent_put(entry->inode, entry);
414
ba1da2f4 415 if (atomic_dec_and_test(&entry->refs)) {
5fd02043
JB
416 if (entry->inode)
417 btrfs_add_delayed_iput(entry->inode);
d397712b 418 while (!list_empty(&entry->list)) {
ba1da2f4
CM
419 cur = entry->list.next;
420 sum = list_entry(cur, struct btrfs_ordered_sum, list);
421 list_del(&sum->list);
422 kfree(sum);
423 }
e6dcd2dc 424 kfree(entry);
ba1da2f4 425 }
dc17ff8f 426}
cee36a03 427
eb84ae03
CM
428/*
429 * remove an ordered extent from the tree. No references are dropped
5fd02043 430 * and waiters are woken up.
eb84ae03 431 */
5fd02043
JB
432void btrfs_remove_ordered_extent(struct inode *inode,
433 struct btrfs_ordered_extent *entry)
cee36a03 434{
e6dcd2dc 435 struct btrfs_ordered_inode_tree *tree;
287a0ab9 436 struct btrfs_root *root = BTRFS_I(inode)->root;
cee36a03 437 struct rb_node *node;
cee36a03 438
e6dcd2dc 439 tree = &BTRFS_I(inode)->ordered_tree;
5fd02043 440 spin_lock_irq(&tree->lock);
e6dcd2dc 441 node = &entry->rb_node;
cee36a03 442 rb_erase(node, &tree->tree);
e6dcd2dc
CM
443 tree->last = NULL;
444 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
5fd02043 445 spin_unlock_irq(&tree->lock);
3eaa2885 446
287a0ab9 447 spin_lock(&root->fs_info->ordered_extent_lock);
3eaa2885 448 list_del_init(&entry->root_extent_list);
5a3f23d5 449
1abe9b8a 450 trace_btrfs_ordered_extent_remove(inode, entry);
451
5a3f23d5
CM
452 /*
453 * we have no more ordered extents for this inode and
454 * no dirty pages. We can safely remove it from the
455 * list of ordered extents
456 */
457 if (RB_EMPTY_ROOT(&tree->tree) &&
458 !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
459 list_del_init(&BTRFS_I(inode)->ordered_operations);
460 }
287a0ab9 461 spin_unlock(&root->fs_info->ordered_extent_lock);
e6dcd2dc 462 wake_up(&entry->wait);
cee36a03
CM
463}
464
d352ac68
CM
465/*
466 * wait for all the ordered extents in a root. This is done when balancing
467 * space between drives.
468 */
143bede5
JM
469void btrfs_wait_ordered_extents(struct btrfs_root *root,
470 int nocow_only, int delay_iput)
3eaa2885
CM
471{
472 struct list_head splice;
473 struct list_head *cur;
474 struct btrfs_ordered_extent *ordered;
475 struct inode *inode;
476
477 INIT_LIST_HEAD(&splice);
478
479 spin_lock(&root->fs_info->ordered_extent_lock);
480 list_splice_init(&root->fs_info->ordered_extents, &splice);
5b21f2ed 481 while (!list_empty(&splice)) {
3eaa2885
CM
482 cur = splice.next;
483 ordered = list_entry(cur, struct btrfs_ordered_extent,
484 root_extent_list);
7ea394f1 485 if (nocow_only &&
d899e052
YZ
486 !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) &&
487 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
5b21f2ed
ZY
488 list_move(&ordered->root_extent_list,
489 &root->fs_info->ordered_extents);
7ea394f1
YZ
490 cond_resched_lock(&root->fs_info->ordered_extent_lock);
491 continue;
492 }
493
3eaa2885
CM
494 list_del_init(&ordered->root_extent_list);
495 atomic_inc(&ordered->refs);
3eaa2885
CM
496
497 /*
5b21f2ed 498 * the inode may be getting freed (in sys_unlink path).
3eaa2885 499 */
5b21f2ed
ZY
500 inode = igrab(ordered->inode);
501
3eaa2885
CM
502 spin_unlock(&root->fs_info->ordered_extent_lock);
503
5b21f2ed
ZY
504 if (inode) {
505 btrfs_start_ordered_extent(inode, ordered, 1);
506 btrfs_put_ordered_extent(ordered);
24bbcf04
YZ
507 if (delay_iput)
508 btrfs_add_delayed_iput(inode);
509 else
510 iput(inode);
5b21f2ed
ZY
511 } else {
512 btrfs_put_ordered_extent(ordered);
513 }
3eaa2885
CM
514
515 spin_lock(&root->fs_info->ordered_extent_lock);
516 }
517 spin_unlock(&root->fs_info->ordered_extent_lock);
3eaa2885
CM
518}
519
5a3f23d5
CM
520/*
521 * this is used during transaction commit to write all the inodes
522 * added to the ordered operation list. These files must be fully on
523 * disk before the transaction commits.
524 *
525 * we have two modes here, one is to just start the IO via filemap_flush
526 * and the other is to wait for all the io. When we wait, we have an
527 * extra check to make sure the ordered operation list really is empty
528 * before we return
529 */
143bede5 530void btrfs_run_ordered_operations(struct btrfs_root *root, int wait)
5a3f23d5
CM
531{
532 struct btrfs_inode *btrfs_inode;
533 struct inode *inode;
534 struct list_head splice;
535
536 INIT_LIST_HEAD(&splice);
537
538 mutex_lock(&root->fs_info->ordered_operations_mutex);
539 spin_lock(&root->fs_info->ordered_extent_lock);
540again:
541 list_splice_init(&root->fs_info->ordered_operations, &splice);
542
543 while (!list_empty(&splice)) {
544 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
545 ordered_operations);
546
547 inode = &btrfs_inode->vfs_inode;
548
549 list_del_init(&btrfs_inode->ordered_operations);
550
551 /*
552 * the inode may be getting freed (in sys_unlink path).
553 */
554 inode = igrab(inode);
555
556 if (!wait && inode) {
557 list_add_tail(&BTRFS_I(inode)->ordered_operations,
558 &root->fs_info->ordered_operations);
559 }
560 spin_unlock(&root->fs_info->ordered_extent_lock);
561
562 if (inode) {
563 if (wait)
564 btrfs_wait_ordered_range(inode, 0, (u64)-1);
565 else
566 filemap_flush(inode->i_mapping);
24bbcf04 567 btrfs_add_delayed_iput(inode);
5a3f23d5
CM
568 }
569
570 cond_resched();
571 spin_lock(&root->fs_info->ordered_extent_lock);
572 }
573 if (wait && !list_empty(&root->fs_info->ordered_operations))
574 goto again;
575
576 spin_unlock(&root->fs_info->ordered_extent_lock);
577 mutex_unlock(&root->fs_info->ordered_operations_mutex);
5a3f23d5
CM
578}
579
eb84ae03
CM
580/*
581 * Used to start IO or wait for a given ordered extent to finish.
582 *
583 * If wait is one, this effectively waits on page writeback for all the pages
584 * in the extent, and it waits on the io completion code to insert
585 * metadata into the btree corresponding to the extent
586 */
587void btrfs_start_ordered_extent(struct inode *inode,
588 struct btrfs_ordered_extent *entry,
589 int wait)
e6dcd2dc
CM
590{
591 u64 start = entry->file_offset;
592 u64 end = start + entry->len - 1;
e1b81e67 593
1abe9b8a 594 trace_btrfs_ordered_extent_start(inode, entry);
595
eb84ae03
CM
596 /*
597 * pages in the range can be dirty, clean or writeback. We
598 * start IO on any dirty ones so the wait doesn't stall waiting
b2570314 599 * for the flusher thread to find them
eb84ae03 600 */
4b46fce2
JB
601 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
602 filemap_fdatawrite_range(inode->i_mapping, start, end);
c8b97818 603 if (wait) {
e6dcd2dc
CM
604 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
605 &entry->flags));
c8b97818 606 }
e6dcd2dc 607}
cee36a03 608
eb84ae03
CM
609/*
610 * Used to wait on ordered extents across a large range of bytes.
611 */
143bede5 612void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
e6dcd2dc
CM
613{
614 u64 end;
e5a2217e 615 u64 orig_end;
e6dcd2dc 616 struct btrfs_ordered_extent *ordered;
8b62b72b 617 int found;
e5a2217e
CM
618
619 if (start + len < start) {
f421950f 620 orig_end = INT_LIMIT(loff_t);
e5a2217e
CM
621 } else {
622 orig_end = start + len - 1;
f421950f
CM
623 if (orig_end > INT_LIMIT(loff_t))
624 orig_end = INT_LIMIT(loff_t);
e5a2217e 625 }
551ebb2d 626
e5a2217e
CM
627 /* start IO across the range first to instantiate any delalloc
628 * extents
629 */
7ddf5a42
JB
630 filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
631
632 /*
633 * So with compression we will find and lock a dirty page and clear the
634 * first one as dirty, setup an async extent, and immediately return
635 * with the entire range locked but with nobody actually marked with
636 * writeback. So we can't just filemap_write_and_wait_range() and
637 * expect it to work since it will just kick off a thread to do the
638 * actual work. So we need to call filemap_fdatawrite_range _again_
639 * since it will wait on the page lock, which won't be unlocked until
640 * after the pages have been marked as writeback and so we're good to go
641 * from there. We have to do this otherwise we'll miss the ordered
642 * extents and that results in badness. Please Josef, do not think you
643 * know better and pull this out at some point in the future, it is
644 * right and you are wrong.
645 */
646 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
647 &BTRFS_I(inode)->runtime_flags))
648 filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
649
650 filemap_fdatawait_range(inode->i_mapping, start, orig_end);
e5a2217e 651
f421950f 652 end = orig_end;
8b62b72b 653 found = 0;
d397712b 654 while (1) {
e6dcd2dc 655 ordered = btrfs_lookup_first_ordered_extent(inode, end);
d397712b 656 if (!ordered)
e6dcd2dc 657 break;
e5a2217e 658 if (ordered->file_offset > orig_end) {
e6dcd2dc
CM
659 btrfs_put_ordered_extent(ordered);
660 break;
661 }
662 if (ordered->file_offset + ordered->len < start) {
663 btrfs_put_ordered_extent(ordered);
664 break;
665 }
8b62b72b 666 found++;
e5a2217e 667 btrfs_start_ordered_extent(inode, ordered, 1);
e6dcd2dc
CM
668 end = ordered->file_offset;
669 btrfs_put_ordered_extent(ordered);
e5a2217e 670 if (end == 0 || end == start)
e6dcd2dc
CM
671 break;
672 end--;
673 }
cee36a03
CM
674}
675
eb84ae03
CM
676/*
677 * find an ordered extent corresponding to file_offset. return NULL if
678 * nothing is found, otherwise take a reference on the extent and return it
679 */
e6dcd2dc
CM
680struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
681 u64 file_offset)
682{
683 struct btrfs_ordered_inode_tree *tree;
684 struct rb_node *node;
685 struct btrfs_ordered_extent *entry = NULL;
686
687 tree = &BTRFS_I(inode)->ordered_tree;
5fd02043 688 spin_lock_irq(&tree->lock);
e6dcd2dc
CM
689 node = tree_search(tree, file_offset);
690 if (!node)
691 goto out;
692
693 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
694 if (!offset_in_entry(entry, file_offset))
695 entry = NULL;
696 if (entry)
697 atomic_inc(&entry->refs);
698out:
5fd02043 699 spin_unlock_irq(&tree->lock);
e6dcd2dc
CM
700 return entry;
701}
702
4b46fce2
JB
703/* Since the DIO code tries to lock a wide area we need to look for any ordered
704 * extents that exist in the range, rather than just the start of the range.
705 */
706struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
707 u64 file_offset,
708 u64 len)
709{
710 struct btrfs_ordered_inode_tree *tree;
711 struct rb_node *node;
712 struct btrfs_ordered_extent *entry = NULL;
713
714 tree = &BTRFS_I(inode)->ordered_tree;
5fd02043 715 spin_lock_irq(&tree->lock);
4b46fce2
JB
716 node = tree_search(tree, file_offset);
717 if (!node) {
718 node = tree_search(tree, file_offset + len);
719 if (!node)
720 goto out;
721 }
722
723 while (1) {
724 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
725 if (range_overlaps(entry, file_offset, len))
726 break;
727
728 if (entry->file_offset >= file_offset + len) {
729 entry = NULL;
730 break;
731 }
732 entry = NULL;
733 node = rb_next(node);
734 if (!node)
735 break;
736 }
737out:
738 if (entry)
739 atomic_inc(&entry->refs);
5fd02043 740 spin_unlock_irq(&tree->lock);
4b46fce2
JB
741 return entry;
742}
743
eb84ae03
CM
744/*
745 * lookup and return any extent before 'file_offset'. NULL is returned
746 * if none is found
747 */
e6dcd2dc 748struct btrfs_ordered_extent *
d397712b 749btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
e6dcd2dc
CM
750{
751 struct btrfs_ordered_inode_tree *tree;
752 struct rb_node *node;
753 struct btrfs_ordered_extent *entry = NULL;
754
755 tree = &BTRFS_I(inode)->ordered_tree;
5fd02043 756 spin_lock_irq(&tree->lock);
e6dcd2dc
CM
757 node = tree_search(tree, file_offset);
758 if (!node)
759 goto out;
760
761 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
762 atomic_inc(&entry->refs);
763out:
5fd02043 764 spin_unlock_irq(&tree->lock);
e6dcd2dc 765 return entry;
81d7ed29 766}
dbe674a9 767
eb84ae03
CM
768/*
769 * After an extent is done, call this to conditionally update the on disk
770 * i_size. i_size is updated to cover any fully written part of the file.
771 */
c2167754 772int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
dbe674a9
CM
773 struct btrfs_ordered_extent *ordered)
774{
775 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
dbe674a9
CM
776 u64 disk_i_size;
777 u64 new_i_size;
c2167754 778 u64 i_size = i_size_read(inode);
dbe674a9 779 struct rb_node *node;
c2167754 780 struct rb_node *prev = NULL;
dbe674a9 781 struct btrfs_ordered_extent *test;
c2167754
YZ
782 int ret = 1;
783
784 if (ordered)
785 offset = entry_end(ordered);
a038fab0
YZ
786 else
787 offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
dbe674a9 788
5fd02043 789 spin_lock_irq(&tree->lock);
dbe674a9
CM
790 disk_i_size = BTRFS_I(inode)->disk_i_size;
791
c2167754
YZ
792 /* truncate file */
793 if (disk_i_size > i_size) {
794 BTRFS_I(inode)->disk_i_size = i_size;
795 ret = 0;
796 goto out;
797 }
798
dbe674a9
CM
799 /*
800 * if the disk i_size is already at the inode->i_size, or
801 * this ordered extent is inside the disk i_size, we're done
802 */
c2167754 803 if (disk_i_size == i_size || offset <= disk_i_size) {
dbe674a9
CM
804 goto out;
805 }
806
dbe674a9
CM
807 /*
808 * walk backward from this ordered extent to disk_i_size.
809 * if we find an ordered extent then we can't update disk i_size
810 * yet
811 */
c2167754
YZ
812 if (ordered) {
813 node = rb_prev(&ordered->rb_node);
814 } else {
815 prev = tree_search(tree, offset);
816 /*
817 * we insert file extents without involving ordered struct,
818 * so there should be no ordered struct cover this offset
819 */
820 if (prev) {
821 test = rb_entry(prev, struct btrfs_ordered_extent,
822 rb_node);
823 BUG_ON(offset_in_entry(test, offset));
824 }
825 node = prev;
826 }
5fd02043 827 for (; node; node = rb_prev(node)) {
dbe674a9 828 test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
5fd02043
JB
829
830 /* We treat this entry as if it doesnt exist */
831 if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
832 continue;
dbe674a9
CM
833 if (test->file_offset + test->len <= disk_i_size)
834 break;
c2167754 835 if (test->file_offset >= i_size)
dbe674a9 836 break;
b9a8cc5b
MX
837 if (test->file_offset >= disk_i_size) {
838 /*
839 * we don't update disk_i_size now, so record this
840 * undealt i_size. Or we will not know the real
841 * i_size.
842 */
843 if (test->outstanding_isize < offset)
844 test->outstanding_isize = offset;
845 if (ordered &&
846 ordered->outstanding_isize >
847 test->outstanding_isize)
848 test->outstanding_isize =
849 ordered->outstanding_isize;
dbe674a9 850 goto out;
5fd02043 851 }
dbe674a9 852 }
b9a8cc5b 853 new_i_size = min_t(u64, offset, i_size);
dbe674a9
CM
854
855 /*
b9a8cc5b
MX
856 * Some ordered extents may completed before the current one, and
857 * we hold the real i_size in ->outstanding_isize.
dbe674a9 858 */
b9a8cc5b
MX
859 if (ordered && ordered->outstanding_isize > new_i_size)
860 new_i_size = min_t(u64, ordered->outstanding_isize, i_size);
dbe674a9 861 BTRFS_I(inode)->disk_i_size = new_i_size;
c2167754 862 ret = 0;
dbe674a9 863out:
c2167754 864 /*
5fd02043
JB
865 * We need to do this because we can't remove ordered extents until
866 * after the i_disk_size has been updated and then the inode has been
867 * updated to reflect the change, so we need to tell anybody who finds
868 * this ordered extent that we've already done all the real work, we
869 * just haven't completed all the other work.
c2167754
YZ
870 */
871 if (ordered)
5fd02043
JB
872 set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags);
873 spin_unlock_irq(&tree->lock);
c2167754 874 return ret;
dbe674a9 875}
ba1da2f4 876
eb84ae03
CM
877/*
878 * search the ordered extents for one corresponding to 'offset' and
879 * try to find a checksum. This is used because we allow pages to
880 * be reclaimed before their checksum is actually put into the btree
881 */
d20f7043
CM
882int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
883 u32 *sum)
ba1da2f4
CM
884{
885 struct btrfs_ordered_sum *ordered_sum;
886 struct btrfs_sector_sum *sector_sums;
887 struct btrfs_ordered_extent *ordered;
888 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
3edf7d33
CM
889 unsigned long num_sectors;
890 unsigned long i;
891 u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
ba1da2f4 892 int ret = 1;
ba1da2f4
CM
893
894 ordered = btrfs_lookup_ordered_extent(inode, offset);
895 if (!ordered)
896 return 1;
897
5fd02043 898 spin_lock_irq(&tree->lock);
c6e30871 899 list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
d20f7043 900 if (disk_bytenr >= ordered_sum->bytenr) {
3edf7d33 901 num_sectors = ordered_sum->len / sectorsize;
ed98b56a 902 sector_sums = ordered_sum->sums;
3edf7d33 903 for (i = 0; i < num_sectors; i++) {
d20f7043 904 if (sector_sums[i].bytenr == disk_bytenr) {
3edf7d33
CM
905 *sum = sector_sums[i].sum;
906 ret = 0;
907 goto out;
908 }
909 }
ba1da2f4
CM
910 }
911 }
912out:
5fd02043 913 spin_unlock_irq(&tree->lock);
89642229 914 btrfs_put_ordered_extent(ordered);
ba1da2f4
CM
915 return ret;
916}
917
f421950f 918
5a3f23d5
CM
919/*
920 * add a given inode to the list of inodes that must be fully on
921 * disk before a transaction commit finishes.
922 *
923 * This basically gives us the ext3 style data=ordered mode, and it is mostly
924 * used to make sure renamed files are fully on disk.
925 *
926 * It is a noop if the inode is already fully on disk.
927 *
928 * If trans is not null, we'll do a friendly check for a transaction that
929 * is already flushing things and force the IO down ourselves.
930 */
143bede5
JM
931void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
932 struct btrfs_root *root, struct inode *inode)
5a3f23d5
CM
933{
934 u64 last_mod;
935
936 last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans);
937
938 /*
939 * if this file hasn't been changed since the last transaction
940 * commit, we can safely return without doing anything
941 */
942 if (last_mod < root->fs_info->last_trans_committed)
143bede5 943 return;
5a3f23d5
CM
944
945 /*
946 * the transaction is already committing. Just start the IO and
947 * don't bother with all of this list nonsense
948 */
949 if (trans && root->fs_info->running_transaction->blocked) {
950 btrfs_wait_ordered_range(inode, 0, (u64)-1);
143bede5 951 return;
5a3f23d5
CM
952 }
953
954 spin_lock(&root->fs_info->ordered_extent_lock);
955 if (list_empty(&BTRFS_I(inode)->ordered_operations)) {
956 list_add_tail(&BTRFS_I(inode)->ordered_operations,
957 &root->fs_info->ordered_operations);
958 }
959 spin_unlock(&root->fs_info->ordered_extent_lock);
5a3f23d5 960}