1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <linux/slab.h>
7 #include <linux/blkdev.h>
8 #include <linux/writeback.h>
9 #include <linux/sched/mm.h>
12 #include "transaction.h"
13 #include "btrfs_inode.h"
14 #include "extent_io.h"
16 #include "compression.h"
17 #include "delalloc-space.h"
21 static struct kmem_cache
*btrfs_ordered_extent_cache
;
23 static u64
entry_end(struct btrfs_ordered_extent
*entry
)
25 if (entry
->file_offset
+ entry
->num_bytes
< entry
->file_offset
)
27 return entry
->file_offset
+ entry
->num_bytes
;
30 /* returns NULL if the insertion worked, or it returns the node it did find
33 static struct rb_node
*tree_insert(struct rb_root
*root
, u64 file_offset
,
36 struct rb_node
**p
= &root
->rb_node
;
37 struct rb_node
*parent
= NULL
;
38 struct btrfs_ordered_extent
*entry
;
42 entry
= rb_entry(parent
, struct btrfs_ordered_extent
, rb_node
);
44 if (file_offset
< entry
->file_offset
)
46 else if (file_offset
>= entry_end(entry
))
52 rb_link_node(node
, parent
, p
);
53 rb_insert_color(node
, root
);
58 * look for a given offset in the tree, and if it can't be found return the
61 static struct rb_node
*__tree_search(struct rb_root
*root
, u64 file_offset
,
62 struct rb_node
**prev_ret
)
64 struct rb_node
*n
= root
->rb_node
;
65 struct rb_node
*prev
= NULL
;
67 struct btrfs_ordered_extent
*entry
;
68 struct btrfs_ordered_extent
*prev_entry
= NULL
;
71 entry
= rb_entry(n
, struct btrfs_ordered_extent
, rb_node
);
75 if (file_offset
< entry
->file_offset
)
77 else if (file_offset
>= entry_end(entry
))
85 while (prev
&& file_offset
>= entry_end(prev_entry
)) {
89 prev_entry
= rb_entry(test
, struct btrfs_ordered_extent
,
91 if (file_offset
< entry_end(prev_entry
))
97 prev_entry
= rb_entry(prev
, struct btrfs_ordered_extent
,
99 while (prev
&& file_offset
< entry_end(prev_entry
)) {
100 test
= rb_prev(prev
);
103 prev_entry
= rb_entry(test
, struct btrfs_ordered_extent
,
111 static int range_overlaps(struct btrfs_ordered_extent
*entry
, u64 file_offset
,
114 if (file_offset
+ len
<= entry
->file_offset
||
115 entry
->file_offset
+ entry
->num_bytes
<= file_offset
)
121 * look find the first ordered struct that has this offset, otherwise
122 * the first one less than this offset
124 static inline struct rb_node
*tree_search(struct btrfs_ordered_inode_tree
*tree
,
127 struct rb_root
*root
= &tree
->tree
;
128 struct rb_node
*prev
= NULL
;
130 struct btrfs_ordered_extent
*entry
;
133 entry
= rb_entry(tree
->last
, struct btrfs_ordered_extent
,
135 if (in_range(file_offset
, entry
->file_offset
, entry
->num_bytes
))
138 ret
= __tree_search(root
, file_offset
, &prev
);
147 * Allocate and add a new ordered_extent into the per-inode tree.
149 * The tree is given a single reference on the ordered extent that was
152 static int __btrfs_add_ordered_extent(struct btrfs_inode
*inode
, u64 file_offset
,
153 u64 disk_bytenr
, u64 num_bytes
,
154 u64 disk_num_bytes
, int type
, int dio
,
157 struct btrfs_root
*root
= inode
->root
;
158 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
159 struct btrfs_ordered_inode_tree
*tree
= &inode
->ordered_tree
;
160 struct rb_node
*node
;
161 struct btrfs_ordered_extent
*entry
;
164 if (type
== BTRFS_ORDERED_NOCOW
|| type
== BTRFS_ORDERED_PREALLOC
) {
165 /* For nocow write, we can release the qgroup rsv right now */
166 ret
= btrfs_qgroup_free_data(inode
, NULL
, file_offset
, num_bytes
);
172 * The ordered extent has reserved qgroup space, release now
173 * and pass the reserved number for qgroup_record to free.
175 ret
= btrfs_qgroup_release_data(inode
, file_offset
, num_bytes
);
179 entry
= kmem_cache_zalloc(btrfs_ordered_extent_cache
, GFP_NOFS
);
183 entry
->file_offset
= file_offset
;
184 entry
->disk_bytenr
= disk_bytenr
;
185 entry
->num_bytes
= num_bytes
;
186 entry
->disk_num_bytes
= disk_num_bytes
;
187 entry
->bytes_left
= num_bytes
;
188 entry
->inode
= igrab(&inode
->vfs_inode
);
189 entry
->compress_type
= compress_type
;
190 entry
->truncated_len
= (u64
)-1;
191 entry
->qgroup_rsv
= ret
;
192 entry
->physical
= (u64
)-1;
194 ASSERT(type
== BTRFS_ORDERED_REGULAR
||
195 type
== BTRFS_ORDERED_NOCOW
||
196 type
== BTRFS_ORDERED_PREALLOC
||
197 type
== BTRFS_ORDERED_COMPRESSED
);
198 set_bit(type
, &entry
->flags
);
200 percpu_counter_add_batch(&fs_info
->ordered_bytes
, num_bytes
,
201 fs_info
->delalloc_batch
);
204 set_bit(BTRFS_ORDERED_DIRECT
, &entry
->flags
);
206 /* one ref for the tree */
207 refcount_set(&entry
->refs
, 1);
208 init_waitqueue_head(&entry
->wait
);
209 INIT_LIST_HEAD(&entry
->list
);
210 INIT_LIST_HEAD(&entry
->log_list
);
211 INIT_LIST_HEAD(&entry
->root_extent_list
);
212 INIT_LIST_HEAD(&entry
->work_list
);
213 init_completion(&entry
->completion
);
215 trace_btrfs_ordered_extent_add(inode
, entry
);
217 spin_lock_irq(&tree
->lock
);
218 node
= tree_insert(&tree
->tree
, file_offset
,
221 btrfs_panic(fs_info
, -EEXIST
,
222 "inconsistency in ordered tree at offset %llu",
224 spin_unlock_irq(&tree
->lock
);
226 spin_lock(&root
->ordered_extent_lock
);
227 list_add_tail(&entry
->root_extent_list
,
228 &root
->ordered_extents
);
229 root
->nr_ordered_extents
++;
230 if (root
->nr_ordered_extents
== 1) {
231 spin_lock(&fs_info
->ordered_root_lock
);
232 BUG_ON(!list_empty(&root
->ordered_root
));
233 list_add_tail(&root
->ordered_root
, &fs_info
->ordered_roots
);
234 spin_unlock(&fs_info
->ordered_root_lock
);
236 spin_unlock(&root
->ordered_extent_lock
);
239 * We don't need the count_max_extents here, we can assume that all of
240 * that work has been done at higher layers, so this is truly the
241 * smallest the extent is going to get.
243 spin_lock(&inode
->lock
);
244 btrfs_mod_outstanding_extents(inode
, 1);
245 spin_unlock(&inode
->lock
);
250 int btrfs_add_ordered_extent(struct btrfs_inode
*inode
, u64 file_offset
,
251 u64 disk_bytenr
, u64 num_bytes
, u64 disk_num_bytes
,
254 ASSERT(type
== BTRFS_ORDERED_REGULAR
||
255 type
== BTRFS_ORDERED_NOCOW
||
256 type
== BTRFS_ORDERED_PREALLOC
);
257 return __btrfs_add_ordered_extent(inode
, file_offset
, disk_bytenr
,
258 num_bytes
, disk_num_bytes
, type
, 0,
259 BTRFS_COMPRESS_NONE
);
262 int btrfs_add_ordered_extent_dio(struct btrfs_inode
*inode
, u64 file_offset
,
263 u64 disk_bytenr
, u64 num_bytes
,
264 u64 disk_num_bytes
, int type
)
266 ASSERT(type
== BTRFS_ORDERED_REGULAR
||
267 type
== BTRFS_ORDERED_NOCOW
||
268 type
== BTRFS_ORDERED_PREALLOC
);
269 return __btrfs_add_ordered_extent(inode
, file_offset
, disk_bytenr
,
270 num_bytes
, disk_num_bytes
, type
, 1,
271 BTRFS_COMPRESS_NONE
);
274 int btrfs_add_ordered_extent_compress(struct btrfs_inode
*inode
, u64 file_offset
,
275 u64 disk_bytenr
, u64 num_bytes
,
276 u64 disk_num_bytes
, int compress_type
)
278 ASSERT(compress_type
!= BTRFS_COMPRESS_NONE
);
279 return __btrfs_add_ordered_extent(inode
, file_offset
, disk_bytenr
,
280 num_bytes
, disk_num_bytes
,
281 BTRFS_ORDERED_COMPRESSED
, 0,
286 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
287 * when an ordered extent is finished. If the list covers more than one
288 * ordered extent, it is split across multiples.
290 void btrfs_add_ordered_sum(struct btrfs_ordered_extent
*entry
,
291 struct btrfs_ordered_sum
*sum
)
293 struct btrfs_ordered_inode_tree
*tree
;
295 tree
= &BTRFS_I(entry
->inode
)->ordered_tree
;
296 spin_lock_irq(&tree
->lock
);
297 list_add_tail(&sum
->list
, &entry
->list
);
298 spin_unlock_irq(&tree
->lock
);
302 * Mark all ordered extents io inside the specified range finished.
304 * @page: The invovled page for the opeartion.
305 * For uncompressed buffered IO, the page status also needs to be
306 * updated to indicate whether the pending ordered io is finished.
307 * Can be NULL for direct IO and compressed write.
308 * For these cases, callers are ensured they won't execute the
309 * endio function twice.
310 * @finish_func: The function to be executed when all the IO of an ordered
311 * extent are finished.
313 * This function is called for endio, thus the range must have ordered
314 * extent(s) coveri it.
316 void btrfs_mark_ordered_io_finished(struct btrfs_inode
*inode
,
317 struct page
*page
, u64 file_offset
,
318 u64 num_bytes
, btrfs_func_t finish_func
,
321 struct btrfs_ordered_inode_tree
*tree
= &inode
->ordered_tree
;
322 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
323 struct btrfs_workqueue
*wq
;
324 struct rb_node
*node
;
325 struct btrfs_ordered_extent
*entry
= NULL
;
327 u64 cur
= file_offset
;
329 if (btrfs_is_free_space_inode(inode
))
330 wq
= fs_info
->endio_freespace_worker
;
332 wq
= fs_info
->endio_write_workers
;
335 ASSERT(page
->mapping
&& page_offset(page
) <= file_offset
&&
336 file_offset
+ num_bytes
<= page_offset(page
) + PAGE_SIZE
);
338 spin_lock_irqsave(&tree
->lock
, flags
);
339 while (cur
< file_offset
+ num_bytes
) {
344 node
= tree_search(tree
, cur
);
345 /* No ordered extents at all */
349 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
350 entry_end
= entry
->file_offset
+ entry
->num_bytes
;
356 if (cur
>= entry_end
) {
357 node
= rb_next(node
);
358 /* No more ordered extents, exit */
361 entry
= rb_entry(node
, struct btrfs_ordered_extent
,
364 /* Go to next ordered extent and continue */
365 cur
= entry
->file_offset
;
371 * Go to the start of OE.
373 if (cur
< entry
->file_offset
) {
374 cur
= entry
->file_offset
;
379 * Now we are definitely inside one ordered extent.
385 end
= min(entry
->file_offset
+ entry
->num_bytes
,
386 file_offset
+ num_bytes
) - 1;
387 ASSERT(end
+ 1 - cur
< U32_MAX
);
392 * Ordered (Private2) bit indicates whether we still
393 * have pending io unfinished for the ordered extent.
395 * If there's no such bit, we need to skip to next range.
397 if (!btrfs_page_test_ordered(fs_info
, page
, cur
, len
)) {
401 btrfs_page_clear_ordered(fs_info
, page
, cur
, len
);
404 /* Now we're fine to update the accounting */
405 if (unlikely(len
> entry
->bytes_left
)) {
408 "bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%u left=%llu",
409 inode
->root
->root_key
.objectid
,
413 len
, entry
->bytes_left
);
414 entry
->bytes_left
= 0;
416 entry
->bytes_left
-= len
;
420 set_bit(BTRFS_ORDERED_IOERR
, &entry
->flags
);
423 * All the IO of the ordered extent is finished, we need to queue
424 * the finish_func to be executed.
426 if (entry
->bytes_left
== 0) {
427 set_bit(BTRFS_ORDERED_IO_DONE
, &entry
->flags
);
428 cond_wake_up(&entry
->wait
);
429 refcount_inc(&entry
->refs
);
430 spin_unlock_irqrestore(&tree
->lock
, flags
);
431 btrfs_init_work(&entry
->work
, finish_func
, NULL
, NULL
);
432 btrfs_queue_work(wq
, &entry
->work
);
433 spin_lock_irqsave(&tree
->lock
, flags
);
437 spin_unlock_irqrestore(&tree
->lock
, flags
);
441 * Finish IO for one ordered extent across a given range. The range can only
442 * contain one ordered extent.
444 * @cached: The cached ordered extent. If not NULL, we can skip the tree
445 * search and use the ordered extent directly.
446 * Will be also used to store the finished ordered extent.
447 * @file_offset: File offset for the finished IO
448 * @io_size: Length of the finish IO range
450 * Return true if the ordered extent is finished in the range, and update
452 * Return false otherwise.
454 * NOTE: The range can NOT cross multiple ordered extents.
455 * Thus caller should ensure the range doesn't cross ordered extents.
457 bool btrfs_dec_test_ordered_pending(struct btrfs_inode
*inode
,
458 struct btrfs_ordered_extent
**cached
,
459 u64 file_offset
, u64 io_size
)
461 struct btrfs_ordered_inode_tree
*tree
= &inode
->ordered_tree
;
462 struct rb_node
*node
;
463 struct btrfs_ordered_extent
*entry
= NULL
;
465 bool finished
= false;
467 spin_lock_irqsave(&tree
->lock
, flags
);
468 if (cached
&& *cached
) {
473 node
= tree_search(tree
, file_offset
);
477 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
479 if (!in_range(file_offset
, entry
->file_offset
, entry
->num_bytes
))
482 if (io_size
> entry
->bytes_left
)
483 btrfs_crit(inode
->root
->fs_info
,
484 "bad ordered accounting left %llu size %llu",
485 entry
->bytes_left
, io_size
);
487 entry
->bytes_left
-= io_size
;
489 if (entry
->bytes_left
== 0) {
491 * Ensure only one caller can set the flag and finished_ret
494 finished
= !test_and_set_bit(BTRFS_ORDERED_IO_DONE
, &entry
->flags
);
495 /* test_and_set_bit implies a barrier */
496 cond_wake_up_nomb(&entry
->wait
);
499 if (finished
&& cached
&& entry
) {
501 refcount_inc(&entry
->refs
);
503 spin_unlock_irqrestore(&tree
->lock
, flags
);
508 * used to drop a reference on an ordered extent. This will free
509 * the extent if the last reference is dropped
511 void btrfs_put_ordered_extent(struct btrfs_ordered_extent
*entry
)
513 struct list_head
*cur
;
514 struct btrfs_ordered_sum
*sum
;
516 trace_btrfs_ordered_extent_put(BTRFS_I(entry
->inode
), entry
);
518 if (refcount_dec_and_test(&entry
->refs
)) {
519 ASSERT(list_empty(&entry
->root_extent_list
));
520 ASSERT(list_empty(&entry
->log_list
));
521 ASSERT(RB_EMPTY_NODE(&entry
->rb_node
));
523 btrfs_add_delayed_iput(entry
->inode
);
524 while (!list_empty(&entry
->list
)) {
525 cur
= entry
->list
.next
;
526 sum
= list_entry(cur
, struct btrfs_ordered_sum
, list
);
527 list_del(&sum
->list
);
530 kmem_cache_free(btrfs_ordered_extent_cache
, entry
);
535 * remove an ordered extent from the tree. No references are dropped
536 * and waiters are woken up.
538 void btrfs_remove_ordered_extent(struct btrfs_inode
*btrfs_inode
,
539 struct btrfs_ordered_extent
*entry
)
541 struct btrfs_ordered_inode_tree
*tree
;
542 struct btrfs_root
*root
= btrfs_inode
->root
;
543 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
544 struct rb_node
*node
;
547 /* This is paired with btrfs_add_ordered_extent. */
548 spin_lock(&btrfs_inode
->lock
);
549 btrfs_mod_outstanding_extents(btrfs_inode
, -1);
550 spin_unlock(&btrfs_inode
->lock
);
551 if (root
!= fs_info
->tree_root
)
552 btrfs_delalloc_release_metadata(btrfs_inode
, entry
->num_bytes
,
555 percpu_counter_add_batch(&fs_info
->ordered_bytes
, -entry
->num_bytes
,
556 fs_info
->delalloc_batch
);
558 tree
= &btrfs_inode
->ordered_tree
;
559 spin_lock_irq(&tree
->lock
);
560 node
= &entry
->rb_node
;
561 rb_erase(node
, &tree
->tree
);
563 if (tree
->last
== node
)
565 set_bit(BTRFS_ORDERED_COMPLETE
, &entry
->flags
);
566 pending
= test_and_clear_bit(BTRFS_ORDERED_PENDING
, &entry
->flags
);
567 spin_unlock_irq(&tree
->lock
);
570 * The current running transaction is waiting on us, we need to let it
571 * know that we're complete and wake it up.
574 struct btrfs_transaction
*trans
;
577 * The checks for trans are just a formality, it should be set,
578 * but if it isn't we don't want to deref/assert under the spin
579 * lock, so be nice and check if trans is set, but ASSERT() so
580 * if it isn't set a developer will notice.
582 spin_lock(&fs_info
->trans_lock
);
583 trans
= fs_info
->running_transaction
;
585 refcount_inc(&trans
->use_count
);
586 spin_unlock(&fs_info
->trans_lock
);
590 if (atomic_dec_and_test(&trans
->pending_ordered
))
591 wake_up(&trans
->pending_wait
);
592 btrfs_put_transaction(trans
);
596 spin_lock(&root
->ordered_extent_lock
);
597 list_del_init(&entry
->root_extent_list
);
598 root
->nr_ordered_extents
--;
600 trace_btrfs_ordered_extent_remove(btrfs_inode
, entry
);
602 if (!root
->nr_ordered_extents
) {
603 spin_lock(&fs_info
->ordered_root_lock
);
604 BUG_ON(list_empty(&root
->ordered_root
));
605 list_del_init(&root
->ordered_root
);
606 spin_unlock(&fs_info
->ordered_root_lock
);
608 spin_unlock(&root
->ordered_extent_lock
);
609 wake_up(&entry
->wait
);
612 static void btrfs_run_ordered_extent_work(struct btrfs_work
*work
)
614 struct btrfs_ordered_extent
*ordered
;
616 ordered
= container_of(work
, struct btrfs_ordered_extent
, flush_work
);
617 btrfs_start_ordered_extent(ordered
, 1);
618 complete(&ordered
->completion
);
622 * wait for all the ordered extents in a root. This is done when balancing
623 * space between drives.
625 u64
btrfs_wait_ordered_extents(struct btrfs_root
*root
, u64 nr
,
626 const u64 range_start
, const u64 range_len
)
628 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
632 struct btrfs_ordered_extent
*ordered
, *next
;
634 const u64 range_end
= range_start
+ range_len
;
636 mutex_lock(&root
->ordered_extent_mutex
);
637 spin_lock(&root
->ordered_extent_lock
);
638 list_splice_init(&root
->ordered_extents
, &splice
);
639 while (!list_empty(&splice
) && nr
) {
640 ordered
= list_first_entry(&splice
, struct btrfs_ordered_extent
,
643 if (range_end
<= ordered
->disk_bytenr
||
644 ordered
->disk_bytenr
+ ordered
->disk_num_bytes
<= range_start
) {
645 list_move_tail(&ordered
->root_extent_list
, &skipped
);
646 cond_resched_lock(&root
->ordered_extent_lock
);
650 list_move_tail(&ordered
->root_extent_list
,
651 &root
->ordered_extents
);
652 refcount_inc(&ordered
->refs
);
653 spin_unlock(&root
->ordered_extent_lock
);
655 btrfs_init_work(&ordered
->flush_work
,
656 btrfs_run_ordered_extent_work
, NULL
, NULL
);
657 list_add_tail(&ordered
->work_list
, &works
);
658 btrfs_queue_work(fs_info
->flush_workers
, &ordered
->flush_work
);
661 spin_lock(&root
->ordered_extent_lock
);
666 list_splice_tail(&skipped
, &root
->ordered_extents
);
667 list_splice_tail(&splice
, &root
->ordered_extents
);
668 spin_unlock(&root
->ordered_extent_lock
);
670 list_for_each_entry_safe(ordered
, next
, &works
, work_list
) {
671 list_del_init(&ordered
->work_list
);
672 wait_for_completion(&ordered
->completion
);
673 btrfs_put_ordered_extent(ordered
);
676 mutex_unlock(&root
->ordered_extent_mutex
);
681 void btrfs_wait_ordered_roots(struct btrfs_fs_info
*fs_info
, u64 nr
,
682 const u64 range_start
, const u64 range_len
)
684 struct btrfs_root
*root
;
685 struct list_head splice
;
688 INIT_LIST_HEAD(&splice
);
690 mutex_lock(&fs_info
->ordered_operations_mutex
);
691 spin_lock(&fs_info
->ordered_root_lock
);
692 list_splice_init(&fs_info
->ordered_roots
, &splice
);
693 while (!list_empty(&splice
) && nr
) {
694 root
= list_first_entry(&splice
, struct btrfs_root
,
696 root
= btrfs_grab_root(root
);
698 list_move_tail(&root
->ordered_root
,
699 &fs_info
->ordered_roots
);
700 spin_unlock(&fs_info
->ordered_root_lock
);
702 done
= btrfs_wait_ordered_extents(root
, nr
,
703 range_start
, range_len
);
704 btrfs_put_root(root
);
706 spin_lock(&fs_info
->ordered_root_lock
);
711 list_splice_tail(&splice
, &fs_info
->ordered_roots
);
712 spin_unlock(&fs_info
->ordered_root_lock
);
713 mutex_unlock(&fs_info
->ordered_operations_mutex
);
717 * Used to start IO or wait for a given ordered extent to finish.
719 * If wait is one, this effectively waits on page writeback for all the pages
720 * in the extent, and it waits on the io completion code to insert
721 * metadata into the btree corresponding to the extent
723 void btrfs_start_ordered_extent(struct btrfs_ordered_extent
*entry
, int wait
)
725 u64 start
= entry
->file_offset
;
726 u64 end
= start
+ entry
->num_bytes
- 1;
727 struct btrfs_inode
*inode
= BTRFS_I(entry
->inode
);
729 trace_btrfs_ordered_extent_start(inode
, entry
);
732 * pages in the range can be dirty, clean or writeback. We
733 * start IO on any dirty ones so the wait doesn't stall waiting
734 * for the flusher thread to find them
736 if (!test_bit(BTRFS_ORDERED_DIRECT
, &entry
->flags
))
737 filemap_fdatawrite_range(inode
->vfs_inode
.i_mapping
, start
, end
);
739 wait_event(entry
->wait
, test_bit(BTRFS_ORDERED_COMPLETE
,
745 * Used to wait on ordered extents across a large range of bytes.
747 int btrfs_wait_ordered_range(struct inode
*inode
, u64 start
, u64 len
)
753 struct btrfs_ordered_extent
*ordered
;
755 if (start
+ len
< start
) {
756 orig_end
= INT_LIMIT(loff_t
);
758 orig_end
= start
+ len
- 1;
759 if (orig_end
> INT_LIMIT(loff_t
))
760 orig_end
= INT_LIMIT(loff_t
);
763 /* start IO across the range first to instantiate any delalloc
766 ret
= btrfs_fdatawrite_range(inode
, start
, orig_end
);
771 * If we have a writeback error don't return immediately. Wait first
772 * for any ordered extents that haven't completed yet. This is to make
773 * sure no one can dirty the same page ranges and call writepages()
774 * before the ordered extents complete - to avoid failures (-EEXIST)
775 * when adding the new ordered extents to the ordered tree.
777 ret_wb
= filemap_fdatawait_range(inode
->i_mapping
, start
, orig_end
);
781 ordered
= btrfs_lookup_first_ordered_extent(BTRFS_I(inode
), end
);
784 if (ordered
->file_offset
> orig_end
) {
785 btrfs_put_ordered_extent(ordered
);
788 if (ordered
->file_offset
+ ordered
->num_bytes
<= start
) {
789 btrfs_put_ordered_extent(ordered
);
792 btrfs_start_ordered_extent(ordered
, 1);
793 end
= ordered
->file_offset
;
795 * If the ordered extent had an error save the error but don't
796 * exit without waiting first for all other ordered extents in
797 * the range to complete.
799 if (test_bit(BTRFS_ORDERED_IOERR
, &ordered
->flags
))
801 btrfs_put_ordered_extent(ordered
);
802 if (end
== 0 || end
== start
)
806 return ret_wb
? ret_wb
: ret
;
810 * find an ordered extent corresponding to file_offset. return NULL if
811 * nothing is found, otherwise take a reference on the extent and return it
813 struct btrfs_ordered_extent
*btrfs_lookup_ordered_extent(struct btrfs_inode
*inode
,
816 struct btrfs_ordered_inode_tree
*tree
;
817 struct rb_node
*node
;
818 struct btrfs_ordered_extent
*entry
= NULL
;
821 tree
= &inode
->ordered_tree
;
822 spin_lock_irqsave(&tree
->lock
, flags
);
823 node
= tree_search(tree
, file_offset
);
827 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
828 if (!in_range(file_offset
, entry
->file_offset
, entry
->num_bytes
))
831 refcount_inc(&entry
->refs
);
833 spin_unlock_irqrestore(&tree
->lock
, flags
);
837 /* Since the DIO code tries to lock a wide area we need to look for any ordered
838 * extents that exist in the range, rather than just the start of the range.
840 struct btrfs_ordered_extent
*btrfs_lookup_ordered_range(
841 struct btrfs_inode
*inode
, u64 file_offset
, u64 len
)
843 struct btrfs_ordered_inode_tree
*tree
;
844 struct rb_node
*node
;
845 struct btrfs_ordered_extent
*entry
= NULL
;
847 tree
= &inode
->ordered_tree
;
848 spin_lock_irq(&tree
->lock
);
849 node
= tree_search(tree
, file_offset
);
851 node
= tree_search(tree
, file_offset
+ len
);
857 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
858 if (range_overlaps(entry
, file_offset
, len
))
861 if (entry
->file_offset
>= file_offset
+ len
) {
866 node
= rb_next(node
);
872 refcount_inc(&entry
->refs
);
873 spin_unlock_irq(&tree
->lock
);
878 * Adds all ordered extents to the given list. The list ends up sorted by the
879 * file_offset of the ordered extents.
881 void btrfs_get_ordered_extents_for_logging(struct btrfs_inode
*inode
,
882 struct list_head
*list
)
884 struct btrfs_ordered_inode_tree
*tree
= &inode
->ordered_tree
;
887 ASSERT(inode_is_locked(&inode
->vfs_inode
));
889 spin_lock_irq(&tree
->lock
);
890 for (n
= rb_first(&tree
->tree
); n
; n
= rb_next(n
)) {
891 struct btrfs_ordered_extent
*ordered
;
893 ordered
= rb_entry(n
, struct btrfs_ordered_extent
, rb_node
);
895 if (test_bit(BTRFS_ORDERED_LOGGED
, &ordered
->flags
))
898 ASSERT(list_empty(&ordered
->log_list
));
899 list_add_tail(&ordered
->log_list
, list
);
900 refcount_inc(&ordered
->refs
);
902 spin_unlock_irq(&tree
->lock
);
906 * lookup and return any extent before 'file_offset'. NULL is returned
909 struct btrfs_ordered_extent
*
910 btrfs_lookup_first_ordered_extent(struct btrfs_inode
*inode
, u64 file_offset
)
912 struct btrfs_ordered_inode_tree
*tree
;
913 struct rb_node
*node
;
914 struct btrfs_ordered_extent
*entry
= NULL
;
916 tree
= &inode
->ordered_tree
;
917 spin_lock_irq(&tree
->lock
);
918 node
= tree_search(tree
, file_offset
);
922 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
923 refcount_inc(&entry
->refs
);
925 spin_unlock_irq(&tree
->lock
);
930 * Lookup the first ordered extent that overlaps the range
931 * [@file_offset, @file_offset + @len).
933 * The difference between this and btrfs_lookup_first_ordered_extent() is
934 * that this one won't return any ordered extent that does not overlap the range.
935 * And the difference against btrfs_lookup_ordered_extent() is, this function
936 * ensures the first ordered extent gets returned.
938 struct btrfs_ordered_extent
*btrfs_lookup_first_ordered_range(
939 struct btrfs_inode
*inode
, u64 file_offset
, u64 len
)
941 struct btrfs_ordered_inode_tree
*tree
= &inode
->ordered_tree
;
942 struct rb_node
*node
;
944 struct rb_node
*prev
;
945 struct rb_node
*next
;
946 struct btrfs_ordered_extent
*entry
= NULL
;
948 spin_lock_irq(&tree
->lock
);
949 node
= tree
->tree
.rb_node
;
951 * Here we don't want to use tree_search() which will use tree->last
952 * and screw up the search order.
953 * And __tree_search() can't return the adjacent ordered extents
954 * either, thus here we do our own search.
957 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
959 if (file_offset
< entry
->file_offset
) {
960 node
= node
->rb_left
;
961 } else if (file_offset
>= entry_end(entry
)) {
962 node
= node
->rb_right
;
965 * Direct hit, got an ordered extent that starts at
976 cur
= &entry
->rb_node
;
977 /* We got an entry around @file_offset, check adjacent entries */
978 if (entry
->file_offset
< file_offset
) {
986 entry
= rb_entry(prev
, struct btrfs_ordered_extent
, rb_node
);
987 if (range_overlaps(entry
, file_offset
, len
))
991 entry
= rb_entry(next
, struct btrfs_ordered_extent
, rb_node
);
992 if (range_overlaps(entry
, file_offset
, len
))
995 /* No ordered extent in the range */
999 refcount_inc(&entry
->refs
);
1000 spin_unlock_irq(&tree
->lock
);
1005 * btrfs_flush_ordered_range - Lock the passed range and ensures all pending
1006 * ordered extents in it are run to completion.
1008 * @inode: Inode whose ordered tree is to be searched
1009 * @start: Beginning of range to flush
1010 * @end: Last byte of range to lock
1011 * @cached_state: If passed, will return the extent state responsible for the
1012 * locked range. It's the caller's responsibility to free the cached state.
1014 * This function always returns with the given range locked, ensuring after it's
1015 * called no order extent can be pending.
1017 void btrfs_lock_and_flush_ordered_range(struct btrfs_inode
*inode
, u64 start
,
1019 struct extent_state
**cached_state
)
1021 struct btrfs_ordered_extent
*ordered
;
1022 struct extent_state
*cache
= NULL
;
1023 struct extent_state
**cachedp
= &cache
;
1026 cachedp
= cached_state
;
1029 lock_extent_bits(&inode
->io_tree
, start
, end
, cachedp
);
1030 ordered
= btrfs_lookup_ordered_range(inode
, start
,
1034 * If no external cached_state has been passed then
1035 * decrement the extra ref taken for cachedp since we
1036 * aren't exposing it outside of this function
1039 refcount_dec(&cache
->refs
);
1042 unlock_extent_cached(&inode
->io_tree
, start
, end
, cachedp
);
1043 btrfs_start_ordered_extent(ordered
, 1);
1044 btrfs_put_ordered_extent(ordered
);
1048 static int clone_ordered_extent(struct btrfs_ordered_extent
*ordered
, u64 pos
,
1051 struct inode
*inode
= ordered
->inode
;
1052 struct btrfs_fs_info
*fs_info
= BTRFS_I(inode
)->root
->fs_info
;
1053 u64 file_offset
= ordered
->file_offset
+ pos
;
1054 u64 disk_bytenr
= ordered
->disk_bytenr
+ pos
;
1055 u64 num_bytes
= len
;
1056 u64 disk_num_bytes
= len
;
1058 unsigned long flags_masked
= ordered
->flags
& ~(1 << BTRFS_ORDERED_DIRECT
);
1059 int compress_type
= ordered
->compress_type
;
1060 unsigned long weight
;
1063 weight
= hweight_long(flags_masked
);
1064 WARN_ON_ONCE(weight
> 1);
1068 type
= __ffs(flags_masked
);
1071 * The splitting extent is already counted and will be added again
1072 * in btrfs_add_ordered_extent_*(). Subtract num_bytes to avoid
1075 percpu_counter_add_batch(&fs_info
->ordered_bytes
, -num_bytes
,
1076 fs_info
->delalloc_batch
);
1077 if (test_bit(BTRFS_ORDERED_COMPRESSED
, &ordered
->flags
)) {
1079 ret
= btrfs_add_ordered_extent_compress(BTRFS_I(inode
),
1080 file_offset
, disk_bytenr
, num_bytes
,
1081 disk_num_bytes
, compress_type
);
1082 } else if (test_bit(BTRFS_ORDERED_DIRECT
, &ordered
->flags
)) {
1083 ret
= btrfs_add_ordered_extent_dio(BTRFS_I(inode
), file_offset
,
1084 disk_bytenr
, num_bytes
, disk_num_bytes
, type
);
1086 ret
= btrfs_add_ordered_extent(BTRFS_I(inode
), file_offset
,
1087 disk_bytenr
, num_bytes
, disk_num_bytes
, type
);
1093 int btrfs_split_ordered_extent(struct btrfs_ordered_extent
*ordered
, u64 pre
,
1096 struct inode
*inode
= ordered
->inode
;
1097 struct btrfs_ordered_inode_tree
*tree
= &BTRFS_I(inode
)->ordered_tree
;
1098 struct rb_node
*node
;
1099 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
1102 spin_lock_irq(&tree
->lock
);
1103 /* Remove from tree once */
1104 node
= &ordered
->rb_node
;
1105 rb_erase(node
, &tree
->tree
);
1106 RB_CLEAR_NODE(node
);
1107 if (tree
->last
== node
)
1110 ordered
->file_offset
+= pre
;
1111 ordered
->disk_bytenr
+= pre
;
1112 ordered
->num_bytes
-= (pre
+ post
);
1113 ordered
->disk_num_bytes
-= (pre
+ post
);
1114 ordered
->bytes_left
-= (pre
+ post
);
1116 /* Re-insert the node */
1117 node
= tree_insert(&tree
->tree
, ordered
->file_offset
, &ordered
->rb_node
);
1119 btrfs_panic(fs_info
, -EEXIST
,
1120 "zoned: inconsistency in ordered tree at offset %llu",
1121 ordered
->file_offset
);
1123 spin_unlock_irq(&tree
->lock
);
1126 ret
= clone_ordered_extent(ordered
, 0, pre
);
1127 if (ret
== 0 && post
)
1128 ret
= clone_ordered_extent(ordered
, pre
+ ordered
->disk_num_bytes
,
1134 int __init
ordered_data_init(void)
1136 btrfs_ordered_extent_cache
= kmem_cache_create("btrfs_ordered_extent",
1137 sizeof(struct btrfs_ordered_extent
), 0,
1140 if (!btrfs_ordered_extent_cache
)
1146 void __cold
ordered_data_exit(void)
1148 kmem_cache_destroy(btrfs_ordered_extent_cache
);