2 * Copyright (C) 2011 Fujitsu. All rights reserved.
3 * Written by Miao Xie <miaox@cn.fujitsu.com>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
20 #include <linux/slab.h>
21 #include "delayed-inode.h"
23 #include "transaction.h"
26 #define BTRFS_DELAYED_WRITEBACK 512
27 #define BTRFS_DELAYED_BACKGROUND 128
28 #define BTRFS_DELAYED_BATCH 16
30 static struct kmem_cache
*delayed_node_cache
;
32 int __init
btrfs_delayed_inode_init(void)
34 delayed_node_cache
= kmem_cache_create("btrfs_delayed_node",
35 sizeof(struct btrfs_delayed_node
),
39 if (!delayed_node_cache
)
44 void btrfs_delayed_inode_exit(void)
46 kmem_cache_destroy(delayed_node_cache
);
49 static inline void btrfs_init_delayed_node(
50 struct btrfs_delayed_node
*delayed_node
,
51 struct btrfs_root
*root
, u64 inode_id
)
53 delayed_node
->root
= root
;
54 delayed_node
->inode_id
= inode_id
;
55 atomic_set(&delayed_node
->refs
, 0);
56 delayed_node
->ins_root
= RB_ROOT
;
57 delayed_node
->del_root
= RB_ROOT
;
58 mutex_init(&delayed_node
->mutex
);
59 INIT_LIST_HEAD(&delayed_node
->n_list
);
60 INIT_LIST_HEAD(&delayed_node
->p_list
);
63 static inline int btrfs_is_continuous_delayed_item(
64 struct btrfs_delayed_item
*item1
,
65 struct btrfs_delayed_item
*item2
)
67 if (item1
->key
.type
== BTRFS_DIR_INDEX_KEY
&&
68 item1
->key
.objectid
== item2
->key
.objectid
&&
69 item1
->key
.type
== item2
->key
.type
&&
70 item1
->key
.offset
+ 1 == item2
->key
.offset
)
75 static struct btrfs_delayed_node
*btrfs_get_delayed_node(struct btrfs_inode
*btrfs_inode
)
77 struct btrfs_root
*root
= btrfs_inode
->root
;
78 u64 ino
= btrfs_ino(btrfs_inode
);
79 struct btrfs_delayed_node
*node
;
81 node
= READ_ONCE(btrfs_inode
->delayed_node
);
83 atomic_inc(&node
->refs
);
87 spin_lock(&root
->inode_lock
);
88 node
= radix_tree_lookup(&root
->delayed_nodes_tree
, ino
);
90 if (btrfs_inode
->delayed_node
) {
91 atomic_inc(&node
->refs
); /* can be accessed */
92 BUG_ON(btrfs_inode
->delayed_node
!= node
);
93 spin_unlock(&root
->inode_lock
);
96 btrfs_inode
->delayed_node
= node
;
97 /* can be accessed and cached in the inode */
98 atomic_add(2, &node
->refs
);
99 spin_unlock(&root
->inode_lock
);
102 spin_unlock(&root
->inode_lock
);
107 /* Will return either the node or PTR_ERR(-ENOMEM) */
108 static struct btrfs_delayed_node
*btrfs_get_or_create_delayed_node(
109 struct btrfs_inode
*btrfs_inode
)
111 struct btrfs_delayed_node
*node
;
112 struct btrfs_root
*root
= btrfs_inode
->root
;
113 u64 ino
= btrfs_ino(btrfs_inode
);
117 node
= btrfs_get_delayed_node(btrfs_inode
);
121 node
= kmem_cache_zalloc(delayed_node_cache
, GFP_NOFS
);
123 return ERR_PTR(-ENOMEM
);
124 btrfs_init_delayed_node(node
, root
, ino
);
126 /* cached in the btrfs inode and can be accessed */
127 atomic_add(2, &node
->refs
);
129 ret
= radix_tree_preload(GFP_NOFS
);
131 kmem_cache_free(delayed_node_cache
, node
);
135 spin_lock(&root
->inode_lock
);
136 ret
= radix_tree_insert(&root
->delayed_nodes_tree
, ino
, node
);
137 if (ret
== -EEXIST
) {
138 spin_unlock(&root
->inode_lock
);
139 kmem_cache_free(delayed_node_cache
, node
);
140 radix_tree_preload_end();
143 btrfs_inode
->delayed_node
= node
;
144 spin_unlock(&root
->inode_lock
);
145 radix_tree_preload_end();
151 * Call it when holding delayed_node->mutex
153 * If mod = 1, add this node into the prepared list.
155 static void btrfs_queue_delayed_node(struct btrfs_delayed_root
*root
,
156 struct btrfs_delayed_node
*node
,
159 spin_lock(&root
->lock
);
160 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST
, &node
->flags
)) {
161 if (!list_empty(&node
->p_list
))
162 list_move_tail(&node
->p_list
, &root
->prepare_list
);
164 list_add_tail(&node
->p_list
, &root
->prepare_list
);
166 list_add_tail(&node
->n_list
, &root
->node_list
);
167 list_add_tail(&node
->p_list
, &root
->prepare_list
);
168 atomic_inc(&node
->refs
); /* inserted into list */
170 set_bit(BTRFS_DELAYED_NODE_IN_LIST
, &node
->flags
);
172 spin_unlock(&root
->lock
);
175 /* Call it when holding delayed_node->mutex */
176 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root
*root
,
177 struct btrfs_delayed_node
*node
)
179 spin_lock(&root
->lock
);
180 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST
, &node
->flags
)) {
182 atomic_dec(&node
->refs
); /* not in the list */
183 list_del_init(&node
->n_list
);
184 if (!list_empty(&node
->p_list
))
185 list_del_init(&node
->p_list
);
186 clear_bit(BTRFS_DELAYED_NODE_IN_LIST
, &node
->flags
);
188 spin_unlock(&root
->lock
);
191 static struct btrfs_delayed_node
*btrfs_first_delayed_node(
192 struct btrfs_delayed_root
*delayed_root
)
195 struct btrfs_delayed_node
*node
= NULL
;
197 spin_lock(&delayed_root
->lock
);
198 if (list_empty(&delayed_root
->node_list
))
201 p
= delayed_root
->node_list
.next
;
202 node
= list_entry(p
, struct btrfs_delayed_node
, n_list
);
203 atomic_inc(&node
->refs
);
205 spin_unlock(&delayed_root
->lock
);
210 static struct btrfs_delayed_node
*btrfs_next_delayed_node(
211 struct btrfs_delayed_node
*node
)
213 struct btrfs_delayed_root
*delayed_root
;
215 struct btrfs_delayed_node
*next
= NULL
;
217 delayed_root
= node
->root
->fs_info
->delayed_root
;
218 spin_lock(&delayed_root
->lock
);
219 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST
, &node
->flags
)) {
220 /* not in the list */
221 if (list_empty(&delayed_root
->node_list
))
223 p
= delayed_root
->node_list
.next
;
224 } else if (list_is_last(&node
->n_list
, &delayed_root
->node_list
))
227 p
= node
->n_list
.next
;
229 next
= list_entry(p
, struct btrfs_delayed_node
, n_list
);
230 atomic_inc(&next
->refs
);
232 spin_unlock(&delayed_root
->lock
);
237 static void __btrfs_release_delayed_node(
238 struct btrfs_delayed_node
*delayed_node
,
241 struct btrfs_delayed_root
*delayed_root
;
246 delayed_root
= delayed_node
->root
->fs_info
->delayed_root
;
248 mutex_lock(&delayed_node
->mutex
);
249 if (delayed_node
->count
)
250 btrfs_queue_delayed_node(delayed_root
, delayed_node
, mod
);
252 btrfs_dequeue_delayed_node(delayed_root
, delayed_node
);
253 mutex_unlock(&delayed_node
->mutex
);
255 if (atomic_dec_and_test(&delayed_node
->refs
)) {
257 struct btrfs_root
*root
= delayed_node
->root
;
258 spin_lock(&root
->inode_lock
);
259 if (atomic_read(&delayed_node
->refs
) == 0) {
260 radix_tree_delete(&root
->delayed_nodes_tree
,
261 delayed_node
->inode_id
);
264 spin_unlock(&root
->inode_lock
);
266 kmem_cache_free(delayed_node_cache
, delayed_node
);
270 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node
*node
)
272 __btrfs_release_delayed_node(node
, 0);
275 static struct btrfs_delayed_node
*btrfs_first_prepared_delayed_node(
276 struct btrfs_delayed_root
*delayed_root
)
279 struct btrfs_delayed_node
*node
= NULL
;
281 spin_lock(&delayed_root
->lock
);
282 if (list_empty(&delayed_root
->prepare_list
))
285 p
= delayed_root
->prepare_list
.next
;
287 node
= list_entry(p
, struct btrfs_delayed_node
, p_list
);
288 atomic_inc(&node
->refs
);
290 spin_unlock(&delayed_root
->lock
);
295 static inline void btrfs_release_prepared_delayed_node(
296 struct btrfs_delayed_node
*node
)
298 __btrfs_release_delayed_node(node
, 1);
301 static struct btrfs_delayed_item
*btrfs_alloc_delayed_item(u32 data_len
)
303 struct btrfs_delayed_item
*item
;
304 item
= kmalloc(sizeof(*item
) + data_len
, GFP_NOFS
);
306 item
->data_len
= data_len
;
307 item
->ins_or_del
= 0;
308 item
->bytes_reserved
= 0;
309 item
->delayed_node
= NULL
;
310 atomic_set(&item
->refs
, 1);
316 * __btrfs_lookup_delayed_item - look up the delayed item by key
317 * @delayed_node: pointer to the delayed node
318 * @key: the key to look up
319 * @prev: used to store the prev item if the right item isn't found
320 * @next: used to store the next item if the right item isn't found
322 * Note: if we don't find the right item, we will return the prev item and
325 static struct btrfs_delayed_item
*__btrfs_lookup_delayed_item(
326 struct rb_root
*root
,
327 struct btrfs_key
*key
,
328 struct btrfs_delayed_item
**prev
,
329 struct btrfs_delayed_item
**next
)
331 struct rb_node
*node
, *prev_node
= NULL
;
332 struct btrfs_delayed_item
*delayed_item
= NULL
;
335 node
= root
->rb_node
;
338 delayed_item
= rb_entry(node
, struct btrfs_delayed_item
,
341 ret
= btrfs_comp_cpu_keys(&delayed_item
->key
, key
);
343 node
= node
->rb_right
;
345 node
= node
->rb_left
;
354 *prev
= delayed_item
;
355 else if ((node
= rb_prev(prev_node
)) != NULL
) {
356 *prev
= rb_entry(node
, struct btrfs_delayed_item
,
366 *next
= delayed_item
;
367 else if ((node
= rb_next(prev_node
)) != NULL
) {
368 *next
= rb_entry(node
, struct btrfs_delayed_item
,
376 static struct btrfs_delayed_item
*__btrfs_lookup_delayed_insertion_item(
377 struct btrfs_delayed_node
*delayed_node
,
378 struct btrfs_key
*key
)
380 return __btrfs_lookup_delayed_item(&delayed_node
->ins_root
, key
,
384 static int __btrfs_add_delayed_item(struct btrfs_delayed_node
*delayed_node
,
385 struct btrfs_delayed_item
*ins
,
388 struct rb_node
**p
, *node
;
389 struct rb_node
*parent_node
= NULL
;
390 struct rb_root
*root
;
391 struct btrfs_delayed_item
*item
;
394 if (action
== BTRFS_DELAYED_INSERTION_ITEM
)
395 root
= &delayed_node
->ins_root
;
396 else if (action
== BTRFS_DELAYED_DELETION_ITEM
)
397 root
= &delayed_node
->del_root
;
401 node
= &ins
->rb_node
;
405 item
= rb_entry(parent_node
, struct btrfs_delayed_item
,
408 cmp
= btrfs_comp_cpu_keys(&item
->key
, &ins
->key
);
417 rb_link_node(node
, parent_node
, p
);
418 rb_insert_color(node
, root
);
419 ins
->delayed_node
= delayed_node
;
420 ins
->ins_or_del
= action
;
422 if (ins
->key
.type
== BTRFS_DIR_INDEX_KEY
&&
423 action
== BTRFS_DELAYED_INSERTION_ITEM
&&
424 ins
->key
.offset
>= delayed_node
->index_cnt
)
425 delayed_node
->index_cnt
= ins
->key
.offset
+ 1;
427 delayed_node
->count
++;
428 atomic_inc(&delayed_node
->root
->fs_info
->delayed_root
->items
);
432 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node
*node
,
433 struct btrfs_delayed_item
*item
)
435 return __btrfs_add_delayed_item(node
, item
,
436 BTRFS_DELAYED_INSERTION_ITEM
);
439 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node
*node
,
440 struct btrfs_delayed_item
*item
)
442 return __btrfs_add_delayed_item(node
, item
,
443 BTRFS_DELAYED_DELETION_ITEM
);
446 static void finish_one_item(struct btrfs_delayed_root
*delayed_root
)
448 int seq
= atomic_inc_return(&delayed_root
->items_seq
);
451 * atomic_dec_return implies a barrier for waitqueue_active
453 if ((atomic_dec_return(&delayed_root
->items
) <
454 BTRFS_DELAYED_BACKGROUND
|| seq
% BTRFS_DELAYED_BATCH
== 0) &&
455 waitqueue_active(&delayed_root
->wait
))
456 wake_up(&delayed_root
->wait
);
459 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item
*delayed_item
)
461 struct rb_root
*root
;
462 struct btrfs_delayed_root
*delayed_root
;
464 delayed_root
= delayed_item
->delayed_node
->root
->fs_info
->delayed_root
;
466 BUG_ON(!delayed_root
);
467 BUG_ON(delayed_item
->ins_or_del
!= BTRFS_DELAYED_DELETION_ITEM
&&
468 delayed_item
->ins_or_del
!= BTRFS_DELAYED_INSERTION_ITEM
);
470 if (delayed_item
->ins_or_del
== BTRFS_DELAYED_INSERTION_ITEM
)
471 root
= &delayed_item
->delayed_node
->ins_root
;
473 root
= &delayed_item
->delayed_node
->del_root
;
475 rb_erase(&delayed_item
->rb_node
, root
);
476 delayed_item
->delayed_node
->count
--;
478 finish_one_item(delayed_root
);
481 static void btrfs_release_delayed_item(struct btrfs_delayed_item
*item
)
484 __btrfs_remove_delayed_item(item
);
485 if (atomic_dec_and_test(&item
->refs
))
490 static struct btrfs_delayed_item
*__btrfs_first_delayed_insertion_item(
491 struct btrfs_delayed_node
*delayed_node
)
494 struct btrfs_delayed_item
*item
= NULL
;
496 p
= rb_first(&delayed_node
->ins_root
);
498 item
= rb_entry(p
, struct btrfs_delayed_item
, rb_node
);
503 static struct btrfs_delayed_item
*__btrfs_first_delayed_deletion_item(
504 struct btrfs_delayed_node
*delayed_node
)
507 struct btrfs_delayed_item
*item
= NULL
;
509 p
= rb_first(&delayed_node
->del_root
);
511 item
= rb_entry(p
, struct btrfs_delayed_item
, rb_node
);
516 static struct btrfs_delayed_item
*__btrfs_next_delayed_item(
517 struct btrfs_delayed_item
*item
)
520 struct btrfs_delayed_item
*next
= NULL
;
522 p
= rb_next(&item
->rb_node
);
524 next
= rb_entry(p
, struct btrfs_delayed_item
, rb_node
);
529 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle
*trans
,
530 struct btrfs_fs_info
*fs_info
,
531 struct btrfs_delayed_item
*item
)
533 struct btrfs_block_rsv
*src_rsv
;
534 struct btrfs_block_rsv
*dst_rsv
;
538 if (!trans
->bytes_reserved
)
541 src_rsv
= trans
->block_rsv
;
542 dst_rsv
= &fs_info
->delayed_block_rsv
;
544 num_bytes
= btrfs_calc_trans_metadata_size(fs_info
, 1);
545 ret
= btrfs_block_rsv_migrate(src_rsv
, dst_rsv
, num_bytes
, 1);
547 trace_btrfs_space_reservation(fs_info
, "delayed_item",
550 item
->bytes_reserved
= num_bytes
;
556 static void btrfs_delayed_item_release_metadata(struct btrfs_fs_info
*fs_info
,
557 struct btrfs_delayed_item
*item
)
559 struct btrfs_block_rsv
*rsv
;
561 if (!item
->bytes_reserved
)
564 rsv
= &fs_info
->delayed_block_rsv
;
565 trace_btrfs_space_reservation(fs_info
, "delayed_item",
566 item
->key
.objectid
, item
->bytes_reserved
,
568 btrfs_block_rsv_release(fs_info
, rsv
,
569 item
->bytes_reserved
);
572 static int btrfs_delayed_inode_reserve_metadata(
573 struct btrfs_trans_handle
*trans
,
574 struct btrfs_root
*root
,
575 struct btrfs_inode
*inode
,
576 struct btrfs_delayed_node
*node
)
578 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
579 struct btrfs_block_rsv
*src_rsv
;
580 struct btrfs_block_rsv
*dst_rsv
;
583 bool release
= false;
585 src_rsv
= trans
->block_rsv
;
586 dst_rsv
= &fs_info
->delayed_block_rsv
;
588 num_bytes
= btrfs_calc_trans_metadata_size(fs_info
, 1);
591 * If our block_rsv is the delalloc block reserve then check and see if
592 * we have our extra reservation for updating the inode. If not fall
593 * through and try to reserve space quickly.
595 * We used to try and steal from the delalloc block rsv or the global
596 * reserve, but we'd steal a full reservation, which isn't kind. We are
597 * here through delalloc which means we've likely just cowed down close
598 * to the leaf that contains the inode, so we would steal less just
599 * doing the fallback inode update, so if we do end up having to steal
600 * from the global block rsv we hopefully only steal one or two blocks
601 * worth which is less likely to hurt us.
603 if (src_rsv
&& src_rsv
->type
== BTRFS_BLOCK_RSV_DELALLOC
) {
604 spin_lock(&inode
->lock
);
605 if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED
,
606 &inode
->runtime_flags
))
610 spin_unlock(&inode
->lock
);
614 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
615 * which doesn't reserve space for speed. This is a problem since we
616 * still need to reserve space for this update, so try to reserve the
619 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
620 * we're accounted for.
622 if (!src_rsv
|| (!trans
->bytes_reserved
&&
623 src_rsv
->type
!= BTRFS_BLOCK_RSV_DELALLOC
)) {
624 ret
= btrfs_block_rsv_add(root
, dst_rsv
, num_bytes
,
625 BTRFS_RESERVE_NO_FLUSH
);
627 * Since we're under a transaction reserve_metadata_bytes could
628 * try to commit the transaction which will make it return
629 * EAGAIN to make us stop the transaction we have, so return
630 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
635 node
->bytes_reserved
= num_bytes
;
636 trace_btrfs_space_reservation(fs_info
,
644 ret
= btrfs_block_rsv_migrate(src_rsv
, dst_rsv
, num_bytes
, 1);
647 * Migrate only takes a reservation, it doesn't touch the size of the
648 * block_rsv. This is to simplify people who don't normally have things
649 * migrated from their block rsv. If they go to release their
650 * reservation, that will decrease the size as well, so if migrate
651 * reduced size we'd end up with a negative size. But for the
652 * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
653 * but we could in fact do this reserve/migrate dance several times
654 * between the time we did the original reservation and we'd clean it
655 * up. So to take care of this, release the space for the meta
656 * reservation here. I think it may be time for a documentation page on
657 * how block rsvs. work.
660 trace_btrfs_space_reservation(fs_info
, "delayed_inode",
661 btrfs_ino(inode
), num_bytes
, 1);
662 node
->bytes_reserved
= num_bytes
;
666 trace_btrfs_space_reservation(fs_info
, "delalloc",
667 btrfs_ino(inode
), num_bytes
, 0);
668 btrfs_block_rsv_release(fs_info
, src_rsv
, num_bytes
);
674 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info
*fs_info
,
675 struct btrfs_delayed_node
*node
)
677 struct btrfs_block_rsv
*rsv
;
679 if (!node
->bytes_reserved
)
682 rsv
= &fs_info
->delayed_block_rsv
;
683 trace_btrfs_space_reservation(fs_info
, "delayed_inode",
684 node
->inode_id
, node
->bytes_reserved
, 0);
685 btrfs_block_rsv_release(fs_info
, rsv
,
686 node
->bytes_reserved
);
687 node
->bytes_reserved
= 0;
691 * This helper will insert some continuous items into the same leaf according
692 * to the free space of the leaf.
694 static int btrfs_batch_insert_items(struct btrfs_root
*root
,
695 struct btrfs_path
*path
,
696 struct btrfs_delayed_item
*item
)
698 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
699 struct btrfs_delayed_item
*curr
, *next
;
701 int total_data_size
= 0, total_size
= 0;
702 struct extent_buffer
*leaf
;
704 struct btrfs_key
*keys
;
706 struct list_head head
;
712 BUG_ON(!path
->nodes
[0]);
714 leaf
= path
->nodes
[0];
715 free_space
= btrfs_leaf_free_space(fs_info
, leaf
);
716 INIT_LIST_HEAD(&head
);
722 * count the number of the continuous items that we can insert in batch
724 while (total_size
+ next
->data_len
+ sizeof(struct btrfs_item
) <=
726 total_data_size
+= next
->data_len
;
727 total_size
+= next
->data_len
+ sizeof(struct btrfs_item
);
728 list_add_tail(&next
->tree_list
, &head
);
732 next
= __btrfs_next_delayed_item(curr
);
736 if (!btrfs_is_continuous_delayed_item(curr
, next
))
746 * we need allocate some memory space, but it might cause the task
747 * to sleep, so we set all locked nodes in the path to blocking locks
750 btrfs_set_path_blocking(path
);
752 keys
= kmalloc_array(nitems
, sizeof(struct btrfs_key
), GFP_NOFS
);
758 data_size
= kmalloc_array(nitems
, sizeof(u32
), GFP_NOFS
);
764 /* get keys of all the delayed items */
766 list_for_each_entry(next
, &head
, tree_list
) {
768 data_size
[i
] = next
->data_len
;
772 /* reset all the locked nodes in the patch to spinning locks. */
773 btrfs_clear_path_blocking(path
, NULL
, 0);
775 /* insert the keys of the items */
776 setup_items_for_insert(root
, path
, keys
, data_size
,
777 total_data_size
, total_size
, nitems
);
779 /* insert the dir index items */
780 slot
= path
->slots
[0];
781 list_for_each_entry_safe(curr
, next
, &head
, tree_list
) {
782 data_ptr
= btrfs_item_ptr(leaf
, slot
, char);
783 write_extent_buffer(leaf
, &curr
->data
,
784 (unsigned long)data_ptr
,
788 btrfs_delayed_item_release_metadata(fs_info
, curr
);
790 list_del(&curr
->tree_list
);
791 btrfs_release_delayed_item(curr
);
802 * This helper can just do simple insertion that needn't extend item for new
803 * data, such as directory name index insertion, inode insertion.
805 static int btrfs_insert_delayed_item(struct btrfs_trans_handle
*trans
,
806 struct btrfs_root
*root
,
807 struct btrfs_path
*path
,
808 struct btrfs_delayed_item
*delayed_item
)
810 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
811 struct extent_buffer
*leaf
;
815 ret
= btrfs_insert_empty_item(trans
, root
, path
, &delayed_item
->key
,
816 delayed_item
->data_len
);
817 if (ret
< 0 && ret
!= -EEXIST
)
820 leaf
= path
->nodes
[0];
822 ptr
= btrfs_item_ptr(leaf
, path
->slots
[0], char);
824 write_extent_buffer(leaf
, delayed_item
->data
, (unsigned long)ptr
,
825 delayed_item
->data_len
);
826 btrfs_mark_buffer_dirty(leaf
);
828 btrfs_delayed_item_release_metadata(fs_info
, delayed_item
);
833 * we insert an item first, then if there are some continuous items, we try
834 * to insert those items into the same leaf.
836 static int btrfs_insert_delayed_items(struct btrfs_trans_handle
*trans
,
837 struct btrfs_path
*path
,
838 struct btrfs_root
*root
,
839 struct btrfs_delayed_node
*node
)
841 struct btrfs_delayed_item
*curr
, *prev
;
845 mutex_lock(&node
->mutex
);
846 curr
= __btrfs_first_delayed_insertion_item(node
);
850 ret
= btrfs_insert_delayed_item(trans
, root
, path
, curr
);
852 btrfs_release_path(path
);
857 curr
= __btrfs_next_delayed_item(prev
);
858 if (curr
&& btrfs_is_continuous_delayed_item(prev
, curr
)) {
859 /* insert the continuous items into the same leaf */
861 btrfs_batch_insert_items(root
, path
, curr
);
863 btrfs_release_delayed_item(prev
);
864 btrfs_mark_buffer_dirty(path
->nodes
[0]);
866 btrfs_release_path(path
);
867 mutex_unlock(&node
->mutex
);
871 mutex_unlock(&node
->mutex
);
875 static int btrfs_batch_delete_items(struct btrfs_trans_handle
*trans
,
876 struct btrfs_root
*root
,
877 struct btrfs_path
*path
,
878 struct btrfs_delayed_item
*item
)
880 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
881 struct btrfs_delayed_item
*curr
, *next
;
882 struct extent_buffer
*leaf
;
883 struct btrfs_key key
;
884 struct list_head head
;
885 int nitems
, i
, last_item
;
888 BUG_ON(!path
->nodes
[0]);
890 leaf
= path
->nodes
[0];
893 last_item
= btrfs_header_nritems(leaf
) - 1;
895 return -ENOENT
; /* FIXME: Is errno suitable? */
898 INIT_LIST_HEAD(&head
);
899 btrfs_item_key_to_cpu(leaf
, &key
, i
);
902 * count the number of the dir index items that we can delete in batch
904 while (btrfs_comp_cpu_keys(&next
->key
, &key
) == 0) {
905 list_add_tail(&next
->tree_list
, &head
);
909 next
= __btrfs_next_delayed_item(curr
);
913 if (!btrfs_is_continuous_delayed_item(curr
, next
))
919 btrfs_item_key_to_cpu(leaf
, &key
, i
);
925 ret
= btrfs_del_items(trans
, root
, path
, path
->slots
[0], nitems
);
929 list_for_each_entry_safe(curr
, next
, &head
, tree_list
) {
930 btrfs_delayed_item_release_metadata(fs_info
, curr
);
931 list_del(&curr
->tree_list
);
932 btrfs_release_delayed_item(curr
);
939 static int btrfs_delete_delayed_items(struct btrfs_trans_handle
*trans
,
940 struct btrfs_path
*path
,
941 struct btrfs_root
*root
,
942 struct btrfs_delayed_node
*node
)
944 struct btrfs_delayed_item
*curr
, *prev
;
948 mutex_lock(&node
->mutex
);
949 curr
= __btrfs_first_delayed_deletion_item(node
);
953 ret
= btrfs_search_slot(trans
, root
, &curr
->key
, path
, -1, 1);
958 * can't find the item which the node points to, so this node
959 * is invalid, just drop it.
962 curr
= __btrfs_next_delayed_item(prev
);
963 btrfs_release_delayed_item(prev
);
965 btrfs_release_path(path
);
967 mutex_unlock(&node
->mutex
);
973 btrfs_batch_delete_items(trans
, root
, path
, curr
);
974 btrfs_release_path(path
);
975 mutex_unlock(&node
->mutex
);
979 btrfs_release_path(path
);
980 mutex_unlock(&node
->mutex
);
984 static void btrfs_release_delayed_inode(struct btrfs_delayed_node
*delayed_node
)
986 struct btrfs_delayed_root
*delayed_root
;
989 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
)) {
990 BUG_ON(!delayed_node
->root
);
991 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
);
992 delayed_node
->count
--;
994 delayed_root
= delayed_node
->root
->fs_info
->delayed_root
;
995 finish_one_item(delayed_root
);
999 static void btrfs_release_delayed_iref(struct btrfs_delayed_node
*delayed_node
)
1001 struct btrfs_delayed_root
*delayed_root
;
1003 ASSERT(delayed_node
->root
);
1004 clear_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &delayed_node
->flags
);
1005 delayed_node
->count
--;
1007 delayed_root
= delayed_node
->root
->fs_info
->delayed_root
;
1008 finish_one_item(delayed_root
);
1011 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle
*trans
,
1012 struct btrfs_root
*root
,
1013 struct btrfs_path
*path
,
1014 struct btrfs_delayed_node
*node
)
1016 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1017 struct btrfs_key key
;
1018 struct btrfs_inode_item
*inode_item
;
1019 struct extent_buffer
*leaf
;
1023 key
.objectid
= node
->inode_id
;
1024 key
.type
= BTRFS_INODE_ITEM_KEY
;
1027 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &node
->flags
))
1032 ret
= btrfs_lookup_inode(trans
, root
, path
, &key
, mod
);
1034 btrfs_release_path(path
);
1036 } else if (ret
< 0) {
1040 leaf
= path
->nodes
[0];
1041 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
1042 struct btrfs_inode_item
);
1043 write_extent_buffer(leaf
, &node
->inode_item
, (unsigned long)inode_item
,
1044 sizeof(struct btrfs_inode_item
));
1045 btrfs_mark_buffer_dirty(leaf
);
1047 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &node
->flags
))
1051 if (path
->slots
[0] >= btrfs_header_nritems(leaf
))
1054 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
1055 if (key
.objectid
!= node
->inode_id
)
1058 if (key
.type
!= BTRFS_INODE_REF_KEY
&&
1059 key
.type
!= BTRFS_INODE_EXTREF_KEY
)
1063 * Delayed iref deletion is for the inode who has only one link,
1064 * so there is only one iref. The case that several irefs are
1065 * in the same item doesn't exist.
1067 btrfs_del_item(trans
, root
, path
);
1069 btrfs_release_delayed_iref(node
);
1071 btrfs_release_path(path
);
1073 btrfs_delayed_inode_release_metadata(fs_info
, node
);
1074 btrfs_release_delayed_inode(node
);
1079 btrfs_release_path(path
);
1081 key
.type
= BTRFS_INODE_EXTREF_KEY
;
1083 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1089 leaf
= path
->nodes
[0];
1094 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle
*trans
,
1095 struct btrfs_root
*root
,
1096 struct btrfs_path
*path
,
1097 struct btrfs_delayed_node
*node
)
1101 mutex_lock(&node
->mutex
);
1102 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &node
->flags
)) {
1103 mutex_unlock(&node
->mutex
);
1107 ret
= __btrfs_update_delayed_inode(trans
, root
, path
, node
);
1108 mutex_unlock(&node
->mutex
);
1113 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle
*trans
,
1114 struct btrfs_path
*path
,
1115 struct btrfs_delayed_node
*node
)
1119 ret
= btrfs_insert_delayed_items(trans
, path
, node
->root
, node
);
1123 ret
= btrfs_delete_delayed_items(trans
, path
, node
->root
, node
);
1127 ret
= btrfs_update_delayed_inode(trans
, node
->root
, path
, node
);
1132 * Called when committing the transaction.
1133 * Returns 0 on success.
1134 * Returns < 0 on error and returns with an aborted transaction with any
1135 * outstanding delayed items cleaned up.
1137 static int __btrfs_run_delayed_items(struct btrfs_trans_handle
*trans
,
1138 struct btrfs_fs_info
*fs_info
, int nr
)
1140 struct btrfs_delayed_root
*delayed_root
;
1141 struct btrfs_delayed_node
*curr_node
, *prev_node
;
1142 struct btrfs_path
*path
;
1143 struct btrfs_block_rsv
*block_rsv
;
1145 bool count
= (nr
> 0);
1150 path
= btrfs_alloc_path();
1153 path
->leave_spinning
= 1;
1155 block_rsv
= trans
->block_rsv
;
1156 trans
->block_rsv
= &fs_info
->delayed_block_rsv
;
1158 delayed_root
= fs_info
->delayed_root
;
1160 curr_node
= btrfs_first_delayed_node(delayed_root
);
1161 while (curr_node
&& (!count
|| (count
&& nr
--))) {
1162 ret
= __btrfs_commit_inode_delayed_items(trans
, path
,
1165 btrfs_release_delayed_node(curr_node
);
1167 btrfs_abort_transaction(trans
, ret
);
1171 prev_node
= curr_node
;
1172 curr_node
= btrfs_next_delayed_node(curr_node
);
1173 btrfs_release_delayed_node(prev_node
);
1177 btrfs_release_delayed_node(curr_node
);
1178 btrfs_free_path(path
);
1179 trans
->block_rsv
= block_rsv
;
1184 int btrfs_run_delayed_items(struct btrfs_trans_handle
*trans
,
1185 struct btrfs_fs_info
*fs_info
)
1187 return __btrfs_run_delayed_items(trans
, fs_info
, -1);
1190 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle
*trans
,
1191 struct btrfs_fs_info
*fs_info
, int nr
)
1193 return __btrfs_run_delayed_items(trans
, fs_info
, nr
);
1196 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle
*trans
,
1197 struct inode
*inode
)
1199 struct btrfs_delayed_node
*delayed_node
= btrfs_get_delayed_node(BTRFS_I(inode
));
1200 struct btrfs_path
*path
;
1201 struct btrfs_block_rsv
*block_rsv
;
1207 mutex_lock(&delayed_node
->mutex
);
1208 if (!delayed_node
->count
) {
1209 mutex_unlock(&delayed_node
->mutex
);
1210 btrfs_release_delayed_node(delayed_node
);
1213 mutex_unlock(&delayed_node
->mutex
);
1215 path
= btrfs_alloc_path();
1217 btrfs_release_delayed_node(delayed_node
);
1220 path
->leave_spinning
= 1;
1222 block_rsv
= trans
->block_rsv
;
1223 trans
->block_rsv
= &delayed_node
->root
->fs_info
->delayed_block_rsv
;
1225 ret
= __btrfs_commit_inode_delayed_items(trans
, path
, delayed_node
);
1227 btrfs_release_delayed_node(delayed_node
);
1228 btrfs_free_path(path
);
1229 trans
->block_rsv
= block_rsv
;
1234 int btrfs_commit_inode_delayed_inode(struct inode
*inode
)
1236 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
1237 struct btrfs_trans_handle
*trans
;
1238 struct btrfs_delayed_node
*delayed_node
= btrfs_get_delayed_node(BTRFS_I(inode
));
1239 struct btrfs_path
*path
;
1240 struct btrfs_block_rsv
*block_rsv
;
1246 mutex_lock(&delayed_node
->mutex
);
1247 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
)) {
1248 mutex_unlock(&delayed_node
->mutex
);
1249 btrfs_release_delayed_node(delayed_node
);
1252 mutex_unlock(&delayed_node
->mutex
);
1254 trans
= btrfs_join_transaction(delayed_node
->root
);
1255 if (IS_ERR(trans
)) {
1256 ret
= PTR_ERR(trans
);
1260 path
= btrfs_alloc_path();
1265 path
->leave_spinning
= 1;
1267 block_rsv
= trans
->block_rsv
;
1268 trans
->block_rsv
= &fs_info
->delayed_block_rsv
;
1270 mutex_lock(&delayed_node
->mutex
);
1271 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
))
1272 ret
= __btrfs_update_delayed_inode(trans
, delayed_node
->root
,
1273 path
, delayed_node
);
1276 mutex_unlock(&delayed_node
->mutex
);
1278 btrfs_free_path(path
);
1279 trans
->block_rsv
= block_rsv
;
1281 btrfs_end_transaction(trans
);
1282 btrfs_btree_balance_dirty(fs_info
);
1284 btrfs_release_delayed_node(delayed_node
);
1289 void btrfs_remove_delayed_node(struct btrfs_inode
*inode
)
1291 struct btrfs_delayed_node
*delayed_node
;
1293 delayed_node
= READ_ONCE(inode
->delayed_node
);
1297 inode
->delayed_node
= NULL
;
1298 btrfs_release_delayed_node(delayed_node
);
1301 struct btrfs_async_delayed_work
{
1302 struct btrfs_delayed_root
*delayed_root
;
1304 struct btrfs_work work
;
1307 static void btrfs_async_run_delayed_root(struct btrfs_work
*work
)
1309 struct btrfs_async_delayed_work
*async_work
;
1310 struct btrfs_delayed_root
*delayed_root
;
1311 struct btrfs_trans_handle
*trans
;
1312 struct btrfs_path
*path
;
1313 struct btrfs_delayed_node
*delayed_node
= NULL
;
1314 struct btrfs_root
*root
;
1315 struct btrfs_block_rsv
*block_rsv
;
1318 async_work
= container_of(work
, struct btrfs_async_delayed_work
, work
);
1319 delayed_root
= async_work
->delayed_root
;
1321 path
= btrfs_alloc_path();
1326 if (atomic_read(&delayed_root
->items
) < BTRFS_DELAYED_BACKGROUND
/ 2)
1329 delayed_node
= btrfs_first_prepared_delayed_node(delayed_root
);
1333 path
->leave_spinning
= 1;
1334 root
= delayed_node
->root
;
1336 trans
= btrfs_join_transaction(root
);
1340 block_rsv
= trans
->block_rsv
;
1341 trans
->block_rsv
= &root
->fs_info
->delayed_block_rsv
;
1343 __btrfs_commit_inode_delayed_items(trans
, path
, delayed_node
);
1345 trans
->block_rsv
= block_rsv
;
1346 btrfs_end_transaction(trans
);
1347 btrfs_btree_balance_dirty_nodelay(root
->fs_info
);
1350 btrfs_release_path(path
);
1353 btrfs_release_prepared_delayed_node(delayed_node
);
1354 if ((async_work
->nr
== 0 && total_done
< BTRFS_DELAYED_WRITEBACK
) ||
1355 total_done
< async_work
->nr
)
1359 btrfs_free_path(path
);
1361 wake_up(&delayed_root
->wait
);
1366 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root
*delayed_root
,
1367 struct btrfs_fs_info
*fs_info
, int nr
)
1369 struct btrfs_async_delayed_work
*async_work
;
1371 if (atomic_read(&delayed_root
->items
) < BTRFS_DELAYED_BACKGROUND
||
1372 btrfs_workqueue_normal_congested(fs_info
->delayed_workers
))
1375 async_work
= kmalloc(sizeof(*async_work
), GFP_NOFS
);
1379 async_work
->delayed_root
= delayed_root
;
1380 btrfs_init_work(&async_work
->work
, btrfs_delayed_meta_helper
,
1381 btrfs_async_run_delayed_root
, NULL
, NULL
);
1382 async_work
->nr
= nr
;
1384 btrfs_queue_work(fs_info
->delayed_workers
, &async_work
->work
);
1388 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info
*fs_info
)
1390 WARN_ON(btrfs_first_delayed_node(fs_info
->delayed_root
));
1393 static int could_end_wait(struct btrfs_delayed_root
*delayed_root
, int seq
)
1395 int val
= atomic_read(&delayed_root
->items_seq
);
1397 if (val
< seq
|| val
>= seq
+ BTRFS_DELAYED_BATCH
)
1400 if (atomic_read(&delayed_root
->items
) < BTRFS_DELAYED_BACKGROUND
)
1406 void btrfs_balance_delayed_items(struct btrfs_fs_info
*fs_info
)
1408 struct btrfs_delayed_root
*delayed_root
= fs_info
->delayed_root
;
1410 if (atomic_read(&delayed_root
->items
) < BTRFS_DELAYED_BACKGROUND
)
1413 if (atomic_read(&delayed_root
->items
) >= BTRFS_DELAYED_WRITEBACK
) {
1417 seq
= atomic_read(&delayed_root
->items_seq
);
1419 ret
= btrfs_wq_run_delayed_node(delayed_root
, fs_info
, 0);
1423 wait_event_interruptible(delayed_root
->wait
,
1424 could_end_wait(delayed_root
, seq
));
1428 btrfs_wq_run_delayed_node(delayed_root
, fs_info
, BTRFS_DELAYED_BATCH
);
1431 /* Will return 0 or -ENOMEM */
1432 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle
*trans
,
1433 struct btrfs_fs_info
*fs_info
,
1434 const char *name
, int name_len
,
1435 struct btrfs_inode
*dir
,
1436 struct btrfs_disk_key
*disk_key
, u8 type
,
1439 struct btrfs_delayed_node
*delayed_node
;
1440 struct btrfs_delayed_item
*delayed_item
;
1441 struct btrfs_dir_item
*dir_item
;
1444 delayed_node
= btrfs_get_or_create_delayed_node(dir
);
1445 if (IS_ERR(delayed_node
))
1446 return PTR_ERR(delayed_node
);
1448 delayed_item
= btrfs_alloc_delayed_item(sizeof(*dir_item
) + name_len
);
1449 if (!delayed_item
) {
1454 delayed_item
->key
.objectid
= btrfs_ino(dir
);
1455 delayed_item
->key
.type
= BTRFS_DIR_INDEX_KEY
;
1456 delayed_item
->key
.offset
= index
;
1458 dir_item
= (struct btrfs_dir_item
*)delayed_item
->data
;
1459 dir_item
->location
= *disk_key
;
1460 btrfs_set_stack_dir_transid(dir_item
, trans
->transid
);
1461 btrfs_set_stack_dir_data_len(dir_item
, 0);
1462 btrfs_set_stack_dir_name_len(dir_item
, name_len
);
1463 btrfs_set_stack_dir_type(dir_item
, type
);
1464 memcpy((char *)(dir_item
+ 1), name
, name_len
);
1466 ret
= btrfs_delayed_item_reserve_metadata(trans
, fs_info
, delayed_item
);
1468 * we have reserved enough space when we start a new transaction,
1469 * so reserving metadata failure is impossible
1474 mutex_lock(&delayed_node
->mutex
);
1475 ret
= __btrfs_add_delayed_insertion_item(delayed_node
, delayed_item
);
1476 if (unlikely(ret
)) {
1478 "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1479 name_len
, name
, delayed_node
->root
->objectid
,
1480 delayed_node
->inode_id
, ret
);
1483 mutex_unlock(&delayed_node
->mutex
);
1486 btrfs_release_delayed_node(delayed_node
);
1490 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info
*fs_info
,
1491 struct btrfs_delayed_node
*node
,
1492 struct btrfs_key
*key
)
1494 struct btrfs_delayed_item
*item
;
1496 mutex_lock(&node
->mutex
);
1497 item
= __btrfs_lookup_delayed_insertion_item(node
, key
);
1499 mutex_unlock(&node
->mutex
);
1503 btrfs_delayed_item_release_metadata(fs_info
, item
);
1504 btrfs_release_delayed_item(item
);
1505 mutex_unlock(&node
->mutex
);
1509 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle
*trans
,
1510 struct btrfs_fs_info
*fs_info
,
1511 struct btrfs_inode
*dir
, u64 index
)
1513 struct btrfs_delayed_node
*node
;
1514 struct btrfs_delayed_item
*item
;
1515 struct btrfs_key item_key
;
1518 node
= btrfs_get_or_create_delayed_node(dir
);
1520 return PTR_ERR(node
);
1522 item_key
.objectid
= btrfs_ino(dir
);
1523 item_key
.type
= BTRFS_DIR_INDEX_KEY
;
1524 item_key
.offset
= index
;
1526 ret
= btrfs_delete_delayed_insertion_item(fs_info
, node
, &item_key
);
1530 item
= btrfs_alloc_delayed_item(0);
1536 item
->key
= item_key
;
1538 ret
= btrfs_delayed_item_reserve_metadata(trans
, fs_info
, item
);
1540 * we have reserved enough space when we start a new transaction,
1541 * so reserving metadata failure is impossible.
1545 mutex_lock(&node
->mutex
);
1546 ret
= __btrfs_add_delayed_deletion_item(node
, item
);
1547 if (unlikely(ret
)) {
1549 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1550 index
, node
->root
->objectid
, node
->inode_id
, ret
);
1553 mutex_unlock(&node
->mutex
);
1555 btrfs_release_delayed_node(node
);
1559 int btrfs_inode_delayed_dir_index_count(struct inode
*inode
)
1561 struct btrfs_delayed_node
*delayed_node
= btrfs_get_delayed_node(BTRFS_I(inode
));
1567 * Since we have held i_mutex of this directory, it is impossible that
1568 * a new directory index is added into the delayed node and index_cnt
1569 * is updated now. So we needn't lock the delayed node.
1571 if (!delayed_node
->index_cnt
) {
1572 btrfs_release_delayed_node(delayed_node
);
1576 BTRFS_I(inode
)->index_cnt
= delayed_node
->index_cnt
;
1577 btrfs_release_delayed_node(delayed_node
);
1581 bool btrfs_readdir_get_delayed_items(struct inode
*inode
,
1582 struct list_head
*ins_list
,
1583 struct list_head
*del_list
)
1585 struct btrfs_delayed_node
*delayed_node
;
1586 struct btrfs_delayed_item
*item
;
1588 delayed_node
= btrfs_get_delayed_node(BTRFS_I(inode
));
1593 * We can only do one readdir with delayed items at a time because of
1594 * item->readdir_list.
1596 inode_unlock_shared(inode
);
1599 mutex_lock(&delayed_node
->mutex
);
1600 item
= __btrfs_first_delayed_insertion_item(delayed_node
);
1602 atomic_inc(&item
->refs
);
1603 list_add_tail(&item
->readdir_list
, ins_list
);
1604 item
= __btrfs_next_delayed_item(item
);
1607 item
= __btrfs_first_delayed_deletion_item(delayed_node
);
1609 atomic_inc(&item
->refs
);
1610 list_add_tail(&item
->readdir_list
, del_list
);
1611 item
= __btrfs_next_delayed_item(item
);
1613 mutex_unlock(&delayed_node
->mutex
);
1615 * This delayed node is still cached in the btrfs inode, so refs
1616 * must be > 1 now, and we needn't check it is going to be freed
1619 * Besides that, this function is used to read dir, we do not
1620 * insert/delete delayed items in this period. So we also needn't
1621 * requeue or dequeue this delayed node.
1623 atomic_dec(&delayed_node
->refs
);
1628 void btrfs_readdir_put_delayed_items(struct inode
*inode
,
1629 struct list_head
*ins_list
,
1630 struct list_head
*del_list
)
1632 struct btrfs_delayed_item
*curr
, *next
;
1634 list_for_each_entry_safe(curr
, next
, ins_list
, readdir_list
) {
1635 list_del(&curr
->readdir_list
);
1636 if (atomic_dec_and_test(&curr
->refs
))
1640 list_for_each_entry_safe(curr
, next
, del_list
, readdir_list
) {
1641 list_del(&curr
->readdir_list
);
1642 if (atomic_dec_and_test(&curr
->refs
))
1647 * The VFS is going to do up_read(), so we need to downgrade back to a
1650 downgrade_write(&inode
->i_rwsem
);
1653 int btrfs_should_delete_dir_index(struct list_head
*del_list
,
1656 struct btrfs_delayed_item
*curr
, *next
;
1659 if (list_empty(del_list
))
1662 list_for_each_entry_safe(curr
, next
, del_list
, readdir_list
) {
1663 if (curr
->key
.offset
> index
)
1666 list_del(&curr
->readdir_list
);
1667 ret
= (curr
->key
.offset
== index
);
1669 if (atomic_dec_and_test(&curr
->refs
))
1681 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1684 int btrfs_readdir_delayed_dir_index(struct dir_context
*ctx
,
1685 struct list_head
*ins_list
)
1687 struct btrfs_dir_item
*di
;
1688 struct btrfs_delayed_item
*curr
, *next
;
1689 struct btrfs_key location
;
1693 unsigned char d_type
;
1695 if (list_empty(ins_list
))
1699 * Changing the data of the delayed item is impossible. So
1700 * we needn't lock them. And we have held i_mutex of the
1701 * directory, nobody can delete any directory indexes now.
1703 list_for_each_entry_safe(curr
, next
, ins_list
, readdir_list
) {
1704 list_del(&curr
->readdir_list
);
1706 if (curr
->key
.offset
< ctx
->pos
) {
1707 if (atomic_dec_and_test(&curr
->refs
))
1712 ctx
->pos
= curr
->key
.offset
;
1714 di
= (struct btrfs_dir_item
*)curr
->data
;
1715 name
= (char *)(di
+ 1);
1716 name_len
= btrfs_stack_dir_name_len(di
);
1718 d_type
= btrfs_filetype_table
[di
->type
];
1719 btrfs_disk_key_to_cpu(&location
, &di
->location
);
1721 over
= !dir_emit(ctx
, name
, name_len
,
1722 location
.objectid
, d_type
);
1724 if (atomic_dec_and_test(&curr
->refs
))
1733 static void fill_stack_inode_item(struct btrfs_trans_handle
*trans
,
1734 struct btrfs_inode_item
*inode_item
,
1735 struct inode
*inode
)
1737 btrfs_set_stack_inode_uid(inode_item
, i_uid_read(inode
));
1738 btrfs_set_stack_inode_gid(inode_item
, i_gid_read(inode
));
1739 btrfs_set_stack_inode_size(inode_item
, BTRFS_I(inode
)->disk_i_size
);
1740 btrfs_set_stack_inode_mode(inode_item
, inode
->i_mode
);
1741 btrfs_set_stack_inode_nlink(inode_item
, inode
->i_nlink
);
1742 btrfs_set_stack_inode_nbytes(inode_item
, inode_get_bytes(inode
));
1743 btrfs_set_stack_inode_generation(inode_item
,
1744 BTRFS_I(inode
)->generation
);
1745 btrfs_set_stack_inode_sequence(inode_item
, inode
->i_version
);
1746 btrfs_set_stack_inode_transid(inode_item
, trans
->transid
);
1747 btrfs_set_stack_inode_rdev(inode_item
, inode
->i_rdev
);
1748 btrfs_set_stack_inode_flags(inode_item
, BTRFS_I(inode
)->flags
);
1749 btrfs_set_stack_inode_block_group(inode_item
, 0);
1751 btrfs_set_stack_timespec_sec(&inode_item
->atime
,
1752 inode
->i_atime
.tv_sec
);
1753 btrfs_set_stack_timespec_nsec(&inode_item
->atime
,
1754 inode
->i_atime
.tv_nsec
);
1756 btrfs_set_stack_timespec_sec(&inode_item
->mtime
,
1757 inode
->i_mtime
.tv_sec
);
1758 btrfs_set_stack_timespec_nsec(&inode_item
->mtime
,
1759 inode
->i_mtime
.tv_nsec
);
1761 btrfs_set_stack_timespec_sec(&inode_item
->ctime
,
1762 inode
->i_ctime
.tv_sec
);
1763 btrfs_set_stack_timespec_nsec(&inode_item
->ctime
,
1764 inode
->i_ctime
.tv_nsec
);
1766 btrfs_set_stack_timespec_sec(&inode_item
->otime
,
1767 BTRFS_I(inode
)->i_otime
.tv_sec
);
1768 btrfs_set_stack_timespec_nsec(&inode_item
->otime
,
1769 BTRFS_I(inode
)->i_otime
.tv_nsec
);
1772 int btrfs_fill_inode(struct inode
*inode
, u32
*rdev
)
1774 struct btrfs_delayed_node
*delayed_node
;
1775 struct btrfs_inode_item
*inode_item
;
1777 delayed_node
= btrfs_get_delayed_node(BTRFS_I(inode
));
1781 mutex_lock(&delayed_node
->mutex
);
1782 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
)) {
1783 mutex_unlock(&delayed_node
->mutex
);
1784 btrfs_release_delayed_node(delayed_node
);
1788 inode_item
= &delayed_node
->inode_item
;
1790 i_uid_write(inode
, btrfs_stack_inode_uid(inode_item
));
1791 i_gid_write(inode
, btrfs_stack_inode_gid(inode_item
));
1792 btrfs_i_size_write(inode
, btrfs_stack_inode_size(inode_item
));
1793 inode
->i_mode
= btrfs_stack_inode_mode(inode_item
);
1794 set_nlink(inode
, btrfs_stack_inode_nlink(inode_item
));
1795 inode_set_bytes(inode
, btrfs_stack_inode_nbytes(inode_item
));
1796 BTRFS_I(inode
)->generation
= btrfs_stack_inode_generation(inode_item
);
1797 BTRFS_I(inode
)->last_trans
= btrfs_stack_inode_transid(inode_item
);
1799 inode
->i_version
= btrfs_stack_inode_sequence(inode_item
);
1801 *rdev
= btrfs_stack_inode_rdev(inode_item
);
1802 BTRFS_I(inode
)->flags
= btrfs_stack_inode_flags(inode_item
);
1804 inode
->i_atime
.tv_sec
= btrfs_stack_timespec_sec(&inode_item
->atime
);
1805 inode
->i_atime
.tv_nsec
= btrfs_stack_timespec_nsec(&inode_item
->atime
);
1807 inode
->i_mtime
.tv_sec
= btrfs_stack_timespec_sec(&inode_item
->mtime
);
1808 inode
->i_mtime
.tv_nsec
= btrfs_stack_timespec_nsec(&inode_item
->mtime
);
1810 inode
->i_ctime
.tv_sec
= btrfs_stack_timespec_sec(&inode_item
->ctime
);
1811 inode
->i_ctime
.tv_nsec
= btrfs_stack_timespec_nsec(&inode_item
->ctime
);
1813 BTRFS_I(inode
)->i_otime
.tv_sec
=
1814 btrfs_stack_timespec_sec(&inode_item
->otime
);
1815 BTRFS_I(inode
)->i_otime
.tv_nsec
=
1816 btrfs_stack_timespec_nsec(&inode_item
->otime
);
1818 inode
->i_generation
= BTRFS_I(inode
)->generation
;
1819 BTRFS_I(inode
)->index_cnt
= (u64
)-1;
1821 mutex_unlock(&delayed_node
->mutex
);
1822 btrfs_release_delayed_node(delayed_node
);
1826 int btrfs_delayed_update_inode(struct btrfs_trans_handle
*trans
,
1827 struct btrfs_root
*root
, struct inode
*inode
)
1829 struct btrfs_delayed_node
*delayed_node
;
1832 delayed_node
= btrfs_get_or_create_delayed_node(BTRFS_I(inode
));
1833 if (IS_ERR(delayed_node
))
1834 return PTR_ERR(delayed_node
);
1836 mutex_lock(&delayed_node
->mutex
);
1837 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
)) {
1838 fill_stack_inode_item(trans
, &delayed_node
->inode_item
, inode
);
1842 ret
= btrfs_delayed_inode_reserve_metadata(trans
, root
, BTRFS_I(inode
),
1847 fill_stack_inode_item(trans
, &delayed_node
->inode_item
, inode
);
1848 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
);
1849 delayed_node
->count
++;
1850 atomic_inc(&root
->fs_info
->delayed_root
->items
);
1852 mutex_unlock(&delayed_node
->mutex
);
1853 btrfs_release_delayed_node(delayed_node
);
1857 int btrfs_delayed_delete_inode_ref(struct btrfs_inode
*inode
)
1859 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->vfs_inode
.i_sb
);
1860 struct btrfs_delayed_node
*delayed_node
;
1863 * we don't do delayed inode updates during log recovery because it
1864 * leads to enospc problems. This means we also can't do
1865 * delayed inode refs
1867 if (test_bit(BTRFS_FS_LOG_RECOVERING
, &fs_info
->flags
))
1870 delayed_node
= btrfs_get_or_create_delayed_node(inode
);
1871 if (IS_ERR(delayed_node
))
1872 return PTR_ERR(delayed_node
);
1875 * We don't reserve space for inode ref deletion is because:
1876 * - We ONLY do async inode ref deletion for the inode who has only
1877 * one link(i_nlink == 1), it means there is only one inode ref.
1878 * And in most case, the inode ref and the inode item are in the
1879 * same leaf, and we will deal with them at the same time.
1880 * Since we are sure we will reserve the space for the inode item,
1881 * it is unnecessary to reserve space for inode ref deletion.
1882 * - If the inode ref and the inode item are not in the same leaf,
1883 * We also needn't worry about enospc problem, because we reserve
1884 * much more space for the inode update than it needs.
1885 * - At the worst, we can steal some space from the global reservation.
1888 mutex_lock(&delayed_node
->mutex
);
1889 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &delayed_node
->flags
))
1892 set_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &delayed_node
->flags
);
1893 delayed_node
->count
++;
1894 atomic_inc(&fs_info
->delayed_root
->items
);
1896 mutex_unlock(&delayed_node
->mutex
);
1897 btrfs_release_delayed_node(delayed_node
);
1901 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node
*delayed_node
)
1903 struct btrfs_root
*root
= delayed_node
->root
;
1904 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1905 struct btrfs_delayed_item
*curr_item
, *prev_item
;
1907 mutex_lock(&delayed_node
->mutex
);
1908 curr_item
= __btrfs_first_delayed_insertion_item(delayed_node
);
1910 btrfs_delayed_item_release_metadata(fs_info
, curr_item
);
1911 prev_item
= curr_item
;
1912 curr_item
= __btrfs_next_delayed_item(prev_item
);
1913 btrfs_release_delayed_item(prev_item
);
1916 curr_item
= __btrfs_first_delayed_deletion_item(delayed_node
);
1918 btrfs_delayed_item_release_metadata(fs_info
, curr_item
);
1919 prev_item
= curr_item
;
1920 curr_item
= __btrfs_next_delayed_item(prev_item
);
1921 btrfs_release_delayed_item(prev_item
);
1924 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &delayed_node
->flags
))
1925 btrfs_release_delayed_iref(delayed_node
);
1927 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
)) {
1928 btrfs_delayed_inode_release_metadata(fs_info
, delayed_node
);
1929 btrfs_release_delayed_inode(delayed_node
);
1931 mutex_unlock(&delayed_node
->mutex
);
1934 void btrfs_kill_delayed_inode_items(struct btrfs_inode
*inode
)
1936 struct btrfs_delayed_node
*delayed_node
;
1938 delayed_node
= btrfs_get_delayed_node(inode
);
1942 __btrfs_kill_delayed_node(delayed_node
);
1943 btrfs_release_delayed_node(delayed_node
);
1946 void btrfs_kill_all_delayed_nodes(struct btrfs_root
*root
)
1949 struct btrfs_delayed_node
*delayed_nodes
[8];
1953 spin_lock(&root
->inode_lock
);
1954 n
= radix_tree_gang_lookup(&root
->delayed_nodes_tree
,
1955 (void **)delayed_nodes
, inode_id
,
1956 ARRAY_SIZE(delayed_nodes
));
1958 spin_unlock(&root
->inode_lock
);
1962 inode_id
= delayed_nodes
[n
- 1]->inode_id
+ 1;
1964 for (i
= 0; i
< n
; i
++)
1965 atomic_inc(&delayed_nodes
[i
]->refs
);
1966 spin_unlock(&root
->inode_lock
);
1968 for (i
= 0; i
< n
; i
++) {
1969 __btrfs_kill_delayed_node(delayed_nodes
[i
]);
1970 btrfs_release_delayed_node(delayed_nodes
[i
]);
1975 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info
*fs_info
)
1977 struct btrfs_delayed_node
*curr_node
, *prev_node
;
1979 curr_node
= btrfs_first_delayed_node(fs_info
->delayed_root
);
1981 __btrfs_kill_delayed_node(curr_node
);
1983 prev_node
= curr_node
;
1984 curr_node
= btrfs_next_delayed_node(curr_node
);
1985 btrfs_release_delayed_node(prev_node
);