]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - fs/btrfs/delayed-inode.c
Merge tag 'timers-urgent-2020-12-27' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-hirsute-kernel.git] / fs / btrfs / delayed-inode.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2011 Fujitsu. All rights reserved.
4 * Written by Miao Xie <miaox@cn.fujitsu.com>
5 */
6
7 #include <linux/slab.h>
8 #include <linux/iversion.h>
9 #include <linux/sched/mm.h>
10 #include "misc.h"
11 #include "delayed-inode.h"
12 #include "disk-io.h"
13 #include "transaction.h"
14 #include "ctree.h"
15 #include "qgroup.h"
16 #include "locking.h"
17
18 #define BTRFS_DELAYED_WRITEBACK 512
19 #define BTRFS_DELAYED_BACKGROUND 128
20 #define BTRFS_DELAYED_BATCH 16
21
22 static struct kmem_cache *delayed_node_cache;
23
24 int __init btrfs_delayed_inode_init(void)
25 {
26 delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
27 sizeof(struct btrfs_delayed_node),
28 0,
29 SLAB_MEM_SPREAD,
30 NULL);
31 if (!delayed_node_cache)
32 return -ENOMEM;
33 return 0;
34 }
35
36 void __cold btrfs_delayed_inode_exit(void)
37 {
38 kmem_cache_destroy(delayed_node_cache);
39 }
40
41 static inline void btrfs_init_delayed_node(
42 struct btrfs_delayed_node *delayed_node,
43 struct btrfs_root *root, u64 inode_id)
44 {
45 delayed_node->root = root;
46 delayed_node->inode_id = inode_id;
47 refcount_set(&delayed_node->refs, 0);
48 delayed_node->ins_root = RB_ROOT_CACHED;
49 delayed_node->del_root = RB_ROOT_CACHED;
50 mutex_init(&delayed_node->mutex);
51 INIT_LIST_HEAD(&delayed_node->n_list);
52 INIT_LIST_HEAD(&delayed_node->p_list);
53 }
54
55 static inline int btrfs_is_continuous_delayed_item(
56 struct btrfs_delayed_item *item1,
57 struct btrfs_delayed_item *item2)
58 {
59 if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
60 item1->key.objectid == item2->key.objectid &&
61 item1->key.type == item2->key.type &&
62 item1->key.offset + 1 == item2->key.offset)
63 return 1;
64 return 0;
65 }
66
67 static struct btrfs_delayed_node *btrfs_get_delayed_node(
68 struct btrfs_inode *btrfs_inode)
69 {
70 struct btrfs_root *root = btrfs_inode->root;
71 u64 ino = btrfs_ino(btrfs_inode);
72 struct btrfs_delayed_node *node;
73
74 node = READ_ONCE(btrfs_inode->delayed_node);
75 if (node) {
76 refcount_inc(&node->refs);
77 return node;
78 }
79
80 spin_lock(&root->inode_lock);
81 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
82
83 if (node) {
84 if (btrfs_inode->delayed_node) {
85 refcount_inc(&node->refs); /* can be accessed */
86 BUG_ON(btrfs_inode->delayed_node != node);
87 spin_unlock(&root->inode_lock);
88 return node;
89 }
90
91 /*
92 * It's possible that we're racing into the middle of removing
93 * this node from the radix tree. In this case, the refcount
94 * was zero and it should never go back to one. Just return
95 * NULL like it was never in the radix at all; our release
96 * function is in the process of removing it.
97 *
98 * Some implementations of refcount_inc refuse to bump the
99 * refcount once it has hit zero. If we don't do this dance
100 * here, refcount_inc() may decide to just WARN_ONCE() instead
101 * of actually bumping the refcount.
102 *
103 * If this node is properly in the radix, we want to bump the
104 * refcount twice, once for the inode and once for this get
105 * operation.
106 */
107 if (refcount_inc_not_zero(&node->refs)) {
108 refcount_inc(&node->refs);
109 btrfs_inode->delayed_node = node;
110 } else {
111 node = NULL;
112 }
113
114 spin_unlock(&root->inode_lock);
115 return node;
116 }
117 spin_unlock(&root->inode_lock);
118
119 return NULL;
120 }
121
122 /* Will return either the node or PTR_ERR(-ENOMEM) */
123 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
124 struct btrfs_inode *btrfs_inode)
125 {
126 struct btrfs_delayed_node *node;
127 struct btrfs_root *root = btrfs_inode->root;
128 u64 ino = btrfs_ino(btrfs_inode);
129 int ret;
130
131 again:
132 node = btrfs_get_delayed_node(btrfs_inode);
133 if (node)
134 return node;
135
136 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
137 if (!node)
138 return ERR_PTR(-ENOMEM);
139 btrfs_init_delayed_node(node, root, ino);
140
141 /* cached in the btrfs inode and can be accessed */
142 refcount_set(&node->refs, 2);
143
144 ret = radix_tree_preload(GFP_NOFS);
145 if (ret) {
146 kmem_cache_free(delayed_node_cache, node);
147 return ERR_PTR(ret);
148 }
149
150 spin_lock(&root->inode_lock);
151 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
152 if (ret == -EEXIST) {
153 spin_unlock(&root->inode_lock);
154 kmem_cache_free(delayed_node_cache, node);
155 radix_tree_preload_end();
156 goto again;
157 }
158 btrfs_inode->delayed_node = node;
159 spin_unlock(&root->inode_lock);
160 radix_tree_preload_end();
161
162 return node;
163 }
164
165 /*
166 * Call it when holding delayed_node->mutex
167 *
168 * If mod = 1, add this node into the prepared list.
169 */
170 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
171 struct btrfs_delayed_node *node,
172 int mod)
173 {
174 spin_lock(&root->lock);
175 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
176 if (!list_empty(&node->p_list))
177 list_move_tail(&node->p_list, &root->prepare_list);
178 else if (mod)
179 list_add_tail(&node->p_list, &root->prepare_list);
180 } else {
181 list_add_tail(&node->n_list, &root->node_list);
182 list_add_tail(&node->p_list, &root->prepare_list);
183 refcount_inc(&node->refs); /* inserted into list */
184 root->nodes++;
185 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
186 }
187 spin_unlock(&root->lock);
188 }
189
190 /* Call it when holding delayed_node->mutex */
191 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
192 struct btrfs_delayed_node *node)
193 {
194 spin_lock(&root->lock);
195 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
196 root->nodes--;
197 refcount_dec(&node->refs); /* not in the list */
198 list_del_init(&node->n_list);
199 if (!list_empty(&node->p_list))
200 list_del_init(&node->p_list);
201 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
202 }
203 spin_unlock(&root->lock);
204 }
205
206 static struct btrfs_delayed_node *btrfs_first_delayed_node(
207 struct btrfs_delayed_root *delayed_root)
208 {
209 struct list_head *p;
210 struct btrfs_delayed_node *node = NULL;
211
212 spin_lock(&delayed_root->lock);
213 if (list_empty(&delayed_root->node_list))
214 goto out;
215
216 p = delayed_root->node_list.next;
217 node = list_entry(p, struct btrfs_delayed_node, n_list);
218 refcount_inc(&node->refs);
219 out:
220 spin_unlock(&delayed_root->lock);
221
222 return node;
223 }
224
225 static struct btrfs_delayed_node *btrfs_next_delayed_node(
226 struct btrfs_delayed_node *node)
227 {
228 struct btrfs_delayed_root *delayed_root;
229 struct list_head *p;
230 struct btrfs_delayed_node *next = NULL;
231
232 delayed_root = node->root->fs_info->delayed_root;
233 spin_lock(&delayed_root->lock);
234 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
235 /* not in the list */
236 if (list_empty(&delayed_root->node_list))
237 goto out;
238 p = delayed_root->node_list.next;
239 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
240 goto out;
241 else
242 p = node->n_list.next;
243
244 next = list_entry(p, struct btrfs_delayed_node, n_list);
245 refcount_inc(&next->refs);
246 out:
247 spin_unlock(&delayed_root->lock);
248
249 return next;
250 }
251
252 static void __btrfs_release_delayed_node(
253 struct btrfs_delayed_node *delayed_node,
254 int mod)
255 {
256 struct btrfs_delayed_root *delayed_root;
257
258 if (!delayed_node)
259 return;
260
261 delayed_root = delayed_node->root->fs_info->delayed_root;
262
263 mutex_lock(&delayed_node->mutex);
264 if (delayed_node->count)
265 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
266 else
267 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
268 mutex_unlock(&delayed_node->mutex);
269
270 if (refcount_dec_and_test(&delayed_node->refs)) {
271 struct btrfs_root *root = delayed_node->root;
272
273 spin_lock(&root->inode_lock);
274 /*
275 * Once our refcount goes to zero, nobody is allowed to bump it
276 * back up. We can delete it now.
277 */
278 ASSERT(refcount_read(&delayed_node->refs) == 0);
279 radix_tree_delete(&root->delayed_nodes_tree,
280 delayed_node->inode_id);
281 spin_unlock(&root->inode_lock);
282 kmem_cache_free(delayed_node_cache, delayed_node);
283 }
284 }
285
286 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
287 {
288 __btrfs_release_delayed_node(node, 0);
289 }
290
291 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
292 struct btrfs_delayed_root *delayed_root)
293 {
294 struct list_head *p;
295 struct btrfs_delayed_node *node = NULL;
296
297 spin_lock(&delayed_root->lock);
298 if (list_empty(&delayed_root->prepare_list))
299 goto out;
300
301 p = delayed_root->prepare_list.next;
302 list_del_init(p);
303 node = list_entry(p, struct btrfs_delayed_node, p_list);
304 refcount_inc(&node->refs);
305 out:
306 spin_unlock(&delayed_root->lock);
307
308 return node;
309 }
310
311 static inline void btrfs_release_prepared_delayed_node(
312 struct btrfs_delayed_node *node)
313 {
314 __btrfs_release_delayed_node(node, 1);
315 }
316
317 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
318 {
319 struct btrfs_delayed_item *item;
320 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
321 if (item) {
322 item->data_len = data_len;
323 item->ins_or_del = 0;
324 item->bytes_reserved = 0;
325 item->delayed_node = NULL;
326 refcount_set(&item->refs, 1);
327 }
328 return item;
329 }
330
331 /*
332 * __btrfs_lookup_delayed_item - look up the delayed item by key
333 * @delayed_node: pointer to the delayed node
334 * @key: the key to look up
335 * @prev: used to store the prev item if the right item isn't found
336 * @next: used to store the next item if the right item isn't found
337 *
338 * Note: if we don't find the right item, we will return the prev item and
339 * the next item.
340 */
341 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
342 struct rb_root *root,
343 struct btrfs_key *key,
344 struct btrfs_delayed_item **prev,
345 struct btrfs_delayed_item **next)
346 {
347 struct rb_node *node, *prev_node = NULL;
348 struct btrfs_delayed_item *delayed_item = NULL;
349 int ret = 0;
350
351 node = root->rb_node;
352
353 while (node) {
354 delayed_item = rb_entry(node, struct btrfs_delayed_item,
355 rb_node);
356 prev_node = node;
357 ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
358 if (ret < 0)
359 node = node->rb_right;
360 else if (ret > 0)
361 node = node->rb_left;
362 else
363 return delayed_item;
364 }
365
366 if (prev) {
367 if (!prev_node)
368 *prev = NULL;
369 else if (ret < 0)
370 *prev = delayed_item;
371 else if ((node = rb_prev(prev_node)) != NULL) {
372 *prev = rb_entry(node, struct btrfs_delayed_item,
373 rb_node);
374 } else
375 *prev = NULL;
376 }
377
378 if (next) {
379 if (!prev_node)
380 *next = NULL;
381 else if (ret > 0)
382 *next = delayed_item;
383 else if ((node = rb_next(prev_node)) != NULL) {
384 *next = rb_entry(node, struct btrfs_delayed_item,
385 rb_node);
386 } else
387 *next = NULL;
388 }
389 return NULL;
390 }
391
392 static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
393 struct btrfs_delayed_node *delayed_node,
394 struct btrfs_key *key)
395 {
396 return __btrfs_lookup_delayed_item(&delayed_node->ins_root.rb_root, key,
397 NULL, NULL);
398 }
399
400 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
401 struct btrfs_delayed_item *ins,
402 int action)
403 {
404 struct rb_node **p, *node;
405 struct rb_node *parent_node = NULL;
406 struct rb_root_cached *root;
407 struct btrfs_delayed_item *item;
408 int cmp;
409 bool leftmost = true;
410
411 if (action == BTRFS_DELAYED_INSERTION_ITEM)
412 root = &delayed_node->ins_root;
413 else if (action == BTRFS_DELAYED_DELETION_ITEM)
414 root = &delayed_node->del_root;
415 else
416 BUG();
417 p = &root->rb_root.rb_node;
418 node = &ins->rb_node;
419
420 while (*p) {
421 parent_node = *p;
422 item = rb_entry(parent_node, struct btrfs_delayed_item,
423 rb_node);
424
425 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
426 if (cmp < 0) {
427 p = &(*p)->rb_right;
428 leftmost = false;
429 } else if (cmp > 0) {
430 p = &(*p)->rb_left;
431 } else {
432 return -EEXIST;
433 }
434 }
435
436 rb_link_node(node, parent_node, p);
437 rb_insert_color_cached(node, root, leftmost);
438 ins->delayed_node = delayed_node;
439 ins->ins_or_del = action;
440
441 if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
442 action == BTRFS_DELAYED_INSERTION_ITEM &&
443 ins->key.offset >= delayed_node->index_cnt)
444 delayed_node->index_cnt = ins->key.offset + 1;
445
446 delayed_node->count++;
447 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
448 return 0;
449 }
450
451 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
452 struct btrfs_delayed_item *item)
453 {
454 return __btrfs_add_delayed_item(node, item,
455 BTRFS_DELAYED_INSERTION_ITEM);
456 }
457
458 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
459 struct btrfs_delayed_item *item)
460 {
461 return __btrfs_add_delayed_item(node, item,
462 BTRFS_DELAYED_DELETION_ITEM);
463 }
464
465 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
466 {
467 int seq = atomic_inc_return(&delayed_root->items_seq);
468
469 /* atomic_dec_return implies a barrier */
470 if ((atomic_dec_return(&delayed_root->items) <
471 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
472 cond_wake_up_nomb(&delayed_root->wait);
473 }
474
475 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
476 {
477 struct rb_root_cached *root;
478 struct btrfs_delayed_root *delayed_root;
479
480 /* Not associated with any delayed_node */
481 if (!delayed_item->delayed_node)
482 return;
483 delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
484
485 BUG_ON(!delayed_root);
486 BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
487 delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
488
489 if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
490 root = &delayed_item->delayed_node->ins_root;
491 else
492 root = &delayed_item->delayed_node->del_root;
493
494 rb_erase_cached(&delayed_item->rb_node, root);
495 delayed_item->delayed_node->count--;
496
497 finish_one_item(delayed_root);
498 }
499
500 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
501 {
502 if (item) {
503 __btrfs_remove_delayed_item(item);
504 if (refcount_dec_and_test(&item->refs))
505 kfree(item);
506 }
507 }
508
509 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
510 struct btrfs_delayed_node *delayed_node)
511 {
512 struct rb_node *p;
513 struct btrfs_delayed_item *item = NULL;
514
515 p = rb_first_cached(&delayed_node->ins_root);
516 if (p)
517 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
518
519 return item;
520 }
521
522 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
523 struct btrfs_delayed_node *delayed_node)
524 {
525 struct rb_node *p;
526 struct btrfs_delayed_item *item = NULL;
527
528 p = rb_first_cached(&delayed_node->del_root);
529 if (p)
530 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
531
532 return item;
533 }
534
535 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
536 struct btrfs_delayed_item *item)
537 {
538 struct rb_node *p;
539 struct btrfs_delayed_item *next = NULL;
540
541 p = rb_next(&item->rb_node);
542 if (p)
543 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
544
545 return next;
546 }
547
548 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
549 struct btrfs_root *root,
550 struct btrfs_delayed_item *item)
551 {
552 struct btrfs_block_rsv *src_rsv;
553 struct btrfs_block_rsv *dst_rsv;
554 struct btrfs_fs_info *fs_info = root->fs_info;
555 u64 num_bytes;
556 int ret;
557
558 if (!trans->bytes_reserved)
559 return 0;
560
561 src_rsv = trans->block_rsv;
562 dst_rsv = &fs_info->delayed_block_rsv;
563
564 num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
565
566 /*
567 * Here we migrate space rsv from transaction rsv, since have already
568 * reserved space when starting a transaction. So no need to reserve
569 * qgroup space here.
570 */
571 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
572 if (!ret) {
573 trace_btrfs_space_reservation(fs_info, "delayed_item",
574 item->key.objectid,
575 num_bytes, 1);
576 item->bytes_reserved = num_bytes;
577 }
578
579 return ret;
580 }
581
582 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
583 struct btrfs_delayed_item *item)
584 {
585 struct btrfs_block_rsv *rsv;
586 struct btrfs_fs_info *fs_info = root->fs_info;
587
588 if (!item->bytes_reserved)
589 return;
590
591 rsv = &fs_info->delayed_block_rsv;
592 /*
593 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
594 * to release/reserve qgroup space.
595 */
596 trace_btrfs_space_reservation(fs_info, "delayed_item",
597 item->key.objectid, item->bytes_reserved,
598 0);
599 btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved, NULL);
600 }
601
602 static int btrfs_delayed_inode_reserve_metadata(
603 struct btrfs_trans_handle *trans,
604 struct btrfs_root *root,
605 struct btrfs_inode *inode,
606 struct btrfs_delayed_node *node)
607 {
608 struct btrfs_fs_info *fs_info = root->fs_info;
609 struct btrfs_block_rsv *src_rsv;
610 struct btrfs_block_rsv *dst_rsv;
611 u64 num_bytes;
612 int ret;
613
614 src_rsv = trans->block_rsv;
615 dst_rsv = &fs_info->delayed_block_rsv;
616
617 num_bytes = btrfs_calc_metadata_size(fs_info, 1);
618
619 /*
620 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
621 * which doesn't reserve space for speed. This is a problem since we
622 * still need to reserve space for this update, so try to reserve the
623 * space.
624 *
625 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
626 * we always reserve enough to update the inode item.
627 */
628 if (!src_rsv || (!trans->bytes_reserved &&
629 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
630 ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true);
631 if (ret < 0)
632 return ret;
633 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
634 BTRFS_RESERVE_NO_FLUSH);
635 /*
636 * Since we're under a transaction reserve_metadata_bytes could
637 * try to commit the transaction which will make it return
638 * EAGAIN to make us stop the transaction we have, so return
639 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
640 */
641 if (ret == -EAGAIN) {
642 ret = -ENOSPC;
643 btrfs_qgroup_free_meta_prealloc(root, num_bytes);
644 }
645 if (!ret) {
646 node->bytes_reserved = num_bytes;
647 trace_btrfs_space_reservation(fs_info,
648 "delayed_inode",
649 btrfs_ino(inode),
650 num_bytes, 1);
651 } else {
652 btrfs_qgroup_free_meta_prealloc(root, fs_info->nodesize);
653 }
654 return ret;
655 }
656
657 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
658 if (!ret) {
659 trace_btrfs_space_reservation(fs_info, "delayed_inode",
660 btrfs_ino(inode), num_bytes, 1);
661 node->bytes_reserved = num_bytes;
662 }
663
664 return ret;
665 }
666
667 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
668 struct btrfs_delayed_node *node,
669 bool qgroup_free)
670 {
671 struct btrfs_block_rsv *rsv;
672
673 if (!node->bytes_reserved)
674 return;
675
676 rsv = &fs_info->delayed_block_rsv;
677 trace_btrfs_space_reservation(fs_info, "delayed_inode",
678 node->inode_id, node->bytes_reserved, 0);
679 btrfs_block_rsv_release(fs_info, rsv, node->bytes_reserved, NULL);
680 if (qgroup_free)
681 btrfs_qgroup_free_meta_prealloc(node->root,
682 node->bytes_reserved);
683 else
684 btrfs_qgroup_convert_reserved_meta(node->root,
685 node->bytes_reserved);
686 node->bytes_reserved = 0;
687 }
688
689 /*
690 * This helper will insert some continuous items into the same leaf according
691 * to the free space of the leaf.
692 */
693 static int btrfs_batch_insert_items(struct btrfs_root *root,
694 struct btrfs_path *path,
695 struct btrfs_delayed_item *item)
696 {
697 struct btrfs_delayed_item *curr, *next;
698 int free_space;
699 int total_data_size = 0, total_size = 0;
700 struct extent_buffer *leaf;
701 char *data_ptr;
702 struct btrfs_key *keys;
703 u32 *data_size;
704 struct list_head head;
705 int slot;
706 int nitems;
707 int i;
708 int ret = 0;
709
710 BUG_ON(!path->nodes[0]);
711
712 leaf = path->nodes[0];
713 free_space = btrfs_leaf_free_space(leaf);
714 INIT_LIST_HEAD(&head);
715
716 next = item;
717 nitems = 0;
718
719 /*
720 * count the number of the continuous items that we can insert in batch
721 */
722 while (total_size + next->data_len + sizeof(struct btrfs_item) <=
723 free_space) {
724 total_data_size += next->data_len;
725 total_size += next->data_len + sizeof(struct btrfs_item);
726 list_add_tail(&next->tree_list, &head);
727 nitems++;
728
729 curr = next;
730 next = __btrfs_next_delayed_item(curr);
731 if (!next)
732 break;
733
734 if (!btrfs_is_continuous_delayed_item(curr, next))
735 break;
736 }
737
738 if (!nitems) {
739 ret = 0;
740 goto out;
741 }
742
743 keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
744 if (!keys) {
745 ret = -ENOMEM;
746 goto out;
747 }
748
749 data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
750 if (!data_size) {
751 ret = -ENOMEM;
752 goto error;
753 }
754
755 /* get keys of all the delayed items */
756 i = 0;
757 list_for_each_entry(next, &head, tree_list) {
758 keys[i] = next->key;
759 data_size[i] = next->data_len;
760 i++;
761 }
762
763 /* insert the keys of the items */
764 setup_items_for_insert(root, path, keys, data_size, nitems);
765
766 /* insert the dir index items */
767 slot = path->slots[0];
768 list_for_each_entry_safe(curr, next, &head, tree_list) {
769 data_ptr = btrfs_item_ptr(leaf, slot, char);
770 write_extent_buffer(leaf, &curr->data,
771 (unsigned long)data_ptr,
772 curr->data_len);
773 slot++;
774
775 btrfs_delayed_item_release_metadata(root, curr);
776
777 list_del(&curr->tree_list);
778 btrfs_release_delayed_item(curr);
779 }
780
781 error:
782 kfree(data_size);
783 kfree(keys);
784 out:
785 return ret;
786 }
787
788 /*
789 * This helper can just do simple insertion that needn't extend item for new
790 * data, such as directory name index insertion, inode insertion.
791 */
792 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
793 struct btrfs_root *root,
794 struct btrfs_path *path,
795 struct btrfs_delayed_item *delayed_item)
796 {
797 struct extent_buffer *leaf;
798 unsigned int nofs_flag;
799 char *ptr;
800 int ret;
801
802 nofs_flag = memalloc_nofs_save();
803 ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
804 delayed_item->data_len);
805 memalloc_nofs_restore(nofs_flag);
806 if (ret < 0 && ret != -EEXIST)
807 return ret;
808
809 leaf = path->nodes[0];
810
811 ptr = btrfs_item_ptr(leaf, path->slots[0], char);
812
813 write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
814 delayed_item->data_len);
815 btrfs_mark_buffer_dirty(leaf);
816
817 btrfs_delayed_item_release_metadata(root, delayed_item);
818 return 0;
819 }
820
821 /*
822 * we insert an item first, then if there are some continuous items, we try
823 * to insert those items into the same leaf.
824 */
825 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
826 struct btrfs_path *path,
827 struct btrfs_root *root,
828 struct btrfs_delayed_node *node)
829 {
830 struct btrfs_delayed_item *curr, *prev;
831 int ret = 0;
832
833 do_again:
834 mutex_lock(&node->mutex);
835 curr = __btrfs_first_delayed_insertion_item(node);
836 if (!curr)
837 goto insert_end;
838
839 ret = btrfs_insert_delayed_item(trans, root, path, curr);
840 if (ret < 0) {
841 btrfs_release_path(path);
842 goto insert_end;
843 }
844
845 prev = curr;
846 curr = __btrfs_next_delayed_item(prev);
847 if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
848 /* insert the continuous items into the same leaf */
849 path->slots[0]++;
850 btrfs_batch_insert_items(root, path, curr);
851 }
852 btrfs_release_delayed_item(prev);
853 btrfs_mark_buffer_dirty(path->nodes[0]);
854
855 btrfs_release_path(path);
856 mutex_unlock(&node->mutex);
857 goto do_again;
858
859 insert_end:
860 mutex_unlock(&node->mutex);
861 return ret;
862 }
863
864 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
865 struct btrfs_root *root,
866 struct btrfs_path *path,
867 struct btrfs_delayed_item *item)
868 {
869 struct btrfs_delayed_item *curr, *next;
870 struct extent_buffer *leaf;
871 struct btrfs_key key;
872 struct list_head head;
873 int nitems, i, last_item;
874 int ret = 0;
875
876 BUG_ON(!path->nodes[0]);
877
878 leaf = path->nodes[0];
879
880 i = path->slots[0];
881 last_item = btrfs_header_nritems(leaf) - 1;
882 if (i > last_item)
883 return -ENOENT; /* FIXME: Is errno suitable? */
884
885 next = item;
886 INIT_LIST_HEAD(&head);
887 btrfs_item_key_to_cpu(leaf, &key, i);
888 nitems = 0;
889 /*
890 * count the number of the dir index items that we can delete in batch
891 */
892 while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
893 list_add_tail(&next->tree_list, &head);
894 nitems++;
895
896 curr = next;
897 next = __btrfs_next_delayed_item(curr);
898 if (!next)
899 break;
900
901 if (!btrfs_is_continuous_delayed_item(curr, next))
902 break;
903
904 i++;
905 if (i > last_item)
906 break;
907 btrfs_item_key_to_cpu(leaf, &key, i);
908 }
909
910 if (!nitems)
911 return 0;
912
913 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
914 if (ret)
915 goto out;
916
917 list_for_each_entry_safe(curr, next, &head, tree_list) {
918 btrfs_delayed_item_release_metadata(root, curr);
919 list_del(&curr->tree_list);
920 btrfs_release_delayed_item(curr);
921 }
922
923 out:
924 return ret;
925 }
926
927 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
928 struct btrfs_path *path,
929 struct btrfs_root *root,
930 struct btrfs_delayed_node *node)
931 {
932 struct btrfs_delayed_item *curr, *prev;
933 unsigned int nofs_flag;
934 int ret = 0;
935
936 do_again:
937 mutex_lock(&node->mutex);
938 curr = __btrfs_first_delayed_deletion_item(node);
939 if (!curr)
940 goto delete_fail;
941
942 nofs_flag = memalloc_nofs_save();
943 ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
944 memalloc_nofs_restore(nofs_flag);
945 if (ret < 0)
946 goto delete_fail;
947 else if (ret > 0) {
948 /*
949 * can't find the item which the node points to, so this node
950 * is invalid, just drop it.
951 */
952 prev = curr;
953 curr = __btrfs_next_delayed_item(prev);
954 btrfs_release_delayed_item(prev);
955 ret = 0;
956 btrfs_release_path(path);
957 if (curr) {
958 mutex_unlock(&node->mutex);
959 goto do_again;
960 } else
961 goto delete_fail;
962 }
963
964 btrfs_batch_delete_items(trans, root, path, curr);
965 btrfs_release_path(path);
966 mutex_unlock(&node->mutex);
967 goto do_again;
968
969 delete_fail:
970 btrfs_release_path(path);
971 mutex_unlock(&node->mutex);
972 return ret;
973 }
974
975 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
976 {
977 struct btrfs_delayed_root *delayed_root;
978
979 if (delayed_node &&
980 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
981 BUG_ON(!delayed_node->root);
982 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
983 delayed_node->count--;
984
985 delayed_root = delayed_node->root->fs_info->delayed_root;
986 finish_one_item(delayed_root);
987 }
988 }
989
990 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
991 {
992 struct btrfs_delayed_root *delayed_root;
993
994 ASSERT(delayed_node->root);
995 clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
996 delayed_node->count--;
997
998 delayed_root = delayed_node->root->fs_info->delayed_root;
999 finish_one_item(delayed_root);
1000 }
1001
1002 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1003 struct btrfs_root *root,
1004 struct btrfs_path *path,
1005 struct btrfs_delayed_node *node)
1006 {
1007 struct btrfs_fs_info *fs_info = root->fs_info;
1008 struct btrfs_key key;
1009 struct btrfs_inode_item *inode_item;
1010 struct extent_buffer *leaf;
1011 unsigned int nofs_flag;
1012 int mod;
1013 int ret;
1014
1015 key.objectid = node->inode_id;
1016 key.type = BTRFS_INODE_ITEM_KEY;
1017 key.offset = 0;
1018
1019 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1020 mod = -1;
1021 else
1022 mod = 1;
1023
1024 nofs_flag = memalloc_nofs_save();
1025 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1026 memalloc_nofs_restore(nofs_flag);
1027 if (ret > 0) {
1028 btrfs_release_path(path);
1029 return -ENOENT;
1030 } else if (ret < 0) {
1031 return ret;
1032 }
1033
1034 leaf = path->nodes[0];
1035 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1036 struct btrfs_inode_item);
1037 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1038 sizeof(struct btrfs_inode_item));
1039 btrfs_mark_buffer_dirty(leaf);
1040
1041 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1042 goto no_iref;
1043
1044 path->slots[0]++;
1045 if (path->slots[0] >= btrfs_header_nritems(leaf))
1046 goto search;
1047 again:
1048 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1049 if (key.objectid != node->inode_id)
1050 goto out;
1051
1052 if (key.type != BTRFS_INODE_REF_KEY &&
1053 key.type != BTRFS_INODE_EXTREF_KEY)
1054 goto out;
1055
1056 /*
1057 * Delayed iref deletion is for the inode who has only one link,
1058 * so there is only one iref. The case that several irefs are
1059 * in the same item doesn't exist.
1060 */
1061 btrfs_del_item(trans, root, path);
1062 out:
1063 btrfs_release_delayed_iref(node);
1064 no_iref:
1065 btrfs_release_path(path);
1066 err_out:
1067 btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
1068 btrfs_release_delayed_inode(node);
1069
1070 return ret;
1071
1072 search:
1073 btrfs_release_path(path);
1074
1075 key.type = BTRFS_INODE_EXTREF_KEY;
1076 key.offset = -1;
1077
1078 nofs_flag = memalloc_nofs_save();
1079 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1080 memalloc_nofs_restore(nofs_flag);
1081 if (ret < 0)
1082 goto err_out;
1083 ASSERT(ret);
1084
1085 ret = 0;
1086 leaf = path->nodes[0];
1087 path->slots[0]--;
1088 goto again;
1089 }
1090
1091 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1092 struct btrfs_root *root,
1093 struct btrfs_path *path,
1094 struct btrfs_delayed_node *node)
1095 {
1096 int ret;
1097
1098 mutex_lock(&node->mutex);
1099 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1100 mutex_unlock(&node->mutex);
1101 return 0;
1102 }
1103
1104 ret = __btrfs_update_delayed_inode(trans, root, path, node);
1105 mutex_unlock(&node->mutex);
1106 return ret;
1107 }
1108
1109 static inline int
1110 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1111 struct btrfs_path *path,
1112 struct btrfs_delayed_node *node)
1113 {
1114 int ret;
1115
1116 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1117 if (ret)
1118 return ret;
1119
1120 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1121 if (ret)
1122 return ret;
1123
1124 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1125 return ret;
1126 }
1127
1128 /*
1129 * Called when committing the transaction.
1130 * Returns 0 on success.
1131 * Returns < 0 on error and returns with an aborted transaction with any
1132 * outstanding delayed items cleaned up.
1133 */
1134 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
1135 {
1136 struct btrfs_fs_info *fs_info = trans->fs_info;
1137 struct btrfs_delayed_root *delayed_root;
1138 struct btrfs_delayed_node *curr_node, *prev_node;
1139 struct btrfs_path *path;
1140 struct btrfs_block_rsv *block_rsv;
1141 int ret = 0;
1142 bool count = (nr > 0);
1143
1144 if (TRANS_ABORTED(trans))
1145 return -EIO;
1146
1147 path = btrfs_alloc_path();
1148 if (!path)
1149 return -ENOMEM;
1150
1151 block_rsv = trans->block_rsv;
1152 trans->block_rsv = &fs_info->delayed_block_rsv;
1153
1154 delayed_root = fs_info->delayed_root;
1155
1156 curr_node = btrfs_first_delayed_node(delayed_root);
1157 while (curr_node && (!count || (count && nr--))) {
1158 ret = __btrfs_commit_inode_delayed_items(trans, path,
1159 curr_node);
1160 if (ret) {
1161 btrfs_release_delayed_node(curr_node);
1162 curr_node = NULL;
1163 btrfs_abort_transaction(trans, ret);
1164 break;
1165 }
1166
1167 prev_node = curr_node;
1168 curr_node = btrfs_next_delayed_node(curr_node);
1169 btrfs_release_delayed_node(prev_node);
1170 }
1171
1172 if (curr_node)
1173 btrfs_release_delayed_node(curr_node);
1174 btrfs_free_path(path);
1175 trans->block_rsv = block_rsv;
1176
1177 return ret;
1178 }
1179
1180 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
1181 {
1182 return __btrfs_run_delayed_items(trans, -1);
1183 }
1184
1185 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
1186 {
1187 return __btrfs_run_delayed_items(trans, nr);
1188 }
1189
1190 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1191 struct btrfs_inode *inode)
1192 {
1193 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1194 struct btrfs_path *path;
1195 struct btrfs_block_rsv *block_rsv;
1196 int ret;
1197
1198 if (!delayed_node)
1199 return 0;
1200
1201 mutex_lock(&delayed_node->mutex);
1202 if (!delayed_node->count) {
1203 mutex_unlock(&delayed_node->mutex);
1204 btrfs_release_delayed_node(delayed_node);
1205 return 0;
1206 }
1207 mutex_unlock(&delayed_node->mutex);
1208
1209 path = btrfs_alloc_path();
1210 if (!path) {
1211 btrfs_release_delayed_node(delayed_node);
1212 return -ENOMEM;
1213 }
1214
1215 block_rsv = trans->block_rsv;
1216 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1217
1218 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1219
1220 btrfs_release_delayed_node(delayed_node);
1221 btrfs_free_path(path);
1222 trans->block_rsv = block_rsv;
1223
1224 return ret;
1225 }
1226
1227 int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1228 {
1229 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1230 struct btrfs_trans_handle *trans;
1231 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1232 struct btrfs_path *path;
1233 struct btrfs_block_rsv *block_rsv;
1234 int ret;
1235
1236 if (!delayed_node)
1237 return 0;
1238
1239 mutex_lock(&delayed_node->mutex);
1240 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1241 mutex_unlock(&delayed_node->mutex);
1242 btrfs_release_delayed_node(delayed_node);
1243 return 0;
1244 }
1245 mutex_unlock(&delayed_node->mutex);
1246
1247 trans = btrfs_join_transaction(delayed_node->root);
1248 if (IS_ERR(trans)) {
1249 ret = PTR_ERR(trans);
1250 goto out;
1251 }
1252
1253 path = btrfs_alloc_path();
1254 if (!path) {
1255 ret = -ENOMEM;
1256 goto trans_out;
1257 }
1258
1259 block_rsv = trans->block_rsv;
1260 trans->block_rsv = &fs_info->delayed_block_rsv;
1261
1262 mutex_lock(&delayed_node->mutex);
1263 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1264 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1265 path, delayed_node);
1266 else
1267 ret = 0;
1268 mutex_unlock(&delayed_node->mutex);
1269
1270 btrfs_free_path(path);
1271 trans->block_rsv = block_rsv;
1272 trans_out:
1273 btrfs_end_transaction(trans);
1274 btrfs_btree_balance_dirty(fs_info);
1275 out:
1276 btrfs_release_delayed_node(delayed_node);
1277
1278 return ret;
1279 }
1280
1281 void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1282 {
1283 struct btrfs_delayed_node *delayed_node;
1284
1285 delayed_node = READ_ONCE(inode->delayed_node);
1286 if (!delayed_node)
1287 return;
1288
1289 inode->delayed_node = NULL;
1290 btrfs_release_delayed_node(delayed_node);
1291 }
1292
1293 struct btrfs_async_delayed_work {
1294 struct btrfs_delayed_root *delayed_root;
1295 int nr;
1296 struct btrfs_work work;
1297 };
1298
1299 static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1300 {
1301 struct btrfs_async_delayed_work *async_work;
1302 struct btrfs_delayed_root *delayed_root;
1303 struct btrfs_trans_handle *trans;
1304 struct btrfs_path *path;
1305 struct btrfs_delayed_node *delayed_node = NULL;
1306 struct btrfs_root *root;
1307 struct btrfs_block_rsv *block_rsv;
1308 int total_done = 0;
1309
1310 async_work = container_of(work, struct btrfs_async_delayed_work, work);
1311 delayed_root = async_work->delayed_root;
1312
1313 path = btrfs_alloc_path();
1314 if (!path)
1315 goto out;
1316
1317 do {
1318 if (atomic_read(&delayed_root->items) <
1319 BTRFS_DELAYED_BACKGROUND / 2)
1320 break;
1321
1322 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1323 if (!delayed_node)
1324 break;
1325
1326 root = delayed_node->root;
1327
1328 trans = btrfs_join_transaction(root);
1329 if (IS_ERR(trans)) {
1330 btrfs_release_path(path);
1331 btrfs_release_prepared_delayed_node(delayed_node);
1332 total_done++;
1333 continue;
1334 }
1335
1336 block_rsv = trans->block_rsv;
1337 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1338
1339 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1340
1341 trans->block_rsv = block_rsv;
1342 btrfs_end_transaction(trans);
1343 btrfs_btree_balance_dirty_nodelay(root->fs_info);
1344
1345 btrfs_release_path(path);
1346 btrfs_release_prepared_delayed_node(delayed_node);
1347 total_done++;
1348
1349 } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1350 || total_done < async_work->nr);
1351
1352 btrfs_free_path(path);
1353 out:
1354 wake_up(&delayed_root->wait);
1355 kfree(async_work);
1356 }
1357
1358
1359 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1360 struct btrfs_fs_info *fs_info, int nr)
1361 {
1362 struct btrfs_async_delayed_work *async_work;
1363
1364 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1365 if (!async_work)
1366 return -ENOMEM;
1367
1368 async_work->delayed_root = delayed_root;
1369 btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL,
1370 NULL);
1371 async_work->nr = nr;
1372
1373 btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1374 return 0;
1375 }
1376
1377 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1378 {
1379 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1380 }
1381
1382 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1383 {
1384 int val = atomic_read(&delayed_root->items_seq);
1385
1386 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1387 return 1;
1388
1389 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1390 return 1;
1391
1392 return 0;
1393 }
1394
1395 void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1396 {
1397 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1398
1399 if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1400 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1401 return;
1402
1403 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1404 int seq;
1405 int ret;
1406
1407 seq = atomic_read(&delayed_root->items_seq);
1408
1409 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1410 if (ret)
1411 return;
1412
1413 wait_event_interruptible(delayed_root->wait,
1414 could_end_wait(delayed_root, seq));
1415 return;
1416 }
1417
1418 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1419 }
1420
1421 /* Will return 0 or -ENOMEM */
1422 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1423 const char *name, int name_len,
1424 struct btrfs_inode *dir,
1425 struct btrfs_disk_key *disk_key, u8 type,
1426 u64 index)
1427 {
1428 struct btrfs_delayed_node *delayed_node;
1429 struct btrfs_delayed_item *delayed_item;
1430 struct btrfs_dir_item *dir_item;
1431 int ret;
1432
1433 delayed_node = btrfs_get_or_create_delayed_node(dir);
1434 if (IS_ERR(delayed_node))
1435 return PTR_ERR(delayed_node);
1436
1437 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1438 if (!delayed_item) {
1439 ret = -ENOMEM;
1440 goto release_node;
1441 }
1442
1443 delayed_item->key.objectid = btrfs_ino(dir);
1444 delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1445 delayed_item->key.offset = index;
1446
1447 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1448 dir_item->location = *disk_key;
1449 btrfs_set_stack_dir_transid(dir_item, trans->transid);
1450 btrfs_set_stack_dir_data_len(dir_item, 0);
1451 btrfs_set_stack_dir_name_len(dir_item, name_len);
1452 btrfs_set_stack_dir_type(dir_item, type);
1453 memcpy((char *)(dir_item + 1), name, name_len);
1454
1455 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, delayed_item);
1456 /*
1457 * we have reserved enough space when we start a new transaction,
1458 * so reserving metadata failure is impossible
1459 */
1460 BUG_ON(ret);
1461
1462 mutex_lock(&delayed_node->mutex);
1463 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1464 if (unlikely(ret)) {
1465 btrfs_err(trans->fs_info,
1466 "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1467 name_len, name, delayed_node->root->root_key.objectid,
1468 delayed_node->inode_id, ret);
1469 BUG();
1470 }
1471 mutex_unlock(&delayed_node->mutex);
1472
1473 release_node:
1474 btrfs_release_delayed_node(delayed_node);
1475 return ret;
1476 }
1477
1478 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1479 struct btrfs_delayed_node *node,
1480 struct btrfs_key *key)
1481 {
1482 struct btrfs_delayed_item *item;
1483
1484 mutex_lock(&node->mutex);
1485 item = __btrfs_lookup_delayed_insertion_item(node, key);
1486 if (!item) {
1487 mutex_unlock(&node->mutex);
1488 return 1;
1489 }
1490
1491 btrfs_delayed_item_release_metadata(node->root, item);
1492 btrfs_release_delayed_item(item);
1493 mutex_unlock(&node->mutex);
1494 return 0;
1495 }
1496
1497 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1498 struct btrfs_inode *dir, u64 index)
1499 {
1500 struct btrfs_delayed_node *node;
1501 struct btrfs_delayed_item *item;
1502 struct btrfs_key item_key;
1503 int ret;
1504
1505 node = btrfs_get_or_create_delayed_node(dir);
1506 if (IS_ERR(node))
1507 return PTR_ERR(node);
1508
1509 item_key.objectid = btrfs_ino(dir);
1510 item_key.type = BTRFS_DIR_INDEX_KEY;
1511 item_key.offset = index;
1512
1513 ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node,
1514 &item_key);
1515 if (!ret)
1516 goto end;
1517
1518 item = btrfs_alloc_delayed_item(0);
1519 if (!item) {
1520 ret = -ENOMEM;
1521 goto end;
1522 }
1523
1524 item->key = item_key;
1525
1526 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, item);
1527 /*
1528 * we have reserved enough space when we start a new transaction,
1529 * so reserving metadata failure is impossible.
1530 */
1531 if (ret < 0) {
1532 btrfs_err(trans->fs_info,
1533 "metadata reservation failed for delayed dir item deltiona, should have been reserved");
1534 btrfs_release_delayed_item(item);
1535 goto end;
1536 }
1537
1538 mutex_lock(&node->mutex);
1539 ret = __btrfs_add_delayed_deletion_item(node, item);
1540 if (unlikely(ret)) {
1541 btrfs_err(trans->fs_info,
1542 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1543 index, node->root->root_key.objectid,
1544 node->inode_id, ret);
1545 btrfs_delayed_item_release_metadata(dir->root, item);
1546 btrfs_release_delayed_item(item);
1547 }
1548 mutex_unlock(&node->mutex);
1549 end:
1550 btrfs_release_delayed_node(node);
1551 return ret;
1552 }
1553
1554 int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1555 {
1556 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1557
1558 if (!delayed_node)
1559 return -ENOENT;
1560
1561 /*
1562 * Since we have held i_mutex of this directory, it is impossible that
1563 * a new directory index is added into the delayed node and index_cnt
1564 * is updated now. So we needn't lock the delayed node.
1565 */
1566 if (!delayed_node->index_cnt) {
1567 btrfs_release_delayed_node(delayed_node);
1568 return -EINVAL;
1569 }
1570
1571 inode->index_cnt = delayed_node->index_cnt;
1572 btrfs_release_delayed_node(delayed_node);
1573 return 0;
1574 }
1575
1576 bool btrfs_readdir_get_delayed_items(struct inode *inode,
1577 struct list_head *ins_list,
1578 struct list_head *del_list)
1579 {
1580 struct btrfs_delayed_node *delayed_node;
1581 struct btrfs_delayed_item *item;
1582
1583 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1584 if (!delayed_node)
1585 return false;
1586
1587 /*
1588 * We can only do one readdir with delayed items at a time because of
1589 * item->readdir_list.
1590 */
1591 inode_unlock_shared(inode);
1592 inode_lock(inode);
1593
1594 mutex_lock(&delayed_node->mutex);
1595 item = __btrfs_first_delayed_insertion_item(delayed_node);
1596 while (item) {
1597 refcount_inc(&item->refs);
1598 list_add_tail(&item->readdir_list, ins_list);
1599 item = __btrfs_next_delayed_item(item);
1600 }
1601
1602 item = __btrfs_first_delayed_deletion_item(delayed_node);
1603 while (item) {
1604 refcount_inc(&item->refs);
1605 list_add_tail(&item->readdir_list, del_list);
1606 item = __btrfs_next_delayed_item(item);
1607 }
1608 mutex_unlock(&delayed_node->mutex);
1609 /*
1610 * This delayed node is still cached in the btrfs inode, so refs
1611 * must be > 1 now, and we needn't check it is going to be freed
1612 * or not.
1613 *
1614 * Besides that, this function is used to read dir, we do not
1615 * insert/delete delayed items in this period. So we also needn't
1616 * requeue or dequeue this delayed node.
1617 */
1618 refcount_dec(&delayed_node->refs);
1619
1620 return true;
1621 }
1622
1623 void btrfs_readdir_put_delayed_items(struct inode *inode,
1624 struct list_head *ins_list,
1625 struct list_head *del_list)
1626 {
1627 struct btrfs_delayed_item *curr, *next;
1628
1629 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1630 list_del(&curr->readdir_list);
1631 if (refcount_dec_and_test(&curr->refs))
1632 kfree(curr);
1633 }
1634
1635 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1636 list_del(&curr->readdir_list);
1637 if (refcount_dec_and_test(&curr->refs))
1638 kfree(curr);
1639 }
1640
1641 /*
1642 * The VFS is going to do up_read(), so we need to downgrade back to a
1643 * read lock.
1644 */
1645 downgrade_write(&inode->i_rwsem);
1646 }
1647
1648 int btrfs_should_delete_dir_index(struct list_head *del_list,
1649 u64 index)
1650 {
1651 struct btrfs_delayed_item *curr;
1652 int ret = 0;
1653
1654 list_for_each_entry(curr, del_list, readdir_list) {
1655 if (curr->key.offset > index)
1656 break;
1657 if (curr->key.offset == index) {
1658 ret = 1;
1659 break;
1660 }
1661 }
1662 return ret;
1663 }
1664
1665 /*
1666 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1667 *
1668 */
1669 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1670 struct list_head *ins_list)
1671 {
1672 struct btrfs_dir_item *di;
1673 struct btrfs_delayed_item *curr, *next;
1674 struct btrfs_key location;
1675 char *name;
1676 int name_len;
1677 int over = 0;
1678 unsigned char d_type;
1679
1680 if (list_empty(ins_list))
1681 return 0;
1682
1683 /*
1684 * Changing the data of the delayed item is impossible. So
1685 * we needn't lock them. And we have held i_mutex of the
1686 * directory, nobody can delete any directory indexes now.
1687 */
1688 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1689 list_del(&curr->readdir_list);
1690
1691 if (curr->key.offset < ctx->pos) {
1692 if (refcount_dec_and_test(&curr->refs))
1693 kfree(curr);
1694 continue;
1695 }
1696
1697 ctx->pos = curr->key.offset;
1698
1699 di = (struct btrfs_dir_item *)curr->data;
1700 name = (char *)(di + 1);
1701 name_len = btrfs_stack_dir_name_len(di);
1702
1703 d_type = fs_ftype_to_dtype(di->type);
1704 btrfs_disk_key_to_cpu(&location, &di->location);
1705
1706 over = !dir_emit(ctx, name, name_len,
1707 location.objectid, d_type);
1708
1709 if (refcount_dec_and_test(&curr->refs))
1710 kfree(curr);
1711
1712 if (over)
1713 return 1;
1714 ctx->pos++;
1715 }
1716 return 0;
1717 }
1718
1719 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1720 struct btrfs_inode_item *inode_item,
1721 struct inode *inode)
1722 {
1723 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1724 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1725 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1726 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1727 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1728 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1729 btrfs_set_stack_inode_generation(inode_item,
1730 BTRFS_I(inode)->generation);
1731 btrfs_set_stack_inode_sequence(inode_item,
1732 inode_peek_iversion(inode));
1733 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1734 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1735 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1736 btrfs_set_stack_inode_block_group(inode_item, 0);
1737
1738 btrfs_set_stack_timespec_sec(&inode_item->atime,
1739 inode->i_atime.tv_sec);
1740 btrfs_set_stack_timespec_nsec(&inode_item->atime,
1741 inode->i_atime.tv_nsec);
1742
1743 btrfs_set_stack_timespec_sec(&inode_item->mtime,
1744 inode->i_mtime.tv_sec);
1745 btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1746 inode->i_mtime.tv_nsec);
1747
1748 btrfs_set_stack_timespec_sec(&inode_item->ctime,
1749 inode->i_ctime.tv_sec);
1750 btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1751 inode->i_ctime.tv_nsec);
1752
1753 btrfs_set_stack_timespec_sec(&inode_item->otime,
1754 BTRFS_I(inode)->i_otime.tv_sec);
1755 btrfs_set_stack_timespec_nsec(&inode_item->otime,
1756 BTRFS_I(inode)->i_otime.tv_nsec);
1757 }
1758
1759 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1760 {
1761 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1762 struct btrfs_delayed_node *delayed_node;
1763 struct btrfs_inode_item *inode_item;
1764
1765 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1766 if (!delayed_node)
1767 return -ENOENT;
1768
1769 mutex_lock(&delayed_node->mutex);
1770 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1771 mutex_unlock(&delayed_node->mutex);
1772 btrfs_release_delayed_node(delayed_node);
1773 return -ENOENT;
1774 }
1775
1776 inode_item = &delayed_node->inode_item;
1777
1778 i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1779 i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1780 btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1781 btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
1782 round_up(i_size_read(inode), fs_info->sectorsize));
1783 inode->i_mode = btrfs_stack_inode_mode(inode_item);
1784 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1785 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1786 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1787 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1788
1789 inode_set_iversion_queried(inode,
1790 btrfs_stack_inode_sequence(inode_item));
1791 inode->i_rdev = 0;
1792 *rdev = btrfs_stack_inode_rdev(inode_item);
1793 BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1794
1795 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1796 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1797
1798 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1799 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1800
1801 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1802 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1803
1804 BTRFS_I(inode)->i_otime.tv_sec =
1805 btrfs_stack_timespec_sec(&inode_item->otime);
1806 BTRFS_I(inode)->i_otime.tv_nsec =
1807 btrfs_stack_timespec_nsec(&inode_item->otime);
1808
1809 inode->i_generation = BTRFS_I(inode)->generation;
1810 BTRFS_I(inode)->index_cnt = (u64)-1;
1811
1812 mutex_unlock(&delayed_node->mutex);
1813 btrfs_release_delayed_node(delayed_node);
1814 return 0;
1815 }
1816
1817 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1818 struct btrfs_root *root,
1819 struct btrfs_inode *inode)
1820 {
1821 struct btrfs_delayed_node *delayed_node;
1822 int ret = 0;
1823
1824 delayed_node = btrfs_get_or_create_delayed_node(inode);
1825 if (IS_ERR(delayed_node))
1826 return PTR_ERR(delayed_node);
1827
1828 mutex_lock(&delayed_node->mutex);
1829 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1830 fill_stack_inode_item(trans, &delayed_node->inode_item,
1831 &inode->vfs_inode);
1832 goto release_node;
1833 }
1834
1835 ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode,
1836 delayed_node);
1837 if (ret)
1838 goto release_node;
1839
1840 fill_stack_inode_item(trans, &delayed_node->inode_item, &inode->vfs_inode);
1841 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1842 delayed_node->count++;
1843 atomic_inc(&root->fs_info->delayed_root->items);
1844 release_node:
1845 mutex_unlock(&delayed_node->mutex);
1846 btrfs_release_delayed_node(delayed_node);
1847 return ret;
1848 }
1849
1850 int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1851 {
1852 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1853 struct btrfs_delayed_node *delayed_node;
1854
1855 /*
1856 * we don't do delayed inode updates during log recovery because it
1857 * leads to enospc problems. This means we also can't do
1858 * delayed inode refs
1859 */
1860 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1861 return -EAGAIN;
1862
1863 delayed_node = btrfs_get_or_create_delayed_node(inode);
1864 if (IS_ERR(delayed_node))
1865 return PTR_ERR(delayed_node);
1866
1867 /*
1868 * We don't reserve space for inode ref deletion is because:
1869 * - We ONLY do async inode ref deletion for the inode who has only
1870 * one link(i_nlink == 1), it means there is only one inode ref.
1871 * And in most case, the inode ref and the inode item are in the
1872 * same leaf, and we will deal with them at the same time.
1873 * Since we are sure we will reserve the space for the inode item,
1874 * it is unnecessary to reserve space for inode ref deletion.
1875 * - If the inode ref and the inode item are not in the same leaf,
1876 * We also needn't worry about enospc problem, because we reserve
1877 * much more space for the inode update than it needs.
1878 * - At the worst, we can steal some space from the global reservation.
1879 * It is very rare.
1880 */
1881 mutex_lock(&delayed_node->mutex);
1882 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1883 goto release_node;
1884
1885 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1886 delayed_node->count++;
1887 atomic_inc(&fs_info->delayed_root->items);
1888 release_node:
1889 mutex_unlock(&delayed_node->mutex);
1890 btrfs_release_delayed_node(delayed_node);
1891 return 0;
1892 }
1893
1894 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1895 {
1896 struct btrfs_root *root = delayed_node->root;
1897 struct btrfs_fs_info *fs_info = root->fs_info;
1898 struct btrfs_delayed_item *curr_item, *prev_item;
1899
1900 mutex_lock(&delayed_node->mutex);
1901 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1902 while (curr_item) {
1903 btrfs_delayed_item_release_metadata(root, curr_item);
1904 prev_item = curr_item;
1905 curr_item = __btrfs_next_delayed_item(prev_item);
1906 btrfs_release_delayed_item(prev_item);
1907 }
1908
1909 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1910 while (curr_item) {
1911 btrfs_delayed_item_release_metadata(root, curr_item);
1912 prev_item = curr_item;
1913 curr_item = __btrfs_next_delayed_item(prev_item);
1914 btrfs_release_delayed_item(prev_item);
1915 }
1916
1917 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1918 btrfs_release_delayed_iref(delayed_node);
1919
1920 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1921 btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
1922 btrfs_release_delayed_inode(delayed_node);
1923 }
1924 mutex_unlock(&delayed_node->mutex);
1925 }
1926
1927 void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
1928 {
1929 struct btrfs_delayed_node *delayed_node;
1930
1931 delayed_node = btrfs_get_delayed_node(inode);
1932 if (!delayed_node)
1933 return;
1934
1935 __btrfs_kill_delayed_node(delayed_node);
1936 btrfs_release_delayed_node(delayed_node);
1937 }
1938
1939 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1940 {
1941 u64 inode_id = 0;
1942 struct btrfs_delayed_node *delayed_nodes[8];
1943 int i, n;
1944
1945 while (1) {
1946 spin_lock(&root->inode_lock);
1947 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1948 (void **)delayed_nodes, inode_id,
1949 ARRAY_SIZE(delayed_nodes));
1950 if (!n) {
1951 spin_unlock(&root->inode_lock);
1952 break;
1953 }
1954
1955 inode_id = delayed_nodes[n - 1]->inode_id + 1;
1956 for (i = 0; i < n; i++) {
1957 /*
1958 * Don't increase refs in case the node is dead and
1959 * about to be removed from the tree in the loop below
1960 */
1961 if (!refcount_inc_not_zero(&delayed_nodes[i]->refs))
1962 delayed_nodes[i] = NULL;
1963 }
1964 spin_unlock(&root->inode_lock);
1965
1966 for (i = 0; i < n; i++) {
1967 if (!delayed_nodes[i])
1968 continue;
1969 __btrfs_kill_delayed_node(delayed_nodes[i]);
1970 btrfs_release_delayed_node(delayed_nodes[i]);
1971 }
1972 }
1973 }
1974
1975 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
1976 {
1977 struct btrfs_delayed_node *curr_node, *prev_node;
1978
1979 curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
1980 while (curr_node) {
1981 __btrfs_kill_delayed_node(curr_node);
1982
1983 prev_node = curr_node;
1984 curr_node = btrfs_next_delayed_node(curr_node);
1985 btrfs_release_delayed_node(prev_node);
1986 }
1987 }
1988