]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - fs/btrfs/delayed-ref.c
Btrfs: fix race between balance recovery and root deletion
[mirror_ubuntu-zesty-kernel.git] / fs / btrfs / delayed-ref.c
1 /*
2 * Copyright (C) 2009 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/sort.h>
22 #include "ctree.h"
23 #include "delayed-ref.h"
24 #include "transaction.h"
25
26 struct kmem_cache *btrfs_delayed_ref_head_cachep;
27 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
28 struct kmem_cache *btrfs_delayed_data_ref_cachep;
29 struct kmem_cache *btrfs_delayed_extent_op_cachep;
30 /*
31 * delayed back reference update tracking. For subvolume trees
32 * we queue up extent allocations and backref maintenance for
33 * delayed processing. This avoids deep call chains where we
34 * add extents in the middle of btrfs_search_slot, and it allows
35 * us to buffer up frequently modified backrefs in an rb tree instead
36 * of hammering updates on the extent allocation tree.
37 */
38
39 /*
40 * compare two delayed tree backrefs with same bytenr and type
41 */
42 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
43 struct btrfs_delayed_tree_ref *ref1, int type)
44 {
45 if (type == BTRFS_TREE_BLOCK_REF_KEY) {
46 if (ref1->root < ref2->root)
47 return -1;
48 if (ref1->root > ref2->root)
49 return 1;
50 } else {
51 if (ref1->parent < ref2->parent)
52 return -1;
53 if (ref1->parent > ref2->parent)
54 return 1;
55 }
56 return 0;
57 }
58
59 /*
60 * compare two delayed data backrefs with same bytenr and type
61 */
62 static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
63 struct btrfs_delayed_data_ref *ref1)
64 {
65 if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
66 if (ref1->root < ref2->root)
67 return -1;
68 if (ref1->root > ref2->root)
69 return 1;
70 if (ref1->objectid < ref2->objectid)
71 return -1;
72 if (ref1->objectid > ref2->objectid)
73 return 1;
74 if (ref1->offset < ref2->offset)
75 return -1;
76 if (ref1->offset > ref2->offset)
77 return 1;
78 } else {
79 if (ref1->parent < ref2->parent)
80 return -1;
81 if (ref1->parent > ref2->parent)
82 return 1;
83 }
84 return 0;
85 }
86
87 /*
88 * entries in the rb tree are ordered by the byte number of the extent,
89 * type of the delayed backrefs and content of delayed backrefs.
90 */
91 static int comp_entry(struct btrfs_delayed_ref_node *ref2,
92 struct btrfs_delayed_ref_node *ref1,
93 bool compare_seq)
94 {
95 if (ref1->bytenr < ref2->bytenr)
96 return -1;
97 if (ref1->bytenr > ref2->bytenr)
98 return 1;
99 if (ref1->is_head && ref2->is_head)
100 return 0;
101 if (ref2->is_head)
102 return -1;
103 if (ref1->is_head)
104 return 1;
105 if (ref1->type < ref2->type)
106 return -1;
107 if (ref1->type > ref2->type)
108 return 1;
109 if (ref1->no_quota > ref2->no_quota)
110 return 1;
111 if (ref1->no_quota < ref2->no_quota)
112 return -1;
113 /* merging of sequenced refs is not allowed */
114 if (compare_seq) {
115 if (ref1->seq < ref2->seq)
116 return -1;
117 if (ref1->seq > ref2->seq)
118 return 1;
119 }
120 if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
121 ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
122 return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
123 btrfs_delayed_node_to_tree_ref(ref1),
124 ref1->type);
125 } else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY ||
126 ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
127 return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2),
128 btrfs_delayed_node_to_data_ref(ref1));
129 }
130 BUG();
131 return 0;
132 }
133
134 /*
135 * insert a new ref into the rbtree. This returns any existing refs
136 * for the same (bytenr,parent) tuple, or NULL if the new node was properly
137 * inserted.
138 */
139 static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
140 struct rb_node *node)
141 {
142 struct rb_node **p = &root->rb_node;
143 struct rb_node *parent_node = NULL;
144 struct btrfs_delayed_ref_node *entry;
145 struct btrfs_delayed_ref_node *ins;
146 int cmp;
147
148 ins = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
149 while (*p) {
150 parent_node = *p;
151 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
152 rb_node);
153
154 cmp = comp_entry(entry, ins, 1);
155 if (cmp < 0)
156 p = &(*p)->rb_left;
157 else if (cmp > 0)
158 p = &(*p)->rb_right;
159 else
160 return entry;
161 }
162
163 rb_link_node(node, parent_node, p);
164 rb_insert_color(node, root);
165 return NULL;
166 }
167
168 /* insert a new ref to head ref rbtree */
169 static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
170 struct rb_node *node)
171 {
172 struct rb_node **p = &root->rb_node;
173 struct rb_node *parent_node = NULL;
174 struct btrfs_delayed_ref_head *entry;
175 struct btrfs_delayed_ref_head *ins;
176 u64 bytenr;
177
178 ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
179 bytenr = ins->node.bytenr;
180 while (*p) {
181 parent_node = *p;
182 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
183 href_node);
184
185 if (bytenr < entry->node.bytenr)
186 p = &(*p)->rb_left;
187 else if (bytenr > entry->node.bytenr)
188 p = &(*p)->rb_right;
189 else
190 return entry;
191 }
192
193 rb_link_node(node, parent_node, p);
194 rb_insert_color(node, root);
195 return NULL;
196 }
197
198 /*
199 * find an head entry based on bytenr. This returns the delayed ref
200 * head if it was able to find one, or NULL if nothing was in that spot.
201 * If return_bigger is given, the next bigger entry is returned if no exact
202 * match is found.
203 */
204 static struct btrfs_delayed_ref_head *
205 find_ref_head(struct rb_root *root, u64 bytenr,
206 int return_bigger)
207 {
208 struct rb_node *n;
209 struct btrfs_delayed_ref_head *entry;
210
211 n = root->rb_node;
212 entry = NULL;
213 while (n) {
214 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
215
216 if (bytenr < entry->node.bytenr)
217 n = n->rb_left;
218 else if (bytenr > entry->node.bytenr)
219 n = n->rb_right;
220 else
221 return entry;
222 }
223 if (entry && return_bigger) {
224 if (bytenr > entry->node.bytenr) {
225 n = rb_next(&entry->href_node);
226 if (!n)
227 n = rb_first(root);
228 entry = rb_entry(n, struct btrfs_delayed_ref_head,
229 href_node);
230 return entry;
231 }
232 return entry;
233 }
234 return NULL;
235 }
236
237 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
238 struct btrfs_delayed_ref_head *head)
239 {
240 struct btrfs_delayed_ref_root *delayed_refs;
241
242 delayed_refs = &trans->transaction->delayed_refs;
243 assert_spin_locked(&delayed_refs->lock);
244 if (mutex_trylock(&head->mutex))
245 return 0;
246
247 atomic_inc(&head->node.refs);
248 spin_unlock(&delayed_refs->lock);
249
250 mutex_lock(&head->mutex);
251 spin_lock(&delayed_refs->lock);
252 if (!head->node.in_tree) {
253 mutex_unlock(&head->mutex);
254 btrfs_put_delayed_ref(&head->node);
255 return -EAGAIN;
256 }
257 btrfs_put_delayed_ref(&head->node);
258 return 0;
259 }
260
261 static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
262 struct btrfs_delayed_ref_root *delayed_refs,
263 struct btrfs_delayed_ref_head *head,
264 struct btrfs_delayed_ref_node *ref)
265 {
266 if (btrfs_delayed_ref_is_head(ref)) {
267 head = btrfs_delayed_node_to_head(ref);
268 rb_erase(&head->href_node, &delayed_refs->href_root);
269 } else {
270 assert_spin_locked(&head->lock);
271 rb_erase(&ref->rb_node, &head->ref_root);
272 }
273 ref->in_tree = 0;
274 btrfs_put_delayed_ref(ref);
275 atomic_dec(&delayed_refs->num_entries);
276 if (trans->delayed_ref_updates)
277 trans->delayed_ref_updates--;
278 }
279
280 static int merge_ref(struct btrfs_trans_handle *trans,
281 struct btrfs_delayed_ref_root *delayed_refs,
282 struct btrfs_delayed_ref_head *head,
283 struct btrfs_delayed_ref_node *ref, u64 seq)
284 {
285 struct rb_node *node;
286 int mod = 0;
287 int done = 0;
288
289 node = rb_next(&ref->rb_node);
290 while (!done && node) {
291 struct btrfs_delayed_ref_node *next;
292
293 next = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
294 node = rb_next(node);
295 if (seq && next->seq >= seq)
296 break;
297 if (comp_entry(ref, next, 0))
298 continue;
299
300 if (ref->action == next->action) {
301 mod = next->ref_mod;
302 } else {
303 if (ref->ref_mod < next->ref_mod) {
304 struct btrfs_delayed_ref_node *tmp;
305
306 tmp = ref;
307 ref = next;
308 next = tmp;
309 done = 1;
310 }
311 mod = -next->ref_mod;
312 }
313
314 drop_delayed_ref(trans, delayed_refs, head, next);
315 ref->ref_mod += mod;
316 if (ref->ref_mod == 0) {
317 drop_delayed_ref(trans, delayed_refs, head, ref);
318 done = 1;
319 } else {
320 /*
321 * You can't have multiples of the same ref on a tree
322 * block.
323 */
324 WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
325 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
326 }
327 }
328 return done;
329 }
330
331 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
332 struct btrfs_fs_info *fs_info,
333 struct btrfs_delayed_ref_root *delayed_refs,
334 struct btrfs_delayed_ref_head *head)
335 {
336 struct rb_node *node;
337 u64 seq = 0;
338
339 assert_spin_locked(&head->lock);
340 /*
341 * We don't have too much refs to merge in the case of delayed data
342 * refs.
343 */
344 if (head->is_data)
345 return;
346
347 spin_lock(&fs_info->tree_mod_seq_lock);
348 if (!list_empty(&fs_info->tree_mod_seq_list)) {
349 struct seq_list *elem;
350
351 elem = list_first_entry(&fs_info->tree_mod_seq_list,
352 struct seq_list, list);
353 seq = elem->seq;
354 }
355 spin_unlock(&fs_info->tree_mod_seq_lock);
356
357 node = rb_first(&head->ref_root);
358 while (node) {
359 struct btrfs_delayed_ref_node *ref;
360
361 ref = rb_entry(node, struct btrfs_delayed_ref_node,
362 rb_node);
363 /* We can't merge refs that are outside of our seq count */
364 if (seq && ref->seq >= seq)
365 break;
366 if (merge_ref(trans, delayed_refs, head, ref, seq))
367 node = rb_first(&head->ref_root);
368 else
369 node = rb_next(&ref->rb_node);
370 }
371 }
372
373 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
374 struct btrfs_delayed_ref_root *delayed_refs,
375 u64 seq)
376 {
377 struct seq_list *elem;
378 int ret = 0;
379
380 spin_lock(&fs_info->tree_mod_seq_lock);
381 if (!list_empty(&fs_info->tree_mod_seq_list)) {
382 elem = list_first_entry(&fs_info->tree_mod_seq_list,
383 struct seq_list, list);
384 if (seq >= elem->seq) {
385 pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n",
386 (u32)(seq >> 32), (u32)seq,
387 (u32)(elem->seq >> 32), (u32)elem->seq,
388 delayed_refs);
389 ret = 1;
390 }
391 }
392
393 spin_unlock(&fs_info->tree_mod_seq_lock);
394 return ret;
395 }
396
397 struct btrfs_delayed_ref_head *
398 btrfs_select_ref_head(struct btrfs_trans_handle *trans)
399 {
400 struct btrfs_delayed_ref_root *delayed_refs;
401 struct btrfs_delayed_ref_head *head;
402 u64 start;
403 bool loop = false;
404
405 delayed_refs = &trans->transaction->delayed_refs;
406
407 again:
408 start = delayed_refs->run_delayed_start;
409 head = find_ref_head(&delayed_refs->href_root, start, 1);
410 if (!head && !loop) {
411 delayed_refs->run_delayed_start = 0;
412 start = 0;
413 loop = true;
414 head = find_ref_head(&delayed_refs->href_root, start, 1);
415 if (!head)
416 return NULL;
417 } else if (!head && loop) {
418 return NULL;
419 }
420
421 while (head->processing) {
422 struct rb_node *node;
423
424 node = rb_next(&head->href_node);
425 if (!node) {
426 if (loop)
427 return NULL;
428 delayed_refs->run_delayed_start = 0;
429 start = 0;
430 loop = true;
431 goto again;
432 }
433 head = rb_entry(node, struct btrfs_delayed_ref_head,
434 href_node);
435 }
436
437 head->processing = 1;
438 WARN_ON(delayed_refs->num_heads_ready == 0);
439 delayed_refs->num_heads_ready--;
440 delayed_refs->run_delayed_start = head->node.bytenr +
441 head->node.num_bytes;
442 return head;
443 }
444
445 /*
446 * helper function to update an extent delayed ref in the
447 * rbtree. existing and update must both have the same
448 * bytenr and parent
449 *
450 * This may free existing if the update cancels out whatever
451 * operation it was doing.
452 */
453 static noinline void
454 update_existing_ref(struct btrfs_trans_handle *trans,
455 struct btrfs_delayed_ref_root *delayed_refs,
456 struct btrfs_delayed_ref_head *head,
457 struct btrfs_delayed_ref_node *existing,
458 struct btrfs_delayed_ref_node *update)
459 {
460 if (update->action != existing->action) {
461 /*
462 * this is effectively undoing either an add or a
463 * drop. We decrement the ref_mod, and if it goes
464 * down to zero we just delete the entry without
465 * every changing the extent allocation tree.
466 */
467 existing->ref_mod--;
468 if (existing->ref_mod == 0)
469 drop_delayed_ref(trans, delayed_refs, head, existing);
470 else
471 WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
472 existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
473 } else {
474 WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
475 existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
476 /*
477 * the action on the existing ref matches
478 * the action on the ref we're trying to add.
479 * Bump the ref_mod by one so the backref that
480 * is eventually added/removed has the correct
481 * reference count
482 */
483 existing->ref_mod += update->ref_mod;
484 }
485 }
486
487 /*
488 * helper function to update the accounting in the head ref
489 * existing and update must have the same bytenr
490 */
491 static noinline void
492 update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
493 struct btrfs_delayed_ref_node *update)
494 {
495 struct btrfs_delayed_ref_head *existing_ref;
496 struct btrfs_delayed_ref_head *ref;
497
498 existing_ref = btrfs_delayed_node_to_head(existing);
499 ref = btrfs_delayed_node_to_head(update);
500 BUG_ON(existing_ref->is_data != ref->is_data);
501
502 spin_lock(&existing_ref->lock);
503 if (ref->must_insert_reserved) {
504 /* if the extent was freed and then
505 * reallocated before the delayed ref
506 * entries were processed, we can end up
507 * with an existing head ref without
508 * the must_insert_reserved flag set.
509 * Set it again here
510 */
511 existing_ref->must_insert_reserved = ref->must_insert_reserved;
512
513 /*
514 * update the num_bytes so we make sure the accounting
515 * is done correctly
516 */
517 existing->num_bytes = update->num_bytes;
518
519 }
520
521 if (ref->extent_op) {
522 if (!existing_ref->extent_op) {
523 existing_ref->extent_op = ref->extent_op;
524 } else {
525 if (ref->extent_op->update_key) {
526 memcpy(&existing_ref->extent_op->key,
527 &ref->extent_op->key,
528 sizeof(ref->extent_op->key));
529 existing_ref->extent_op->update_key = 1;
530 }
531 if (ref->extent_op->update_flags) {
532 existing_ref->extent_op->flags_to_set |=
533 ref->extent_op->flags_to_set;
534 existing_ref->extent_op->update_flags = 1;
535 }
536 btrfs_free_delayed_extent_op(ref->extent_op);
537 }
538 }
539 /*
540 * update the reference mod on the head to reflect this new operation,
541 * only need the lock for this case cause we could be processing it
542 * currently, for refs we just added we know we're a-ok.
543 */
544 existing->ref_mod += update->ref_mod;
545 spin_unlock(&existing_ref->lock);
546 }
547
548 /*
549 * helper function to actually insert a head node into the rbtree.
550 * this does all the dirty work in terms of maintaining the correct
551 * overall modification count.
552 */
553 static noinline struct btrfs_delayed_ref_head *
554 add_delayed_ref_head(struct btrfs_fs_info *fs_info,
555 struct btrfs_trans_handle *trans,
556 struct btrfs_delayed_ref_node *ref, u64 bytenr,
557 u64 num_bytes, int action, int is_data)
558 {
559 struct btrfs_delayed_ref_head *existing;
560 struct btrfs_delayed_ref_head *head_ref = NULL;
561 struct btrfs_delayed_ref_root *delayed_refs;
562 int count_mod = 1;
563 int must_insert_reserved = 0;
564
565 /*
566 * the head node stores the sum of all the mods, so dropping a ref
567 * should drop the sum in the head node by one.
568 */
569 if (action == BTRFS_UPDATE_DELAYED_HEAD)
570 count_mod = 0;
571 else if (action == BTRFS_DROP_DELAYED_REF)
572 count_mod = -1;
573
574 /*
575 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
576 * the reserved accounting when the extent is finally added, or
577 * if a later modification deletes the delayed ref without ever
578 * inserting the extent into the extent allocation tree.
579 * ref->must_insert_reserved is the flag used to record
580 * that accounting mods are required.
581 *
582 * Once we record must_insert_reserved, switch the action to
583 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
584 */
585 if (action == BTRFS_ADD_DELAYED_EXTENT)
586 must_insert_reserved = 1;
587 else
588 must_insert_reserved = 0;
589
590 delayed_refs = &trans->transaction->delayed_refs;
591
592 /* first set the basic ref node struct up */
593 atomic_set(&ref->refs, 1);
594 ref->bytenr = bytenr;
595 ref->num_bytes = num_bytes;
596 ref->ref_mod = count_mod;
597 ref->type = 0;
598 ref->action = 0;
599 ref->is_head = 1;
600 ref->in_tree = 1;
601 ref->seq = 0;
602
603 head_ref = btrfs_delayed_node_to_head(ref);
604 head_ref->must_insert_reserved = must_insert_reserved;
605 head_ref->is_data = is_data;
606 head_ref->ref_root = RB_ROOT;
607 head_ref->processing = 0;
608
609 spin_lock_init(&head_ref->lock);
610 mutex_init(&head_ref->mutex);
611
612 trace_add_delayed_ref_head(ref, head_ref, action);
613
614 existing = htree_insert(&delayed_refs->href_root,
615 &head_ref->href_node);
616 if (existing) {
617 update_existing_head_ref(&existing->node, ref);
618 /*
619 * we've updated the existing ref, free the newly
620 * allocated ref
621 */
622 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
623 head_ref = existing;
624 } else {
625 delayed_refs->num_heads++;
626 delayed_refs->num_heads_ready++;
627 atomic_inc(&delayed_refs->num_entries);
628 trans->delayed_ref_updates++;
629 }
630 return head_ref;
631 }
632
633 /*
634 * helper to insert a delayed tree ref into the rbtree.
635 */
636 static noinline void
637 add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
638 struct btrfs_trans_handle *trans,
639 struct btrfs_delayed_ref_head *head_ref,
640 struct btrfs_delayed_ref_node *ref, u64 bytenr,
641 u64 num_bytes, u64 parent, u64 ref_root, int level,
642 int action, int no_quota)
643 {
644 struct btrfs_delayed_ref_node *existing;
645 struct btrfs_delayed_tree_ref *full_ref;
646 struct btrfs_delayed_ref_root *delayed_refs;
647 u64 seq = 0;
648
649 if (action == BTRFS_ADD_DELAYED_EXTENT)
650 action = BTRFS_ADD_DELAYED_REF;
651
652 if (is_fstree(ref_root))
653 seq = atomic64_read(&fs_info->tree_mod_seq);
654 delayed_refs = &trans->transaction->delayed_refs;
655
656 /* first set the basic ref node struct up */
657 atomic_set(&ref->refs, 1);
658 ref->bytenr = bytenr;
659 ref->num_bytes = num_bytes;
660 ref->ref_mod = 1;
661 ref->action = action;
662 ref->is_head = 0;
663 ref->in_tree = 1;
664 ref->no_quota = no_quota;
665 ref->seq = seq;
666
667 full_ref = btrfs_delayed_node_to_tree_ref(ref);
668 full_ref->parent = parent;
669 full_ref->root = ref_root;
670 if (parent)
671 ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
672 else
673 ref->type = BTRFS_TREE_BLOCK_REF_KEY;
674 full_ref->level = level;
675
676 trace_add_delayed_tree_ref(ref, full_ref, action);
677
678 spin_lock(&head_ref->lock);
679 existing = tree_insert(&head_ref->ref_root, &ref->rb_node);
680 if (existing) {
681 update_existing_ref(trans, delayed_refs, head_ref, existing,
682 ref);
683 /*
684 * we've updated the existing ref, free the newly
685 * allocated ref
686 */
687 kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
688 } else {
689 atomic_inc(&delayed_refs->num_entries);
690 trans->delayed_ref_updates++;
691 }
692 spin_unlock(&head_ref->lock);
693 }
694
695 /*
696 * helper to insert a delayed data ref into the rbtree.
697 */
698 static noinline void
699 add_delayed_data_ref(struct btrfs_fs_info *fs_info,
700 struct btrfs_trans_handle *trans,
701 struct btrfs_delayed_ref_head *head_ref,
702 struct btrfs_delayed_ref_node *ref, u64 bytenr,
703 u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
704 u64 offset, int action, int no_quota)
705 {
706 struct btrfs_delayed_ref_node *existing;
707 struct btrfs_delayed_data_ref *full_ref;
708 struct btrfs_delayed_ref_root *delayed_refs;
709 u64 seq = 0;
710
711 if (action == BTRFS_ADD_DELAYED_EXTENT)
712 action = BTRFS_ADD_DELAYED_REF;
713
714 delayed_refs = &trans->transaction->delayed_refs;
715
716 if (is_fstree(ref_root))
717 seq = atomic64_read(&fs_info->tree_mod_seq);
718
719 /* first set the basic ref node struct up */
720 atomic_set(&ref->refs, 1);
721 ref->bytenr = bytenr;
722 ref->num_bytes = num_bytes;
723 ref->ref_mod = 1;
724 ref->action = action;
725 ref->is_head = 0;
726 ref->in_tree = 1;
727 ref->no_quota = no_quota;
728 ref->seq = seq;
729
730 full_ref = btrfs_delayed_node_to_data_ref(ref);
731 full_ref->parent = parent;
732 full_ref->root = ref_root;
733 if (parent)
734 ref->type = BTRFS_SHARED_DATA_REF_KEY;
735 else
736 ref->type = BTRFS_EXTENT_DATA_REF_KEY;
737
738 full_ref->objectid = owner;
739 full_ref->offset = offset;
740
741 trace_add_delayed_data_ref(ref, full_ref, action);
742
743 spin_lock(&head_ref->lock);
744 existing = tree_insert(&head_ref->ref_root, &ref->rb_node);
745 if (existing) {
746 update_existing_ref(trans, delayed_refs, head_ref, existing,
747 ref);
748 /*
749 * we've updated the existing ref, free the newly
750 * allocated ref
751 */
752 kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
753 } else {
754 atomic_inc(&delayed_refs->num_entries);
755 trans->delayed_ref_updates++;
756 }
757 spin_unlock(&head_ref->lock);
758 }
759
760 /*
761 * add a delayed tree ref. This does all of the accounting required
762 * to make sure the delayed ref is eventually processed before this
763 * transaction commits.
764 */
765 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
766 struct btrfs_trans_handle *trans,
767 u64 bytenr, u64 num_bytes, u64 parent,
768 u64 ref_root, int level, int action,
769 struct btrfs_delayed_extent_op *extent_op,
770 int no_quota)
771 {
772 struct btrfs_delayed_tree_ref *ref;
773 struct btrfs_delayed_ref_head *head_ref;
774 struct btrfs_delayed_ref_root *delayed_refs;
775
776 if (!is_fstree(ref_root) || !fs_info->quota_enabled)
777 no_quota = 0;
778
779 BUG_ON(extent_op && extent_op->is_data);
780 ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
781 if (!ref)
782 return -ENOMEM;
783
784 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
785 if (!head_ref) {
786 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
787 return -ENOMEM;
788 }
789
790 head_ref->extent_op = extent_op;
791
792 delayed_refs = &trans->transaction->delayed_refs;
793 spin_lock(&delayed_refs->lock);
794
795 /*
796 * insert both the head node and the new ref without dropping
797 * the spin lock
798 */
799 head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node,
800 bytenr, num_bytes, action, 0);
801
802 add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
803 num_bytes, parent, ref_root, level, action,
804 no_quota);
805 spin_unlock(&delayed_refs->lock);
806
807 return 0;
808 }
809
810 /*
811 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
812 */
813 int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
814 struct btrfs_trans_handle *trans,
815 u64 bytenr, u64 num_bytes,
816 u64 parent, u64 ref_root,
817 u64 owner, u64 offset, int action,
818 struct btrfs_delayed_extent_op *extent_op,
819 int no_quota)
820 {
821 struct btrfs_delayed_data_ref *ref;
822 struct btrfs_delayed_ref_head *head_ref;
823 struct btrfs_delayed_ref_root *delayed_refs;
824
825 if (!is_fstree(ref_root) || !fs_info->quota_enabled)
826 no_quota = 0;
827
828 BUG_ON(extent_op && !extent_op->is_data);
829 ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
830 if (!ref)
831 return -ENOMEM;
832
833 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
834 if (!head_ref) {
835 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
836 return -ENOMEM;
837 }
838
839 head_ref->extent_op = extent_op;
840
841 delayed_refs = &trans->transaction->delayed_refs;
842 spin_lock(&delayed_refs->lock);
843
844 /*
845 * insert both the head node and the new ref without dropping
846 * the spin lock
847 */
848 head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node,
849 bytenr, num_bytes, action, 1);
850
851 add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
852 num_bytes, parent, ref_root, owner, offset,
853 action, no_quota);
854 spin_unlock(&delayed_refs->lock);
855
856 return 0;
857 }
858
859 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
860 struct btrfs_trans_handle *trans,
861 u64 bytenr, u64 num_bytes,
862 struct btrfs_delayed_extent_op *extent_op)
863 {
864 struct btrfs_delayed_ref_head *head_ref;
865 struct btrfs_delayed_ref_root *delayed_refs;
866
867 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
868 if (!head_ref)
869 return -ENOMEM;
870
871 head_ref->extent_op = extent_op;
872
873 delayed_refs = &trans->transaction->delayed_refs;
874 spin_lock(&delayed_refs->lock);
875
876 add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
877 num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
878 extent_op->is_data);
879
880 spin_unlock(&delayed_refs->lock);
881 return 0;
882 }
883
884 /*
885 * this does a simple search for the head node for a given extent.
886 * It must be called with the delayed ref spinlock held, and it returns
887 * the head node if any where found, or NULL if not.
888 */
889 struct btrfs_delayed_ref_head *
890 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
891 {
892 struct btrfs_delayed_ref_root *delayed_refs;
893
894 delayed_refs = &trans->transaction->delayed_refs;
895 return find_ref_head(&delayed_refs->href_root, bytenr, 0);
896 }
897
898 void btrfs_delayed_ref_exit(void)
899 {
900 if (btrfs_delayed_ref_head_cachep)
901 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
902 if (btrfs_delayed_tree_ref_cachep)
903 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
904 if (btrfs_delayed_data_ref_cachep)
905 kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
906 if (btrfs_delayed_extent_op_cachep)
907 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
908 }
909
910 int btrfs_delayed_ref_init(void)
911 {
912 btrfs_delayed_ref_head_cachep = kmem_cache_create(
913 "btrfs_delayed_ref_head",
914 sizeof(struct btrfs_delayed_ref_head), 0,
915 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
916 if (!btrfs_delayed_ref_head_cachep)
917 goto fail;
918
919 btrfs_delayed_tree_ref_cachep = kmem_cache_create(
920 "btrfs_delayed_tree_ref",
921 sizeof(struct btrfs_delayed_tree_ref), 0,
922 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
923 if (!btrfs_delayed_tree_ref_cachep)
924 goto fail;
925
926 btrfs_delayed_data_ref_cachep = kmem_cache_create(
927 "btrfs_delayed_data_ref",
928 sizeof(struct btrfs_delayed_data_ref), 0,
929 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
930 if (!btrfs_delayed_data_ref_cachep)
931 goto fail;
932
933 btrfs_delayed_extent_op_cachep = kmem_cache_create(
934 "btrfs_delayed_extent_op",
935 sizeof(struct btrfs_delayed_extent_op), 0,
936 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
937 if (!btrfs_delayed_extent_op_cachep)
938 goto fail;
939
940 return 0;
941 fail:
942 btrfs_delayed_ref_exit();
943 return -ENOMEM;
944 }