]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/btrfs/delayed-ref.c
Merge remote-tracking branches 'asoc/topic/sgtl5000', 'asoc/topic/simple', 'asoc...
[mirror_ubuntu-bionic-kernel.git] / fs / btrfs / delayed-ref.c
1 /*
2 * Copyright (C) 2009 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/sort.h>
22 #include "ctree.h"
23 #include "delayed-ref.h"
24 #include "transaction.h"
25 #include "qgroup.h"
26
27 struct kmem_cache *btrfs_delayed_ref_head_cachep;
28 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
29 struct kmem_cache *btrfs_delayed_data_ref_cachep;
30 struct kmem_cache *btrfs_delayed_extent_op_cachep;
31 /*
32 * delayed back reference update tracking. For subvolume trees
33 * we queue up extent allocations and backref maintenance for
34 * delayed processing. This avoids deep call chains where we
35 * add extents in the middle of btrfs_search_slot, and it allows
36 * us to buffer up frequently modified backrefs in an rb tree instead
37 * of hammering updates on the extent allocation tree.
38 */
39
40 /*
41 * compare two delayed tree backrefs with same bytenr and type
42 */
43 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
44 struct btrfs_delayed_tree_ref *ref1, int type)
45 {
46 if (type == BTRFS_TREE_BLOCK_REF_KEY) {
47 if (ref1->root < ref2->root)
48 return -1;
49 if (ref1->root > ref2->root)
50 return 1;
51 } else {
52 if (ref1->parent < ref2->parent)
53 return -1;
54 if (ref1->parent > ref2->parent)
55 return 1;
56 }
57 return 0;
58 }
59
60 /*
61 * compare two delayed data backrefs with same bytenr and type
62 */
63 static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
64 struct btrfs_delayed_data_ref *ref1)
65 {
66 if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
67 if (ref1->root < ref2->root)
68 return -1;
69 if (ref1->root > ref2->root)
70 return 1;
71 if (ref1->objectid < ref2->objectid)
72 return -1;
73 if (ref1->objectid > ref2->objectid)
74 return 1;
75 if (ref1->offset < ref2->offset)
76 return -1;
77 if (ref1->offset > ref2->offset)
78 return 1;
79 } else {
80 if (ref1->parent < ref2->parent)
81 return -1;
82 if (ref1->parent > ref2->parent)
83 return 1;
84 }
85 return 0;
86 }
87
88 /* insert a new ref to head ref rbtree */
89 static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
90 struct rb_node *node)
91 {
92 struct rb_node **p = &root->rb_node;
93 struct rb_node *parent_node = NULL;
94 struct btrfs_delayed_ref_head *entry;
95 struct btrfs_delayed_ref_head *ins;
96 u64 bytenr;
97
98 ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
99 bytenr = ins->node.bytenr;
100 while (*p) {
101 parent_node = *p;
102 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
103 href_node);
104
105 if (bytenr < entry->node.bytenr)
106 p = &(*p)->rb_left;
107 else if (bytenr > entry->node.bytenr)
108 p = &(*p)->rb_right;
109 else
110 return entry;
111 }
112
113 rb_link_node(node, parent_node, p);
114 rb_insert_color(node, root);
115 return NULL;
116 }
117
118 /*
119 * find an head entry based on bytenr. This returns the delayed ref
120 * head if it was able to find one, or NULL if nothing was in that spot.
121 * If return_bigger is given, the next bigger entry is returned if no exact
122 * match is found.
123 */
124 static struct btrfs_delayed_ref_head *
125 find_ref_head(struct rb_root *root, u64 bytenr,
126 int return_bigger)
127 {
128 struct rb_node *n;
129 struct btrfs_delayed_ref_head *entry;
130
131 n = root->rb_node;
132 entry = NULL;
133 while (n) {
134 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
135
136 if (bytenr < entry->node.bytenr)
137 n = n->rb_left;
138 else if (bytenr > entry->node.bytenr)
139 n = n->rb_right;
140 else
141 return entry;
142 }
143 if (entry && return_bigger) {
144 if (bytenr > entry->node.bytenr) {
145 n = rb_next(&entry->href_node);
146 if (!n)
147 n = rb_first(root);
148 entry = rb_entry(n, struct btrfs_delayed_ref_head,
149 href_node);
150 return entry;
151 }
152 return entry;
153 }
154 return NULL;
155 }
156
157 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
158 struct btrfs_delayed_ref_head *head)
159 {
160 struct btrfs_delayed_ref_root *delayed_refs;
161
162 delayed_refs = &trans->transaction->delayed_refs;
163 assert_spin_locked(&delayed_refs->lock);
164 if (mutex_trylock(&head->mutex))
165 return 0;
166
167 atomic_inc(&head->node.refs);
168 spin_unlock(&delayed_refs->lock);
169
170 mutex_lock(&head->mutex);
171 spin_lock(&delayed_refs->lock);
172 if (!head->node.in_tree) {
173 mutex_unlock(&head->mutex);
174 btrfs_put_delayed_ref(&head->node);
175 return -EAGAIN;
176 }
177 btrfs_put_delayed_ref(&head->node);
178 return 0;
179 }
180
181 static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
182 struct btrfs_delayed_ref_root *delayed_refs,
183 struct btrfs_delayed_ref_head *head,
184 struct btrfs_delayed_ref_node *ref)
185 {
186 if (btrfs_delayed_ref_is_head(ref)) {
187 head = btrfs_delayed_node_to_head(ref);
188 rb_erase(&head->href_node, &delayed_refs->href_root);
189 } else {
190 assert_spin_locked(&head->lock);
191 list_del(&ref->list);
192 }
193 ref->in_tree = 0;
194 btrfs_put_delayed_ref(ref);
195 atomic_dec(&delayed_refs->num_entries);
196 if (trans->delayed_ref_updates)
197 trans->delayed_ref_updates--;
198 }
199
200 static bool merge_ref(struct btrfs_trans_handle *trans,
201 struct btrfs_delayed_ref_root *delayed_refs,
202 struct btrfs_delayed_ref_head *head,
203 struct btrfs_delayed_ref_node *ref,
204 u64 seq)
205 {
206 struct btrfs_delayed_ref_node *next;
207 bool done = false;
208
209 next = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
210 list);
211 while (!done && &next->list != &head->ref_list) {
212 int mod;
213 struct btrfs_delayed_ref_node *next2;
214
215 next2 = list_next_entry(next, list);
216
217 if (next == ref)
218 goto next;
219
220 if (seq && next->seq >= seq)
221 goto next;
222
223 if (next->type != ref->type)
224 goto next;
225
226 if ((ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
227 ref->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
228 comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref),
229 btrfs_delayed_node_to_tree_ref(next),
230 ref->type))
231 goto next;
232 if ((ref->type == BTRFS_EXTENT_DATA_REF_KEY ||
233 ref->type == BTRFS_SHARED_DATA_REF_KEY) &&
234 comp_data_refs(btrfs_delayed_node_to_data_ref(ref),
235 btrfs_delayed_node_to_data_ref(next)))
236 goto next;
237
238 if (ref->action == next->action) {
239 mod = next->ref_mod;
240 } else {
241 if (ref->ref_mod < next->ref_mod) {
242 swap(ref, next);
243 done = true;
244 }
245 mod = -next->ref_mod;
246 }
247
248 drop_delayed_ref(trans, delayed_refs, head, next);
249 ref->ref_mod += mod;
250 if (ref->ref_mod == 0) {
251 drop_delayed_ref(trans, delayed_refs, head, ref);
252 done = true;
253 } else {
254 /*
255 * Can't have multiples of the same ref on a tree block.
256 */
257 WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
258 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
259 }
260 next:
261 next = next2;
262 }
263
264 return done;
265 }
266
267 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
268 struct btrfs_fs_info *fs_info,
269 struct btrfs_delayed_ref_root *delayed_refs,
270 struct btrfs_delayed_ref_head *head)
271 {
272 struct btrfs_delayed_ref_node *ref;
273 u64 seq = 0;
274
275 assert_spin_locked(&head->lock);
276
277 if (list_empty(&head->ref_list))
278 return;
279
280 /* We don't have too many refs to merge for data. */
281 if (head->is_data)
282 return;
283
284 spin_lock(&fs_info->tree_mod_seq_lock);
285 if (!list_empty(&fs_info->tree_mod_seq_list)) {
286 struct seq_list *elem;
287
288 elem = list_first_entry(&fs_info->tree_mod_seq_list,
289 struct seq_list, list);
290 seq = elem->seq;
291 }
292 spin_unlock(&fs_info->tree_mod_seq_lock);
293
294 ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
295 list);
296 while (&ref->list != &head->ref_list) {
297 if (seq && ref->seq >= seq)
298 goto next;
299
300 if (merge_ref(trans, delayed_refs, head, ref, seq)) {
301 if (list_empty(&head->ref_list))
302 break;
303 ref = list_first_entry(&head->ref_list,
304 struct btrfs_delayed_ref_node,
305 list);
306 continue;
307 }
308 next:
309 ref = list_next_entry(ref, list);
310 }
311 }
312
313 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
314 struct btrfs_delayed_ref_root *delayed_refs,
315 u64 seq)
316 {
317 struct seq_list *elem;
318 int ret = 0;
319
320 spin_lock(&fs_info->tree_mod_seq_lock);
321 if (!list_empty(&fs_info->tree_mod_seq_list)) {
322 elem = list_first_entry(&fs_info->tree_mod_seq_list,
323 struct seq_list, list);
324 if (seq >= elem->seq) {
325 pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n",
326 (u32)(seq >> 32), (u32)seq,
327 (u32)(elem->seq >> 32), (u32)elem->seq,
328 delayed_refs);
329 ret = 1;
330 }
331 }
332
333 spin_unlock(&fs_info->tree_mod_seq_lock);
334 return ret;
335 }
336
337 struct btrfs_delayed_ref_head *
338 btrfs_select_ref_head(struct btrfs_trans_handle *trans)
339 {
340 struct btrfs_delayed_ref_root *delayed_refs;
341 struct btrfs_delayed_ref_head *head;
342 u64 start;
343 bool loop = false;
344
345 delayed_refs = &trans->transaction->delayed_refs;
346
347 again:
348 start = delayed_refs->run_delayed_start;
349 head = find_ref_head(&delayed_refs->href_root, start, 1);
350 if (!head && !loop) {
351 delayed_refs->run_delayed_start = 0;
352 start = 0;
353 loop = true;
354 head = find_ref_head(&delayed_refs->href_root, start, 1);
355 if (!head)
356 return NULL;
357 } else if (!head && loop) {
358 return NULL;
359 }
360
361 while (head->processing) {
362 struct rb_node *node;
363
364 node = rb_next(&head->href_node);
365 if (!node) {
366 if (loop)
367 return NULL;
368 delayed_refs->run_delayed_start = 0;
369 start = 0;
370 loop = true;
371 goto again;
372 }
373 head = rb_entry(node, struct btrfs_delayed_ref_head,
374 href_node);
375 }
376
377 head->processing = 1;
378 WARN_ON(delayed_refs->num_heads_ready == 0);
379 delayed_refs->num_heads_ready--;
380 delayed_refs->run_delayed_start = head->node.bytenr +
381 head->node.num_bytes;
382 return head;
383 }
384
385 /*
386 * Helper to insert the ref_node to the tail or merge with tail.
387 *
388 * Return 0 for insert.
389 * Return >0 for merge.
390 */
391 static int
392 add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
393 struct btrfs_delayed_ref_root *root,
394 struct btrfs_delayed_ref_head *href,
395 struct btrfs_delayed_ref_node *ref)
396 {
397 struct btrfs_delayed_ref_node *exist;
398 int mod;
399 int ret = 0;
400
401 spin_lock(&href->lock);
402 /* Check whether we can merge the tail node with ref */
403 if (list_empty(&href->ref_list))
404 goto add_tail;
405 exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node,
406 list);
407 /* No need to compare bytenr nor is_head */
408 if (exist->type != ref->type || exist->seq != ref->seq)
409 goto add_tail;
410
411 if ((exist->type == BTRFS_TREE_BLOCK_REF_KEY ||
412 exist->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
413 comp_tree_refs(btrfs_delayed_node_to_tree_ref(exist),
414 btrfs_delayed_node_to_tree_ref(ref),
415 ref->type))
416 goto add_tail;
417 if ((exist->type == BTRFS_EXTENT_DATA_REF_KEY ||
418 exist->type == BTRFS_SHARED_DATA_REF_KEY) &&
419 comp_data_refs(btrfs_delayed_node_to_data_ref(exist),
420 btrfs_delayed_node_to_data_ref(ref)))
421 goto add_tail;
422
423 /* Now we are sure we can merge */
424 ret = 1;
425 if (exist->action == ref->action) {
426 mod = ref->ref_mod;
427 } else {
428 /* Need to change action */
429 if (exist->ref_mod < ref->ref_mod) {
430 exist->action = ref->action;
431 mod = -exist->ref_mod;
432 exist->ref_mod = ref->ref_mod;
433 } else
434 mod = -ref->ref_mod;
435 }
436 exist->ref_mod += mod;
437
438 /* remove existing tail if its ref_mod is zero */
439 if (exist->ref_mod == 0)
440 drop_delayed_ref(trans, root, href, exist);
441 spin_unlock(&href->lock);
442 return ret;
443
444 add_tail:
445 list_add_tail(&ref->list, &href->ref_list);
446 atomic_inc(&root->num_entries);
447 trans->delayed_ref_updates++;
448 spin_unlock(&href->lock);
449 return ret;
450 }
451
452 /*
453 * helper function to update the accounting in the head ref
454 * existing and update must have the same bytenr
455 */
456 static noinline void
457 update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
458 struct btrfs_delayed_ref_node *existing,
459 struct btrfs_delayed_ref_node *update)
460 {
461 struct btrfs_delayed_ref_head *existing_ref;
462 struct btrfs_delayed_ref_head *ref;
463 int old_ref_mod;
464
465 existing_ref = btrfs_delayed_node_to_head(existing);
466 ref = btrfs_delayed_node_to_head(update);
467 BUG_ON(existing_ref->is_data != ref->is_data);
468
469 spin_lock(&existing_ref->lock);
470 if (ref->must_insert_reserved) {
471 /* if the extent was freed and then
472 * reallocated before the delayed ref
473 * entries were processed, we can end up
474 * with an existing head ref without
475 * the must_insert_reserved flag set.
476 * Set it again here
477 */
478 existing_ref->must_insert_reserved = ref->must_insert_reserved;
479
480 /*
481 * update the num_bytes so we make sure the accounting
482 * is done correctly
483 */
484 existing->num_bytes = update->num_bytes;
485
486 }
487
488 if (ref->extent_op) {
489 if (!existing_ref->extent_op) {
490 existing_ref->extent_op = ref->extent_op;
491 } else {
492 if (ref->extent_op->update_key) {
493 memcpy(&existing_ref->extent_op->key,
494 &ref->extent_op->key,
495 sizeof(ref->extent_op->key));
496 existing_ref->extent_op->update_key = true;
497 }
498 if (ref->extent_op->update_flags) {
499 existing_ref->extent_op->flags_to_set |=
500 ref->extent_op->flags_to_set;
501 existing_ref->extent_op->update_flags = true;
502 }
503 btrfs_free_delayed_extent_op(ref->extent_op);
504 }
505 }
506 /*
507 * update the reference mod on the head to reflect this new operation,
508 * only need the lock for this case cause we could be processing it
509 * currently, for refs we just added we know we're a-ok.
510 */
511 old_ref_mod = existing_ref->total_ref_mod;
512 existing->ref_mod += update->ref_mod;
513 existing_ref->total_ref_mod += update->ref_mod;
514
515 /*
516 * If we are going to from a positive ref mod to a negative or vice
517 * versa we need to make sure to adjust pending_csums accordingly.
518 */
519 if (existing_ref->is_data) {
520 if (existing_ref->total_ref_mod >= 0 && old_ref_mod < 0)
521 delayed_refs->pending_csums -= existing->num_bytes;
522 if (existing_ref->total_ref_mod < 0 && old_ref_mod >= 0)
523 delayed_refs->pending_csums += existing->num_bytes;
524 }
525 spin_unlock(&existing_ref->lock);
526 }
527
528 /*
529 * helper function to actually insert a head node into the rbtree.
530 * this does all the dirty work in terms of maintaining the correct
531 * overall modification count.
532 */
533 static noinline struct btrfs_delayed_ref_head *
534 add_delayed_ref_head(struct btrfs_fs_info *fs_info,
535 struct btrfs_trans_handle *trans,
536 struct btrfs_delayed_ref_node *ref,
537 struct btrfs_qgroup_extent_record *qrecord,
538 u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
539 int action, int is_data)
540 {
541 struct btrfs_delayed_ref_head *existing;
542 struct btrfs_delayed_ref_head *head_ref = NULL;
543 struct btrfs_delayed_ref_root *delayed_refs;
544 int count_mod = 1;
545 int must_insert_reserved = 0;
546
547 /* If reserved is provided, it must be a data extent. */
548 BUG_ON(!is_data && reserved);
549
550 /*
551 * the head node stores the sum of all the mods, so dropping a ref
552 * should drop the sum in the head node by one.
553 */
554 if (action == BTRFS_UPDATE_DELAYED_HEAD)
555 count_mod = 0;
556 else if (action == BTRFS_DROP_DELAYED_REF)
557 count_mod = -1;
558
559 /*
560 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
561 * the reserved accounting when the extent is finally added, or
562 * if a later modification deletes the delayed ref without ever
563 * inserting the extent into the extent allocation tree.
564 * ref->must_insert_reserved is the flag used to record
565 * that accounting mods are required.
566 *
567 * Once we record must_insert_reserved, switch the action to
568 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
569 */
570 if (action == BTRFS_ADD_DELAYED_EXTENT)
571 must_insert_reserved = 1;
572 else
573 must_insert_reserved = 0;
574
575 delayed_refs = &trans->transaction->delayed_refs;
576
577 /* first set the basic ref node struct up */
578 atomic_set(&ref->refs, 1);
579 ref->bytenr = bytenr;
580 ref->num_bytes = num_bytes;
581 ref->ref_mod = count_mod;
582 ref->type = 0;
583 ref->action = 0;
584 ref->is_head = 1;
585 ref->in_tree = 1;
586 ref->seq = 0;
587
588 head_ref = btrfs_delayed_node_to_head(ref);
589 head_ref->must_insert_reserved = must_insert_reserved;
590 head_ref->is_data = is_data;
591 INIT_LIST_HEAD(&head_ref->ref_list);
592 head_ref->processing = 0;
593 head_ref->total_ref_mod = count_mod;
594 head_ref->qgroup_reserved = 0;
595 head_ref->qgroup_ref_root = 0;
596
597 /* Record qgroup extent info if provided */
598 if (qrecord) {
599 if (ref_root && reserved) {
600 head_ref->qgroup_ref_root = ref_root;
601 head_ref->qgroup_reserved = reserved;
602 }
603
604 qrecord->bytenr = bytenr;
605 qrecord->num_bytes = num_bytes;
606 qrecord->old_roots = NULL;
607
608 if(btrfs_qgroup_insert_dirty_extent_nolock(fs_info,
609 delayed_refs, qrecord))
610 kfree(qrecord);
611 }
612
613 spin_lock_init(&head_ref->lock);
614 mutex_init(&head_ref->mutex);
615
616 trace_add_delayed_ref_head(fs_info, ref, head_ref, action);
617
618 existing = htree_insert(&delayed_refs->href_root,
619 &head_ref->href_node);
620 if (existing) {
621 WARN_ON(ref_root && reserved && existing->qgroup_ref_root
622 && existing->qgroup_reserved);
623 update_existing_head_ref(delayed_refs, &existing->node, ref);
624 /*
625 * we've updated the existing ref, free the newly
626 * allocated ref
627 */
628 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
629 head_ref = existing;
630 } else {
631 if (is_data && count_mod < 0)
632 delayed_refs->pending_csums += num_bytes;
633 delayed_refs->num_heads++;
634 delayed_refs->num_heads_ready++;
635 atomic_inc(&delayed_refs->num_entries);
636 trans->delayed_ref_updates++;
637 }
638 return head_ref;
639 }
640
641 /*
642 * helper to insert a delayed tree ref into the rbtree.
643 */
644 static noinline void
645 add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
646 struct btrfs_trans_handle *trans,
647 struct btrfs_delayed_ref_head *head_ref,
648 struct btrfs_delayed_ref_node *ref, u64 bytenr,
649 u64 num_bytes, u64 parent, u64 ref_root, int level,
650 int action)
651 {
652 struct btrfs_delayed_tree_ref *full_ref;
653 struct btrfs_delayed_ref_root *delayed_refs;
654 u64 seq = 0;
655 int ret;
656
657 if (action == BTRFS_ADD_DELAYED_EXTENT)
658 action = BTRFS_ADD_DELAYED_REF;
659
660 if (is_fstree(ref_root))
661 seq = atomic64_read(&fs_info->tree_mod_seq);
662 delayed_refs = &trans->transaction->delayed_refs;
663
664 /* first set the basic ref node struct up */
665 atomic_set(&ref->refs, 1);
666 ref->bytenr = bytenr;
667 ref->num_bytes = num_bytes;
668 ref->ref_mod = 1;
669 ref->action = action;
670 ref->is_head = 0;
671 ref->in_tree = 1;
672 ref->seq = seq;
673
674 full_ref = btrfs_delayed_node_to_tree_ref(ref);
675 full_ref->parent = parent;
676 full_ref->root = ref_root;
677 if (parent)
678 ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
679 else
680 ref->type = BTRFS_TREE_BLOCK_REF_KEY;
681 full_ref->level = level;
682
683 trace_add_delayed_tree_ref(fs_info, ref, full_ref, action);
684
685 ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
686
687 /*
688 * XXX: memory should be freed at the same level allocated.
689 * But bad practice is anywhere... Follow it now. Need cleanup.
690 */
691 if (ret > 0)
692 kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
693 }
694
695 /*
696 * helper to insert a delayed data ref into the rbtree.
697 */
698 static noinline void
699 add_delayed_data_ref(struct btrfs_fs_info *fs_info,
700 struct btrfs_trans_handle *trans,
701 struct btrfs_delayed_ref_head *head_ref,
702 struct btrfs_delayed_ref_node *ref, u64 bytenr,
703 u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
704 u64 offset, int action)
705 {
706 struct btrfs_delayed_data_ref *full_ref;
707 struct btrfs_delayed_ref_root *delayed_refs;
708 u64 seq = 0;
709 int ret;
710
711 if (action == BTRFS_ADD_DELAYED_EXTENT)
712 action = BTRFS_ADD_DELAYED_REF;
713
714 delayed_refs = &trans->transaction->delayed_refs;
715
716 if (is_fstree(ref_root))
717 seq = atomic64_read(&fs_info->tree_mod_seq);
718
719 /* first set the basic ref node struct up */
720 atomic_set(&ref->refs, 1);
721 ref->bytenr = bytenr;
722 ref->num_bytes = num_bytes;
723 ref->ref_mod = 1;
724 ref->action = action;
725 ref->is_head = 0;
726 ref->in_tree = 1;
727 ref->seq = seq;
728
729 full_ref = btrfs_delayed_node_to_data_ref(ref);
730 full_ref->parent = parent;
731 full_ref->root = ref_root;
732 if (parent)
733 ref->type = BTRFS_SHARED_DATA_REF_KEY;
734 else
735 ref->type = BTRFS_EXTENT_DATA_REF_KEY;
736
737 full_ref->objectid = owner;
738 full_ref->offset = offset;
739
740 trace_add_delayed_data_ref(fs_info, ref, full_ref, action);
741
742 ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
743
744 if (ret > 0)
745 kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
746 }
747
748 /*
749 * add a delayed tree ref. This does all of the accounting required
750 * to make sure the delayed ref is eventually processed before this
751 * transaction commits.
752 */
753 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
754 struct btrfs_trans_handle *trans,
755 u64 bytenr, u64 num_bytes, u64 parent,
756 u64 ref_root, int level, int action,
757 struct btrfs_delayed_extent_op *extent_op)
758 {
759 struct btrfs_delayed_tree_ref *ref;
760 struct btrfs_delayed_ref_head *head_ref;
761 struct btrfs_delayed_ref_root *delayed_refs;
762 struct btrfs_qgroup_extent_record *record = NULL;
763
764 BUG_ON(extent_op && extent_op->is_data);
765 ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
766 if (!ref)
767 return -ENOMEM;
768
769 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
770 if (!head_ref)
771 goto free_ref;
772
773 if (fs_info->quota_enabled && is_fstree(ref_root)) {
774 record = kmalloc(sizeof(*record), GFP_NOFS);
775 if (!record)
776 goto free_head_ref;
777 }
778
779 head_ref->extent_op = extent_op;
780
781 delayed_refs = &trans->transaction->delayed_refs;
782 spin_lock(&delayed_refs->lock);
783
784 /*
785 * insert both the head node and the new ref without dropping
786 * the spin lock
787 */
788 head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
789 bytenr, num_bytes, 0, 0, action, 0);
790
791 add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
792 num_bytes, parent, ref_root, level, action);
793 spin_unlock(&delayed_refs->lock);
794
795 return 0;
796
797 free_head_ref:
798 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
799 free_ref:
800 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
801
802 return -ENOMEM;
803 }
804
805 /*
806 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
807 */
808 int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
809 struct btrfs_trans_handle *trans,
810 u64 bytenr, u64 num_bytes,
811 u64 parent, u64 ref_root,
812 u64 owner, u64 offset, u64 reserved, int action,
813 struct btrfs_delayed_extent_op *extent_op)
814 {
815 struct btrfs_delayed_data_ref *ref;
816 struct btrfs_delayed_ref_head *head_ref;
817 struct btrfs_delayed_ref_root *delayed_refs;
818 struct btrfs_qgroup_extent_record *record = NULL;
819
820 BUG_ON(extent_op && !extent_op->is_data);
821 ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
822 if (!ref)
823 return -ENOMEM;
824
825 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
826 if (!head_ref) {
827 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
828 return -ENOMEM;
829 }
830
831 if (fs_info->quota_enabled && is_fstree(ref_root)) {
832 record = kmalloc(sizeof(*record), GFP_NOFS);
833 if (!record) {
834 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
835 kmem_cache_free(btrfs_delayed_ref_head_cachep,
836 head_ref);
837 return -ENOMEM;
838 }
839 }
840
841 head_ref->extent_op = extent_op;
842
843 delayed_refs = &trans->transaction->delayed_refs;
844 spin_lock(&delayed_refs->lock);
845
846 /*
847 * insert both the head node and the new ref without dropping
848 * the spin lock
849 */
850 head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
851 bytenr, num_bytes, ref_root, reserved,
852 action, 1);
853
854 add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
855 num_bytes, parent, ref_root, owner, offset,
856 action);
857 spin_unlock(&delayed_refs->lock);
858
859 return 0;
860 }
861
862 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
863 struct btrfs_trans_handle *trans,
864 u64 bytenr, u64 num_bytes,
865 struct btrfs_delayed_extent_op *extent_op)
866 {
867 struct btrfs_delayed_ref_head *head_ref;
868 struct btrfs_delayed_ref_root *delayed_refs;
869
870 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
871 if (!head_ref)
872 return -ENOMEM;
873
874 head_ref->extent_op = extent_op;
875
876 delayed_refs = &trans->transaction->delayed_refs;
877 spin_lock(&delayed_refs->lock);
878
879 add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr,
880 num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
881 extent_op->is_data);
882
883 spin_unlock(&delayed_refs->lock);
884 return 0;
885 }
886
887 /*
888 * this does a simple search for the head node for a given extent.
889 * It must be called with the delayed ref spinlock held, and it returns
890 * the head node if any where found, or NULL if not.
891 */
892 struct btrfs_delayed_ref_head *
893 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
894 {
895 struct btrfs_delayed_ref_root *delayed_refs;
896
897 delayed_refs = &trans->transaction->delayed_refs;
898 return find_ref_head(&delayed_refs->href_root, bytenr, 0);
899 }
900
901 void btrfs_delayed_ref_exit(void)
902 {
903 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
904 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
905 kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
906 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
907 }
908
909 int btrfs_delayed_ref_init(void)
910 {
911 btrfs_delayed_ref_head_cachep = kmem_cache_create(
912 "btrfs_delayed_ref_head",
913 sizeof(struct btrfs_delayed_ref_head), 0,
914 SLAB_MEM_SPREAD, NULL);
915 if (!btrfs_delayed_ref_head_cachep)
916 goto fail;
917
918 btrfs_delayed_tree_ref_cachep = kmem_cache_create(
919 "btrfs_delayed_tree_ref",
920 sizeof(struct btrfs_delayed_tree_ref), 0,
921 SLAB_MEM_SPREAD, NULL);
922 if (!btrfs_delayed_tree_ref_cachep)
923 goto fail;
924
925 btrfs_delayed_data_ref_cachep = kmem_cache_create(
926 "btrfs_delayed_data_ref",
927 sizeof(struct btrfs_delayed_data_ref), 0,
928 SLAB_MEM_SPREAD, NULL);
929 if (!btrfs_delayed_data_ref_cachep)
930 goto fail;
931
932 btrfs_delayed_extent_op_cachep = kmem_cache_create(
933 "btrfs_delayed_extent_op",
934 sizeof(struct btrfs_delayed_extent_op), 0,
935 SLAB_MEM_SPREAD, NULL);
936 if (!btrfs_delayed_extent_op_cachep)
937 goto fail;
938
939 return 0;
940 fail:
941 btrfs_delayed_ref_exit();
942 return -ENOMEM;
943 }