]>
Commit | Line | Data |
---|---|---|
56bec294 CM |
1 | /* |
2 | * Copyright (C) 2009 Oracle. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public | |
6 | * License v2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public | |
14 | * License along with this program; if not, write to the | |
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
16 | * Boston, MA 021110-1307, USA. | |
17 | */ | |
18 | ||
19 | #include <linux/sched.h> | |
5a0e3ad6 | 20 | #include <linux/slab.h> |
56bec294 | 21 | #include <linux/sort.h> |
56bec294 CM |
22 | #include "ctree.h" |
23 | #include "delayed-ref.h" | |
24 | #include "transaction.h" | |
3368d001 | 25 | #include "qgroup.h" |
56bec294 | 26 | |
78a6184a MX |
27 | struct kmem_cache *btrfs_delayed_ref_head_cachep; |
28 | struct kmem_cache *btrfs_delayed_tree_ref_cachep; | |
29 | struct kmem_cache *btrfs_delayed_data_ref_cachep; | |
30 | struct kmem_cache *btrfs_delayed_extent_op_cachep; | |
56bec294 CM |
31 | /* |
32 | * delayed back reference update tracking. For subvolume trees | |
33 | * we queue up extent allocations and backref maintenance for | |
34 | * delayed processing. This avoids deep call chains where we | |
35 | * add extents in the middle of btrfs_search_slot, and it allows | |
36 | * us to buffer up frequently modified backrefs in an rb tree instead | |
37 | * of hammering updates on the extent allocation tree. | |
56bec294 CM |
38 | */ |
39 | ||
40 | /* | |
5d4f98a2 YZ |
41 | * compare two delayed tree backrefs with same bytenr and type |
42 | */ | |
43 | static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2, | |
41b0fc42 | 44 | struct btrfs_delayed_tree_ref *ref1, int type) |
5d4f98a2 | 45 | { |
41b0fc42 JB |
46 | if (type == BTRFS_TREE_BLOCK_REF_KEY) { |
47 | if (ref1->root < ref2->root) | |
48 | return -1; | |
49 | if (ref1->root > ref2->root) | |
50 | return 1; | |
51 | } else { | |
52 | if (ref1->parent < ref2->parent) | |
53 | return -1; | |
54 | if (ref1->parent > ref2->parent) | |
55 | return 1; | |
56 | } | |
5d4f98a2 YZ |
57 | return 0; |
58 | } | |
59 | ||
60 | /* | |
61 | * compare two delayed data backrefs with same bytenr and type | |
56bec294 | 62 | */ |
5d4f98a2 YZ |
63 | static int comp_data_refs(struct btrfs_delayed_data_ref *ref2, |
64 | struct btrfs_delayed_data_ref *ref1) | |
56bec294 | 65 | { |
5d4f98a2 YZ |
66 | if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) { |
67 | if (ref1->root < ref2->root) | |
68 | return -1; | |
69 | if (ref1->root > ref2->root) | |
70 | return 1; | |
71 | if (ref1->objectid < ref2->objectid) | |
72 | return -1; | |
73 | if (ref1->objectid > ref2->objectid) | |
74 | return 1; | |
75 | if (ref1->offset < ref2->offset) | |
76 | return -1; | |
77 | if (ref1->offset > ref2->offset) | |
78 | return 1; | |
79 | } else { | |
80 | if (ref1->parent < ref2->parent) | |
81 | return -1; | |
82 | if (ref1->parent > ref2->parent) | |
83 | return 1; | |
84 | } | |
85 | return 0; | |
86 | } | |
87 | ||
c46effa6 LB |
88 | /* insert a new ref to head ref rbtree */ |
89 | static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root, | |
90 | struct rb_node *node) | |
91 | { | |
92 | struct rb_node **p = &root->rb_node; | |
93 | struct rb_node *parent_node = NULL; | |
94 | struct btrfs_delayed_ref_head *entry; | |
95 | struct btrfs_delayed_ref_head *ins; | |
96 | u64 bytenr; | |
97 | ||
98 | ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node); | |
99 | bytenr = ins->node.bytenr; | |
100 | while (*p) { | |
101 | parent_node = *p; | |
102 | entry = rb_entry(parent_node, struct btrfs_delayed_ref_head, | |
103 | href_node); | |
104 | ||
105 | if (bytenr < entry->node.bytenr) | |
106 | p = &(*p)->rb_left; | |
107 | else if (bytenr > entry->node.bytenr) | |
108 | p = &(*p)->rb_right; | |
109 | else | |
110 | return entry; | |
111 | } | |
112 | ||
113 | rb_link_node(node, parent_node, p); | |
114 | rb_insert_color(node, root); | |
115 | return NULL; | |
116 | } | |
117 | ||
56bec294 | 118 | /* |
5d4f98a2 | 119 | * find an head entry based on bytenr. This returns the delayed ref |
d1270cd9 AJ |
120 | * head if it was able to find one, or NULL if nothing was in that spot. |
121 | * If return_bigger is given, the next bigger entry is returned if no exact | |
122 | * match is found. | |
56bec294 | 123 | */ |
c46effa6 LB |
124 | static struct btrfs_delayed_ref_head * |
125 | find_ref_head(struct rb_root *root, u64 bytenr, | |
85fdfdf6 | 126 | int return_bigger) |
56bec294 | 127 | { |
d1270cd9 | 128 | struct rb_node *n; |
c46effa6 | 129 | struct btrfs_delayed_ref_head *entry; |
56bec294 | 130 | |
d1270cd9 AJ |
131 | n = root->rb_node; |
132 | entry = NULL; | |
56bec294 | 133 | while (n) { |
c46effa6 | 134 | entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node); |
56bec294 | 135 | |
c46effa6 | 136 | if (bytenr < entry->node.bytenr) |
56bec294 | 137 | n = n->rb_left; |
85fdfdf6 | 138 | else if (bytenr > entry->node.bytenr) |
56bec294 CM |
139 | n = n->rb_right; |
140 | else | |
141 | return entry; | |
142 | } | |
d1270cd9 | 143 | if (entry && return_bigger) { |
85fdfdf6 | 144 | if (bytenr > entry->node.bytenr) { |
c46effa6 | 145 | n = rb_next(&entry->href_node); |
d1270cd9 AJ |
146 | if (!n) |
147 | n = rb_first(root); | |
c46effa6 LB |
148 | entry = rb_entry(n, struct btrfs_delayed_ref_head, |
149 | href_node); | |
6103fb43 | 150 | return entry; |
d1270cd9 AJ |
151 | } |
152 | return entry; | |
153 | } | |
56bec294 CM |
154 | return NULL; |
155 | } | |
156 | ||
c3e69d58 CM |
157 | int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, |
158 | struct btrfs_delayed_ref_head *head) | |
56bec294 | 159 | { |
c3e69d58 CM |
160 | struct btrfs_delayed_ref_root *delayed_refs; |
161 | ||
162 | delayed_refs = &trans->transaction->delayed_refs; | |
163 | assert_spin_locked(&delayed_refs->lock); | |
164 | if (mutex_trylock(&head->mutex)) | |
165 | return 0; | |
166 | ||
6df8cdf5 | 167 | refcount_inc(&head->node.refs); |
c3e69d58 CM |
168 | spin_unlock(&delayed_refs->lock); |
169 | ||
170 | mutex_lock(&head->mutex); | |
171 | spin_lock(&delayed_refs->lock); | |
172 | if (!head->node.in_tree) { | |
173 | mutex_unlock(&head->mutex); | |
174 | btrfs_put_delayed_ref(&head->node); | |
175 | return -EAGAIN; | |
176 | } | |
177 | btrfs_put_delayed_ref(&head->node); | |
178 | return 0; | |
179 | } | |
180 | ||
35a3621b | 181 | static inline void drop_delayed_ref(struct btrfs_trans_handle *trans, |
ae1e206b | 182 | struct btrfs_delayed_ref_root *delayed_refs, |
d7df2c79 | 183 | struct btrfs_delayed_ref_head *head, |
ae1e206b JB |
184 | struct btrfs_delayed_ref_node *ref) |
185 | { | |
c46effa6 | 186 | if (btrfs_delayed_ref_is_head(ref)) { |
c46effa6 LB |
187 | head = btrfs_delayed_node_to_head(ref); |
188 | rb_erase(&head->href_node, &delayed_refs->href_root); | |
d7df2c79 JB |
189 | } else { |
190 | assert_spin_locked(&head->lock); | |
c6fc2454 | 191 | list_del(&ref->list); |
1d57ee94 WX |
192 | if (!list_empty(&ref->add_list)) |
193 | list_del(&ref->add_list); | |
c46effa6 | 194 | } |
ae1e206b JB |
195 | ref->in_tree = 0; |
196 | btrfs_put_delayed_ref(ref); | |
d7df2c79 | 197 | atomic_dec(&delayed_refs->num_entries); |
ae1e206b JB |
198 | if (trans->delayed_ref_updates) |
199 | trans->delayed_ref_updates--; | |
200 | } | |
201 | ||
2c3cf7d5 FM |
202 | static bool merge_ref(struct btrfs_trans_handle *trans, |
203 | struct btrfs_delayed_ref_root *delayed_refs, | |
204 | struct btrfs_delayed_ref_head *head, | |
205 | struct btrfs_delayed_ref_node *ref, | |
206 | u64 seq) | |
207 | { | |
208 | struct btrfs_delayed_ref_node *next; | |
209 | bool done = false; | |
210 | ||
211 | next = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node, | |
212 | list); | |
213 | while (!done && &next->list != &head->ref_list) { | |
214 | int mod; | |
215 | struct btrfs_delayed_ref_node *next2; | |
216 | ||
217 | next2 = list_next_entry(next, list); | |
218 | ||
219 | if (next == ref) | |
220 | goto next; | |
221 | ||
222 | if (seq && next->seq >= seq) | |
223 | goto next; | |
224 | ||
b06c4bf5 | 225 | if (next->type != ref->type) |
2c3cf7d5 FM |
226 | goto next; |
227 | ||
228 | if ((ref->type == BTRFS_TREE_BLOCK_REF_KEY || | |
229 | ref->type == BTRFS_SHARED_BLOCK_REF_KEY) && | |
230 | comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref), | |
231 | btrfs_delayed_node_to_tree_ref(next), | |
232 | ref->type)) | |
233 | goto next; | |
234 | if ((ref->type == BTRFS_EXTENT_DATA_REF_KEY || | |
235 | ref->type == BTRFS_SHARED_DATA_REF_KEY) && | |
236 | comp_data_refs(btrfs_delayed_node_to_data_ref(ref), | |
237 | btrfs_delayed_node_to_data_ref(next))) | |
238 | goto next; | |
239 | ||
240 | if (ref->action == next->action) { | |
241 | mod = next->ref_mod; | |
242 | } else { | |
243 | if (ref->ref_mod < next->ref_mod) { | |
244 | swap(ref, next); | |
245 | done = true; | |
246 | } | |
247 | mod = -next->ref_mod; | |
248 | } | |
249 | ||
250 | drop_delayed_ref(trans, delayed_refs, head, next); | |
251 | ref->ref_mod += mod; | |
252 | if (ref->ref_mod == 0) { | |
253 | drop_delayed_ref(trans, delayed_refs, head, ref); | |
254 | done = true; | |
255 | } else { | |
256 | /* | |
257 | * Can't have multiples of the same ref on a tree block. | |
258 | */ | |
259 | WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY || | |
260 | ref->type == BTRFS_SHARED_BLOCK_REF_KEY); | |
261 | } | |
262 | next: | |
263 | next = next2; | |
264 | } | |
265 | ||
266 | return done; | |
267 | } | |
268 | ||
269 | void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans, | |
270 | struct btrfs_fs_info *fs_info, | |
271 | struct btrfs_delayed_ref_root *delayed_refs, | |
272 | struct btrfs_delayed_ref_head *head) | |
273 | { | |
274 | struct btrfs_delayed_ref_node *ref; | |
275 | u64 seq = 0; | |
276 | ||
277 | assert_spin_locked(&head->lock); | |
278 | ||
279 | if (list_empty(&head->ref_list)) | |
280 | return; | |
281 | ||
282 | /* We don't have too many refs to merge for data. */ | |
283 | if (head->is_data) | |
284 | return; | |
285 | ||
286 | spin_lock(&fs_info->tree_mod_seq_lock); | |
287 | if (!list_empty(&fs_info->tree_mod_seq_list)) { | |
288 | struct seq_list *elem; | |
289 | ||
290 | elem = list_first_entry(&fs_info->tree_mod_seq_list, | |
291 | struct seq_list, list); | |
292 | seq = elem->seq; | |
293 | } | |
294 | spin_unlock(&fs_info->tree_mod_seq_lock); | |
295 | ||
296 | ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node, | |
297 | list); | |
298 | while (&ref->list != &head->ref_list) { | |
299 | if (seq && ref->seq >= seq) | |
300 | goto next; | |
301 | ||
302 | if (merge_ref(trans, delayed_refs, head, ref, seq)) { | |
303 | if (list_empty(&head->ref_list)) | |
304 | break; | |
305 | ref = list_first_entry(&head->ref_list, | |
306 | struct btrfs_delayed_ref_node, | |
307 | list); | |
308 | continue; | |
309 | } | |
310 | next: | |
311 | ref = list_next_entry(ref, list); | |
312 | } | |
313 | } | |
314 | ||
097b8a7c JS |
315 | int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, |
316 | struct btrfs_delayed_ref_root *delayed_refs, | |
00f04b88 AJ |
317 | u64 seq) |
318 | { | |
319 | struct seq_list *elem; | |
097b8a7c JS |
320 | int ret = 0; |
321 | ||
322 | spin_lock(&fs_info->tree_mod_seq_lock); | |
323 | if (!list_empty(&fs_info->tree_mod_seq_list)) { | |
324 | elem = list_first_entry(&fs_info->tree_mod_seq_list, | |
325 | struct seq_list, list); | |
326 | if (seq >= elem->seq) { | |
ab8d0fc4 JM |
327 | btrfs_debug(fs_info, |
328 | "holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)", | |
329 | (u32)(seq >> 32), (u32)seq, | |
330 | (u32)(elem->seq >> 32), (u32)elem->seq, | |
331 | delayed_refs); | |
097b8a7c JS |
332 | ret = 1; |
333 | } | |
00f04b88 | 334 | } |
097b8a7c JS |
335 | |
336 | spin_unlock(&fs_info->tree_mod_seq_lock); | |
337 | return ret; | |
00f04b88 AJ |
338 | } |
339 | ||
d7df2c79 JB |
340 | struct btrfs_delayed_ref_head * |
341 | btrfs_select_ref_head(struct btrfs_trans_handle *trans) | |
c3e69d58 | 342 | { |
c3e69d58 | 343 | struct btrfs_delayed_ref_root *delayed_refs; |
d7df2c79 JB |
344 | struct btrfs_delayed_ref_head *head; |
345 | u64 start; | |
346 | bool loop = false; | |
56bec294 | 347 | |
c3e69d58 | 348 | delayed_refs = &trans->transaction->delayed_refs; |
c46effa6 | 349 | |
c3e69d58 | 350 | again: |
d7df2c79 | 351 | start = delayed_refs->run_delayed_start; |
85fdfdf6 | 352 | head = find_ref_head(&delayed_refs->href_root, start, 1); |
d7df2c79 JB |
353 | if (!head && !loop) { |
354 | delayed_refs->run_delayed_start = 0; | |
c3e69d58 | 355 | start = 0; |
d7df2c79 | 356 | loop = true; |
85fdfdf6 | 357 | head = find_ref_head(&delayed_refs->href_root, start, 1); |
d7df2c79 JB |
358 | if (!head) |
359 | return NULL; | |
360 | } else if (!head && loop) { | |
361 | return NULL; | |
c3e69d58 | 362 | } |
56bec294 | 363 | |
d7df2c79 JB |
364 | while (head->processing) { |
365 | struct rb_node *node; | |
366 | ||
367 | node = rb_next(&head->href_node); | |
368 | if (!node) { | |
369 | if (loop) | |
370 | return NULL; | |
371 | delayed_refs->run_delayed_start = 0; | |
372 | start = 0; | |
373 | loop = true; | |
374 | goto again; | |
375 | } | |
376 | head = rb_entry(node, struct btrfs_delayed_ref_head, | |
377 | href_node); | |
378 | } | |
093486c4 | 379 | |
d7df2c79 JB |
380 | head->processing = 1; |
381 | WARN_ON(delayed_refs->num_heads_ready == 0); | |
382 | delayed_refs->num_heads_ready--; | |
383 | delayed_refs->run_delayed_start = head->node.bytenr + | |
384 | head->node.num_bytes; | |
385 | return head; | |
093486c4 MX |
386 | } |
387 | ||
c6fc2454 QW |
388 | /* |
389 | * Helper to insert the ref_node to the tail or merge with tail. | |
390 | * | |
391 | * Return 0 for insert. | |
392 | * Return >0 for merge. | |
393 | */ | |
394 | static int | |
395 | add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans, | |
396 | struct btrfs_delayed_ref_root *root, | |
397 | struct btrfs_delayed_ref_head *href, | |
398 | struct btrfs_delayed_ref_node *ref) | |
399 | { | |
400 | struct btrfs_delayed_ref_node *exist; | |
401 | int mod; | |
402 | int ret = 0; | |
403 | ||
404 | spin_lock(&href->lock); | |
405 | /* Check whether we can merge the tail node with ref */ | |
406 | if (list_empty(&href->ref_list)) | |
407 | goto add_tail; | |
408 | exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node, | |
409 | list); | |
410 | /* No need to compare bytenr nor is_head */ | |
b06c4bf5 | 411 | if (exist->type != ref->type || exist->seq != ref->seq) |
c6fc2454 QW |
412 | goto add_tail; |
413 | ||
414 | if ((exist->type == BTRFS_TREE_BLOCK_REF_KEY || | |
415 | exist->type == BTRFS_SHARED_BLOCK_REF_KEY) && | |
416 | comp_tree_refs(btrfs_delayed_node_to_tree_ref(exist), | |
417 | btrfs_delayed_node_to_tree_ref(ref), | |
418 | ref->type)) | |
419 | goto add_tail; | |
420 | if ((exist->type == BTRFS_EXTENT_DATA_REF_KEY || | |
421 | exist->type == BTRFS_SHARED_DATA_REF_KEY) && | |
422 | comp_data_refs(btrfs_delayed_node_to_data_ref(exist), | |
423 | btrfs_delayed_node_to_data_ref(ref))) | |
424 | goto add_tail; | |
425 | ||
426 | /* Now we are sure we can merge */ | |
427 | ret = 1; | |
428 | if (exist->action == ref->action) { | |
429 | mod = ref->ref_mod; | |
430 | } else { | |
431 | /* Need to change action */ | |
432 | if (exist->ref_mod < ref->ref_mod) { | |
433 | exist->action = ref->action; | |
434 | mod = -exist->ref_mod; | |
435 | exist->ref_mod = ref->ref_mod; | |
1d57ee94 WX |
436 | if (ref->action == BTRFS_ADD_DELAYED_REF) |
437 | list_add_tail(&exist->add_list, | |
438 | &href->ref_add_list); | |
439 | else if (ref->action == BTRFS_DROP_DELAYED_REF) { | |
440 | ASSERT(!list_empty(&exist->add_list)); | |
441 | list_del(&exist->add_list); | |
442 | } else { | |
443 | ASSERT(0); | |
444 | } | |
c6fc2454 QW |
445 | } else |
446 | mod = -ref->ref_mod; | |
447 | } | |
448 | exist->ref_mod += mod; | |
449 | ||
450 | /* remove existing tail if its ref_mod is zero */ | |
451 | if (exist->ref_mod == 0) | |
452 | drop_delayed_ref(trans, root, href, exist); | |
453 | spin_unlock(&href->lock); | |
454 | return ret; | |
455 | ||
456 | add_tail: | |
457 | list_add_tail(&ref->list, &href->ref_list); | |
1d57ee94 WX |
458 | if (ref->action == BTRFS_ADD_DELAYED_REF) |
459 | list_add_tail(&ref->add_list, &href->ref_add_list); | |
c6fc2454 QW |
460 | atomic_inc(&root->num_entries); |
461 | trans->delayed_ref_updates++; | |
462 | spin_unlock(&href->lock); | |
463 | return ret; | |
464 | } | |
465 | ||
56bec294 CM |
466 | /* |
467 | * helper function to update the accounting in the head ref | |
468 | * existing and update must have the same bytenr | |
469 | */ | |
470 | static noinline void | |
1262133b JB |
471 | update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs, |
472 | struct btrfs_delayed_ref_node *existing, | |
7be07912 OS |
473 | struct btrfs_delayed_ref_node *update, |
474 | int *old_ref_mod_ret) | |
56bec294 CM |
475 | { |
476 | struct btrfs_delayed_ref_head *existing_ref; | |
477 | struct btrfs_delayed_ref_head *ref; | |
1262133b | 478 | int old_ref_mod; |
56bec294 CM |
479 | |
480 | existing_ref = btrfs_delayed_node_to_head(existing); | |
481 | ref = btrfs_delayed_node_to_head(update); | |
5d4f98a2 | 482 | BUG_ON(existing_ref->is_data != ref->is_data); |
56bec294 | 483 | |
21543bad | 484 | spin_lock(&existing_ref->lock); |
56bec294 CM |
485 | if (ref->must_insert_reserved) { |
486 | /* if the extent was freed and then | |
487 | * reallocated before the delayed ref | |
488 | * entries were processed, we can end up | |
489 | * with an existing head ref without | |
490 | * the must_insert_reserved flag set. | |
491 | * Set it again here | |
492 | */ | |
493 | existing_ref->must_insert_reserved = ref->must_insert_reserved; | |
494 | ||
495 | /* | |
496 | * update the num_bytes so we make sure the accounting | |
497 | * is done correctly | |
498 | */ | |
499 | existing->num_bytes = update->num_bytes; | |
500 | ||
501 | } | |
502 | ||
5d4f98a2 YZ |
503 | if (ref->extent_op) { |
504 | if (!existing_ref->extent_op) { | |
505 | existing_ref->extent_op = ref->extent_op; | |
506 | } else { | |
507 | if (ref->extent_op->update_key) { | |
508 | memcpy(&existing_ref->extent_op->key, | |
509 | &ref->extent_op->key, | |
510 | sizeof(ref->extent_op->key)); | |
35b3ad50 | 511 | existing_ref->extent_op->update_key = true; |
5d4f98a2 YZ |
512 | } |
513 | if (ref->extent_op->update_flags) { | |
514 | existing_ref->extent_op->flags_to_set |= | |
515 | ref->extent_op->flags_to_set; | |
35b3ad50 | 516 | existing_ref->extent_op->update_flags = true; |
5d4f98a2 | 517 | } |
78a6184a | 518 | btrfs_free_delayed_extent_op(ref->extent_op); |
5d4f98a2 YZ |
519 | } |
520 | } | |
56bec294 | 521 | /* |
d7df2c79 JB |
522 | * update the reference mod on the head to reflect this new operation, |
523 | * only need the lock for this case cause we could be processing it | |
524 | * currently, for refs we just added we know we're a-ok. | |
56bec294 | 525 | */ |
1262133b | 526 | old_ref_mod = existing_ref->total_ref_mod; |
7be07912 OS |
527 | if (old_ref_mod_ret) |
528 | *old_ref_mod_ret = old_ref_mod; | |
56bec294 | 529 | existing->ref_mod += update->ref_mod; |
1262133b JB |
530 | existing_ref->total_ref_mod += update->ref_mod; |
531 | ||
532 | /* | |
533 | * If we are going to from a positive ref mod to a negative or vice | |
534 | * versa we need to make sure to adjust pending_csums accordingly. | |
535 | */ | |
536 | if (existing_ref->is_data) { | |
537 | if (existing_ref->total_ref_mod >= 0 && old_ref_mod < 0) | |
538 | delayed_refs->pending_csums -= existing->num_bytes; | |
539 | if (existing_ref->total_ref_mod < 0 && old_ref_mod >= 0) | |
540 | delayed_refs->pending_csums += existing->num_bytes; | |
541 | } | |
d7df2c79 | 542 | spin_unlock(&existing_ref->lock); |
56bec294 CM |
543 | } |
544 | ||
545 | /* | |
5d4f98a2 | 546 | * helper function to actually insert a head node into the rbtree. |
56bec294 | 547 | * this does all the dirty work in terms of maintaining the correct |
5d4f98a2 | 548 | * overall modification count. |
56bec294 | 549 | */ |
d7df2c79 JB |
550 | static noinline struct btrfs_delayed_ref_head * |
551 | add_delayed_ref_head(struct btrfs_fs_info *fs_info, | |
552 | struct btrfs_trans_handle *trans, | |
3368d001 QW |
553 | struct btrfs_delayed_ref_node *ref, |
554 | struct btrfs_qgroup_extent_record *qrecord, | |
5846a3c2 | 555 | u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved, |
7be07912 OS |
556 | int action, int is_data, int *qrecord_inserted_ret, |
557 | int *old_ref_mod, int *new_ref_mod) | |
56bec294 | 558 | { |
d7df2c79 | 559 | struct btrfs_delayed_ref_head *existing; |
c3e69d58 | 560 | struct btrfs_delayed_ref_head *head_ref = NULL; |
56bec294 CM |
561 | struct btrfs_delayed_ref_root *delayed_refs; |
562 | int count_mod = 1; | |
563 | int must_insert_reserved = 0; | |
fb235dc0 | 564 | int qrecord_inserted = 0; |
56bec294 | 565 | |
5846a3c2 QW |
566 | /* If reserved is provided, it must be a data extent. */ |
567 | BUG_ON(!is_data && reserved); | |
568 | ||
56bec294 CM |
569 | /* |
570 | * the head node stores the sum of all the mods, so dropping a ref | |
571 | * should drop the sum in the head node by one. | |
572 | */ | |
5d4f98a2 YZ |
573 | if (action == BTRFS_UPDATE_DELAYED_HEAD) |
574 | count_mod = 0; | |
575 | else if (action == BTRFS_DROP_DELAYED_REF) | |
576 | count_mod = -1; | |
56bec294 CM |
577 | |
578 | /* | |
579 | * BTRFS_ADD_DELAYED_EXTENT means that we need to update | |
580 | * the reserved accounting when the extent is finally added, or | |
581 | * if a later modification deletes the delayed ref without ever | |
582 | * inserting the extent into the extent allocation tree. | |
583 | * ref->must_insert_reserved is the flag used to record | |
584 | * that accounting mods are required. | |
585 | * | |
586 | * Once we record must_insert_reserved, switch the action to | |
587 | * BTRFS_ADD_DELAYED_REF because other special casing is not required. | |
588 | */ | |
5d4f98a2 | 589 | if (action == BTRFS_ADD_DELAYED_EXTENT) |
56bec294 | 590 | must_insert_reserved = 1; |
5d4f98a2 | 591 | else |
56bec294 | 592 | must_insert_reserved = 0; |
56bec294 CM |
593 | |
594 | delayed_refs = &trans->transaction->delayed_refs; | |
595 | ||
596 | /* first set the basic ref node struct up */ | |
6df8cdf5 | 597 | refcount_set(&ref->refs, 1); |
56bec294 | 598 | ref->bytenr = bytenr; |
5d4f98a2 | 599 | ref->num_bytes = num_bytes; |
56bec294 | 600 | ref->ref_mod = count_mod; |
5d4f98a2 YZ |
601 | ref->type = 0; |
602 | ref->action = 0; | |
603 | ref->is_head = 1; | |
56bec294 | 604 | ref->in_tree = 1; |
00f04b88 | 605 | ref->seq = 0; |
5d4f98a2 YZ |
606 | |
607 | head_ref = btrfs_delayed_node_to_head(ref); | |
608 | head_ref->must_insert_reserved = must_insert_reserved; | |
609 | head_ref->is_data = is_data; | |
c6fc2454 | 610 | INIT_LIST_HEAD(&head_ref->ref_list); |
1d57ee94 | 611 | INIT_LIST_HEAD(&head_ref->ref_add_list); |
d7df2c79 | 612 | head_ref->processing = 0; |
1262133b | 613 | head_ref->total_ref_mod = count_mod; |
f64d5ca8 QW |
614 | head_ref->qgroup_reserved = 0; |
615 | head_ref->qgroup_ref_root = 0; | |
5d4f98a2 | 616 | |
3368d001 QW |
617 | /* Record qgroup extent info if provided */ |
618 | if (qrecord) { | |
5846a3c2 QW |
619 | if (ref_root && reserved) { |
620 | head_ref->qgroup_ref_root = ref_root; | |
621 | head_ref->qgroup_reserved = reserved; | |
622 | } | |
623 | ||
3368d001 QW |
624 | qrecord->bytenr = bytenr; |
625 | qrecord->num_bytes = num_bytes; | |
626 | qrecord->old_roots = NULL; | |
627 | ||
50b3e040 | 628 | if(btrfs_qgroup_trace_extent_nolock(fs_info, |
cb93b52c | 629 | delayed_refs, qrecord)) |
3368d001 | 630 | kfree(qrecord); |
fb235dc0 QW |
631 | else |
632 | qrecord_inserted = 1; | |
3368d001 QW |
633 | } |
634 | ||
d7df2c79 | 635 | spin_lock_init(&head_ref->lock); |
5d4f98a2 YZ |
636 | mutex_init(&head_ref->mutex); |
637 | ||
bc074524 | 638 | trace_add_delayed_ref_head(fs_info, ref, head_ref, action); |
1abe9b8a | 639 | |
d7df2c79 JB |
640 | existing = htree_insert(&delayed_refs->href_root, |
641 | &head_ref->href_node); | |
5d4f98a2 | 642 | if (existing) { |
5846a3c2 QW |
643 | WARN_ON(ref_root && reserved && existing->qgroup_ref_root |
644 | && existing->qgroup_reserved); | |
7be07912 OS |
645 | update_existing_head_ref(delayed_refs, &existing->node, ref, |
646 | old_ref_mod); | |
5d4f98a2 YZ |
647 | /* |
648 | * we've updated the existing ref, free the newly | |
649 | * allocated ref | |
650 | */ | |
78a6184a | 651 | kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref); |
d7df2c79 | 652 | head_ref = existing; |
5d4f98a2 | 653 | } else { |
7be07912 OS |
654 | if (old_ref_mod) |
655 | *old_ref_mod = 0; | |
1262133b JB |
656 | if (is_data && count_mod < 0) |
657 | delayed_refs->pending_csums += num_bytes; | |
5d4f98a2 YZ |
658 | delayed_refs->num_heads++; |
659 | delayed_refs->num_heads_ready++; | |
d7df2c79 | 660 | atomic_inc(&delayed_refs->num_entries); |
5d4f98a2 YZ |
661 | trans->delayed_ref_updates++; |
662 | } | |
fb235dc0 QW |
663 | if (qrecord_inserted_ret) |
664 | *qrecord_inserted_ret = qrecord_inserted; | |
7be07912 OS |
665 | if (new_ref_mod) |
666 | *new_ref_mod = head_ref->total_ref_mod; | |
d7df2c79 | 667 | return head_ref; |
5d4f98a2 YZ |
668 | } |
669 | ||
670 | /* | |
671 | * helper to insert a delayed tree ref into the rbtree. | |
672 | */ | |
d7df2c79 JB |
673 | static noinline void |
674 | add_delayed_tree_ref(struct btrfs_fs_info *fs_info, | |
675 | struct btrfs_trans_handle *trans, | |
676 | struct btrfs_delayed_ref_head *head_ref, | |
677 | struct btrfs_delayed_ref_node *ref, u64 bytenr, | |
678 | u64 num_bytes, u64 parent, u64 ref_root, int level, | |
b06c4bf5 | 679 | int action) |
5d4f98a2 | 680 | { |
5d4f98a2 YZ |
681 | struct btrfs_delayed_tree_ref *full_ref; |
682 | struct btrfs_delayed_ref_root *delayed_refs; | |
00f04b88 | 683 | u64 seq = 0; |
c6fc2454 | 684 | int ret; |
5d4f98a2 YZ |
685 | |
686 | if (action == BTRFS_ADD_DELAYED_EXTENT) | |
687 | action = BTRFS_ADD_DELAYED_REF; | |
688 | ||
fcebe456 JB |
689 | if (is_fstree(ref_root)) |
690 | seq = atomic64_read(&fs_info->tree_mod_seq); | |
5d4f98a2 YZ |
691 | delayed_refs = &trans->transaction->delayed_refs; |
692 | ||
693 | /* first set the basic ref node struct up */ | |
6df8cdf5 | 694 | refcount_set(&ref->refs, 1); |
5d4f98a2 | 695 | ref->bytenr = bytenr; |
56bec294 | 696 | ref->num_bytes = num_bytes; |
5d4f98a2 YZ |
697 | ref->ref_mod = 1; |
698 | ref->action = action; | |
699 | ref->is_head = 0; | |
700 | ref->in_tree = 1; | |
00f04b88 | 701 | ref->seq = seq; |
1d57ee94 WX |
702 | INIT_LIST_HEAD(&ref->list); |
703 | INIT_LIST_HEAD(&ref->add_list); | |
00f04b88 | 704 | |
5d4f98a2 | 705 | full_ref = btrfs_delayed_node_to_tree_ref(ref); |
eebe063b AJ |
706 | full_ref->parent = parent; |
707 | full_ref->root = ref_root; | |
708 | if (parent) | |
5d4f98a2 | 709 | ref->type = BTRFS_SHARED_BLOCK_REF_KEY; |
eebe063b | 710 | else |
5d4f98a2 | 711 | ref->type = BTRFS_TREE_BLOCK_REF_KEY; |
5d4f98a2 | 712 | full_ref->level = level; |
56bec294 | 713 | |
bc074524 | 714 | trace_add_delayed_tree_ref(fs_info, ref, full_ref, action); |
1abe9b8a | 715 | |
c6fc2454 QW |
716 | ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref); |
717 | ||
718 | /* | |
719 | * XXX: memory should be freed at the same level allocated. | |
720 | * But bad practice is anywhere... Follow it now. Need cleanup. | |
721 | */ | |
722 | if (ret > 0) | |
78a6184a | 723 | kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref); |
5d4f98a2 YZ |
724 | } |
725 | ||
726 | /* | |
727 | * helper to insert a delayed data ref into the rbtree. | |
728 | */ | |
d7df2c79 JB |
729 | static noinline void |
730 | add_delayed_data_ref(struct btrfs_fs_info *fs_info, | |
731 | struct btrfs_trans_handle *trans, | |
732 | struct btrfs_delayed_ref_head *head_ref, | |
733 | struct btrfs_delayed_ref_node *ref, u64 bytenr, | |
734 | u64 num_bytes, u64 parent, u64 ref_root, u64 owner, | |
b06c4bf5 | 735 | u64 offset, int action) |
5d4f98a2 | 736 | { |
5d4f98a2 YZ |
737 | struct btrfs_delayed_data_ref *full_ref; |
738 | struct btrfs_delayed_ref_root *delayed_refs; | |
00f04b88 | 739 | u64 seq = 0; |
c6fc2454 | 740 | int ret; |
5d4f98a2 YZ |
741 | |
742 | if (action == BTRFS_ADD_DELAYED_EXTENT) | |
743 | action = BTRFS_ADD_DELAYED_REF; | |
744 | ||
745 | delayed_refs = &trans->transaction->delayed_refs; | |
746 | ||
fcebe456 JB |
747 | if (is_fstree(ref_root)) |
748 | seq = atomic64_read(&fs_info->tree_mod_seq); | |
749 | ||
5d4f98a2 | 750 | /* first set the basic ref node struct up */ |
6df8cdf5 | 751 | refcount_set(&ref->refs, 1); |
5d4f98a2 YZ |
752 | ref->bytenr = bytenr; |
753 | ref->num_bytes = num_bytes; | |
754 | ref->ref_mod = 1; | |
755 | ref->action = action; | |
756 | ref->is_head = 0; | |
757 | ref->in_tree = 1; | |
00f04b88 | 758 | ref->seq = seq; |
1d57ee94 WX |
759 | INIT_LIST_HEAD(&ref->list); |
760 | INIT_LIST_HEAD(&ref->add_list); | |
00f04b88 | 761 | |
5d4f98a2 | 762 | full_ref = btrfs_delayed_node_to_data_ref(ref); |
eebe063b AJ |
763 | full_ref->parent = parent; |
764 | full_ref->root = ref_root; | |
765 | if (parent) | |
5d4f98a2 | 766 | ref->type = BTRFS_SHARED_DATA_REF_KEY; |
eebe063b | 767 | else |
5d4f98a2 | 768 | ref->type = BTRFS_EXTENT_DATA_REF_KEY; |
66d7e7f0 | 769 | |
5d4f98a2 YZ |
770 | full_ref->objectid = owner; |
771 | full_ref->offset = offset; | |
56bec294 | 772 | |
bc074524 | 773 | trace_add_delayed_data_ref(fs_info, ref, full_ref, action); |
1abe9b8a | 774 | |
c6fc2454 QW |
775 | ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref); |
776 | ||
777 | if (ret > 0) | |
78a6184a | 778 | kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref); |
56bec294 CM |
779 | } |
780 | ||
781 | /* | |
5d4f98a2 | 782 | * add a delayed tree ref. This does all of the accounting required |
56bec294 CM |
783 | * to make sure the delayed ref is eventually processed before this |
784 | * transaction commits. | |
785 | */ | |
66d7e7f0 AJ |
786 | int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, |
787 | struct btrfs_trans_handle *trans, | |
5d4f98a2 YZ |
788 | u64 bytenr, u64 num_bytes, u64 parent, |
789 | u64 ref_root, int level, int action, | |
7be07912 OS |
790 | struct btrfs_delayed_extent_op *extent_op, |
791 | int *old_ref_mod, int *new_ref_mod) | |
56bec294 | 792 | { |
5d4f98a2 | 793 | struct btrfs_delayed_tree_ref *ref; |
56bec294 CM |
794 | struct btrfs_delayed_ref_head *head_ref; |
795 | struct btrfs_delayed_ref_root *delayed_refs; | |
3368d001 | 796 | struct btrfs_qgroup_extent_record *record = NULL; |
fb235dc0 | 797 | int qrecord_inserted; |
56bec294 | 798 | |
5d4f98a2 | 799 | BUG_ON(extent_op && extent_op->is_data); |
78a6184a | 800 | ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS); |
56bec294 CM |
801 | if (!ref) |
802 | return -ENOMEM; | |
803 | ||
78a6184a | 804 | head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS); |
5a5003df DC |
805 | if (!head_ref) |
806 | goto free_ref; | |
5d4f98a2 | 807 | |
afcdd129 JB |
808 | if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) && |
809 | is_fstree(ref_root)) { | |
3368d001 | 810 | record = kmalloc(sizeof(*record), GFP_NOFS); |
5a5003df DC |
811 | if (!record) |
812 | goto free_head_ref; | |
3368d001 QW |
813 | } |
814 | ||
5d4f98a2 YZ |
815 | head_ref->extent_op = extent_op; |
816 | ||
817 | delayed_refs = &trans->transaction->delayed_refs; | |
818 | spin_lock(&delayed_refs->lock); | |
819 | ||
56bec294 | 820 | /* |
5d4f98a2 YZ |
821 | * insert both the head node and the new ref without dropping |
822 | * the spin lock | |
56bec294 | 823 | */ |
3368d001 | 824 | head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record, |
fb235dc0 | 825 | bytenr, num_bytes, 0, 0, action, 0, |
7be07912 OS |
826 | &qrecord_inserted, old_ref_mod, |
827 | new_ref_mod); | |
5d4f98a2 | 828 | |
d7df2c79 | 829 | add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr, |
b06c4bf5 | 830 | num_bytes, parent, ref_root, level, action); |
5d4f98a2 | 831 | spin_unlock(&delayed_refs->lock); |
95a06077 | 832 | |
fb235dc0 QW |
833 | if (qrecord_inserted) |
834 | return btrfs_qgroup_trace_extent_post(fs_info, record); | |
5d4f98a2 | 835 | return 0; |
5a5003df DC |
836 | |
837 | free_head_ref: | |
838 | kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref); | |
839 | free_ref: | |
840 | kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref); | |
841 | ||
842 | return -ENOMEM; | |
5d4f98a2 YZ |
843 | } |
844 | ||
845 | /* | |
846 | * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref. | |
847 | */ | |
66d7e7f0 AJ |
848 | int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, |
849 | struct btrfs_trans_handle *trans, | |
5d4f98a2 YZ |
850 | u64 bytenr, u64 num_bytes, |
851 | u64 parent, u64 ref_root, | |
7be07912 OS |
852 | u64 owner, u64 offset, u64 reserved, int action, |
853 | int *old_ref_mod, int *new_ref_mod) | |
5d4f98a2 YZ |
854 | { |
855 | struct btrfs_delayed_data_ref *ref; | |
856 | struct btrfs_delayed_ref_head *head_ref; | |
857 | struct btrfs_delayed_ref_root *delayed_refs; | |
3368d001 | 858 | struct btrfs_qgroup_extent_record *record = NULL; |
fb235dc0 | 859 | int qrecord_inserted; |
5d4f98a2 | 860 | |
78a6184a | 861 | ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS); |
5d4f98a2 YZ |
862 | if (!ref) |
863 | return -ENOMEM; | |
56bec294 | 864 | |
78a6184a | 865 | head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS); |
56bec294 | 866 | if (!head_ref) { |
78a6184a | 867 | kmem_cache_free(btrfs_delayed_data_ref_cachep, ref); |
56bec294 CM |
868 | return -ENOMEM; |
869 | } | |
5d4f98a2 | 870 | |
afcdd129 JB |
871 | if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) && |
872 | is_fstree(ref_root)) { | |
3368d001 QW |
873 | record = kmalloc(sizeof(*record), GFP_NOFS); |
874 | if (!record) { | |
875 | kmem_cache_free(btrfs_delayed_data_ref_cachep, ref); | |
876 | kmem_cache_free(btrfs_delayed_ref_head_cachep, | |
877 | head_ref); | |
878 | return -ENOMEM; | |
879 | } | |
880 | } | |
881 | ||
fef394f7 | 882 | head_ref->extent_op = NULL; |
5d4f98a2 | 883 | |
56bec294 CM |
884 | delayed_refs = &trans->transaction->delayed_refs; |
885 | spin_lock(&delayed_refs->lock); | |
886 | ||
887 | /* | |
888 | * insert both the head node and the new ref without dropping | |
889 | * the spin lock | |
890 | */ | |
3368d001 | 891 | head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record, |
5846a3c2 | 892 | bytenr, num_bytes, ref_root, reserved, |
7be07912 OS |
893 | action, 1, &qrecord_inserted, |
894 | old_ref_mod, new_ref_mod); | |
56bec294 | 895 | |
d7df2c79 | 896 | add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr, |
66d7e7f0 | 897 | num_bytes, parent, ref_root, owner, offset, |
b06c4bf5 | 898 | action); |
5d4f98a2 | 899 | spin_unlock(&delayed_refs->lock); |
95a06077 | 900 | |
fb235dc0 QW |
901 | if (qrecord_inserted) |
902 | return btrfs_qgroup_trace_extent_post(fs_info, record); | |
5d4f98a2 YZ |
903 | return 0; |
904 | } | |
905 | ||
66d7e7f0 AJ |
906 | int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, |
907 | struct btrfs_trans_handle *trans, | |
5d4f98a2 YZ |
908 | u64 bytenr, u64 num_bytes, |
909 | struct btrfs_delayed_extent_op *extent_op) | |
910 | { | |
911 | struct btrfs_delayed_ref_head *head_ref; | |
912 | struct btrfs_delayed_ref_root *delayed_refs; | |
5d4f98a2 | 913 | |
78a6184a | 914 | head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS); |
5d4f98a2 YZ |
915 | if (!head_ref) |
916 | return -ENOMEM; | |
917 | ||
918 | head_ref->extent_op = extent_op; | |
919 | ||
920 | delayed_refs = &trans->transaction->delayed_refs; | |
921 | spin_lock(&delayed_refs->lock); | |
922 | ||
3368d001 | 923 | add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr, |
5846a3c2 | 924 | num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD, |
7be07912 | 925 | extent_op->is_data, NULL, NULL, NULL); |
5d4f98a2 | 926 | |
56bec294 CM |
927 | spin_unlock(&delayed_refs->lock); |
928 | return 0; | |
929 | } | |
930 | ||
1887be66 CM |
931 | /* |
932 | * this does a simple search for the head node for a given extent. | |
933 | * It must be called with the delayed ref spinlock held, and it returns | |
934 | * the head node if any where found, or NULL if not. | |
935 | */ | |
936 | struct btrfs_delayed_ref_head * | |
f72ad18e | 937 | btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr) |
1887be66 | 938 | { |
85fdfdf6 | 939 | return find_ref_head(&delayed_refs->href_root, bytenr, 0); |
1887be66 | 940 | } |
78a6184a MX |
941 | |
942 | void btrfs_delayed_ref_exit(void) | |
943 | { | |
5598e900 KM |
944 | kmem_cache_destroy(btrfs_delayed_ref_head_cachep); |
945 | kmem_cache_destroy(btrfs_delayed_tree_ref_cachep); | |
946 | kmem_cache_destroy(btrfs_delayed_data_ref_cachep); | |
947 | kmem_cache_destroy(btrfs_delayed_extent_op_cachep); | |
78a6184a MX |
948 | } |
949 | ||
950 | int btrfs_delayed_ref_init(void) | |
951 | { | |
952 | btrfs_delayed_ref_head_cachep = kmem_cache_create( | |
953 | "btrfs_delayed_ref_head", | |
954 | sizeof(struct btrfs_delayed_ref_head), 0, | |
fba4b697 | 955 | SLAB_MEM_SPREAD, NULL); |
78a6184a MX |
956 | if (!btrfs_delayed_ref_head_cachep) |
957 | goto fail; | |
958 | ||
959 | btrfs_delayed_tree_ref_cachep = kmem_cache_create( | |
960 | "btrfs_delayed_tree_ref", | |
961 | sizeof(struct btrfs_delayed_tree_ref), 0, | |
fba4b697 | 962 | SLAB_MEM_SPREAD, NULL); |
78a6184a MX |
963 | if (!btrfs_delayed_tree_ref_cachep) |
964 | goto fail; | |
965 | ||
966 | btrfs_delayed_data_ref_cachep = kmem_cache_create( | |
967 | "btrfs_delayed_data_ref", | |
968 | sizeof(struct btrfs_delayed_data_ref), 0, | |
fba4b697 | 969 | SLAB_MEM_SPREAD, NULL); |
78a6184a MX |
970 | if (!btrfs_delayed_data_ref_cachep) |
971 | goto fail; | |
972 | ||
973 | btrfs_delayed_extent_op_cachep = kmem_cache_create( | |
974 | "btrfs_delayed_extent_op", | |
975 | sizeof(struct btrfs_delayed_extent_op), 0, | |
fba4b697 | 976 | SLAB_MEM_SPREAD, NULL); |
78a6184a MX |
977 | if (!btrfs_delayed_extent_op_cachep) |
978 | goto fail; | |
979 | ||
980 | return 0; | |
981 | fail: | |
982 | btrfs_delayed_ref_exit(); | |
983 | return -ENOMEM; | |
984 | } |