]>
Commit | Line | Data |
---|---|---|
56bec294 CM |
1 | /* |
2 | * Copyright (C) 2009 Oracle. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public | |
6 | * License v2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public | |
14 | * License along with this program; if not, write to the | |
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
16 | * Boston, MA 021110-1307, USA. | |
17 | */ | |
18 | ||
19 | #include <linux/sched.h> | |
5a0e3ad6 | 20 | #include <linux/slab.h> |
56bec294 | 21 | #include <linux/sort.h> |
56bec294 CM |
22 | #include "ctree.h" |
23 | #include "delayed-ref.h" | |
24 | #include "transaction.h" | |
3368d001 | 25 | #include "qgroup.h" |
56bec294 | 26 | |
78a6184a MX |
27 | struct kmem_cache *btrfs_delayed_ref_head_cachep; |
28 | struct kmem_cache *btrfs_delayed_tree_ref_cachep; | |
29 | struct kmem_cache *btrfs_delayed_data_ref_cachep; | |
30 | struct kmem_cache *btrfs_delayed_extent_op_cachep; | |
56bec294 CM |
31 | /* |
32 | * delayed back reference update tracking. For subvolume trees | |
33 | * we queue up extent allocations and backref maintenance for | |
34 | * delayed processing. This avoids deep call chains where we | |
35 | * add extents in the middle of btrfs_search_slot, and it allows | |
36 | * us to buffer up frequently modified backrefs in an rb tree instead | |
37 | * of hammering updates on the extent allocation tree. | |
56bec294 CM |
38 | */ |
39 | ||
40 | /* | |
5d4f98a2 YZ |
41 | * compare two delayed tree backrefs with same bytenr and type |
42 | */ | |
43 | static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2, | |
41b0fc42 | 44 | struct btrfs_delayed_tree_ref *ref1, int type) |
5d4f98a2 | 45 | { |
41b0fc42 JB |
46 | if (type == BTRFS_TREE_BLOCK_REF_KEY) { |
47 | if (ref1->root < ref2->root) | |
48 | return -1; | |
49 | if (ref1->root > ref2->root) | |
50 | return 1; | |
51 | } else { | |
52 | if (ref1->parent < ref2->parent) | |
53 | return -1; | |
54 | if (ref1->parent > ref2->parent) | |
55 | return 1; | |
56 | } | |
5d4f98a2 YZ |
57 | return 0; |
58 | } | |
59 | ||
60 | /* | |
61 | * compare two delayed data backrefs with same bytenr and type | |
56bec294 | 62 | */ |
5d4f98a2 YZ |
63 | static int comp_data_refs(struct btrfs_delayed_data_ref *ref2, |
64 | struct btrfs_delayed_data_ref *ref1) | |
56bec294 | 65 | { |
5d4f98a2 YZ |
66 | if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) { |
67 | if (ref1->root < ref2->root) | |
68 | return -1; | |
69 | if (ref1->root > ref2->root) | |
70 | return 1; | |
71 | if (ref1->objectid < ref2->objectid) | |
72 | return -1; | |
73 | if (ref1->objectid > ref2->objectid) | |
74 | return 1; | |
75 | if (ref1->offset < ref2->offset) | |
76 | return -1; | |
77 | if (ref1->offset > ref2->offset) | |
78 | return 1; | |
79 | } else { | |
80 | if (ref1->parent < ref2->parent) | |
81 | return -1; | |
82 | if (ref1->parent > ref2->parent) | |
83 | return 1; | |
84 | } | |
85 | return 0; | |
86 | } | |
87 | ||
c46effa6 LB |
88 | /* insert a new ref to head ref rbtree */ |
89 | static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root, | |
90 | struct rb_node *node) | |
91 | { | |
92 | struct rb_node **p = &root->rb_node; | |
93 | struct rb_node *parent_node = NULL; | |
94 | struct btrfs_delayed_ref_head *entry; | |
95 | struct btrfs_delayed_ref_head *ins; | |
96 | u64 bytenr; | |
97 | ||
98 | ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node); | |
99 | bytenr = ins->node.bytenr; | |
100 | while (*p) { | |
101 | parent_node = *p; | |
102 | entry = rb_entry(parent_node, struct btrfs_delayed_ref_head, | |
103 | href_node); | |
104 | ||
105 | if (bytenr < entry->node.bytenr) | |
106 | p = &(*p)->rb_left; | |
107 | else if (bytenr > entry->node.bytenr) | |
108 | p = &(*p)->rb_right; | |
109 | else | |
110 | return entry; | |
111 | } | |
112 | ||
113 | rb_link_node(node, parent_node, p); | |
114 | rb_insert_color(node, root); | |
115 | return NULL; | |
116 | } | |
117 | ||
56bec294 | 118 | /* |
5d4f98a2 | 119 | * find an head entry based on bytenr. This returns the delayed ref |
d1270cd9 AJ |
120 | * head if it was able to find one, or NULL if nothing was in that spot. |
121 | * If return_bigger is given, the next bigger entry is returned if no exact | |
122 | * match is found. | |
56bec294 | 123 | */ |
c46effa6 LB |
124 | static struct btrfs_delayed_ref_head * |
125 | find_ref_head(struct rb_root *root, u64 bytenr, | |
85fdfdf6 | 126 | int return_bigger) |
56bec294 | 127 | { |
d1270cd9 | 128 | struct rb_node *n; |
c46effa6 | 129 | struct btrfs_delayed_ref_head *entry; |
56bec294 | 130 | |
d1270cd9 AJ |
131 | n = root->rb_node; |
132 | entry = NULL; | |
56bec294 | 133 | while (n) { |
c46effa6 | 134 | entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node); |
56bec294 | 135 | |
c46effa6 | 136 | if (bytenr < entry->node.bytenr) |
56bec294 | 137 | n = n->rb_left; |
85fdfdf6 | 138 | else if (bytenr > entry->node.bytenr) |
56bec294 CM |
139 | n = n->rb_right; |
140 | else | |
141 | return entry; | |
142 | } | |
d1270cd9 | 143 | if (entry && return_bigger) { |
85fdfdf6 | 144 | if (bytenr > entry->node.bytenr) { |
c46effa6 | 145 | n = rb_next(&entry->href_node); |
d1270cd9 AJ |
146 | if (!n) |
147 | n = rb_first(root); | |
c46effa6 LB |
148 | entry = rb_entry(n, struct btrfs_delayed_ref_head, |
149 | href_node); | |
6103fb43 | 150 | return entry; |
d1270cd9 AJ |
151 | } |
152 | return entry; | |
153 | } | |
56bec294 CM |
154 | return NULL; |
155 | } | |
156 | ||
c3e69d58 CM |
157 | int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, |
158 | struct btrfs_delayed_ref_head *head) | |
56bec294 | 159 | { |
c3e69d58 CM |
160 | struct btrfs_delayed_ref_root *delayed_refs; |
161 | ||
162 | delayed_refs = &trans->transaction->delayed_refs; | |
163 | assert_spin_locked(&delayed_refs->lock); | |
164 | if (mutex_trylock(&head->mutex)) | |
165 | return 0; | |
166 | ||
167 | atomic_inc(&head->node.refs); | |
168 | spin_unlock(&delayed_refs->lock); | |
169 | ||
170 | mutex_lock(&head->mutex); | |
171 | spin_lock(&delayed_refs->lock); | |
172 | if (!head->node.in_tree) { | |
173 | mutex_unlock(&head->mutex); | |
174 | btrfs_put_delayed_ref(&head->node); | |
175 | return -EAGAIN; | |
176 | } | |
177 | btrfs_put_delayed_ref(&head->node); | |
178 | return 0; | |
179 | } | |
180 | ||
35a3621b | 181 | static inline void drop_delayed_ref(struct btrfs_trans_handle *trans, |
ae1e206b | 182 | struct btrfs_delayed_ref_root *delayed_refs, |
d7df2c79 | 183 | struct btrfs_delayed_ref_head *head, |
ae1e206b JB |
184 | struct btrfs_delayed_ref_node *ref) |
185 | { | |
c46effa6 | 186 | if (btrfs_delayed_ref_is_head(ref)) { |
c46effa6 LB |
187 | head = btrfs_delayed_node_to_head(ref); |
188 | rb_erase(&head->href_node, &delayed_refs->href_root); | |
d7df2c79 JB |
189 | } else { |
190 | assert_spin_locked(&head->lock); | |
c6fc2454 | 191 | list_del(&ref->list); |
1d57ee94 WX |
192 | if (!list_empty(&ref->add_list)) |
193 | list_del(&ref->add_list); | |
c46effa6 | 194 | } |
ae1e206b JB |
195 | ref->in_tree = 0; |
196 | btrfs_put_delayed_ref(ref); | |
d7df2c79 | 197 | atomic_dec(&delayed_refs->num_entries); |
ae1e206b JB |
198 | if (trans->delayed_ref_updates) |
199 | trans->delayed_ref_updates--; | |
200 | } | |
201 | ||
2c3cf7d5 FM |
202 | static bool merge_ref(struct btrfs_trans_handle *trans, |
203 | struct btrfs_delayed_ref_root *delayed_refs, | |
204 | struct btrfs_delayed_ref_head *head, | |
205 | struct btrfs_delayed_ref_node *ref, | |
206 | u64 seq) | |
207 | { | |
208 | struct btrfs_delayed_ref_node *next; | |
209 | bool done = false; | |
210 | ||
211 | next = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node, | |
212 | list); | |
213 | while (!done && &next->list != &head->ref_list) { | |
214 | int mod; | |
215 | struct btrfs_delayed_ref_node *next2; | |
216 | ||
217 | next2 = list_next_entry(next, list); | |
218 | ||
219 | if (next == ref) | |
220 | goto next; | |
221 | ||
222 | if (seq && next->seq >= seq) | |
223 | goto next; | |
224 | ||
b06c4bf5 | 225 | if (next->type != ref->type) |
2c3cf7d5 FM |
226 | goto next; |
227 | ||
228 | if ((ref->type == BTRFS_TREE_BLOCK_REF_KEY || | |
229 | ref->type == BTRFS_SHARED_BLOCK_REF_KEY) && | |
230 | comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref), | |
231 | btrfs_delayed_node_to_tree_ref(next), | |
232 | ref->type)) | |
233 | goto next; | |
234 | if ((ref->type == BTRFS_EXTENT_DATA_REF_KEY || | |
235 | ref->type == BTRFS_SHARED_DATA_REF_KEY) && | |
236 | comp_data_refs(btrfs_delayed_node_to_data_ref(ref), | |
237 | btrfs_delayed_node_to_data_ref(next))) | |
238 | goto next; | |
239 | ||
240 | if (ref->action == next->action) { | |
241 | mod = next->ref_mod; | |
242 | } else { | |
243 | if (ref->ref_mod < next->ref_mod) { | |
244 | swap(ref, next); | |
245 | done = true; | |
246 | } | |
247 | mod = -next->ref_mod; | |
248 | } | |
249 | ||
250 | drop_delayed_ref(trans, delayed_refs, head, next); | |
251 | ref->ref_mod += mod; | |
252 | if (ref->ref_mod == 0) { | |
253 | drop_delayed_ref(trans, delayed_refs, head, ref); | |
254 | done = true; | |
255 | } else { | |
256 | /* | |
257 | * Can't have multiples of the same ref on a tree block. | |
258 | */ | |
259 | WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY || | |
260 | ref->type == BTRFS_SHARED_BLOCK_REF_KEY); | |
261 | } | |
262 | next: | |
263 | next = next2; | |
264 | } | |
265 | ||
266 | return done; | |
267 | } | |
268 | ||
269 | void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans, | |
270 | struct btrfs_fs_info *fs_info, | |
271 | struct btrfs_delayed_ref_root *delayed_refs, | |
272 | struct btrfs_delayed_ref_head *head) | |
273 | { | |
274 | struct btrfs_delayed_ref_node *ref; | |
275 | u64 seq = 0; | |
276 | ||
277 | assert_spin_locked(&head->lock); | |
278 | ||
279 | if (list_empty(&head->ref_list)) | |
280 | return; | |
281 | ||
282 | /* We don't have too many refs to merge for data. */ | |
283 | if (head->is_data) | |
284 | return; | |
285 | ||
286 | spin_lock(&fs_info->tree_mod_seq_lock); | |
287 | if (!list_empty(&fs_info->tree_mod_seq_list)) { | |
288 | struct seq_list *elem; | |
289 | ||
290 | elem = list_first_entry(&fs_info->tree_mod_seq_list, | |
291 | struct seq_list, list); | |
292 | seq = elem->seq; | |
293 | } | |
294 | spin_unlock(&fs_info->tree_mod_seq_lock); | |
295 | ||
296 | ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node, | |
297 | list); | |
298 | while (&ref->list != &head->ref_list) { | |
299 | if (seq && ref->seq >= seq) | |
300 | goto next; | |
301 | ||
302 | if (merge_ref(trans, delayed_refs, head, ref, seq)) { | |
303 | if (list_empty(&head->ref_list)) | |
304 | break; | |
305 | ref = list_first_entry(&head->ref_list, | |
306 | struct btrfs_delayed_ref_node, | |
307 | list); | |
308 | continue; | |
309 | } | |
310 | next: | |
311 | ref = list_next_entry(ref, list); | |
312 | } | |
313 | } | |
314 | ||
097b8a7c JS |
315 | int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, |
316 | struct btrfs_delayed_ref_root *delayed_refs, | |
00f04b88 AJ |
317 | u64 seq) |
318 | { | |
319 | struct seq_list *elem; | |
097b8a7c JS |
320 | int ret = 0; |
321 | ||
322 | spin_lock(&fs_info->tree_mod_seq_lock); | |
323 | if (!list_empty(&fs_info->tree_mod_seq_list)) { | |
324 | elem = list_first_entry(&fs_info->tree_mod_seq_list, | |
325 | struct seq_list, list); | |
326 | if (seq >= elem->seq) { | |
ab8d0fc4 JM |
327 | btrfs_debug(fs_info, |
328 | "holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)", | |
329 | (u32)(seq >> 32), (u32)seq, | |
330 | (u32)(elem->seq >> 32), (u32)elem->seq, | |
331 | delayed_refs); | |
097b8a7c JS |
332 | ret = 1; |
333 | } | |
00f04b88 | 334 | } |
097b8a7c JS |
335 | |
336 | spin_unlock(&fs_info->tree_mod_seq_lock); | |
337 | return ret; | |
00f04b88 AJ |
338 | } |
339 | ||
d7df2c79 JB |
340 | struct btrfs_delayed_ref_head * |
341 | btrfs_select_ref_head(struct btrfs_trans_handle *trans) | |
c3e69d58 | 342 | { |
c3e69d58 | 343 | struct btrfs_delayed_ref_root *delayed_refs; |
d7df2c79 JB |
344 | struct btrfs_delayed_ref_head *head; |
345 | u64 start; | |
346 | bool loop = false; | |
56bec294 | 347 | |
c3e69d58 | 348 | delayed_refs = &trans->transaction->delayed_refs; |
c46effa6 | 349 | |
c3e69d58 | 350 | again: |
d7df2c79 | 351 | start = delayed_refs->run_delayed_start; |
85fdfdf6 | 352 | head = find_ref_head(&delayed_refs->href_root, start, 1); |
d7df2c79 JB |
353 | if (!head && !loop) { |
354 | delayed_refs->run_delayed_start = 0; | |
c3e69d58 | 355 | start = 0; |
d7df2c79 | 356 | loop = true; |
85fdfdf6 | 357 | head = find_ref_head(&delayed_refs->href_root, start, 1); |
d7df2c79 JB |
358 | if (!head) |
359 | return NULL; | |
360 | } else if (!head && loop) { | |
361 | return NULL; | |
c3e69d58 | 362 | } |
56bec294 | 363 | |
d7df2c79 JB |
364 | while (head->processing) { |
365 | struct rb_node *node; | |
366 | ||
367 | node = rb_next(&head->href_node); | |
368 | if (!node) { | |
369 | if (loop) | |
370 | return NULL; | |
371 | delayed_refs->run_delayed_start = 0; | |
372 | start = 0; | |
373 | loop = true; | |
374 | goto again; | |
375 | } | |
376 | head = rb_entry(node, struct btrfs_delayed_ref_head, | |
377 | href_node); | |
378 | } | |
093486c4 | 379 | |
d7df2c79 JB |
380 | head->processing = 1; |
381 | WARN_ON(delayed_refs->num_heads_ready == 0); | |
382 | delayed_refs->num_heads_ready--; | |
383 | delayed_refs->run_delayed_start = head->node.bytenr + | |
384 | head->node.num_bytes; | |
385 | return head; | |
093486c4 MX |
386 | } |
387 | ||
c6fc2454 QW |
388 | /* |
389 | * Helper to insert the ref_node to the tail or merge with tail. | |
390 | * | |
391 | * Return 0 for insert. | |
392 | * Return >0 for merge. | |
393 | */ | |
394 | static int | |
395 | add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans, | |
396 | struct btrfs_delayed_ref_root *root, | |
397 | struct btrfs_delayed_ref_head *href, | |
398 | struct btrfs_delayed_ref_node *ref) | |
399 | { | |
400 | struct btrfs_delayed_ref_node *exist; | |
401 | int mod; | |
402 | int ret = 0; | |
403 | ||
404 | spin_lock(&href->lock); | |
405 | /* Check whether we can merge the tail node with ref */ | |
406 | if (list_empty(&href->ref_list)) | |
407 | goto add_tail; | |
408 | exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node, | |
409 | list); | |
410 | /* No need to compare bytenr nor is_head */ | |
b06c4bf5 | 411 | if (exist->type != ref->type || exist->seq != ref->seq) |
c6fc2454 QW |
412 | goto add_tail; |
413 | ||
414 | if ((exist->type == BTRFS_TREE_BLOCK_REF_KEY || | |
415 | exist->type == BTRFS_SHARED_BLOCK_REF_KEY) && | |
416 | comp_tree_refs(btrfs_delayed_node_to_tree_ref(exist), | |
417 | btrfs_delayed_node_to_tree_ref(ref), | |
418 | ref->type)) | |
419 | goto add_tail; | |
420 | if ((exist->type == BTRFS_EXTENT_DATA_REF_KEY || | |
421 | exist->type == BTRFS_SHARED_DATA_REF_KEY) && | |
422 | comp_data_refs(btrfs_delayed_node_to_data_ref(exist), | |
423 | btrfs_delayed_node_to_data_ref(ref))) | |
424 | goto add_tail; | |
425 | ||
426 | /* Now we are sure we can merge */ | |
427 | ret = 1; | |
428 | if (exist->action == ref->action) { | |
429 | mod = ref->ref_mod; | |
430 | } else { | |
431 | /* Need to change action */ | |
432 | if (exist->ref_mod < ref->ref_mod) { | |
433 | exist->action = ref->action; | |
434 | mod = -exist->ref_mod; | |
435 | exist->ref_mod = ref->ref_mod; | |
1d57ee94 WX |
436 | if (ref->action == BTRFS_ADD_DELAYED_REF) |
437 | list_add_tail(&exist->add_list, | |
438 | &href->ref_add_list); | |
439 | else if (ref->action == BTRFS_DROP_DELAYED_REF) { | |
440 | ASSERT(!list_empty(&exist->add_list)); | |
441 | list_del(&exist->add_list); | |
442 | } else { | |
443 | ASSERT(0); | |
444 | } | |
c6fc2454 QW |
445 | } else |
446 | mod = -ref->ref_mod; | |
447 | } | |
448 | exist->ref_mod += mod; | |
449 | ||
450 | /* remove existing tail if its ref_mod is zero */ | |
451 | if (exist->ref_mod == 0) | |
452 | drop_delayed_ref(trans, root, href, exist); | |
453 | spin_unlock(&href->lock); | |
454 | return ret; | |
455 | ||
456 | add_tail: | |
457 | list_add_tail(&ref->list, &href->ref_list); | |
1d57ee94 WX |
458 | if (ref->action == BTRFS_ADD_DELAYED_REF) |
459 | list_add_tail(&ref->add_list, &href->ref_add_list); | |
c6fc2454 QW |
460 | atomic_inc(&root->num_entries); |
461 | trans->delayed_ref_updates++; | |
462 | spin_unlock(&href->lock); | |
463 | return ret; | |
464 | } | |
465 | ||
56bec294 CM |
466 | /* |
467 | * helper function to update the accounting in the head ref | |
468 | * existing and update must have the same bytenr | |
469 | */ | |
470 | static noinline void | |
1262133b JB |
471 | update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs, |
472 | struct btrfs_delayed_ref_node *existing, | |
56bec294 CM |
473 | struct btrfs_delayed_ref_node *update) |
474 | { | |
475 | struct btrfs_delayed_ref_head *existing_ref; | |
476 | struct btrfs_delayed_ref_head *ref; | |
1262133b | 477 | int old_ref_mod; |
56bec294 CM |
478 | |
479 | existing_ref = btrfs_delayed_node_to_head(existing); | |
480 | ref = btrfs_delayed_node_to_head(update); | |
5d4f98a2 | 481 | BUG_ON(existing_ref->is_data != ref->is_data); |
56bec294 | 482 | |
21543bad | 483 | spin_lock(&existing_ref->lock); |
56bec294 CM |
484 | if (ref->must_insert_reserved) { |
485 | /* if the extent was freed and then | |
486 | * reallocated before the delayed ref | |
487 | * entries were processed, we can end up | |
488 | * with an existing head ref without | |
489 | * the must_insert_reserved flag set. | |
490 | * Set it again here | |
491 | */ | |
492 | existing_ref->must_insert_reserved = ref->must_insert_reserved; | |
493 | ||
494 | /* | |
495 | * update the num_bytes so we make sure the accounting | |
496 | * is done correctly | |
497 | */ | |
498 | existing->num_bytes = update->num_bytes; | |
499 | ||
500 | } | |
501 | ||
5d4f98a2 YZ |
502 | if (ref->extent_op) { |
503 | if (!existing_ref->extent_op) { | |
504 | existing_ref->extent_op = ref->extent_op; | |
505 | } else { | |
506 | if (ref->extent_op->update_key) { | |
507 | memcpy(&existing_ref->extent_op->key, | |
508 | &ref->extent_op->key, | |
509 | sizeof(ref->extent_op->key)); | |
35b3ad50 | 510 | existing_ref->extent_op->update_key = true; |
5d4f98a2 YZ |
511 | } |
512 | if (ref->extent_op->update_flags) { | |
513 | existing_ref->extent_op->flags_to_set |= | |
514 | ref->extent_op->flags_to_set; | |
35b3ad50 | 515 | existing_ref->extent_op->update_flags = true; |
5d4f98a2 | 516 | } |
78a6184a | 517 | btrfs_free_delayed_extent_op(ref->extent_op); |
5d4f98a2 YZ |
518 | } |
519 | } | |
56bec294 | 520 | /* |
d7df2c79 JB |
521 | * update the reference mod on the head to reflect this new operation, |
522 | * only need the lock for this case cause we could be processing it | |
523 | * currently, for refs we just added we know we're a-ok. | |
56bec294 | 524 | */ |
1262133b | 525 | old_ref_mod = existing_ref->total_ref_mod; |
56bec294 | 526 | existing->ref_mod += update->ref_mod; |
1262133b JB |
527 | existing_ref->total_ref_mod += update->ref_mod; |
528 | ||
529 | /* | |
530 | * If we are going to from a positive ref mod to a negative or vice | |
531 | * versa we need to make sure to adjust pending_csums accordingly. | |
532 | */ | |
533 | if (existing_ref->is_data) { | |
534 | if (existing_ref->total_ref_mod >= 0 && old_ref_mod < 0) | |
535 | delayed_refs->pending_csums -= existing->num_bytes; | |
536 | if (existing_ref->total_ref_mod < 0 && old_ref_mod >= 0) | |
537 | delayed_refs->pending_csums += existing->num_bytes; | |
538 | } | |
d7df2c79 | 539 | spin_unlock(&existing_ref->lock); |
56bec294 CM |
540 | } |
541 | ||
542 | /* | |
5d4f98a2 | 543 | * helper function to actually insert a head node into the rbtree. |
56bec294 | 544 | * this does all the dirty work in terms of maintaining the correct |
5d4f98a2 | 545 | * overall modification count. |
56bec294 | 546 | */ |
d7df2c79 JB |
547 | static noinline struct btrfs_delayed_ref_head * |
548 | add_delayed_ref_head(struct btrfs_fs_info *fs_info, | |
549 | struct btrfs_trans_handle *trans, | |
3368d001 QW |
550 | struct btrfs_delayed_ref_node *ref, |
551 | struct btrfs_qgroup_extent_record *qrecord, | |
5846a3c2 QW |
552 | u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved, |
553 | int action, int is_data) | |
56bec294 | 554 | { |
d7df2c79 | 555 | struct btrfs_delayed_ref_head *existing; |
c3e69d58 | 556 | struct btrfs_delayed_ref_head *head_ref = NULL; |
56bec294 CM |
557 | struct btrfs_delayed_ref_root *delayed_refs; |
558 | int count_mod = 1; | |
559 | int must_insert_reserved = 0; | |
560 | ||
5846a3c2 QW |
561 | /* If reserved is provided, it must be a data extent. */ |
562 | BUG_ON(!is_data && reserved); | |
563 | ||
56bec294 CM |
564 | /* |
565 | * the head node stores the sum of all the mods, so dropping a ref | |
566 | * should drop the sum in the head node by one. | |
567 | */ | |
5d4f98a2 YZ |
568 | if (action == BTRFS_UPDATE_DELAYED_HEAD) |
569 | count_mod = 0; | |
570 | else if (action == BTRFS_DROP_DELAYED_REF) | |
571 | count_mod = -1; | |
56bec294 CM |
572 | |
573 | /* | |
574 | * BTRFS_ADD_DELAYED_EXTENT means that we need to update | |
575 | * the reserved accounting when the extent is finally added, or | |
576 | * if a later modification deletes the delayed ref without ever | |
577 | * inserting the extent into the extent allocation tree. | |
578 | * ref->must_insert_reserved is the flag used to record | |
579 | * that accounting mods are required. | |
580 | * | |
581 | * Once we record must_insert_reserved, switch the action to | |
582 | * BTRFS_ADD_DELAYED_REF because other special casing is not required. | |
583 | */ | |
5d4f98a2 | 584 | if (action == BTRFS_ADD_DELAYED_EXTENT) |
56bec294 | 585 | must_insert_reserved = 1; |
5d4f98a2 | 586 | else |
56bec294 | 587 | must_insert_reserved = 0; |
56bec294 CM |
588 | |
589 | delayed_refs = &trans->transaction->delayed_refs; | |
590 | ||
591 | /* first set the basic ref node struct up */ | |
592 | atomic_set(&ref->refs, 1); | |
593 | ref->bytenr = bytenr; | |
5d4f98a2 | 594 | ref->num_bytes = num_bytes; |
56bec294 | 595 | ref->ref_mod = count_mod; |
5d4f98a2 YZ |
596 | ref->type = 0; |
597 | ref->action = 0; | |
598 | ref->is_head = 1; | |
56bec294 | 599 | ref->in_tree = 1; |
00f04b88 | 600 | ref->seq = 0; |
5d4f98a2 YZ |
601 | |
602 | head_ref = btrfs_delayed_node_to_head(ref); | |
603 | head_ref->must_insert_reserved = must_insert_reserved; | |
604 | head_ref->is_data = is_data; | |
c6fc2454 | 605 | INIT_LIST_HEAD(&head_ref->ref_list); |
1d57ee94 | 606 | INIT_LIST_HEAD(&head_ref->ref_add_list); |
d7df2c79 | 607 | head_ref->processing = 0; |
1262133b | 608 | head_ref->total_ref_mod = count_mod; |
f64d5ca8 QW |
609 | head_ref->qgroup_reserved = 0; |
610 | head_ref->qgroup_ref_root = 0; | |
5d4f98a2 | 611 | |
3368d001 QW |
612 | /* Record qgroup extent info if provided */ |
613 | if (qrecord) { | |
5846a3c2 QW |
614 | if (ref_root && reserved) { |
615 | head_ref->qgroup_ref_root = ref_root; | |
616 | head_ref->qgroup_reserved = reserved; | |
617 | } | |
618 | ||
3368d001 QW |
619 | qrecord->bytenr = bytenr; |
620 | qrecord->num_bytes = num_bytes; | |
621 | qrecord->old_roots = NULL; | |
622 | ||
50b3e040 | 623 | if(btrfs_qgroup_trace_extent_nolock(fs_info, |
cb93b52c | 624 | delayed_refs, qrecord)) |
3368d001 QW |
625 | kfree(qrecord); |
626 | } | |
627 | ||
d7df2c79 | 628 | spin_lock_init(&head_ref->lock); |
5d4f98a2 YZ |
629 | mutex_init(&head_ref->mutex); |
630 | ||
bc074524 | 631 | trace_add_delayed_ref_head(fs_info, ref, head_ref, action); |
1abe9b8a | 632 | |
d7df2c79 JB |
633 | existing = htree_insert(&delayed_refs->href_root, |
634 | &head_ref->href_node); | |
5d4f98a2 | 635 | if (existing) { |
5846a3c2 QW |
636 | WARN_ON(ref_root && reserved && existing->qgroup_ref_root |
637 | && existing->qgroup_reserved); | |
1262133b | 638 | update_existing_head_ref(delayed_refs, &existing->node, ref); |
5d4f98a2 YZ |
639 | /* |
640 | * we've updated the existing ref, free the newly | |
641 | * allocated ref | |
642 | */ | |
78a6184a | 643 | kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref); |
d7df2c79 | 644 | head_ref = existing; |
5d4f98a2 | 645 | } else { |
1262133b JB |
646 | if (is_data && count_mod < 0) |
647 | delayed_refs->pending_csums += num_bytes; | |
5d4f98a2 YZ |
648 | delayed_refs->num_heads++; |
649 | delayed_refs->num_heads_ready++; | |
d7df2c79 | 650 | atomic_inc(&delayed_refs->num_entries); |
5d4f98a2 YZ |
651 | trans->delayed_ref_updates++; |
652 | } | |
d7df2c79 | 653 | return head_ref; |
5d4f98a2 YZ |
654 | } |
655 | ||
656 | /* | |
657 | * helper to insert a delayed tree ref into the rbtree. | |
658 | */ | |
d7df2c79 JB |
659 | static noinline void |
660 | add_delayed_tree_ref(struct btrfs_fs_info *fs_info, | |
661 | struct btrfs_trans_handle *trans, | |
662 | struct btrfs_delayed_ref_head *head_ref, | |
663 | struct btrfs_delayed_ref_node *ref, u64 bytenr, | |
664 | u64 num_bytes, u64 parent, u64 ref_root, int level, | |
b06c4bf5 | 665 | int action) |
5d4f98a2 | 666 | { |
5d4f98a2 YZ |
667 | struct btrfs_delayed_tree_ref *full_ref; |
668 | struct btrfs_delayed_ref_root *delayed_refs; | |
00f04b88 | 669 | u64 seq = 0; |
c6fc2454 | 670 | int ret; |
5d4f98a2 YZ |
671 | |
672 | if (action == BTRFS_ADD_DELAYED_EXTENT) | |
673 | action = BTRFS_ADD_DELAYED_REF; | |
674 | ||
fcebe456 JB |
675 | if (is_fstree(ref_root)) |
676 | seq = atomic64_read(&fs_info->tree_mod_seq); | |
5d4f98a2 YZ |
677 | delayed_refs = &trans->transaction->delayed_refs; |
678 | ||
679 | /* first set the basic ref node struct up */ | |
680 | atomic_set(&ref->refs, 1); | |
681 | ref->bytenr = bytenr; | |
56bec294 | 682 | ref->num_bytes = num_bytes; |
5d4f98a2 YZ |
683 | ref->ref_mod = 1; |
684 | ref->action = action; | |
685 | ref->is_head = 0; | |
686 | ref->in_tree = 1; | |
00f04b88 | 687 | ref->seq = seq; |
1d57ee94 WX |
688 | INIT_LIST_HEAD(&ref->list); |
689 | INIT_LIST_HEAD(&ref->add_list); | |
00f04b88 | 690 | |
5d4f98a2 | 691 | full_ref = btrfs_delayed_node_to_tree_ref(ref); |
eebe063b AJ |
692 | full_ref->parent = parent; |
693 | full_ref->root = ref_root; | |
694 | if (parent) | |
5d4f98a2 | 695 | ref->type = BTRFS_SHARED_BLOCK_REF_KEY; |
eebe063b | 696 | else |
5d4f98a2 | 697 | ref->type = BTRFS_TREE_BLOCK_REF_KEY; |
5d4f98a2 | 698 | full_ref->level = level; |
56bec294 | 699 | |
bc074524 | 700 | trace_add_delayed_tree_ref(fs_info, ref, full_ref, action); |
1abe9b8a | 701 | |
c6fc2454 QW |
702 | ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref); |
703 | ||
704 | /* | |
705 | * XXX: memory should be freed at the same level allocated. | |
706 | * But bad practice is anywhere... Follow it now. Need cleanup. | |
707 | */ | |
708 | if (ret > 0) | |
78a6184a | 709 | kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref); |
5d4f98a2 YZ |
710 | } |
711 | ||
712 | /* | |
713 | * helper to insert a delayed data ref into the rbtree. | |
714 | */ | |
d7df2c79 JB |
715 | static noinline void |
716 | add_delayed_data_ref(struct btrfs_fs_info *fs_info, | |
717 | struct btrfs_trans_handle *trans, | |
718 | struct btrfs_delayed_ref_head *head_ref, | |
719 | struct btrfs_delayed_ref_node *ref, u64 bytenr, | |
720 | u64 num_bytes, u64 parent, u64 ref_root, u64 owner, | |
b06c4bf5 | 721 | u64 offset, int action) |
5d4f98a2 | 722 | { |
5d4f98a2 YZ |
723 | struct btrfs_delayed_data_ref *full_ref; |
724 | struct btrfs_delayed_ref_root *delayed_refs; | |
00f04b88 | 725 | u64 seq = 0; |
c6fc2454 | 726 | int ret; |
5d4f98a2 YZ |
727 | |
728 | if (action == BTRFS_ADD_DELAYED_EXTENT) | |
729 | action = BTRFS_ADD_DELAYED_REF; | |
730 | ||
731 | delayed_refs = &trans->transaction->delayed_refs; | |
732 | ||
fcebe456 JB |
733 | if (is_fstree(ref_root)) |
734 | seq = atomic64_read(&fs_info->tree_mod_seq); | |
735 | ||
5d4f98a2 YZ |
736 | /* first set the basic ref node struct up */ |
737 | atomic_set(&ref->refs, 1); | |
738 | ref->bytenr = bytenr; | |
739 | ref->num_bytes = num_bytes; | |
740 | ref->ref_mod = 1; | |
741 | ref->action = action; | |
742 | ref->is_head = 0; | |
743 | ref->in_tree = 1; | |
00f04b88 | 744 | ref->seq = seq; |
1d57ee94 WX |
745 | INIT_LIST_HEAD(&ref->list); |
746 | INIT_LIST_HEAD(&ref->add_list); | |
00f04b88 | 747 | |
5d4f98a2 | 748 | full_ref = btrfs_delayed_node_to_data_ref(ref); |
eebe063b AJ |
749 | full_ref->parent = parent; |
750 | full_ref->root = ref_root; | |
751 | if (parent) | |
5d4f98a2 | 752 | ref->type = BTRFS_SHARED_DATA_REF_KEY; |
eebe063b | 753 | else |
5d4f98a2 | 754 | ref->type = BTRFS_EXTENT_DATA_REF_KEY; |
66d7e7f0 | 755 | |
5d4f98a2 YZ |
756 | full_ref->objectid = owner; |
757 | full_ref->offset = offset; | |
56bec294 | 758 | |
bc074524 | 759 | trace_add_delayed_data_ref(fs_info, ref, full_ref, action); |
1abe9b8a | 760 | |
c6fc2454 QW |
761 | ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref); |
762 | ||
763 | if (ret > 0) | |
78a6184a | 764 | kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref); |
56bec294 CM |
765 | } |
766 | ||
767 | /* | |
5d4f98a2 | 768 | * add a delayed tree ref. This does all of the accounting required |
56bec294 CM |
769 | * to make sure the delayed ref is eventually processed before this |
770 | * transaction commits. | |
771 | */ | |
66d7e7f0 AJ |
772 | int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, |
773 | struct btrfs_trans_handle *trans, | |
5d4f98a2 YZ |
774 | u64 bytenr, u64 num_bytes, u64 parent, |
775 | u64 ref_root, int level, int action, | |
b06c4bf5 | 776 | struct btrfs_delayed_extent_op *extent_op) |
56bec294 | 777 | { |
5d4f98a2 | 778 | struct btrfs_delayed_tree_ref *ref; |
56bec294 CM |
779 | struct btrfs_delayed_ref_head *head_ref; |
780 | struct btrfs_delayed_ref_root *delayed_refs; | |
3368d001 | 781 | struct btrfs_qgroup_extent_record *record = NULL; |
56bec294 | 782 | |
5d4f98a2 | 783 | BUG_ON(extent_op && extent_op->is_data); |
78a6184a | 784 | ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS); |
56bec294 CM |
785 | if (!ref) |
786 | return -ENOMEM; | |
787 | ||
78a6184a | 788 | head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS); |
5a5003df DC |
789 | if (!head_ref) |
790 | goto free_ref; | |
5d4f98a2 | 791 | |
afcdd129 JB |
792 | if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) && |
793 | is_fstree(ref_root)) { | |
3368d001 | 794 | record = kmalloc(sizeof(*record), GFP_NOFS); |
5a5003df DC |
795 | if (!record) |
796 | goto free_head_ref; | |
3368d001 QW |
797 | } |
798 | ||
5d4f98a2 YZ |
799 | head_ref->extent_op = extent_op; |
800 | ||
801 | delayed_refs = &trans->transaction->delayed_refs; | |
802 | spin_lock(&delayed_refs->lock); | |
803 | ||
56bec294 | 804 | /* |
5d4f98a2 YZ |
805 | * insert both the head node and the new ref without dropping |
806 | * the spin lock | |
56bec294 | 807 | */ |
3368d001 | 808 | head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record, |
5846a3c2 | 809 | bytenr, num_bytes, 0, 0, action, 0); |
5d4f98a2 | 810 | |
d7df2c79 | 811 | add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr, |
b06c4bf5 | 812 | num_bytes, parent, ref_root, level, action); |
5d4f98a2 | 813 | spin_unlock(&delayed_refs->lock); |
95a06077 | 814 | |
5d4f98a2 | 815 | return 0; |
5a5003df DC |
816 | |
817 | free_head_ref: | |
818 | kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref); | |
819 | free_ref: | |
820 | kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref); | |
821 | ||
822 | return -ENOMEM; | |
5d4f98a2 YZ |
823 | } |
824 | ||
825 | /* | |
826 | * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref. | |
827 | */ | |
66d7e7f0 AJ |
828 | int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, |
829 | struct btrfs_trans_handle *trans, | |
5d4f98a2 YZ |
830 | u64 bytenr, u64 num_bytes, |
831 | u64 parent, u64 ref_root, | |
5846a3c2 | 832 | u64 owner, u64 offset, u64 reserved, int action, |
b06c4bf5 | 833 | struct btrfs_delayed_extent_op *extent_op) |
5d4f98a2 YZ |
834 | { |
835 | struct btrfs_delayed_data_ref *ref; | |
836 | struct btrfs_delayed_ref_head *head_ref; | |
837 | struct btrfs_delayed_ref_root *delayed_refs; | |
3368d001 | 838 | struct btrfs_qgroup_extent_record *record = NULL; |
5d4f98a2 YZ |
839 | |
840 | BUG_ON(extent_op && !extent_op->is_data); | |
78a6184a | 841 | ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS); |
5d4f98a2 YZ |
842 | if (!ref) |
843 | return -ENOMEM; | |
56bec294 | 844 | |
78a6184a | 845 | head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS); |
56bec294 | 846 | if (!head_ref) { |
78a6184a | 847 | kmem_cache_free(btrfs_delayed_data_ref_cachep, ref); |
56bec294 CM |
848 | return -ENOMEM; |
849 | } | |
5d4f98a2 | 850 | |
afcdd129 JB |
851 | if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) && |
852 | is_fstree(ref_root)) { | |
3368d001 QW |
853 | record = kmalloc(sizeof(*record), GFP_NOFS); |
854 | if (!record) { | |
855 | kmem_cache_free(btrfs_delayed_data_ref_cachep, ref); | |
856 | kmem_cache_free(btrfs_delayed_ref_head_cachep, | |
857 | head_ref); | |
858 | return -ENOMEM; | |
859 | } | |
860 | } | |
861 | ||
5d4f98a2 YZ |
862 | head_ref->extent_op = extent_op; |
863 | ||
56bec294 CM |
864 | delayed_refs = &trans->transaction->delayed_refs; |
865 | spin_lock(&delayed_refs->lock); | |
866 | ||
867 | /* | |
868 | * insert both the head node and the new ref without dropping | |
869 | * the spin lock | |
870 | */ | |
3368d001 | 871 | head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record, |
5846a3c2 QW |
872 | bytenr, num_bytes, ref_root, reserved, |
873 | action, 1); | |
56bec294 | 874 | |
d7df2c79 | 875 | add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr, |
66d7e7f0 | 876 | num_bytes, parent, ref_root, owner, offset, |
b06c4bf5 | 877 | action); |
5d4f98a2 | 878 | spin_unlock(&delayed_refs->lock); |
95a06077 | 879 | |
5d4f98a2 YZ |
880 | return 0; |
881 | } | |
882 | ||
66d7e7f0 AJ |
883 | int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, |
884 | struct btrfs_trans_handle *trans, | |
5d4f98a2 YZ |
885 | u64 bytenr, u64 num_bytes, |
886 | struct btrfs_delayed_extent_op *extent_op) | |
887 | { | |
888 | struct btrfs_delayed_ref_head *head_ref; | |
889 | struct btrfs_delayed_ref_root *delayed_refs; | |
5d4f98a2 | 890 | |
78a6184a | 891 | head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS); |
5d4f98a2 YZ |
892 | if (!head_ref) |
893 | return -ENOMEM; | |
894 | ||
895 | head_ref->extent_op = extent_op; | |
896 | ||
897 | delayed_refs = &trans->transaction->delayed_refs; | |
898 | spin_lock(&delayed_refs->lock); | |
899 | ||
3368d001 | 900 | add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr, |
5846a3c2 | 901 | num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD, |
3368d001 | 902 | extent_op->is_data); |
5d4f98a2 | 903 | |
56bec294 CM |
904 | spin_unlock(&delayed_refs->lock); |
905 | return 0; | |
906 | } | |
907 | ||
1887be66 CM |
908 | /* |
909 | * this does a simple search for the head node for a given extent. | |
910 | * It must be called with the delayed ref spinlock held, and it returns | |
911 | * the head node if any where found, or NULL if not. | |
912 | */ | |
913 | struct btrfs_delayed_ref_head * | |
914 | btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr) | |
915 | { | |
1887be66 CM |
916 | struct btrfs_delayed_ref_root *delayed_refs; |
917 | ||
918 | delayed_refs = &trans->transaction->delayed_refs; | |
85fdfdf6 | 919 | return find_ref_head(&delayed_refs->href_root, bytenr, 0); |
1887be66 | 920 | } |
78a6184a MX |
921 | |
922 | void btrfs_delayed_ref_exit(void) | |
923 | { | |
5598e900 KM |
924 | kmem_cache_destroy(btrfs_delayed_ref_head_cachep); |
925 | kmem_cache_destroy(btrfs_delayed_tree_ref_cachep); | |
926 | kmem_cache_destroy(btrfs_delayed_data_ref_cachep); | |
927 | kmem_cache_destroy(btrfs_delayed_extent_op_cachep); | |
78a6184a MX |
928 | } |
929 | ||
930 | int btrfs_delayed_ref_init(void) | |
931 | { | |
932 | btrfs_delayed_ref_head_cachep = kmem_cache_create( | |
933 | "btrfs_delayed_ref_head", | |
934 | sizeof(struct btrfs_delayed_ref_head), 0, | |
fba4b697 | 935 | SLAB_MEM_SPREAD, NULL); |
78a6184a MX |
936 | if (!btrfs_delayed_ref_head_cachep) |
937 | goto fail; | |
938 | ||
939 | btrfs_delayed_tree_ref_cachep = kmem_cache_create( | |
940 | "btrfs_delayed_tree_ref", | |
941 | sizeof(struct btrfs_delayed_tree_ref), 0, | |
fba4b697 | 942 | SLAB_MEM_SPREAD, NULL); |
78a6184a MX |
943 | if (!btrfs_delayed_tree_ref_cachep) |
944 | goto fail; | |
945 | ||
946 | btrfs_delayed_data_ref_cachep = kmem_cache_create( | |
947 | "btrfs_delayed_data_ref", | |
948 | sizeof(struct btrfs_delayed_data_ref), 0, | |
fba4b697 | 949 | SLAB_MEM_SPREAD, NULL); |
78a6184a MX |
950 | if (!btrfs_delayed_data_ref_cachep) |
951 | goto fail; | |
952 | ||
953 | btrfs_delayed_extent_op_cachep = kmem_cache_create( | |
954 | "btrfs_delayed_extent_op", | |
955 | sizeof(struct btrfs_delayed_extent_op), 0, | |
fba4b697 | 956 | SLAB_MEM_SPREAD, NULL); |
78a6184a MX |
957 | if (!btrfs_delayed_extent_op_cachep) |
958 | goto fail; | |
959 | ||
960 | return 0; | |
961 | fail: | |
962 | btrfs_delayed_ref_exit(); | |
963 | return -ENOMEM; | |
964 | } |