]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2007 Oracle. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public | |
6 | * License v2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public | |
14 | * License along with this program; if not, write to the | |
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
16 | * Boston, MA 021110-1307, USA. | |
17 | */ | |
18 | #include <linux/sched.h> | |
19 | #include <linux/pagemap.h> | |
20 | #include <linux/writeback.h> | |
21 | #include <linux/blkdev.h> | |
22 | #include <linux/sort.h> | |
23 | #include <linux/rcupdate.h> | |
24 | #include <linux/kthread.h> | |
25 | #include <linux/slab.h> | |
26 | #include "compat.h" | |
27 | #include "hash.h" | |
28 | #include "ctree.h" | |
29 | #include "disk-io.h" | |
30 | #include "print-tree.h" | |
31 | #include "transaction.h" | |
32 | #include "volumes.h" | |
33 | #include "locking.h" | |
34 | #include "free-space-cache.h" | |
35 | ||
36 | /* control flags for do_chunk_alloc's force field | |
37 | * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk | |
38 | * if we really need one. | |
39 | * | |
40 | * CHUNK_ALLOC_FORCE means it must try to allocate one | |
41 | * | |
42 | * CHUNK_ALLOC_LIMITED means to only try and allocate one | |
43 | * if we have very few chunks already allocated. This is | |
44 | * used as part of the clustering code to help make sure | |
45 | * we have a good pool of storage to cluster in, without | |
46 | * filling the FS with empty chunks | |
47 | * | |
48 | */ | |
49 | enum { | |
50 | CHUNK_ALLOC_NO_FORCE = 0, | |
51 | CHUNK_ALLOC_FORCE = 1, | |
52 | CHUNK_ALLOC_LIMITED = 2, | |
53 | }; | |
54 | ||
55 | /* | |
56 | * Control how reservations are dealt with. | |
57 | * | |
58 | * RESERVE_FREE - freeing a reservation. | |
59 | * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for | |
60 | * ENOSPC accounting | |
61 | * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update | |
62 | * bytes_may_use as the ENOSPC accounting is done elsewhere | |
63 | */ | |
64 | enum { | |
65 | RESERVE_FREE = 0, | |
66 | RESERVE_ALLOC = 1, | |
67 | RESERVE_ALLOC_NO_ACCOUNT = 2, | |
68 | }; | |
69 | ||
70 | static int update_block_group(struct btrfs_trans_handle *trans, | |
71 | struct btrfs_root *root, | |
72 | u64 bytenr, u64 num_bytes, int alloc); | |
73 | static int __btrfs_free_extent(struct btrfs_trans_handle *trans, | |
74 | struct btrfs_root *root, | |
75 | u64 bytenr, u64 num_bytes, u64 parent, | |
76 | u64 root_objectid, u64 owner_objectid, | |
77 | u64 owner_offset, int refs_to_drop, | |
78 | struct btrfs_delayed_extent_op *extra_op); | |
79 | static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op, | |
80 | struct extent_buffer *leaf, | |
81 | struct btrfs_extent_item *ei); | |
82 | static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, | |
83 | struct btrfs_root *root, | |
84 | u64 parent, u64 root_objectid, | |
85 | u64 flags, u64 owner, u64 offset, | |
86 | struct btrfs_key *ins, int ref_mod); | |
87 | static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, | |
88 | struct btrfs_root *root, | |
89 | u64 parent, u64 root_objectid, | |
90 | u64 flags, struct btrfs_disk_key *key, | |
91 | int level, struct btrfs_key *ins); | |
92 | static int do_chunk_alloc(struct btrfs_trans_handle *trans, | |
93 | struct btrfs_root *extent_root, u64 alloc_bytes, | |
94 | u64 flags, int force); | |
95 | static int find_next_key(struct btrfs_path *path, int level, | |
96 | struct btrfs_key *key); | |
97 | static void dump_space_info(struct btrfs_space_info *info, u64 bytes, | |
98 | int dump_block_groups); | |
99 | static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache, | |
100 | u64 num_bytes, int reserve); | |
101 | ||
102 | static noinline int | |
103 | block_group_cache_done(struct btrfs_block_group_cache *cache) | |
104 | { | |
105 | smp_mb(); | |
106 | return cache->cached == BTRFS_CACHE_FINISHED; | |
107 | } | |
108 | ||
109 | static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) | |
110 | { | |
111 | return (cache->flags & bits) == bits; | |
112 | } | |
113 | ||
114 | static void btrfs_get_block_group(struct btrfs_block_group_cache *cache) | |
115 | { | |
116 | atomic_inc(&cache->count); | |
117 | } | |
118 | ||
119 | void btrfs_put_block_group(struct btrfs_block_group_cache *cache) | |
120 | { | |
121 | if (atomic_dec_and_test(&cache->count)) { | |
122 | WARN_ON(cache->pinned > 0); | |
123 | WARN_ON(cache->reserved > 0); | |
124 | kfree(cache->free_space_ctl); | |
125 | kfree(cache); | |
126 | } | |
127 | } | |
128 | ||
129 | /* | |
130 | * this adds the block group to the fs_info rb tree for the block group | |
131 | * cache | |
132 | */ | |
133 | static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, | |
134 | struct btrfs_block_group_cache *block_group) | |
135 | { | |
136 | struct rb_node **p; | |
137 | struct rb_node *parent = NULL; | |
138 | struct btrfs_block_group_cache *cache; | |
139 | ||
140 | spin_lock(&info->block_group_cache_lock); | |
141 | p = &info->block_group_cache_tree.rb_node; | |
142 | ||
143 | while (*p) { | |
144 | parent = *p; | |
145 | cache = rb_entry(parent, struct btrfs_block_group_cache, | |
146 | cache_node); | |
147 | if (block_group->key.objectid < cache->key.objectid) { | |
148 | p = &(*p)->rb_left; | |
149 | } else if (block_group->key.objectid > cache->key.objectid) { | |
150 | p = &(*p)->rb_right; | |
151 | } else { | |
152 | spin_unlock(&info->block_group_cache_lock); | |
153 | return -EEXIST; | |
154 | } | |
155 | } | |
156 | ||
157 | rb_link_node(&block_group->cache_node, parent, p); | |
158 | rb_insert_color(&block_group->cache_node, | |
159 | &info->block_group_cache_tree); | |
160 | spin_unlock(&info->block_group_cache_lock); | |
161 | ||
162 | return 0; | |
163 | } | |
164 | ||
165 | /* | |
166 | * This will return the block group at or after bytenr if contains is 0, else | |
167 | * it will return the block group that contains the bytenr | |
168 | */ | |
169 | static struct btrfs_block_group_cache * | |
170 | block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr, | |
171 | int contains) | |
172 | { | |
173 | struct btrfs_block_group_cache *cache, *ret = NULL; | |
174 | struct rb_node *n; | |
175 | u64 end, start; | |
176 | ||
177 | spin_lock(&info->block_group_cache_lock); | |
178 | n = info->block_group_cache_tree.rb_node; | |
179 | ||
180 | while (n) { | |
181 | cache = rb_entry(n, struct btrfs_block_group_cache, | |
182 | cache_node); | |
183 | end = cache->key.objectid + cache->key.offset - 1; | |
184 | start = cache->key.objectid; | |
185 | ||
186 | if (bytenr < start) { | |
187 | if (!contains && (!ret || start < ret->key.objectid)) | |
188 | ret = cache; | |
189 | n = n->rb_left; | |
190 | } else if (bytenr > start) { | |
191 | if (contains && bytenr <= end) { | |
192 | ret = cache; | |
193 | break; | |
194 | } | |
195 | n = n->rb_right; | |
196 | } else { | |
197 | ret = cache; | |
198 | break; | |
199 | } | |
200 | } | |
201 | if (ret) | |
202 | btrfs_get_block_group(ret); | |
203 | spin_unlock(&info->block_group_cache_lock); | |
204 | ||
205 | return ret; | |
206 | } | |
207 | ||
208 | static int add_excluded_extent(struct btrfs_root *root, | |
209 | u64 start, u64 num_bytes) | |
210 | { | |
211 | u64 end = start + num_bytes - 1; | |
212 | set_extent_bits(&root->fs_info->freed_extents[0], | |
213 | start, end, EXTENT_UPTODATE, GFP_NOFS); | |
214 | set_extent_bits(&root->fs_info->freed_extents[1], | |
215 | start, end, EXTENT_UPTODATE, GFP_NOFS); | |
216 | return 0; | |
217 | } | |
218 | ||
219 | static void free_excluded_extents(struct btrfs_root *root, | |
220 | struct btrfs_block_group_cache *cache) | |
221 | { | |
222 | u64 start, end; | |
223 | ||
224 | start = cache->key.objectid; | |
225 | end = start + cache->key.offset - 1; | |
226 | ||
227 | clear_extent_bits(&root->fs_info->freed_extents[0], | |
228 | start, end, EXTENT_UPTODATE, GFP_NOFS); | |
229 | clear_extent_bits(&root->fs_info->freed_extents[1], | |
230 | start, end, EXTENT_UPTODATE, GFP_NOFS); | |
231 | } | |
232 | ||
233 | static int exclude_super_stripes(struct btrfs_root *root, | |
234 | struct btrfs_block_group_cache *cache) | |
235 | { | |
236 | u64 bytenr; | |
237 | u64 *logical; | |
238 | int stripe_len; | |
239 | int i, nr, ret; | |
240 | ||
241 | if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) { | |
242 | stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid; | |
243 | cache->bytes_super += stripe_len; | |
244 | ret = add_excluded_extent(root, cache->key.objectid, | |
245 | stripe_len); | |
246 | BUG_ON(ret); | |
247 | } | |
248 | ||
249 | for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { | |
250 | bytenr = btrfs_sb_offset(i); | |
251 | ret = btrfs_rmap_block(&root->fs_info->mapping_tree, | |
252 | cache->key.objectid, bytenr, | |
253 | 0, &logical, &nr, &stripe_len); | |
254 | BUG_ON(ret); | |
255 | ||
256 | while (nr--) { | |
257 | cache->bytes_super += stripe_len; | |
258 | ret = add_excluded_extent(root, logical[nr], | |
259 | stripe_len); | |
260 | BUG_ON(ret); | |
261 | } | |
262 | ||
263 | kfree(logical); | |
264 | } | |
265 | return 0; | |
266 | } | |
267 | ||
268 | static struct btrfs_caching_control * | |
269 | get_caching_control(struct btrfs_block_group_cache *cache) | |
270 | { | |
271 | struct btrfs_caching_control *ctl; | |
272 | ||
273 | spin_lock(&cache->lock); | |
274 | if (cache->cached != BTRFS_CACHE_STARTED) { | |
275 | spin_unlock(&cache->lock); | |
276 | return NULL; | |
277 | } | |
278 | ||
279 | /* We're loading it the fast way, so we don't have a caching_ctl. */ | |
280 | if (!cache->caching_ctl) { | |
281 | spin_unlock(&cache->lock); | |
282 | return NULL; | |
283 | } | |
284 | ||
285 | ctl = cache->caching_ctl; | |
286 | atomic_inc(&ctl->count); | |
287 | spin_unlock(&cache->lock); | |
288 | return ctl; | |
289 | } | |
290 | ||
291 | static void put_caching_control(struct btrfs_caching_control *ctl) | |
292 | { | |
293 | if (atomic_dec_and_test(&ctl->count)) | |
294 | kfree(ctl); | |
295 | } | |
296 | ||
297 | /* | |
298 | * this is only called by cache_block_group, since we could have freed extents | |
299 | * we need to check the pinned_extents for any extents that can't be used yet | |
300 | * since their free space will be released as soon as the transaction commits. | |
301 | */ | |
302 | static u64 add_new_free_space(struct btrfs_block_group_cache *block_group, | |
303 | struct btrfs_fs_info *info, u64 start, u64 end) | |
304 | { | |
305 | u64 extent_start, extent_end, size, total_added = 0; | |
306 | int ret; | |
307 | ||
308 | while (start < end) { | |
309 | ret = find_first_extent_bit(info->pinned_extents, start, | |
310 | &extent_start, &extent_end, | |
311 | EXTENT_DIRTY | EXTENT_UPTODATE); | |
312 | if (ret) | |
313 | break; | |
314 | ||
315 | if (extent_start <= start) { | |
316 | start = extent_end + 1; | |
317 | } else if (extent_start > start && extent_start < end) { | |
318 | size = extent_start - start; | |
319 | total_added += size; | |
320 | ret = btrfs_add_free_space(block_group, start, | |
321 | size); | |
322 | BUG_ON(ret); | |
323 | start = extent_end + 1; | |
324 | } else { | |
325 | break; | |
326 | } | |
327 | } | |
328 | ||
329 | if (start < end) { | |
330 | size = end - start; | |
331 | total_added += size; | |
332 | ret = btrfs_add_free_space(block_group, start, size); | |
333 | BUG_ON(ret); | |
334 | } | |
335 | ||
336 | return total_added; | |
337 | } | |
338 | ||
339 | static noinline void caching_thread(struct btrfs_work *work) | |
340 | { | |
341 | struct btrfs_block_group_cache *block_group; | |
342 | struct btrfs_fs_info *fs_info; | |
343 | struct btrfs_caching_control *caching_ctl; | |
344 | struct btrfs_root *extent_root; | |
345 | struct btrfs_path *path; | |
346 | struct extent_buffer *leaf; | |
347 | struct btrfs_key key; | |
348 | u64 total_found = 0; | |
349 | u64 last = 0; | |
350 | u32 nritems; | |
351 | int ret = 0; | |
352 | ||
353 | caching_ctl = container_of(work, struct btrfs_caching_control, work); | |
354 | block_group = caching_ctl->block_group; | |
355 | fs_info = block_group->fs_info; | |
356 | extent_root = fs_info->extent_root; | |
357 | ||
358 | path = btrfs_alloc_path(); | |
359 | if (!path) | |
360 | goto out; | |
361 | ||
362 | last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); | |
363 | ||
364 | /* | |
365 | * We don't want to deadlock with somebody trying to allocate a new | |
366 | * extent for the extent root while also trying to search the extent | |
367 | * root to add free space. So we skip locking and search the commit | |
368 | * root, since its read-only | |
369 | */ | |
370 | path->skip_locking = 1; | |
371 | path->search_commit_root = 1; | |
372 | path->reada = 1; | |
373 | ||
374 | key.objectid = last; | |
375 | key.offset = 0; | |
376 | key.type = BTRFS_EXTENT_ITEM_KEY; | |
377 | again: | |
378 | mutex_lock(&caching_ctl->mutex); | |
379 | /* need to make sure the commit_root doesn't disappear */ | |
380 | down_read(&fs_info->extent_commit_sem); | |
381 | ||
382 | ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); | |
383 | if (ret < 0) | |
384 | goto err; | |
385 | ||
386 | leaf = path->nodes[0]; | |
387 | nritems = btrfs_header_nritems(leaf); | |
388 | ||
389 | while (1) { | |
390 | if (btrfs_fs_closing(fs_info) > 1) { | |
391 | last = (u64)-1; | |
392 | break; | |
393 | } | |
394 | ||
395 | if (path->slots[0] < nritems) { | |
396 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); | |
397 | } else { | |
398 | ret = find_next_key(path, 0, &key); | |
399 | if (ret) | |
400 | break; | |
401 | ||
402 | if (need_resched() || | |
403 | btrfs_next_leaf(extent_root, path)) { | |
404 | caching_ctl->progress = last; | |
405 | btrfs_release_path(path); | |
406 | up_read(&fs_info->extent_commit_sem); | |
407 | mutex_unlock(&caching_ctl->mutex); | |
408 | cond_resched(); | |
409 | goto again; | |
410 | } | |
411 | leaf = path->nodes[0]; | |
412 | nritems = btrfs_header_nritems(leaf); | |
413 | continue; | |
414 | } | |
415 | ||
416 | if (key.objectid < block_group->key.objectid) { | |
417 | path->slots[0]++; | |
418 | continue; | |
419 | } | |
420 | ||
421 | if (key.objectid >= block_group->key.objectid + | |
422 | block_group->key.offset) | |
423 | break; | |
424 | ||
425 | if (key.type == BTRFS_EXTENT_ITEM_KEY) { | |
426 | total_found += add_new_free_space(block_group, | |
427 | fs_info, last, | |
428 | key.objectid); | |
429 | last = key.objectid + key.offset; | |
430 | ||
431 | if (total_found > (1024 * 1024 * 2)) { | |
432 | total_found = 0; | |
433 | wake_up(&caching_ctl->wait); | |
434 | } | |
435 | } | |
436 | path->slots[0]++; | |
437 | } | |
438 | ret = 0; | |
439 | ||
440 | total_found += add_new_free_space(block_group, fs_info, last, | |
441 | block_group->key.objectid + | |
442 | block_group->key.offset); | |
443 | caching_ctl->progress = (u64)-1; | |
444 | ||
445 | spin_lock(&block_group->lock); | |
446 | block_group->caching_ctl = NULL; | |
447 | block_group->cached = BTRFS_CACHE_FINISHED; | |
448 | spin_unlock(&block_group->lock); | |
449 | ||
450 | err: | |
451 | btrfs_free_path(path); | |
452 | up_read(&fs_info->extent_commit_sem); | |
453 | ||
454 | free_excluded_extents(extent_root, block_group); | |
455 | ||
456 | mutex_unlock(&caching_ctl->mutex); | |
457 | out: | |
458 | wake_up(&caching_ctl->wait); | |
459 | ||
460 | put_caching_control(caching_ctl); | |
461 | btrfs_put_block_group(block_group); | |
462 | } | |
463 | ||
464 | static int cache_block_group(struct btrfs_block_group_cache *cache, | |
465 | struct btrfs_trans_handle *trans, | |
466 | struct btrfs_root *root, | |
467 | int load_cache_only) | |
468 | { | |
469 | struct btrfs_fs_info *fs_info = cache->fs_info; | |
470 | struct btrfs_caching_control *caching_ctl; | |
471 | int ret = 0; | |
472 | ||
473 | smp_mb(); | |
474 | if (cache->cached != BTRFS_CACHE_NO) | |
475 | return 0; | |
476 | ||
477 | /* | |
478 | * We can't do the read from on-disk cache during a commit since we need | |
479 | * to have the normal tree locking. Also if we are currently trying to | |
480 | * allocate blocks for the tree root we can't do the fast caching since | |
481 | * we likely hold important locks. | |
482 | */ | |
483 | if (trans && (!trans->transaction->in_commit) && | |
484 | (root && root != root->fs_info->tree_root)) { | |
485 | spin_lock(&cache->lock); | |
486 | if (cache->cached != BTRFS_CACHE_NO) { | |
487 | spin_unlock(&cache->lock); | |
488 | return 0; | |
489 | } | |
490 | cache->cached = BTRFS_CACHE_STARTED; | |
491 | spin_unlock(&cache->lock); | |
492 | ||
493 | ret = load_free_space_cache(fs_info, cache); | |
494 | ||
495 | spin_lock(&cache->lock); | |
496 | if (ret == 1) { | |
497 | cache->cached = BTRFS_CACHE_FINISHED; | |
498 | cache->last_byte_to_unpin = (u64)-1; | |
499 | } else { | |
500 | cache->cached = BTRFS_CACHE_NO; | |
501 | } | |
502 | spin_unlock(&cache->lock); | |
503 | if (ret == 1) { | |
504 | free_excluded_extents(fs_info->extent_root, cache); | |
505 | return 0; | |
506 | } | |
507 | } | |
508 | ||
509 | if (load_cache_only) | |
510 | return 0; | |
511 | ||
512 | caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS); | |
513 | BUG_ON(!caching_ctl); | |
514 | ||
515 | INIT_LIST_HEAD(&caching_ctl->list); | |
516 | mutex_init(&caching_ctl->mutex); | |
517 | init_waitqueue_head(&caching_ctl->wait); | |
518 | caching_ctl->block_group = cache; | |
519 | caching_ctl->progress = cache->key.objectid; | |
520 | /* one for caching kthread, one for caching block group list */ | |
521 | atomic_set(&caching_ctl->count, 2); | |
522 | caching_ctl->work.func = caching_thread; | |
523 | ||
524 | spin_lock(&cache->lock); | |
525 | if (cache->cached != BTRFS_CACHE_NO) { | |
526 | spin_unlock(&cache->lock); | |
527 | kfree(caching_ctl); | |
528 | return 0; | |
529 | } | |
530 | cache->caching_ctl = caching_ctl; | |
531 | cache->cached = BTRFS_CACHE_STARTED; | |
532 | spin_unlock(&cache->lock); | |
533 | ||
534 | down_write(&fs_info->extent_commit_sem); | |
535 | list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); | |
536 | up_write(&fs_info->extent_commit_sem); | |
537 | ||
538 | btrfs_get_block_group(cache); | |
539 | ||
540 | btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work); | |
541 | ||
542 | return ret; | |
543 | } | |
544 | ||
545 | /* | |
546 | * return the block group that starts at or after bytenr | |
547 | */ | |
548 | static struct btrfs_block_group_cache * | |
549 | btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr) | |
550 | { | |
551 | struct btrfs_block_group_cache *cache; | |
552 | ||
553 | cache = block_group_cache_tree_search(info, bytenr, 0); | |
554 | ||
555 | return cache; | |
556 | } | |
557 | ||
558 | /* | |
559 | * return the block group that contains the given bytenr | |
560 | */ | |
561 | struct btrfs_block_group_cache *btrfs_lookup_block_group( | |
562 | struct btrfs_fs_info *info, | |
563 | u64 bytenr) | |
564 | { | |
565 | struct btrfs_block_group_cache *cache; | |
566 | ||
567 | cache = block_group_cache_tree_search(info, bytenr, 1); | |
568 | ||
569 | return cache; | |
570 | } | |
571 | ||
572 | static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info, | |
573 | u64 flags) | |
574 | { | |
575 | struct list_head *head = &info->space_info; | |
576 | struct btrfs_space_info *found; | |
577 | ||
578 | flags &= BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_SYSTEM | | |
579 | BTRFS_BLOCK_GROUP_METADATA; | |
580 | ||
581 | rcu_read_lock(); | |
582 | list_for_each_entry_rcu(found, head, list) { | |
583 | if (found->flags & flags) { | |
584 | rcu_read_unlock(); | |
585 | return found; | |
586 | } | |
587 | } | |
588 | rcu_read_unlock(); | |
589 | return NULL; | |
590 | } | |
591 | ||
592 | /* | |
593 | * after adding space to the filesystem, we need to clear the full flags | |
594 | * on all the space infos. | |
595 | */ | |
596 | void btrfs_clear_space_info_full(struct btrfs_fs_info *info) | |
597 | { | |
598 | struct list_head *head = &info->space_info; | |
599 | struct btrfs_space_info *found; | |
600 | ||
601 | rcu_read_lock(); | |
602 | list_for_each_entry_rcu(found, head, list) | |
603 | found->full = 0; | |
604 | rcu_read_unlock(); | |
605 | } | |
606 | ||
607 | static u64 div_factor(u64 num, int factor) | |
608 | { | |
609 | if (factor == 10) | |
610 | return num; | |
611 | num *= factor; | |
612 | do_div(num, 10); | |
613 | return num; | |
614 | } | |
615 | ||
616 | static u64 div_factor_fine(u64 num, int factor) | |
617 | { | |
618 | if (factor == 100) | |
619 | return num; | |
620 | num *= factor; | |
621 | do_div(num, 100); | |
622 | return num; | |
623 | } | |
624 | ||
625 | u64 btrfs_find_block_group(struct btrfs_root *root, | |
626 | u64 search_start, u64 search_hint, int owner) | |
627 | { | |
628 | struct btrfs_block_group_cache *cache; | |
629 | u64 used; | |
630 | u64 last = max(search_hint, search_start); | |
631 | u64 group_start = 0; | |
632 | int full_search = 0; | |
633 | int factor = 9; | |
634 | int wrapped = 0; | |
635 | again: | |
636 | while (1) { | |
637 | cache = btrfs_lookup_first_block_group(root->fs_info, last); | |
638 | if (!cache) | |
639 | break; | |
640 | ||
641 | spin_lock(&cache->lock); | |
642 | last = cache->key.objectid + cache->key.offset; | |
643 | used = btrfs_block_group_used(&cache->item); | |
644 | ||
645 | if ((full_search || !cache->ro) && | |
646 | block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) { | |
647 | if (used + cache->pinned + cache->reserved < | |
648 | div_factor(cache->key.offset, factor)) { | |
649 | group_start = cache->key.objectid; | |
650 | spin_unlock(&cache->lock); | |
651 | btrfs_put_block_group(cache); | |
652 | goto found; | |
653 | } | |
654 | } | |
655 | spin_unlock(&cache->lock); | |
656 | btrfs_put_block_group(cache); | |
657 | cond_resched(); | |
658 | } | |
659 | if (!wrapped) { | |
660 | last = search_start; | |
661 | wrapped = 1; | |
662 | goto again; | |
663 | } | |
664 | if (!full_search && factor < 10) { | |
665 | last = search_start; | |
666 | full_search = 1; | |
667 | factor = 10; | |
668 | goto again; | |
669 | } | |
670 | found: | |
671 | return group_start; | |
672 | } | |
673 | ||
674 | /* simple helper to search for an existing extent at a given offset */ | |
675 | int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len) | |
676 | { | |
677 | int ret; | |
678 | struct btrfs_key key; | |
679 | struct btrfs_path *path; | |
680 | ||
681 | path = btrfs_alloc_path(); | |
682 | if (!path) | |
683 | return -ENOMEM; | |
684 | ||
685 | key.objectid = start; | |
686 | key.offset = len; | |
687 | btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY); | |
688 | ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path, | |
689 | 0, 0); | |
690 | btrfs_free_path(path); | |
691 | return ret; | |
692 | } | |
693 | ||
694 | /* | |
695 | * helper function to lookup reference count and flags of extent. | |
696 | * | |
697 | * the head node for delayed ref is used to store the sum of all the | |
698 | * reference count modifications queued up in the rbtree. the head | |
699 | * node may also store the extent flags to set. This way you can check | |
700 | * to see what the reference count and extent flags would be if all of | |
701 | * the delayed refs are not processed. | |
702 | */ | |
703 | int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, | |
704 | struct btrfs_root *root, u64 bytenr, | |
705 | u64 num_bytes, u64 *refs, u64 *flags) | |
706 | { | |
707 | struct btrfs_delayed_ref_head *head; | |
708 | struct btrfs_delayed_ref_root *delayed_refs; | |
709 | struct btrfs_path *path; | |
710 | struct btrfs_extent_item *ei; | |
711 | struct extent_buffer *leaf; | |
712 | struct btrfs_key key; | |
713 | u32 item_size; | |
714 | u64 num_refs; | |
715 | u64 extent_flags; | |
716 | int ret; | |
717 | ||
718 | path = btrfs_alloc_path(); | |
719 | if (!path) | |
720 | return -ENOMEM; | |
721 | ||
722 | key.objectid = bytenr; | |
723 | key.type = BTRFS_EXTENT_ITEM_KEY; | |
724 | key.offset = num_bytes; | |
725 | if (!trans) { | |
726 | path->skip_locking = 1; | |
727 | path->search_commit_root = 1; | |
728 | } | |
729 | again: | |
730 | ret = btrfs_search_slot(trans, root->fs_info->extent_root, | |
731 | &key, path, 0, 0); | |
732 | if (ret < 0) | |
733 | goto out_free; | |
734 | ||
735 | if (ret == 0) { | |
736 | leaf = path->nodes[0]; | |
737 | item_size = btrfs_item_size_nr(leaf, path->slots[0]); | |
738 | if (item_size >= sizeof(*ei)) { | |
739 | ei = btrfs_item_ptr(leaf, path->slots[0], | |
740 | struct btrfs_extent_item); | |
741 | num_refs = btrfs_extent_refs(leaf, ei); | |
742 | extent_flags = btrfs_extent_flags(leaf, ei); | |
743 | } else { | |
744 | #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 | |
745 | struct btrfs_extent_item_v0 *ei0; | |
746 | BUG_ON(item_size != sizeof(*ei0)); | |
747 | ei0 = btrfs_item_ptr(leaf, path->slots[0], | |
748 | struct btrfs_extent_item_v0); | |
749 | num_refs = btrfs_extent_refs_v0(leaf, ei0); | |
750 | /* FIXME: this isn't correct for data */ | |
751 | extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF; | |
752 | #else | |
753 | BUG(); | |
754 | #endif | |
755 | } | |
756 | BUG_ON(num_refs == 0); | |
757 | } else { | |
758 | num_refs = 0; | |
759 | extent_flags = 0; | |
760 | ret = 0; | |
761 | } | |
762 | ||
763 | if (!trans) | |
764 | goto out; | |
765 | ||
766 | delayed_refs = &trans->transaction->delayed_refs; | |
767 | spin_lock(&delayed_refs->lock); | |
768 | head = btrfs_find_delayed_ref_head(trans, bytenr); | |
769 | if (head) { | |
770 | if (!mutex_trylock(&head->mutex)) { | |
771 | atomic_inc(&head->node.refs); | |
772 | spin_unlock(&delayed_refs->lock); | |
773 | ||
774 | btrfs_release_path(path); | |
775 | ||
776 | /* | |
777 | * Mutex was contended, block until it's released and try | |
778 | * again | |
779 | */ | |
780 | mutex_lock(&head->mutex); | |
781 | mutex_unlock(&head->mutex); | |
782 | btrfs_put_delayed_ref(&head->node); | |
783 | goto again; | |
784 | } | |
785 | if (head->extent_op && head->extent_op->update_flags) | |
786 | extent_flags |= head->extent_op->flags_to_set; | |
787 | else | |
788 | BUG_ON(num_refs == 0); | |
789 | ||
790 | num_refs += head->node.ref_mod; | |
791 | mutex_unlock(&head->mutex); | |
792 | } | |
793 | spin_unlock(&delayed_refs->lock); | |
794 | out: | |
795 | WARN_ON(num_refs == 0); | |
796 | if (refs) | |
797 | *refs = num_refs; | |
798 | if (flags) | |
799 | *flags = extent_flags; | |
800 | out_free: | |
801 | btrfs_free_path(path); | |
802 | return ret; | |
803 | } | |
804 | ||
805 | /* | |
806 | * Back reference rules. Back refs have three main goals: | |
807 | * | |
808 | * 1) differentiate between all holders of references to an extent so that | |
809 | * when a reference is dropped we can make sure it was a valid reference | |
810 | * before freeing the extent. | |
811 | * | |
812 | * 2) Provide enough information to quickly find the holders of an extent | |
813 | * if we notice a given block is corrupted or bad. | |
814 | * | |
815 | * 3) Make it easy to migrate blocks for FS shrinking or storage pool | |
816 | * maintenance. This is actually the same as #2, but with a slightly | |
817 | * different use case. | |
818 | * | |
819 | * There are two kinds of back refs. The implicit back refs is optimized | |
820 | * for pointers in non-shared tree blocks. For a given pointer in a block, | |
821 | * back refs of this kind provide information about the block's owner tree | |
822 | * and the pointer's key. These information allow us to find the block by | |
823 | * b-tree searching. The full back refs is for pointers in tree blocks not | |
824 | * referenced by their owner trees. The location of tree block is recorded | |
825 | * in the back refs. Actually the full back refs is generic, and can be | |
826 | * used in all cases the implicit back refs is used. The major shortcoming | |
827 | * of the full back refs is its overhead. Every time a tree block gets | |
828 | * COWed, we have to update back refs entry for all pointers in it. | |
829 | * | |
830 | * For a newly allocated tree block, we use implicit back refs for | |
831 | * pointers in it. This means most tree related operations only involve | |
832 | * implicit back refs. For a tree block created in old transaction, the | |
833 | * only way to drop a reference to it is COW it. So we can detect the | |
834 | * event that tree block loses its owner tree's reference and do the | |
835 | * back refs conversion. | |
836 | * | |
837 | * When a tree block is COW'd through a tree, there are four cases: | |
838 | * | |
839 | * The reference count of the block is one and the tree is the block's | |
840 | * owner tree. Nothing to do in this case. | |
841 | * | |
842 | * The reference count of the block is one and the tree is not the | |
843 | * block's owner tree. In this case, full back refs is used for pointers | |
844 | * in the block. Remove these full back refs, add implicit back refs for | |
845 | * every pointers in the new block. | |
846 | * | |
847 | * The reference count of the block is greater than one and the tree is | |
848 | * the block's owner tree. In this case, implicit back refs is used for | |
849 | * pointers in the block. Add full back refs for every pointers in the | |
850 | * block, increase lower level extents' reference counts. The original | |
851 | * implicit back refs are entailed to the new block. | |
852 | * | |
853 | * The reference count of the block is greater than one and the tree is | |
854 | * not the block's owner tree. Add implicit back refs for every pointer in | |
855 | * the new block, increase lower level extents' reference count. | |
856 | * | |
857 | * Back Reference Key composing: | |
858 | * | |
859 | * The key objectid corresponds to the first byte in the extent, | |
860 | * The key type is used to differentiate between types of back refs. | |
861 | * There are different meanings of the key offset for different types | |
862 | * of back refs. | |
863 | * | |
864 | * File extents can be referenced by: | |
865 | * | |
866 | * - multiple snapshots, subvolumes, or different generations in one subvol | |
867 | * - different files inside a single subvolume | |
868 | * - different offsets inside a file (bookend extents in file.c) | |
869 | * | |
870 | * The extent ref structure for the implicit back refs has fields for: | |
871 | * | |
872 | * - Objectid of the subvolume root | |
873 | * - objectid of the file holding the reference | |
874 | * - original offset in the file | |
875 | * - how many bookend extents | |
876 | * | |
877 | * The key offset for the implicit back refs is hash of the first | |
878 | * three fields. | |
879 | * | |
880 | * The extent ref structure for the full back refs has field for: | |
881 | * | |
882 | * - number of pointers in the tree leaf | |
883 | * | |
884 | * The key offset for the implicit back refs is the first byte of | |
885 | * the tree leaf | |
886 | * | |
887 | * When a file extent is allocated, The implicit back refs is used. | |
888 | * the fields are filled in: | |
889 | * | |
890 | * (root_key.objectid, inode objectid, offset in file, 1) | |
891 | * | |
892 | * When a file extent is removed file truncation, we find the | |
893 | * corresponding implicit back refs and check the following fields: | |
894 | * | |
895 | * (btrfs_header_owner(leaf), inode objectid, offset in file) | |
896 | * | |
897 | * Btree extents can be referenced by: | |
898 | * | |
899 | * - Different subvolumes | |
900 | * | |
901 | * Both the implicit back refs and the full back refs for tree blocks | |
902 | * only consist of key. The key offset for the implicit back refs is | |
903 | * objectid of block's owner tree. The key offset for the full back refs | |
904 | * is the first byte of parent block. | |
905 | * | |
906 | * When implicit back refs is used, information about the lowest key and | |
907 | * level of the tree block are required. These information are stored in | |
908 | * tree block info structure. | |
909 | */ | |
910 | ||
911 | #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 | |
912 | static int convert_extent_item_v0(struct btrfs_trans_handle *trans, | |
913 | struct btrfs_root *root, | |
914 | struct btrfs_path *path, | |
915 | u64 owner, u32 extra_size) | |
916 | { | |
917 | struct btrfs_extent_item *item; | |
918 | struct btrfs_extent_item_v0 *ei0; | |
919 | struct btrfs_extent_ref_v0 *ref0; | |
920 | struct btrfs_tree_block_info *bi; | |
921 | struct extent_buffer *leaf; | |
922 | struct btrfs_key key; | |
923 | struct btrfs_key found_key; | |
924 | u32 new_size = sizeof(*item); | |
925 | u64 refs; | |
926 | int ret; | |
927 | ||
928 | leaf = path->nodes[0]; | |
929 | BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0)); | |
930 | ||
931 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); | |
932 | ei0 = btrfs_item_ptr(leaf, path->slots[0], | |
933 | struct btrfs_extent_item_v0); | |
934 | refs = btrfs_extent_refs_v0(leaf, ei0); | |
935 | ||
936 | if (owner == (u64)-1) { | |
937 | while (1) { | |
938 | if (path->slots[0] >= btrfs_header_nritems(leaf)) { | |
939 | ret = btrfs_next_leaf(root, path); | |
940 | if (ret < 0) | |
941 | return ret; | |
942 | BUG_ON(ret > 0); | |
943 | leaf = path->nodes[0]; | |
944 | } | |
945 | btrfs_item_key_to_cpu(leaf, &found_key, | |
946 | path->slots[0]); | |
947 | BUG_ON(key.objectid != found_key.objectid); | |
948 | if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) { | |
949 | path->slots[0]++; | |
950 | continue; | |
951 | } | |
952 | ref0 = btrfs_item_ptr(leaf, path->slots[0], | |
953 | struct btrfs_extent_ref_v0); | |
954 | owner = btrfs_ref_objectid_v0(leaf, ref0); | |
955 | break; | |
956 | } | |
957 | } | |
958 | btrfs_release_path(path); | |
959 | ||
960 | if (owner < BTRFS_FIRST_FREE_OBJECTID) | |
961 | new_size += sizeof(*bi); | |
962 | ||
963 | new_size -= sizeof(*ei0); | |
964 | ret = btrfs_search_slot(trans, root, &key, path, | |
965 | new_size + extra_size, 1); | |
966 | if (ret < 0) | |
967 | return ret; | |
968 | BUG_ON(ret); | |
969 | ||
970 | ret = btrfs_extend_item(trans, root, path, new_size); | |
971 | ||
972 | leaf = path->nodes[0]; | |
973 | item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); | |
974 | btrfs_set_extent_refs(leaf, item, refs); | |
975 | /* FIXME: get real generation */ | |
976 | btrfs_set_extent_generation(leaf, item, 0); | |
977 | if (owner < BTRFS_FIRST_FREE_OBJECTID) { | |
978 | btrfs_set_extent_flags(leaf, item, | |
979 | BTRFS_EXTENT_FLAG_TREE_BLOCK | | |
980 | BTRFS_BLOCK_FLAG_FULL_BACKREF); | |
981 | bi = (struct btrfs_tree_block_info *)(item + 1); | |
982 | /* FIXME: get first key of the block */ | |
983 | memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi)); | |
984 | btrfs_set_tree_block_level(leaf, bi, (int)owner); | |
985 | } else { | |
986 | btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA); | |
987 | } | |
988 | btrfs_mark_buffer_dirty(leaf); | |
989 | return 0; | |
990 | } | |
991 | #endif | |
992 | ||
993 | static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset) | |
994 | { | |
995 | u32 high_crc = ~(u32)0; | |
996 | u32 low_crc = ~(u32)0; | |
997 | __le64 lenum; | |
998 | ||
999 | lenum = cpu_to_le64(root_objectid); | |
1000 | high_crc = crc32c(high_crc, &lenum, sizeof(lenum)); | |
1001 | lenum = cpu_to_le64(owner); | |
1002 | low_crc = crc32c(low_crc, &lenum, sizeof(lenum)); | |
1003 | lenum = cpu_to_le64(offset); | |
1004 | low_crc = crc32c(low_crc, &lenum, sizeof(lenum)); | |
1005 | ||
1006 | return ((u64)high_crc << 31) ^ (u64)low_crc; | |
1007 | } | |
1008 | ||
1009 | static u64 hash_extent_data_ref_item(struct extent_buffer *leaf, | |
1010 | struct btrfs_extent_data_ref *ref) | |
1011 | { | |
1012 | return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref), | |
1013 | btrfs_extent_data_ref_objectid(leaf, ref), | |
1014 | btrfs_extent_data_ref_offset(leaf, ref)); | |
1015 | } | |
1016 | ||
1017 | static int match_extent_data_ref(struct extent_buffer *leaf, | |
1018 | struct btrfs_extent_data_ref *ref, | |
1019 | u64 root_objectid, u64 owner, u64 offset) | |
1020 | { | |
1021 | if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid || | |
1022 | btrfs_extent_data_ref_objectid(leaf, ref) != owner || | |
1023 | btrfs_extent_data_ref_offset(leaf, ref) != offset) | |
1024 | return 0; | |
1025 | return 1; | |
1026 | } | |
1027 | ||
1028 | static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans, | |
1029 | struct btrfs_root *root, | |
1030 | struct btrfs_path *path, | |
1031 | u64 bytenr, u64 parent, | |
1032 | u64 root_objectid, | |
1033 | u64 owner, u64 offset) | |
1034 | { | |
1035 | struct btrfs_key key; | |
1036 | struct btrfs_extent_data_ref *ref; | |
1037 | struct extent_buffer *leaf; | |
1038 | u32 nritems; | |
1039 | int ret; | |
1040 | int recow; | |
1041 | int err = -ENOENT; | |
1042 | ||
1043 | key.objectid = bytenr; | |
1044 | if (parent) { | |
1045 | key.type = BTRFS_SHARED_DATA_REF_KEY; | |
1046 | key.offset = parent; | |
1047 | } else { | |
1048 | key.type = BTRFS_EXTENT_DATA_REF_KEY; | |
1049 | key.offset = hash_extent_data_ref(root_objectid, | |
1050 | owner, offset); | |
1051 | } | |
1052 | again: | |
1053 | recow = 0; | |
1054 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); | |
1055 | if (ret < 0) { | |
1056 | err = ret; | |
1057 | goto fail; | |
1058 | } | |
1059 | ||
1060 | if (parent) { | |
1061 | if (!ret) | |
1062 | return 0; | |
1063 | #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 | |
1064 | key.type = BTRFS_EXTENT_REF_V0_KEY; | |
1065 | btrfs_release_path(path); | |
1066 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); | |
1067 | if (ret < 0) { | |
1068 | err = ret; | |
1069 | goto fail; | |
1070 | } | |
1071 | if (!ret) | |
1072 | return 0; | |
1073 | #endif | |
1074 | goto fail; | |
1075 | } | |
1076 | ||
1077 | leaf = path->nodes[0]; | |
1078 | nritems = btrfs_header_nritems(leaf); | |
1079 | while (1) { | |
1080 | if (path->slots[0] >= nritems) { | |
1081 | ret = btrfs_next_leaf(root, path); | |
1082 | if (ret < 0) | |
1083 | err = ret; | |
1084 | if (ret) | |
1085 | goto fail; | |
1086 | ||
1087 | leaf = path->nodes[0]; | |
1088 | nritems = btrfs_header_nritems(leaf); | |
1089 | recow = 1; | |
1090 | } | |
1091 | ||
1092 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); | |
1093 | if (key.objectid != bytenr || | |
1094 | key.type != BTRFS_EXTENT_DATA_REF_KEY) | |
1095 | goto fail; | |
1096 | ||
1097 | ref = btrfs_item_ptr(leaf, path->slots[0], | |
1098 | struct btrfs_extent_data_ref); | |
1099 | ||
1100 | if (match_extent_data_ref(leaf, ref, root_objectid, | |
1101 | owner, offset)) { | |
1102 | if (recow) { | |
1103 | btrfs_release_path(path); | |
1104 | goto again; | |
1105 | } | |
1106 | err = 0; | |
1107 | break; | |
1108 | } | |
1109 | path->slots[0]++; | |
1110 | } | |
1111 | fail: | |
1112 | return err; | |
1113 | } | |
1114 | ||
1115 | static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans, | |
1116 | struct btrfs_root *root, | |
1117 | struct btrfs_path *path, | |
1118 | u64 bytenr, u64 parent, | |
1119 | u64 root_objectid, u64 owner, | |
1120 | u64 offset, int refs_to_add) | |
1121 | { | |
1122 | struct btrfs_key key; | |
1123 | struct extent_buffer *leaf; | |
1124 | u32 size; | |
1125 | u32 num_refs; | |
1126 | int ret; | |
1127 | ||
1128 | key.objectid = bytenr; | |
1129 | if (parent) { | |
1130 | key.type = BTRFS_SHARED_DATA_REF_KEY; | |
1131 | key.offset = parent; | |
1132 | size = sizeof(struct btrfs_shared_data_ref); | |
1133 | } else { | |
1134 | key.type = BTRFS_EXTENT_DATA_REF_KEY; | |
1135 | key.offset = hash_extent_data_ref(root_objectid, | |
1136 | owner, offset); | |
1137 | size = sizeof(struct btrfs_extent_data_ref); | |
1138 | } | |
1139 | ||
1140 | ret = btrfs_insert_empty_item(trans, root, path, &key, size); | |
1141 | if (ret && ret != -EEXIST) | |
1142 | goto fail; | |
1143 | ||
1144 | leaf = path->nodes[0]; | |
1145 | if (parent) { | |
1146 | struct btrfs_shared_data_ref *ref; | |
1147 | ref = btrfs_item_ptr(leaf, path->slots[0], | |
1148 | struct btrfs_shared_data_ref); | |
1149 | if (ret == 0) { | |
1150 | btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add); | |
1151 | } else { | |
1152 | num_refs = btrfs_shared_data_ref_count(leaf, ref); | |
1153 | num_refs += refs_to_add; | |
1154 | btrfs_set_shared_data_ref_count(leaf, ref, num_refs); | |
1155 | } | |
1156 | } else { | |
1157 | struct btrfs_extent_data_ref *ref; | |
1158 | while (ret == -EEXIST) { | |
1159 | ref = btrfs_item_ptr(leaf, path->slots[0], | |
1160 | struct btrfs_extent_data_ref); | |
1161 | if (match_extent_data_ref(leaf, ref, root_objectid, | |
1162 | owner, offset)) | |
1163 | break; | |
1164 | btrfs_release_path(path); | |
1165 | key.offset++; | |
1166 | ret = btrfs_insert_empty_item(trans, root, path, &key, | |
1167 | size); | |
1168 | if (ret && ret != -EEXIST) | |
1169 | goto fail; | |
1170 | ||
1171 | leaf = path->nodes[0]; | |
1172 | } | |
1173 | ref = btrfs_item_ptr(leaf, path->slots[0], | |
1174 | struct btrfs_extent_data_ref); | |
1175 | if (ret == 0) { | |
1176 | btrfs_set_extent_data_ref_root(leaf, ref, | |
1177 | root_objectid); | |
1178 | btrfs_set_extent_data_ref_objectid(leaf, ref, owner); | |
1179 | btrfs_set_extent_data_ref_offset(leaf, ref, offset); | |
1180 | btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add); | |
1181 | } else { | |
1182 | num_refs = btrfs_extent_data_ref_count(leaf, ref); | |
1183 | num_refs += refs_to_add; | |
1184 | btrfs_set_extent_data_ref_count(leaf, ref, num_refs); | |
1185 | } | |
1186 | } | |
1187 | btrfs_mark_buffer_dirty(leaf); | |
1188 | ret = 0; | |
1189 | fail: | |
1190 | btrfs_release_path(path); | |
1191 | return ret; | |
1192 | } | |
1193 | ||
1194 | static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans, | |
1195 | struct btrfs_root *root, | |
1196 | struct btrfs_path *path, | |
1197 | int refs_to_drop) | |
1198 | { | |
1199 | struct btrfs_key key; | |
1200 | struct btrfs_extent_data_ref *ref1 = NULL; | |
1201 | struct btrfs_shared_data_ref *ref2 = NULL; | |
1202 | struct extent_buffer *leaf; | |
1203 | u32 num_refs = 0; | |
1204 | int ret = 0; | |
1205 | ||
1206 | leaf = path->nodes[0]; | |
1207 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); | |
1208 | ||
1209 | if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { | |
1210 | ref1 = btrfs_item_ptr(leaf, path->slots[0], | |
1211 | struct btrfs_extent_data_ref); | |
1212 | num_refs = btrfs_extent_data_ref_count(leaf, ref1); | |
1213 | } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) { | |
1214 | ref2 = btrfs_item_ptr(leaf, path->slots[0], | |
1215 | struct btrfs_shared_data_ref); | |
1216 | num_refs = btrfs_shared_data_ref_count(leaf, ref2); | |
1217 | #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 | |
1218 | } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) { | |
1219 | struct btrfs_extent_ref_v0 *ref0; | |
1220 | ref0 = btrfs_item_ptr(leaf, path->slots[0], | |
1221 | struct btrfs_extent_ref_v0); | |
1222 | num_refs = btrfs_ref_count_v0(leaf, ref0); | |
1223 | #endif | |
1224 | } else { | |
1225 | BUG(); | |
1226 | } | |
1227 | ||
1228 | BUG_ON(num_refs < refs_to_drop); | |
1229 | num_refs -= refs_to_drop; | |
1230 | ||
1231 | if (num_refs == 0) { | |
1232 | ret = btrfs_del_item(trans, root, path); | |
1233 | } else { | |
1234 | if (key.type == BTRFS_EXTENT_DATA_REF_KEY) | |
1235 | btrfs_set_extent_data_ref_count(leaf, ref1, num_refs); | |
1236 | else if (key.type == BTRFS_SHARED_DATA_REF_KEY) | |
1237 | btrfs_set_shared_data_ref_count(leaf, ref2, num_refs); | |
1238 | #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 | |
1239 | else { | |
1240 | struct btrfs_extent_ref_v0 *ref0; | |
1241 | ref0 = btrfs_item_ptr(leaf, path->slots[0], | |
1242 | struct btrfs_extent_ref_v0); | |
1243 | btrfs_set_ref_count_v0(leaf, ref0, num_refs); | |
1244 | } | |
1245 | #endif | |
1246 | btrfs_mark_buffer_dirty(leaf); | |
1247 | } | |
1248 | return ret; | |
1249 | } | |
1250 | ||
1251 | static noinline u32 extent_data_ref_count(struct btrfs_root *root, | |
1252 | struct btrfs_path *path, | |
1253 | struct btrfs_extent_inline_ref *iref) | |
1254 | { | |
1255 | struct btrfs_key key; | |
1256 | struct extent_buffer *leaf; | |
1257 | struct btrfs_extent_data_ref *ref1; | |
1258 | struct btrfs_shared_data_ref *ref2; | |
1259 | u32 num_refs = 0; | |
1260 | ||
1261 | leaf = path->nodes[0]; | |
1262 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); | |
1263 | if (iref) { | |
1264 | if (btrfs_extent_inline_ref_type(leaf, iref) == | |
1265 | BTRFS_EXTENT_DATA_REF_KEY) { | |
1266 | ref1 = (struct btrfs_extent_data_ref *)(&iref->offset); | |
1267 | num_refs = btrfs_extent_data_ref_count(leaf, ref1); | |
1268 | } else { | |
1269 | ref2 = (struct btrfs_shared_data_ref *)(iref + 1); | |
1270 | num_refs = btrfs_shared_data_ref_count(leaf, ref2); | |
1271 | } | |
1272 | } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { | |
1273 | ref1 = btrfs_item_ptr(leaf, path->slots[0], | |
1274 | struct btrfs_extent_data_ref); | |
1275 | num_refs = btrfs_extent_data_ref_count(leaf, ref1); | |
1276 | } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) { | |
1277 | ref2 = btrfs_item_ptr(leaf, path->slots[0], | |
1278 | struct btrfs_shared_data_ref); | |
1279 | num_refs = btrfs_shared_data_ref_count(leaf, ref2); | |
1280 | #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 | |
1281 | } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) { | |
1282 | struct btrfs_extent_ref_v0 *ref0; | |
1283 | ref0 = btrfs_item_ptr(leaf, path->slots[0], | |
1284 | struct btrfs_extent_ref_v0); | |
1285 | num_refs = btrfs_ref_count_v0(leaf, ref0); | |
1286 | #endif | |
1287 | } else { | |
1288 | WARN_ON(1); | |
1289 | } | |
1290 | return num_refs; | |
1291 | } | |
1292 | ||
1293 | static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans, | |
1294 | struct btrfs_root *root, | |
1295 | struct btrfs_path *path, | |
1296 | u64 bytenr, u64 parent, | |
1297 | u64 root_objectid) | |
1298 | { | |
1299 | struct btrfs_key key; | |
1300 | int ret; | |
1301 | ||
1302 | key.objectid = bytenr; | |
1303 | if (parent) { | |
1304 | key.type = BTRFS_SHARED_BLOCK_REF_KEY; | |
1305 | key.offset = parent; | |
1306 | } else { | |
1307 | key.type = BTRFS_TREE_BLOCK_REF_KEY; | |
1308 | key.offset = root_objectid; | |
1309 | } | |
1310 | ||
1311 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); | |
1312 | if (ret > 0) | |
1313 | ret = -ENOENT; | |
1314 | #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 | |
1315 | if (ret == -ENOENT && parent) { | |
1316 | btrfs_release_path(path); | |
1317 | key.type = BTRFS_EXTENT_REF_V0_KEY; | |
1318 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); | |
1319 | if (ret > 0) | |
1320 | ret = -ENOENT; | |
1321 | } | |
1322 | #endif | |
1323 | return ret; | |
1324 | } | |
1325 | ||
1326 | static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans, | |
1327 | struct btrfs_root *root, | |
1328 | struct btrfs_path *path, | |
1329 | u64 bytenr, u64 parent, | |
1330 | u64 root_objectid) | |
1331 | { | |
1332 | struct btrfs_key key; | |
1333 | int ret; | |
1334 | ||
1335 | key.objectid = bytenr; | |
1336 | if (parent) { | |
1337 | key.type = BTRFS_SHARED_BLOCK_REF_KEY; | |
1338 | key.offset = parent; | |
1339 | } else { | |
1340 | key.type = BTRFS_TREE_BLOCK_REF_KEY; | |
1341 | key.offset = root_objectid; | |
1342 | } | |
1343 | ||
1344 | ret = btrfs_insert_empty_item(trans, root, path, &key, 0); | |
1345 | btrfs_release_path(path); | |
1346 | return ret; | |
1347 | } | |
1348 | ||
1349 | static inline int extent_ref_type(u64 parent, u64 owner) | |
1350 | { | |
1351 | int type; | |
1352 | if (owner < BTRFS_FIRST_FREE_OBJECTID) { | |
1353 | if (parent > 0) | |
1354 | type = BTRFS_SHARED_BLOCK_REF_KEY; | |
1355 | else | |
1356 | type = BTRFS_TREE_BLOCK_REF_KEY; | |
1357 | } else { | |
1358 | if (parent > 0) | |
1359 | type = BTRFS_SHARED_DATA_REF_KEY; | |
1360 | else | |
1361 | type = BTRFS_EXTENT_DATA_REF_KEY; | |
1362 | } | |
1363 | return type; | |
1364 | } | |
1365 | ||
1366 | static int find_next_key(struct btrfs_path *path, int level, | |
1367 | struct btrfs_key *key) | |
1368 | ||
1369 | { | |
1370 | for (; level < BTRFS_MAX_LEVEL; level++) { | |
1371 | if (!path->nodes[level]) | |
1372 | break; | |
1373 | if (path->slots[level] + 1 >= | |
1374 | btrfs_header_nritems(path->nodes[level])) | |
1375 | continue; | |
1376 | if (level == 0) | |
1377 | btrfs_item_key_to_cpu(path->nodes[level], key, | |
1378 | path->slots[level] + 1); | |
1379 | else | |
1380 | btrfs_node_key_to_cpu(path->nodes[level], key, | |
1381 | path->slots[level] + 1); | |
1382 | return 0; | |
1383 | } | |
1384 | return 1; | |
1385 | } | |
1386 | ||
1387 | /* | |
1388 | * look for inline back ref. if back ref is found, *ref_ret is set | |
1389 | * to the address of inline back ref, and 0 is returned. | |
1390 | * | |
1391 | * if back ref isn't found, *ref_ret is set to the address where it | |
1392 | * should be inserted, and -ENOENT is returned. | |
1393 | * | |
1394 | * if insert is true and there are too many inline back refs, the path | |
1395 | * points to the extent item, and -EAGAIN is returned. | |
1396 | * | |
1397 | * NOTE: inline back refs are ordered in the same way that back ref | |
1398 | * items in the tree are ordered. | |
1399 | */ | |
1400 | static noinline_for_stack | |
1401 | int lookup_inline_extent_backref(struct btrfs_trans_handle *trans, | |
1402 | struct btrfs_root *root, | |
1403 | struct btrfs_path *path, | |
1404 | struct btrfs_extent_inline_ref **ref_ret, | |
1405 | u64 bytenr, u64 num_bytes, | |
1406 | u64 parent, u64 root_objectid, | |
1407 | u64 owner, u64 offset, int insert) | |
1408 | { | |
1409 | struct btrfs_key key; | |
1410 | struct extent_buffer *leaf; | |
1411 | struct btrfs_extent_item *ei; | |
1412 | struct btrfs_extent_inline_ref *iref; | |
1413 | u64 flags; | |
1414 | u64 item_size; | |
1415 | unsigned long ptr; | |
1416 | unsigned long end; | |
1417 | int extra_size; | |
1418 | int type; | |
1419 | int want; | |
1420 | int ret; | |
1421 | int err = 0; | |
1422 | ||
1423 | key.objectid = bytenr; | |
1424 | key.type = BTRFS_EXTENT_ITEM_KEY; | |
1425 | key.offset = num_bytes; | |
1426 | ||
1427 | want = extent_ref_type(parent, owner); | |
1428 | if (insert) { | |
1429 | extra_size = btrfs_extent_inline_ref_size(want); | |
1430 | path->keep_locks = 1; | |
1431 | } else | |
1432 | extra_size = -1; | |
1433 | ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1); | |
1434 | if (ret < 0) { | |
1435 | err = ret; | |
1436 | goto out; | |
1437 | } | |
1438 | BUG_ON(ret); | |
1439 | ||
1440 | leaf = path->nodes[0]; | |
1441 | item_size = btrfs_item_size_nr(leaf, path->slots[0]); | |
1442 | #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 | |
1443 | if (item_size < sizeof(*ei)) { | |
1444 | if (!insert) { | |
1445 | err = -ENOENT; | |
1446 | goto out; | |
1447 | } | |
1448 | ret = convert_extent_item_v0(trans, root, path, owner, | |
1449 | extra_size); | |
1450 | if (ret < 0) { | |
1451 | err = ret; | |
1452 | goto out; | |
1453 | } | |
1454 | leaf = path->nodes[0]; | |
1455 | item_size = btrfs_item_size_nr(leaf, path->slots[0]); | |
1456 | } | |
1457 | #endif | |
1458 | BUG_ON(item_size < sizeof(*ei)); | |
1459 | ||
1460 | ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); | |
1461 | flags = btrfs_extent_flags(leaf, ei); | |
1462 | ||
1463 | ptr = (unsigned long)(ei + 1); | |
1464 | end = (unsigned long)ei + item_size; | |
1465 | ||
1466 | if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { | |
1467 | ptr += sizeof(struct btrfs_tree_block_info); | |
1468 | BUG_ON(ptr > end); | |
1469 | } else { | |
1470 | BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA)); | |
1471 | } | |
1472 | ||
1473 | err = -ENOENT; | |
1474 | while (1) { | |
1475 | if (ptr >= end) { | |
1476 | WARN_ON(ptr > end); | |
1477 | break; | |
1478 | } | |
1479 | iref = (struct btrfs_extent_inline_ref *)ptr; | |
1480 | type = btrfs_extent_inline_ref_type(leaf, iref); | |
1481 | if (want < type) | |
1482 | break; | |
1483 | if (want > type) { | |
1484 | ptr += btrfs_extent_inline_ref_size(type); | |
1485 | continue; | |
1486 | } | |
1487 | ||
1488 | if (type == BTRFS_EXTENT_DATA_REF_KEY) { | |
1489 | struct btrfs_extent_data_ref *dref; | |
1490 | dref = (struct btrfs_extent_data_ref *)(&iref->offset); | |
1491 | if (match_extent_data_ref(leaf, dref, root_objectid, | |
1492 | owner, offset)) { | |
1493 | err = 0; | |
1494 | break; | |
1495 | } | |
1496 | if (hash_extent_data_ref_item(leaf, dref) < | |
1497 | hash_extent_data_ref(root_objectid, owner, offset)) | |
1498 | break; | |
1499 | } else { | |
1500 | u64 ref_offset; | |
1501 | ref_offset = btrfs_extent_inline_ref_offset(leaf, iref); | |
1502 | if (parent > 0) { | |
1503 | if (parent == ref_offset) { | |
1504 | err = 0; | |
1505 | break; | |
1506 | } | |
1507 | if (ref_offset < parent) | |
1508 | break; | |
1509 | } else { | |
1510 | if (root_objectid == ref_offset) { | |
1511 | err = 0; | |
1512 | break; | |
1513 | } | |
1514 | if (ref_offset < root_objectid) | |
1515 | break; | |
1516 | } | |
1517 | } | |
1518 | ptr += btrfs_extent_inline_ref_size(type); | |
1519 | } | |
1520 | if (err == -ENOENT && insert) { | |
1521 | if (item_size + extra_size >= | |
1522 | BTRFS_MAX_EXTENT_ITEM_SIZE(root)) { | |
1523 | err = -EAGAIN; | |
1524 | goto out; | |
1525 | } | |
1526 | /* | |
1527 | * To add new inline back ref, we have to make sure | |
1528 | * there is no corresponding back ref item. | |
1529 | * For simplicity, we just do not add new inline back | |
1530 | * ref if there is any kind of item for this block | |
1531 | */ | |
1532 | if (find_next_key(path, 0, &key) == 0 && | |
1533 | key.objectid == bytenr && | |
1534 | key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) { | |
1535 | err = -EAGAIN; | |
1536 | goto out; | |
1537 | } | |
1538 | } | |
1539 | *ref_ret = (struct btrfs_extent_inline_ref *)ptr; | |
1540 | out: | |
1541 | if (insert) { | |
1542 | path->keep_locks = 0; | |
1543 | btrfs_unlock_up_safe(path, 1); | |
1544 | } | |
1545 | return err; | |
1546 | } | |
1547 | ||
1548 | /* | |
1549 | * helper to add new inline back ref | |
1550 | */ | |
1551 | static noinline_for_stack | |
1552 | int setup_inline_extent_backref(struct btrfs_trans_handle *trans, | |
1553 | struct btrfs_root *root, | |
1554 | struct btrfs_path *path, | |
1555 | struct btrfs_extent_inline_ref *iref, | |
1556 | u64 parent, u64 root_objectid, | |
1557 | u64 owner, u64 offset, int refs_to_add, | |
1558 | struct btrfs_delayed_extent_op *extent_op) | |
1559 | { | |
1560 | struct extent_buffer *leaf; | |
1561 | struct btrfs_extent_item *ei; | |
1562 | unsigned long ptr; | |
1563 | unsigned long end; | |
1564 | unsigned long item_offset; | |
1565 | u64 refs; | |
1566 | int size; | |
1567 | int type; | |
1568 | int ret; | |
1569 | ||
1570 | leaf = path->nodes[0]; | |
1571 | ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); | |
1572 | item_offset = (unsigned long)iref - (unsigned long)ei; | |
1573 | ||
1574 | type = extent_ref_type(parent, owner); | |
1575 | size = btrfs_extent_inline_ref_size(type); | |
1576 | ||
1577 | ret = btrfs_extend_item(trans, root, path, size); | |
1578 | ||
1579 | ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); | |
1580 | refs = btrfs_extent_refs(leaf, ei); | |
1581 | refs += refs_to_add; | |
1582 | btrfs_set_extent_refs(leaf, ei, refs); | |
1583 | if (extent_op) | |
1584 | __run_delayed_extent_op(extent_op, leaf, ei); | |
1585 | ||
1586 | ptr = (unsigned long)ei + item_offset; | |
1587 | end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]); | |
1588 | if (ptr < end - size) | |
1589 | memmove_extent_buffer(leaf, ptr + size, ptr, | |
1590 | end - size - ptr); | |
1591 | ||
1592 | iref = (struct btrfs_extent_inline_ref *)ptr; | |
1593 | btrfs_set_extent_inline_ref_type(leaf, iref, type); | |
1594 | if (type == BTRFS_EXTENT_DATA_REF_KEY) { | |
1595 | struct btrfs_extent_data_ref *dref; | |
1596 | dref = (struct btrfs_extent_data_ref *)(&iref->offset); | |
1597 | btrfs_set_extent_data_ref_root(leaf, dref, root_objectid); | |
1598 | btrfs_set_extent_data_ref_objectid(leaf, dref, owner); | |
1599 | btrfs_set_extent_data_ref_offset(leaf, dref, offset); | |
1600 | btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add); | |
1601 | } else if (type == BTRFS_SHARED_DATA_REF_KEY) { | |
1602 | struct btrfs_shared_data_ref *sref; | |
1603 | sref = (struct btrfs_shared_data_ref *)(iref + 1); | |
1604 | btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add); | |
1605 | btrfs_set_extent_inline_ref_offset(leaf, iref, parent); | |
1606 | } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) { | |
1607 | btrfs_set_extent_inline_ref_offset(leaf, iref, parent); | |
1608 | } else { | |
1609 | btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid); | |
1610 | } | |
1611 | btrfs_mark_buffer_dirty(leaf); | |
1612 | return 0; | |
1613 | } | |
1614 | ||
1615 | static int lookup_extent_backref(struct btrfs_trans_handle *trans, | |
1616 | struct btrfs_root *root, | |
1617 | struct btrfs_path *path, | |
1618 | struct btrfs_extent_inline_ref **ref_ret, | |
1619 | u64 bytenr, u64 num_bytes, u64 parent, | |
1620 | u64 root_objectid, u64 owner, u64 offset) | |
1621 | { | |
1622 | int ret; | |
1623 | ||
1624 | ret = lookup_inline_extent_backref(trans, root, path, ref_ret, | |
1625 | bytenr, num_bytes, parent, | |
1626 | root_objectid, owner, offset, 0); | |
1627 | if (ret != -ENOENT) | |
1628 | return ret; | |
1629 | ||
1630 | btrfs_release_path(path); | |
1631 | *ref_ret = NULL; | |
1632 | ||
1633 | if (owner < BTRFS_FIRST_FREE_OBJECTID) { | |
1634 | ret = lookup_tree_block_ref(trans, root, path, bytenr, parent, | |
1635 | root_objectid); | |
1636 | } else { | |
1637 | ret = lookup_extent_data_ref(trans, root, path, bytenr, parent, | |
1638 | root_objectid, owner, offset); | |
1639 | } | |
1640 | return ret; | |
1641 | } | |
1642 | ||
1643 | /* | |
1644 | * helper to update/remove inline back ref | |
1645 | */ | |
1646 | static noinline_for_stack | |
1647 | int update_inline_extent_backref(struct btrfs_trans_handle *trans, | |
1648 | struct btrfs_root *root, | |
1649 | struct btrfs_path *path, | |
1650 | struct btrfs_extent_inline_ref *iref, | |
1651 | int refs_to_mod, | |
1652 | struct btrfs_delayed_extent_op *extent_op) | |
1653 | { | |
1654 | struct extent_buffer *leaf; | |
1655 | struct btrfs_extent_item *ei; | |
1656 | struct btrfs_extent_data_ref *dref = NULL; | |
1657 | struct btrfs_shared_data_ref *sref = NULL; | |
1658 | unsigned long ptr; | |
1659 | unsigned long end; | |
1660 | u32 item_size; | |
1661 | int size; | |
1662 | int type; | |
1663 | int ret; | |
1664 | u64 refs; | |
1665 | ||
1666 | leaf = path->nodes[0]; | |
1667 | ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); | |
1668 | refs = btrfs_extent_refs(leaf, ei); | |
1669 | WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0); | |
1670 | refs += refs_to_mod; | |
1671 | btrfs_set_extent_refs(leaf, ei, refs); | |
1672 | if (extent_op) | |
1673 | __run_delayed_extent_op(extent_op, leaf, ei); | |
1674 | ||
1675 | type = btrfs_extent_inline_ref_type(leaf, iref); | |
1676 | ||
1677 | if (type == BTRFS_EXTENT_DATA_REF_KEY) { | |
1678 | dref = (struct btrfs_extent_data_ref *)(&iref->offset); | |
1679 | refs = btrfs_extent_data_ref_count(leaf, dref); | |
1680 | } else if (type == BTRFS_SHARED_DATA_REF_KEY) { | |
1681 | sref = (struct btrfs_shared_data_ref *)(iref + 1); | |
1682 | refs = btrfs_shared_data_ref_count(leaf, sref); | |
1683 | } else { | |
1684 | refs = 1; | |
1685 | BUG_ON(refs_to_mod != -1); | |
1686 | } | |
1687 | ||
1688 | BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod); | |
1689 | refs += refs_to_mod; | |
1690 | ||
1691 | if (refs > 0) { | |
1692 | if (type == BTRFS_EXTENT_DATA_REF_KEY) | |
1693 | btrfs_set_extent_data_ref_count(leaf, dref, refs); | |
1694 | else | |
1695 | btrfs_set_shared_data_ref_count(leaf, sref, refs); | |
1696 | } else { | |
1697 | size = btrfs_extent_inline_ref_size(type); | |
1698 | item_size = btrfs_item_size_nr(leaf, path->slots[0]); | |
1699 | ptr = (unsigned long)iref; | |
1700 | end = (unsigned long)ei + item_size; | |
1701 | if (ptr + size < end) | |
1702 | memmove_extent_buffer(leaf, ptr, ptr + size, | |
1703 | end - ptr - size); | |
1704 | item_size -= size; | |
1705 | ret = btrfs_truncate_item(trans, root, path, item_size, 1); | |
1706 | } | |
1707 | btrfs_mark_buffer_dirty(leaf); | |
1708 | return 0; | |
1709 | } | |
1710 | ||
1711 | static noinline_for_stack | |
1712 | int insert_inline_extent_backref(struct btrfs_trans_handle *trans, | |
1713 | struct btrfs_root *root, | |
1714 | struct btrfs_path *path, | |
1715 | u64 bytenr, u64 num_bytes, u64 parent, | |
1716 | u64 root_objectid, u64 owner, | |
1717 | u64 offset, int refs_to_add, | |
1718 | struct btrfs_delayed_extent_op *extent_op) | |
1719 | { | |
1720 | struct btrfs_extent_inline_ref *iref; | |
1721 | int ret; | |
1722 | ||
1723 | ret = lookup_inline_extent_backref(trans, root, path, &iref, | |
1724 | bytenr, num_bytes, parent, | |
1725 | root_objectid, owner, offset, 1); | |
1726 | if (ret == 0) { | |
1727 | BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID); | |
1728 | ret = update_inline_extent_backref(trans, root, path, iref, | |
1729 | refs_to_add, extent_op); | |
1730 | } else if (ret == -ENOENT) { | |
1731 | ret = setup_inline_extent_backref(trans, root, path, iref, | |
1732 | parent, root_objectid, | |
1733 | owner, offset, refs_to_add, | |
1734 | extent_op); | |
1735 | } | |
1736 | return ret; | |
1737 | } | |
1738 | ||
1739 | static int insert_extent_backref(struct btrfs_trans_handle *trans, | |
1740 | struct btrfs_root *root, | |
1741 | struct btrfs_path *path, | |
1742 | u64 bytenr, u64 parent, u64 root_objectid, | |
1743 | u64 owner, u64 offset, int refs_to_add) | |
1744 | { | |
1745 | int ret; | |
1746 | if (owner < BTRFS_FIRST_FREE_OBJECTID) { | |
1747 | BUG_ON(refs_to_add != 1); | |
1748 | ret = insert_tree_block_ref(trans, root, path, bytenr, | |
1749 | parent, root_objectid); | |
1750 | } else { | |
1751 | ret = insert_extent_data_ref(trans, root, path, bytenr, | |
1752 | parent, root_objectid, | |
1753 | owner, offset, refs_to_add); | |
1754 | } | |
1755 | return ret; | |
1756 | } | |
1757 | ||
1758 | static int remove_extent_backref(struct btrfs_trans_handle *trans, | |
1759 | struct btrfs_root *root, | |
1760 | struct btrfs_path *path, | |
1761 | struct btrfs_extent_inline_ref *iref, | |
1762 | int refs_to_drop, int is_data) | |
1763 | { | |
1764 | int ret; | |
1765 | ||
1766 | BUG_ON(!is_data && refs_to_drop != 1); | |
1767 | if (iref) { | |
1768 | ret = update_inline_extent_backref(trans, root, path, iref, | |
1769 | -refs_to_drop, NULL); | |
1770 | } else if (is_data) { | |
1771 | ret = remove_extent_data_ref(trans, root, path, refs_to_drop); | |
1772 | } else { | |
1773 | ret = btrfs_del_item(trans, root, path); | |
1774 | } | |
1775 | return ret; | |
1776 | } | |
1777 | ||
1778 | static int btrfs_issue_discard(struct block_device *bdev, | |
1779 | u64 start, u64 len) | |
1780 | { | |
1781 | return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0); | |
1782 | } | |
1783 | ||
1784 | static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, | |
1785 | u64 num_bytes, u64 *actual_bytes) | |
1786 | { | |
1787 | int ret; | |
1788 | u64 discarded_bytes = 0; | |
1789 | struct btrfs_multi_bio *multi = NULL; | |
1790 | ||
1791 | ||
1792 | /* Tell the block device(s) that the sectors can be discarded */ | |
1793 | ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD, | |
1794 | bytenr, &num_bytes, &multi, 0); | |
1795 | if (!ret) { | |
1796 | struct btrfs_bio_stripe *stripe = multi->stripes; | |
1797 | int i; | |
1798 | ||
1799 | ||
1800 | for (i = 0; i < multi->num_stripes; i++, stripe++) { | |
1801 | if (!stripe->dev->can_discard) | |
1802 | continue; | |
1803 | ||
1804 | ret = btrfs_issue_discard(stripe->dev->bdev, | |
1805 | stripe->physical, | |
1806 | stripe->length); | |
1807 | if (!ret) | |
1808 | discarded_bytes += stripe->length; | |
1809 | else if (ret != -EOPNOTSUPP) | |
1810 | break; | |
1811 | ||
1812 | /* | |
1813 | * Just in case we get back EOPNOTSUPP for some reason, | |
1814 | * just ignore the return value so we don't screw up | |
1815 | * people calling discard_extent. | |
1816 | */ | |
1817 | ret = 0; | |
1818 | } | |
1819 | kfree(multi); | |
1820 | } | |
1821 | ||
1822 | if (actual_bytes) | |
1823 | *actual_bytes = discarded_bytes; | |
1824 | ||
1825 | ||
1826 | return ret; | |
1827 | } | |
1828 | ||
1829 | int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, | |
1830 | struct btrfs_root *root, | |
1831 | u64 bytenr, u64 num_bytes, u64 parent, | |
1832 | u64 root_objectid, u64 owner, u64 offset) | |
1833 | { | |
1834 | int ret; | |
1835 | BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID && | |
1836 | root_objectid == BTRFS_TREE_LOG_OBJECTID); | |
1837 | ||
1838 | if (owner < BTRFS_FIRST_FREE_OBJECTID) { | |
1839 | ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes, | |
1840 | parent, root_objectid, (int)owner, | |
1841 | BTRFS_ADD_DELAYED_REF, NULL); | |
1842 | } else { | |
1843 | ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes, | |
1844 | parent, root_objectid, owner, offset, | |
1845 | BTRFS_ADD_DELAYED_REF, NULL); | |
1846 | } | |
1847 | return ret; | |
1848 | } | |
1849 | ||
1850 | static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, | |
1851 | struct btrfs_root *root, | |
1852 | u64 bytenr, u64 num_bytes, | |
1853 | u64 parent, u64 root_objectid, | |
1854 | u64 owner, u64 offset, int refs_to_add, | |
1855 | struct btrfs_delayed_extent_op *extent_op) | |
1856 | { | |
1857 | struct btrfs_path *path; | |
1858 | struct extent_buffer *leaf; | |
1859 | struct btrfs_extent_item *item; | |
1860 | u64 refs; | |
1861 | int ret; | |
1862 | int err = 0; | |
1863 | ||
1864 | path = btrfs_alloc_path(); | |
1865 | if (!path) | |
1866 | return -ENOMEM; | |
1867 | ||
1868 | path->reada = 1; | |
1869 | path->leave_spinning = 1; | |
1870 | /* this will setup the path even if it fails to insert the back ref */ | |
1871 | ret = insert_inline_extent_backref(trans, root->fs_info->extent_root, | |
1872 | path, bytenr, num_bytes, parent, | |
1873 | root_objectid, owner, offset, | |
1874 | refs_to_add, extent_op); | |
1875 | if (ret == 0) | |
1876 | goto out; | |
1877 | ||
1878 | if (ret != -EAGAIN) { | |
1879 | err = ret; | |
1880 | goto out; | |
1881 | } | |
1882 | ||
1883 | leaf = path->nodes[0]; | |
1884 | item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); | |
1885 | refs = btrfs_extent_refs(leaf, item); | |
1886 | btrfs_set_extent_refs(leaf, item, refs + refs_to_add); | |
1887 | if (extent_op) | |
1888 | __run_delayed_extent_op(extent_op, leaf, item); | |
1889 | ||
1890 | btrfs_mark_buffer_dirty(leaf); | |
1891 | btrfs_release_path(path); | |
1892 | ||
1893 | path->reada = 1; | |
1894 | path->leave_spinning = 1; | |
1895 | ||
1896 | /* now insert the actual backref */ | |
1897 | ret = insert_extent_backref(trans, root->fs_info->extent_root, | |
1898 | path, bytenr, parent, root_objectid, | |
1899 | owner, offset, refs_to_add); | |
1900 | BUG_ON(ret); | |
1901 | out: | |
1902 | btrfs_free_path(path); | |
1903 | return err; | |
1904 | } | |
1905 | ||
1906 | static int run_delayed_data_ref(struct btrfs_trans_handle *trans, | |
1907 | struct btrfs_root *root, | |
1908 | struct btrfs_delayed_ref_node *node, | |
1909 | struct btrfs_delayed_extent_op *extent_op, | |
1910 | int insert_reserved) | |
1911 | { | |
1912 | int ret = 0; | |
1913 | struct btrfs_delayed_data_ref *ref; | |
1914 | struct btrfs_key ins; | |
1915 | u64 parent = 0; | |
1916 | u64 ref_root = 0; | |
1917 | u64 flags = 0; | |
1918 | ||
1919 | ins.objectid = node->bytenr; | |
1920 | ins.offset = node->num_bytes; | |
1921 | ins.type = BTRFS_EXTENT_ITEM_KEY; | |
1922 | ||
1923 | ref = btrfs_delayed_node_to_data_ref(node); | |
1924 | if (node->type == BTRFS_SHARED_DATA_REF_KEY) | |
1925 | parent = ref->parent; | |
1926 | else | |
1927 | ref_root = ref->root; | |
1928 | ||
1929 | if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { | |
1930 | if (extent_op) { | |
1931 | BUG_ON(extent_op->update_key); | |
1932 | flags |= extent_op->flags_to_set; | |
1933 | } | |
1934 | ret = alloc_reserved_file_extent(trans, root, | |
1935 | parent, ref_root, flags, | |
1936 | ref->objectid, ref->offset, | |
1937 | &ins, node->ref_mod); | |
1938 | } else if (node->action == BTRFS_ADD_DELAYED_REF) { | |
1939 | ret = __btrfs_inc_extent_ref(trans, root, node->bytenr, | |
1940 | node->num_bytes, parent, | |
1941 | ref_root, ref->objectid, | |
1942 | ref->offset, node->ref_mod, | |
1943 | extent_op); | |
1944 | } else if (node->action == BTRFS_DROP_DELAYED_REF) { | |
1945 | ret = __btrfs_free_extent(trans, root, node->bytenr, | |
1946 | node->num_bytes, parent, | |
1947 | ref_root, ref->objectid, | |
1948 | ref->offset, node->ref_mod, | |
1949 | extent_op); | |
1950 | } else { | |
1951 | BUG(); | |
1952 | } | |
1953 | return ret; | |
1954 | } | |
1955 | ||
1956 | static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op, | |
1957 | struct extent_buffer *leaf, | |
1958 | struct btrfs_extent_item *ei) | |
1959 | { | |
1960 | u64 flags = btrfs_extent_flags(leaf, ei); | |
1961 | if (extent_op->update_flags) { | |
1962 | flags |= extent_op->flags_to_set; | |
1963 | btrfs_set_extent_flags(leaf, ei, flags); | |
1964 | } | |
1965 | ||
1966 | if (extent_op->update_key) { | |
1967 | struct btrfs_tree_block_info *bi; | |
1968 | BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)); | |
1969 | bi = (struct btrfs_tree_block_info *)(ei + 1); | |
1970 | btrfs_set_tree_block_key(leaf, bi, &extent_op->key); | |
1971 | } | |
1972 | } | |
1973 | ||
1974 | static int run_delayed_extent_op(struct btrfs_trans_handle *trans, | |
1975 | struct btrfs_root *root, | |
1976 | struct btrfs_delayed_ref_node *node, | |
1977 | struct btrfs_delayed_extent_op *extent_op) | |
1978 | { | |
1979 | struct btrfs_key key; | |
1980 | struct btrfs_path *path; | |
1981 | struct btrfs_extent_item *ei; | |
1982 | struct extent_buffer *leaf; | |
1983 | u32 item_size; | |
1984 | int ret; | |
1985 | int err = 0; | |
1986 | ||
1987 | path = btrfs_alloc_path(); | |
1988 | if (!path) | |
1989 | return -ENOMEM; | |
1990 | ||
1991 | key.objectid = node->bytenr; | |
1992 | key.type = BTRFS_EXTENT_ITEM_KEY; | |
1993 | key.offset = node->num_bytes; | |
1994 | ||
1995 | path->reada = 1; | |
1996 | path->leave_spinning = 1; | |
1997 | ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, | |
1998 | path, 0, 1); | |
1999 | if (ret < 0) { | |
2000 | err = ret; | |
2001 | goto out; | |
2002 | } | |
2003 | if (ret > 0) { | |
2004 | err = -EIO; | |
2005 | goto out; | |
2006 | } | |
2007 | ||
2008 | leaf = path->nodes[0]; | |
2009 | item_size = btrfs_item_size_nr(leaf, path->slots[0]); | |
2010 | #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 | |
2011 | if (item_size < sizeof(*ei)) { | |
2012 | ret = convert_extent_item_v0(trans, root->fs_info->extent_root, | |
2013 | path, (u64)-1, 0); | |
2014 | if (ret < 0) { | |
2015 | err = ret; | |
2016 | goto out; | |
2017 | } | |
2018 | leaf = path->nodes[0]; | |
2019 | item_size = btrfs_item_size_nr(leaf, path->slots[0]); | |
2020 | } | |
2021 | #endif | |
2022 | BUG_ON(item_size < sizeof(*ei)); | |
2023 | ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); | |
2024 | __run_delayed_extent_op(extent_op, leaf, ei); | |
2025 | ||
2026 | btrfs_mark_buffer_dirty(leaf); | |
2027 | out: | |
2028 | btrfs_free_path(path); | |
2029 | return err; | |
2030 | } | |
2031 | ||
2032 | static int run_delayed_tree_ref(struct btrfs_trans_handle *trans, | |
2033 | struct btrfs_root *root, | |
2034 | struct btrfs_delayed_ref_node *node, | |
2035 | struct btrfs_delayed_extent_op *extent_op, | |
2036 | int insert_reserved) | |
2037 | { | |
2038 | int ret = 0; | |
2039 | struct btrfs_delayed_tree_ref *ref; | |
2040 | struct btrfs_key ins; | |
2041 | u64 parent = 0; | |
2042 | u64 ref_root = 0; | |
2043 | ||
2044 | ins.objectid = node->bytenr; | |
2045 | ins.offset = node->num_bytes; | |
2046 | ins.type = BTRFS_EXTENT_ITEM_KEY; | |
2047 | ||
2048 | ref = btrfs_delayed_node_to_tree_ref(node); | |
2049 | if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) | |
2050 | parent = ref->parent; | |
2051 | else | |
2052 | ref_root = ref->root; | |
2053 | ||
2054 | BUG_ON(node->ref_mod != 1); | |
2055 | if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { | |
2056 | BUG_ON(!extent_op || !extent_op->update_flags || | |
2057 | !extent_op->update_key); | |
2058 | ret = alloc_reserved_tree_block(trans, root, | |
2059 | parent, ref_root, | |
2060 | extent_op->flags_to_set, | |
2061 | &extent_op->key, | |
2062 | ref->level, &ins); | |
2063 | } else if (node->action == BTRFS_ADD_DELAYED_REF) { | |
2064 | ret = __btrfs_inc_extent_ref(trans, root, node->bytenr, | |
2065 | node->num_bytes, parent, ref_root, | |
2066 | ref->level, 0, 1, extent_op); | |
2067 | } else if (node->action == BTRFS_DROP_DELAYED_REF) { | |
2068 | ret = __btrfs_free_extent(trans, root, node->bytenr, | |
2069 | node->num_bytes, parent, ref_root, | |
2070 | ref->level, 0, 1, extent_op); | |
2071 | } else { | |
2072 | BUG(); | |
2073 | } | |
2074 | return ret; | |
2075 | } | |
2076 | ||
2077 | /* helper function to actually process a single delayed ref entry */ | |
2078 | static int run_one_delayed_ref(struct btrfs_trans_handle *trans, | |
2079 | struct btrfs_root *root, | |
2080 | struct btrfs_delayed_ref_node *node, | |
2081 | struct btrfs_delayed_extent_op *extent_op, | |
2082 | int insert_reserved) | |
2083 | { | |
2084 | int ret; | |
2085 | if (btrfs_delayed_ref_is_head(node)) { | |
2086 | struct btrfs_delayed_ref_head *head; | |
2087 | /* | |
2088 | * we've hit the end of the chain and we were supposed | |
2089 | * to insert this extent into the tree. But, it got | |
2090 | * deleted before we ever needed to insert it, so all | |
2091 | * we have to do is clean up the accounting | |
2092 | */ | |
2093 | BUG_ON(extent_op); | |
2094 | head = btrfs_delayed_node_to_head(node); | |
2095 | if (insert_reserved) { | |
2096 | btrfs_pin_extent(root, node->bytenr, | |
2097 | node->num_bytes, 1); | |
2098 | if (head->is_data) { | |
2099 | ret = btrfs_del_csums(trans, root, | |
2100 | node->bytenr, | |
2101 | node->num_bytes); | |
2102 | BUG_ON(ret); | |
2103 | } | |
2104 | } | |
2105 | mutex_unlock(&head->mutex); | |
2106 | return 0; | |
2107 | } | |
2108 | ||
2109 | if (node->type == BTRFS_TREE_BLOCK_REF_KEY || | |
2110 | node->type == BTRFS_SHARED_BLOCK_REF_KEY) | |
2111 | ret = run_delayed_tree_ref(trans, root, node, extent_op, | |
2112 | insert_reserved); | |
2113 | else if (node->type == BTRFS_EXTENT_DATA_REF_KEY || | |
2114 | node->type == BTRFS_SHARED_DATA_REF_KEY) | |
2115 | ret = run_delayed_data_ref(trans, root, node, extent_op, | |
2116 | insert_reserved); | |
2117 | else | |
2118 | BUG(); | |
2119 | return ret; | |
2120 | } | |
2121 | ||
2122 | static noinline struct btrfs_delayed_ref_node * | |
2123 | select_delayed_ref(struct btrfs_delayed_ref_head *head) | |
2124 | { | |
2125 | struct rb_node *node; | |
2126 | struct btrfs_delayed_ref_node *ref; | |
2127 | int action = BTRFS_ADD_DELAYED_REF; | |
2128 | again: | |
2129 | /* | |
2130 | * select delayed ref of type BTRFS_ADD_DELAYED_REF first. | |
2131 | * this prevents ref count from going down to zero when | |
2132 | * there still are pending delayed ref. | |
2133 | */ | |
2134 | node = rb_prev(&head->node.rb_node); | |
2135 | while (1) { | |
2136 | if (!node) | |
2137 | break; | |
2138 | ref = rb_entry(node, struct btrfs_delayed_ref_node, | |
2139 | rb_node); | |
2140 | if (ref->bytenr != head->node.bytenr) | |
2141 | break; | |
2142 | if (ref->action == action) | |
2143 | return ref; | |
2144 | node = rb_prev(node); | |
2145 | } | |
2146 | if (action == BTRFS_ADD_DELAYED_REF) { | |
2147 | action = BTRFS_DROP_DELAYED_REF; | |
2148 | goto again; | |
2149 | } | |
2150 | return NULL; | |
2151 | } | |
2152 | ||
2153 | static noinline int run_clustered_refs(struct btrfs_trans_handle *trans, | |
2154 | struct btrfs_root *root, | |
2155 | struct list_head *cluster) | |
2156 | { | |
2157 | struct btrfs_delayed_ref_root *delayed_refs; | |
2158 | struct btrfs_delayed_ref_node *ref; | |
2159 | struct btrfs_delayed_ref_head *locked_ref = NULL; | |
2160 | struct btrfs_delayed_extent_op *extent_op; | |
2161 | int ret; | |
2162 | int count = 0; | |
2163 | int must_insert_reserved = 0; | |
2164 | ||
2165 | delayed_refs = &trans->transaction->delayed_refs; | |
2166 | while (1) { | |
2167 | if (!locked_ref) { | |
2168 | /* pick a new head ref from the cluster list */ | |
2169 | if (list_empty(cluster)) | |
2170 | break; | |
2171 | ||
2172 | locked_ref = list_entry(cluster->next, | |
2173 | struct btrfs_delayed_ref_head, cluster); | |
2174 | ||
2175 | /* grab the lock that says we are going to process | |
2176 | * all the refs for this head */ | |
2177 | ret = btrfs_delayed_ref_lock(trans, locked_ref); | |
2178 | ||
2179 | /* | |
2180 | * we may have dropped the spin lock to get the head | |
2181 | * mutex lock, and that might have given someone else | |
2182 | * time to free the head. If that's true, it has been | |
2183 | * removed from our list and we can move on. | |
2184 | */ | |
2185 | if (ret == -EAGAIN) { | |
2186 | locked_ref = NULL; | |
2187 | count++; | |
2188 | continue; | |
2189 | } | |
2190 | } | |
2191 | ||
2192 | /* | |
2193 | * record the must insert reserved flag before we | |
2194 | * drop the spin lock. | |
2195 | */ | |
2196 | must_insert_reserved = locked_ref->must_insert_reserved; | |
2197 | locked_ref->must_insert_reserved = 0; | |
2198 | ||
2199 | extent_op = locked_ref->extent_op; | |
2200 | locked_ref->extent_op = NULL; | |
2201 | ||
2202 | /* | |
2203 | * locked_ref is the head node, so we have to go one | |
2204 | * node back for any delayed ref updates | |
2205 | */ | |
2206 | ref = select_delayed_ref(locked_ref); | |
2207 | if (!ref) { | |
2208 | /* All delayed refs have been processed, Go ahead | |
2209 | * and send the head node to run_one_delayed_ref, | |
2210 | * so that any accounting fixes can happen | |
2211 | */ | |
2212 | ref = &locked_ref->node; | |
2213 | ||
2214 | if (extent_op && must_insert_reserved) { | |
2215 | kfree(extent_op); | |
2216 | extent_op = NULL; | |
2217 | } | |
2218 | ||
2219 | if (extent_op) { | |
2220 | spin_unlock(&delayed_refs->lock); | |
2221 | ||
2222 | ret = run_delayed_extent_op(trans, root, | |
2223 | ref, extent_op); | |
2224 | BUG_ON(ret); | |
2225 | kfree(extent_op); | |
2226 | ||
2227 | cond_resched(); | |
2228 | spin_lock(&delayed_refs->lock); | |
2229 | continue; | |
2230 | } | |
2231 | ||
2232 | list_del_init(&locked_ref->cluster); | |
2233 | locked_ref = NULL; | |
2234 | } | |
2235 | ||
2236 | ref->in_tree = 0; | |
2237 | rb_erase(&ref->rb_node, &delayed_refs->root); | |
2238 | delayed_refs->num_entries--; | |
2239 | ||
2240 | spin_unlock(&delayed_refs->lock); | |
2241 | ||
2242 | ret = run_one_delayed_ref(trans, root, ref, extent_op, | |
2243 | must_insert_reserved); | |
2244 | BUG_ON(ret); | |
2245 | ||
2246 | btrfs_put_delayed_ref(ref); | |
2247 | kfree(extent_op); | |
2248 | count++; | |
2249 | ||
2250 | cond_resched(); | |
2251 | spin_lock(&delayed_refs->lock); | |
2252 | } | |
2253 | return count; | |
2254 | } | |
2255 | ||
2256 | /* | |
2257 | * this starts processing the delayed reference count updates and | |
2258 | * extent insertions we have queued up so far. count can be | |
2259 | * 0, which means to process everything in the tree at the start | |
2260 | * of the run (but not newly added entries), or it can be some target | |
2261 | * number you'd like to process. | |
2262 | */ | |
2263 | int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, | |
2264 | struct btrfs_root *root, unsigned long count) | |
2265 | { | |
2266 | struct rb_node *node; | |
2267 | struct btrfs_delayed_ref_root *delayed_refs; | |
2268 | struct btrfs_delayed_ref_node *ref; | |
2269 | struct list_head cluster; | |
2270 | int ret; | |
2271 | int run_all = count == (unsigned long)-1; | |
2272 | int run_most = 0; | |
2273 | ||
2274 | if (root == root->fs_info->extent_root) | |
2275 | root = root->fs_info->tree_root; | |
2276 | ||
2277 | delayed_refs = &trans->transaction->delayed_refs; | |
2278 | INIT_LIST_HEAD(&cluster); | |
2279 | again: | |
2280 | spin_lock(&delayed_refs->lock); | |
2281 | if (count == 0) { | |
2282 | count = delayed_refs->num_entries * 2; | |
2283 | run_most = 1; | |
2284 | } | |
2285 | while (1) { | |
2286 | if (!(run_all || run_most) && | |
2287 | delayed_refs->num_heads_ready < 64) | |
2288 | break; | |
2289 | ||
2290 | /* | |
2291 | * go find something we can process in the rbtree. We start at | |
2292 | * the beginning of the tree, and then build a cluster | |
2293 | * of refs to process starting at the first one we are able to | |
2294 | * lock | |
2295 | */ | |
2296 | ret = btrfs_find_ref_cluster(trans, &cluster, | |
2297 | delayed_refs->run_delayed_start); | |
2298 | if (ret) | |
2299 | break; | |
2300 | ||
2301 | ret = run_clustered_refs(trans, root, &cluster); | |
2302 | BUG_ON(ret < 0); | |
2303 | ||
2304 | count -= min_t(unsigned long, ret, count); | |
2305 | ||
2306 | if (count == 0) | |
2307 | break; | |
2308 | } | |
2309 | ||
2310 | if (run_all) { | |
2311 | node = rb_first(&delayed_refs->root); | |
2312 | if (!node) | |
2313 | goto out; | |
2314 | count = (unsigned long)-1; | |
2315 | ||
2316 | while (node) { | |
2317 | ref = rb_entry(node, struct btrfs_delayed_ref_node, | |
2318 | rb_node); | |
2319 | if (btrfs_delayed_ref_is_head(ref)) { | |
2320 | struct btrfs_delayed_ref_head *head; | |
2321 | ||
2322 | head = btrfs_delayed_node_to_head(ref); | |
2323 | atomic_inc(&ref->refs); | |
2324 | ||
2325 | spin_unlock(&delayed_refs->lock); | |
2326 | /* | |
2327 | * Mutex was contended, block until it's | |
2328 | * released and try again | |
2329 | */ | |
2330 | mutex_lock(&head->mutex); | |
2331 | mutex_unlock(&head->mutex); | |
2332 | ||
2333 | btrfs_put_delayed_ref(ref); | |
2334 | cond_resched(); | |
2335 | goto again; | |
2336 | } | |
2337 | node = rb_next(node); | |
2338 | } | |
2339 | spin_unlock(&delayed_refs->lock); | |
2340 | schedule_timeout(1); | |
2341 | goto again; | |
2342 | } | |
2343 | out: | |
2344 | spin_unlock(&delayed_refs->lock); | |
2345 | return 0; | |
2346 | } | |
2347 | ||
2348 | int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, | |
2349 | struct btrfs_root *root, | |
2350 | u64 bytenr, u64 num_bytes, u64 flags, | |
2351 | int is_data) | |
2352 | { | |
2353 | struct btrfs_delayed_extent_op *extent_op; | |
2354 | int ret; | |
2355 | ||
2356 | extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS); | |
2357 | if (!extent_op) | |
2358 | return -ENOMEM; | |
2359 | ||
2360 | extent_op->flags_to_set = flags; | |
2361 | extent_op->update_flags = 1; | |
2362 | extent_op->update_key = 0; | |
2363 | extent_op->is_data = is_data ? 1 : 0; | |
2364 | ||
2365 | ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op); | |
2366 | if (ret) | |
2367 | kfree(extent_op); | |
2368 | return ret; | |
2369 | } | |
2370 | ||
2371 | static noinline int check_delayed_ref(struct btrfs_trans_handle *trans, | |
2372 | struct btrfs_root *root, | |
2373 | struct btrfs_path *path, | |
2374 | u64 objectid, u64 offset, u64 bytenr) | |
2375 | { | |
2376 | struct btrfs_delayed_ref_head *head; | |
2377 | struct btrfs_delayed_ref_node *ref; | |
2378 | struct btrfs_delayed_data_ref *data_ref; | |
2379 | struct btrfs_delayed_ref_root *delayed_refs; | |
2380 | struct rb_node *node; | |
2381 | int ret = 0; | |
2382 | ||
2383 | ret = -ENOENT; | |
2384 | delayed_refs = &trans->transaction->delayed_refs; | |
2385 | spin_lock(&delayed_refs->lock); | |
2386 | head = btrfs_find_delayed_ref_head(trans, bytenr); | |
2387 | if (!head) | |
2388 | goto out; | |
2389 | ||
2390 | if (!mutex_trylock(&head->mutex)) { | |
2391 | atomic_inc(&head->node.refs); | |
2392 | spin_unlock(&delayed_refs->lock); | |
2393 | ||
2394 | btrfs_release_path(path); | |
2395 | ||
2396 | /* | |
2397 | * Mutex was contended, block until it's released and let | |
2398 | * caller try again | |
2399 | */ | |
2400 | mutex_lock(&head->mutex); | |
2401 | mutex_unlock(&head->mutex); | |
2402 | btrfs_put_delayed_ref(&head->node); | |
2403 | return -EAGAIN; | |
2404 | } | |
2405 | ||
2406 | node = rb_prev(&head->node.rb_node); | |
2407 | if (!node) | |
2408 | goto out_unlock; | |
2409 | ||
2410 | ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); | |
2411 | ||
2412 | if (ref->bytenr != bytenr) | |
2413 | goto out_unlock; | |
2414 | ||
2415 | ret = 1; | |
2416 | if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) | |
2417 | goto out_unlock; | |
2418 | ||
2419 | data_ref = btrfs_delayed_node_to_data_ref(ref); | |
2420 | ||
2421 | node = rb_prev(node); | |
2422 | if (node) { | |
2423 | ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); | |
2424 | if (ref->bytenr == bytenr) | |
2425 | goto out_unlock; | |
2426 | } | |
2427 | ||
2428 | if (data_ref->root != root->root_key.objectid || | |
2429 | data_ref->objectid != objectid || data_ref->offset != offset) | |
2430 | goto out_unlock; | |
2431 | ||
2432 | ret = 0; | |
2433 | out_unlock: | |
2434 | mutex_unlock(&head->mutex); | |
2435 | out: | |
2436 | spin_unlock(&delayed_refs->lock); | |
2437 | return ret; | |
2438 | } | |
2439 | ||
2440 | static noinline int check_committed_ref(struct btrfs_trans_handle *trans, | |
2441 | struct btrfs_root *root, | |
2442 | struct btrfs_path *path, | |
2443 | u64 objectid, u64 offset, u64 bytenr) | |
2444 | { | |
2445 | struct btrfs_root *extent_root = root->fs_info->extent_root; | |
2446 | struct extent_buffer *leaf; | |
2447 | struct btrfs_extent_data_ref *ref; | |
2448 | struct btrfs_extent_inline_ref *iref; | |
2449 | struct btrfs_extent_item *ei; | |
2450 | struct btrfs_key key; | |
2451 | u32 item_size; | |
2452 | int ret; | |
2453 | ||
2454 | key.objectid = bytenr; | |
2455 | key.offset = (u64)-1; | |
2456 | key.type = BTRFS_EXTENT_ITEM_KEY; | |
2457 | ||
2458 | ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); | |
2459 | if (ret < 0) | |
2460 | goto out; | |
2461 | BUG_ON(ret == 0); | |
2462 | ||
2463 | ret = -ENOENT; | |
2464 | if (path->slots[0] == 0) | |
2465 | goto out; | |
2466 | ||
2467 | path->slots[0]--; | |
2468 | leaf = path->nodes[0]; | |
2469 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); | |
2470 | ||
2471 | if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY) | |
2472 | goto out; | |
2473 | ||
2474 | ret = 1; | |
2475 | item_size = btrfs_item_size_nr(leaf, path->slots[0]); | |
2476 | #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 | |
2477 | if (item_size < sizeof(*ei)) { | |
2478 | WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0)); | |
2479 | goto out; | |
2480 | } | |
2481 | #endif | |
2482 | ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); | |
2483 | ||
2484 | if (item_size != sizeof(*ei) + | |
2485 | btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY)) | |
2486 | goto out; | |
2487 | ||
2488 | if (btrfs_extent_generation(leaf, ei) <= | |
2489 | btrfs_root_last_snapshot(&root->root_item)) | |
2490 | goto out; | |
2491 | ||
2492 | iref = (struct btrfs_extent_inline_ref *)(ei + 1); | |
2493 | if (btrfs_extent_inline_ref_type(leaf, iref) != | |
2494 | BTRFS_EXTENT_DATA_REF_KEY) | |
2495 | goto out; | |
2496 | ||
2497 | ref = (struct btrfs_extent_data_ref *)(&iref->offset); | |
2498 | if (btrfs_extent_refs(leaf, ei) != | |
2499 | btrfs_extent_data_ref_count(leaf, ref) || | |
2500 | btrfs_extent_data_ref_root(leaf, ref) != | |
2501 | root->root_key.objectid || | |
2502 | btrfs_extent_data_ref_objectid(leaf, ref) != objectid || | |
2503 | btrfs_extent_data_ref_offset(leaf, ref) != offset) | |
2504 | goto out; | |
2505 | ||
2506 | ret = 0; | |
2507 | out: | |
2508 | return ret; | |
2509 | } | |
2510 | ||
2511 | int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans, | |
2512 | struct btrfs_root *root, | |
2513 | u64 objectid, u64 offset, u64 bytenr) | |
2514 | { | |
2515 | struct btrfs_path *path; | |
2516 | int ret; | |
2517 | int ret2; | |
2518 | ||
2519 | path = btrfs_alloc_path(); | |
2520 | if (!path) | |
2521 | return -ENOENT; | |
2522 | ||
2523 | do { | |
2524 | ret = check_committed_ref(trans, root, path, objectid, | |
2525 | offset, bytenr); | |
2526 | if (ret && ret != -ENOENT) | |
2527 | goto out; | |
2528 | ||
2529 | ret2 = check_delayed_ref(trans, root, path, objectid, | |
2530 | offset, bytenr); | |
2531 | } while (ret2 == -EAGAIN); | |
2532 | ||
2533 | if (ret2 && ret2 != -ENOENT) { | |
2534 | ret = ret2; | |
2535 | goto out; | |
2536 | } | |
2537 | ||
2538 | if (ret != -ENOENT || ret2 != -ENOENT) | |
2539 | ret = 0; | |
2540 | out: | |
2541 | btrfs_free_path(path); | |
2542 | if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) | |
2543 | WARN_ON(ret > 0); | |
2544 | return ret; | |
2545 | } | |
2546 | ||
2547 | static int __btrfs_mod_ref(struct btrfs_trans_handle *trans, | |
2548 | struct btrfs_root *root, | |
2549 | struct extent_buffer *buf, | |
2550 | int full_backref, int inc) | |
2551 | { | |
2552 | u64 bytenr; | |
2553 | u64 num_bytes; | |
2554 | u64 parent; | |
2555 | u64 ref_root; | |
2556 | u32 nritems; | |
2557 | struct btrfs_key key; | |
2558 | struct btrfs_file_extent_item *fi; | |
2559 | int i; | |
2560 | int level; | |
2561 | int ret = 0; | |
2562 | int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *, | |
2563 | u64, u64, u64, u64, u64, u64); | |
2564 | ||
2565 | ref_root = btrfs_header_owner(buf); | |
2566 | nritems = btrfs_header_nritems(buf); | |
2567 | level = btrfs_header_level(buf); | |
2568 | ||
2569 | if (!root->ref_cows && level == 0) | |
2570 | return 0; | |
2571 | ||
2572 | if (inc) | |
2573 | process_func = btrfs_inc_extent_ref; | |
2574 | else | |
2575 | process_func = btrfs_free_extent; | |
2576 | ||
2577 | if (full_backref) | |
2578 | parent = buf->start; | |
2579 | else | |
2580 | parent = 0; | |
2581 | ||
2582 | for (i = 0; i < nritems; i++) { | |
2583 | if (level == 0) { | |
2584 | btrfs_item_key_to_cpu(buf, &key, i); | |
2585 | if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY) | |
2586 | continue; | |
2587 | fi = btrfs_item_ptr(buf, i, | |
2588 | struct btrfs_file_extent_item); | |
2589 | if (btrfs_file_extent_type(buf, fi) == | |
2590 | BTRFS_FILE_EXTENT_INLINE) | |
2591 | continue; | |
2592 | bytenr = btrfs_file_extent_disk_bytenr(buf, fi); | |
2593 | if (bytenr == 0) | |
2594 | continue; | |
2595 | ||
2596 | num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi); | |
2597 | key.offset -= btrfs_file_extent_offset(buf, fi); | |
2598 | ret = process_func(trans, root, bytenr, num_bytes, | |
2599 | parent, ref_root, key.objectid, | |
2600 | key.offset); | |
2601 | if (ret) | |
2602 | goto fail; | |
2603 | } else { | |
2604 | bytenr = btrfs_node_blockptr(buf, i); | |
2605 | num_bytes = btrfs_level_size(root, level - 1); | |
2606 | ret = process_func(trans, root, bytenr, num_bytes, | |
2607 | parent, ref_root, level - 1, 0); | |
2608 | if (ret) | |
2609 | goto fail; | |
2610 | } | |
2611 | } | |
2612 | return 0; | |
2613 | fail: | |
2614 | BUG(); | |
2615 | return ret; | |
2616 | } | |
2617 | ||
2618 | int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, | |
2619 | struct extent_buffer *buf, int full_backref) | |
2620 | { | |
2621 | return __btrfs_mod_ref(trans, root, buf, full_backref, 1); | |
2622 | } | |
2623 | ||
2624 | int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, | |
2625 | struct extent_buffer *buf, int full_backref) | |
2626 | { | |
2627 | return __btrfs_mod_ref(trans, root, buf, full_backref, 0); | |
2628 | } | |
2629 | ||
2630 | static int write_one_cache_group(struct btrfs_trans_handle *trans, | |
2631 | struct btrfs_root *root, | |
2632 | struct btrfs_path *path, | |
2633 | struct btrfs_block_group_cache *cache) | |
2634 | { | |
2635 | int ret; | |
2636 | struct btrfs_root *extent_root = root->fs_info->extent_root; | |
2637 | unsigned long bi; | |
2638 | struct extent_buffer *leaf; | |
2639 | ||
2640 | ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1); | |
2641 | if (ret < 0) | |
2642 | goto fail; | |
2643 | BUG_ON(ret); | |
2644 | ||
2645 | leaf = path->nodes[0]; | |
2646 | bi = btrfs_item_ptr_offset(leaf, path->slots[0]); | |
2647 | write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item)); | |
2648 | btrfs_mark_buffer_dirty(leaf); | |
2649 | btrfs_release_path(path); | |
2650 | fail: | |
2651 | if (ret) | |
2652 | return ret; | |
2653 | return 0; | |
2654 | ||
2655 | } | |
2656 | ||
2657 | static struct btrfs_block_group_cache * | |
2658 | next_block_group(struct btrfs_root *root, | |
2659 | struct btrfs_block_group_cache *cache) | |
2660 | { | |
2661 | struct rb_node *node; | |
2662 | spin_lock(&root->fs_info->block_group_cache_lock); | |
2663 | node = rb_next(&cache->cache_node); | |
2664 | btrfs_put_block_group(cache); | |
2665 | if (node) { | |
2666 | cache = rb_entry(node, struct btrfs_block_group_cache, | |
2667 | cache_node); | |
2668 | btrfs_get_block_group(cache); | |
2669 | } else | |
2670 | cache = NULL; | |
2671 | spin_unlock(&root->fs_info->block_group_cache_lock); | |
2672 | return cache; | |
2673 | } | |
2674 | ||
2675 | static int cache_save_setup(struct btrfs_block_group_cache *block_group, | |
2676 | struct btrfs_trans_handle *trans, | |
2677 | struct btrfs_path *path) | |
2678 | { | |
2679 | struct btrfs_root *root = block_group->fs_info->tree_root; | |
2680 | struct inode *inode = NULL; | |
2681 | u64 alloc_hint = 0; | |
2682 | int dcs = BTRFS_DC_ERROR; | |
2683 | int num_pages = 0; | |
2684 | int retries = 0; | |
2685 | int ret = 0; | |
2686 | ||
2687 | /* | |
2688 | * If this block group is smaller than 100 megs don't bother caching the | |
2689 | * block group. | |
2690 | */ | |
2691 | if (block_group->key.offset < (100 * 1024 * 1024)) { | |
2692 | spin_lock(&block_group->lock); | |
2693 | block_group->disk_cache_state = BTRFS_DC_WRITTEN; | |
2694 | spin_unlock(&block_group->lock); | |
2695 | return 0; | |
2696 | } | |
2697 | ||
2698 | again: | |
2699 | inode = lookup_free_space_inode(root, block_group, path); | |
2700 | if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { | |
2701 | ret = PTR_ERR(inode); | |
2702 | btrfs_release_path(path); | |
2703 | goto out; | |
2704 | } | |
2705 | ||
2706 | if (IS_ERR(inode)) { | |
2707 | BUG_ON(retries); | |
2708 | retries++; | |
2709 | ||
2710 | if (block_group->ro) | |
2711 | goto out_free; | |
2712 | ||
2713 | ret = create_free_space_inode(root, trans, block_group, path); | |
2714 | if (ret) | |
2715 | goto out_free; | |
2716 | goto again; | |
2717 | } | |
2718 | ||
2719 | /* | |
2720 | * We want to set the generation to 0, that way if anything goes wrong | |
2721 | * from here on out we know not to trust this cache when we load up next | |
2722 | * time. | |
2723 | */ | |
2724 | BTRFS_I(inode)->generation = 0; | |
2725 | ret = btrfs_update_inode(trans, root, inode); | |
2726 | WARN_ON(ret); | |
2727 | ||
2728 | if (i_size_read(inode) > 0) { | |
2729 | ret = btrfs_truncate_free_space_cache(root, trans, path, | |
2730 | inode); | |
2731 | if (ret) | |
2732 | goto out_put; | |
2733 | } | |
2734 | ||
2735 | spin_lock(&block_group->lock); | |
2736 | if (block_group->cached != BTRFS_CACHE_FINISHED) { | |
2737 | /* We're not cached, don't bother trying to write stuff out */ | |
2738 | dcs = BTRFS_DC_WRITTEN; | |
2739 | spin_unlock(&block_group->lock); | |
2740 | goto out_put; | |
2741 | } | |
2742 | spin_unlock(&block_group->lock); | |
2743 | ||
2744 | num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024); | |
2745 | if (!num_pages) | |
2746 | num_pages = 1; | |
2747 | ||
2748 | /* | |
2749 | * Just to make absolutely sure we have enough space, we're going to | |
2750 | * preallocate 12 pages worth of space for each block group. In | |
2751 | * practice we ought to use at most 8, but we need extra space so we can | |
2752 | * add our header and have a terminator between the extents and the | |
2753 | * bitmaps. | |
2754 | */ | |
2755 | num_pages *= 16; | |
2756 | num_pages *= PAGE_CACHE_SIZE; | |
2757 | ||
2758 | ret = btrfs_check_data_free_space(inode, num_pages); | |
2759 | if (ret) | |
2760 | goto out_put; | |
2761 | ||
2762 | ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages, | |
2763 | num_pages, num_pages, | |
2764 | &alloc_hint); | |
2765 | if (!ret) | |
2766 | dcs = BTRFS_DC_SETUP; | |
2767 | btrfs_free_reserved_data_space(inode, num_pages); | |
2768 | out_put: | |
2769 | iput(inode); | |
2770 | out_free: | |
2771 | btrfs_release_path(path); | |
2772 | out: | |
2773 | spin_lock(&block_group->lock); | |
2774 | block_group->disk_cache_state = dcs; | |
2775 | spin_unlock(&block_group->lock); | |
2776 | ||
2777 | return ret; | |
2778 | } | |
2779 | ||
2780 | int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, | |
2781 | struct btrfs_root *root) | |
2782 | { | |
2783 | struct btrfs_block_group_cache *cache; | |
2784 | int err = 0; | |
2785 | struct btrfs_path *path; | |
2786 | u64 last = 0; | |
2787 | ||
2788 | path = btrfs_alloc_path(); | |
2789 | if (!path) | |
2790 | return -ENOMEM; | |
2791 | ||
2792 | again: | |
2793 | while (1) { | |
2794 | cache = btrfs_lookup_first_block_group(root->fs_info, last); | |
2795 | while (cache) { | |
2796 | if (cache->disk_cache_state == BTRFS_DC_CLEAR) | |
2797 | break; | |
2798 | cache = next_block_group(root, cache); | |
2799 | } | |
2800 | if (!cache) { | |
2801 | if (last == 0) | |
2802 | break; | |
2803 | last = 0; | |
2804 | continue; | |
2805 | } | |
2806 | err = cache_save_setup(cache, trans, path); | |
2807 | last = cache->key.objectid + cache->key.offset; | |
2808 | btrfs_put_block_group(cache); | |
2809 | } | |
2810 | ||
2811 | while (1) { | |
2812 | if (last == 0) { | |
2813 | err = btrfs_run_delayed_refs(trans, root, | |
2814 | (unsigned long)-1); | |
2815 | BUG_ON(err); | |
2816 | } | |
2817 | ||
2818 | cache = btrfs_lookup_first_block_group(root->fs_info, last); | |
2819 | while (cache) { | |
2820 | if (cache->disk_cache_state == BTRFS_DC_CLEAR) { | |
2821 | btrfs_put_block_group(cache); | |
2822 | goto again; | |
2823 | } | |
2824 | ||
2825 | if (cache->dirty) | |
2826 | break; | |
2827 | cache = next_block_group(root, cache); | |
2828 | } | |
2829 | if (!cache) { | |
2830 | if (last == 0) | |
2831 | break; | |
2832 | last = 0; | |
2833 | continue; | |
2834 | } | |
2835 | ||
2836 | if (cache->disk_cache_state == BTRFS_DC_SETUP) | |
2837 | cache->disk_cache_state = BTRFS_DC_NEED_WRITE; | |
2838 | cache->dirty = 0; | |
2839 | last = cache->key.objectid + cache->key.offset; | |
2840 | ||
2841 | err = write_one_cache_group(trans, root, path, cache); | |
2842 | BUG_ON(err); | |
2843 | btrfs_put_block_group(cache); | |
2844 | } | |
2845 | ||
2846 | while (1) { | |
2847 | /* | |
2848 | * I don't think this is needed since we're just marking our | |
2849 | * preallocated extent as written, but just in case it can't | |
2850 | * hurt. | |
2851 | */ | |
2852 | if (last == 0) { | |
2853 | err = btrfs_run_delayed_refs(trans, root, | |
2854 | (unsigned long)-1); | |
2855 | BUG_ON(err); | |
2856 | } | |
2857 | ||
2858 | cache = btrfs_lookup_first_block_group(root->fs_info, last); | |
2859 | while (cache) { | |
2860 | /* | |
2861 | * Really this shouldn't happen, but it could if we | |
2862 | * couldn't write the entire preallocated extent and | |
2863 | * splitting the extent resulted in a new block. | |
2864 | */ | |
2865 | if (cache->dirty) { | |
2866 | btrfs_put_block_group(cache); | |
2867 | goto again; | |
2868 | } | |
2869 | if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE) | |
2870 | break; | |
2871 | cache = next_block_group(root, cache); | |
2872 | } | |
2873 | if (!cache) { | |
2874 | if (last == 0) | |
2875 | break; | |
2876 | last = 0; | |
2877 | continue; | |
2878 | } | |
2879 | ||
2880 | btrfs_write_out_cache(root, trans, cache, path); | |
2881 | ||
2882 | /* | |
2883 | * If we didn't have an error then the cache state is still | |
2884 | * NEED_WRITE, so we can set it to WRITTEN. | |
2885 | */ | |
2886 | if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE) | |
2887 | cache->disk_cache_state = BTRFS_DC_WRITTEN; | |
2888 | last = cache->key.objectid + cache->key.offset; | |
2889 | btrfs_put_block_group(cache); | |
2890 | } | |
2891 | ||
2892 | btrfs_free_path(path); | |
2893 | return 0; | |
2894 | } | |
2895 | ||
2896 | int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr) | |
2897 | { | |
2898 | struct btrfs_block_group_cache *block_group; | |
2899 | int readonly = 0; | |
2900 | ||
2901 | block_group = btrfs_lookup_block_group(root->fs_info, bytenr); | |
2902 | if (!block_group || block_group->ro) | |
2903 | readonly = 1; | |
2904 | if (block_group) | |
2905 | btrfs_put_block_group(block_group); | |
2906 | return readonly; | |
2907 | } | |
2908 | ||
2909 | static int update_space_info(struct btrfs_fs_info *info, u64 flags, | |
2910 | u64 total_bytes, u64 bytes_used, | |
2911 | struct btrfs_space_info **space_info) | |
2912 | { | |
2913 | struct btrfs_space_info *found; | |
2914 | int i; | |
2915 | int factor; | |
2916 | ||
2917 | if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 | | |
2918 | BTRFS_BLOCK_GROUP_RAID10)) | |
2919 | factor = 2; | |
2920 | else | |
2921 | factor = 1; | |
2922 | ||
2923 | found = __find_space_info(info, flags); | |
2924 | if (found) { | |
2925 | spin_lock(&found->lock); | |
2926 | found->total_bytes += total_bytes; | |
2927 | found->disk_total += total_bytes * factor; | |
2928 | found->bytes_used += bytes_used; | |
2929 | found->disk_used += bytes_used * factor; | |
2930 | found->full = 0; | |
2931 | spin_unlock(&found->lock); | |
2932 | *space_info = found; | |
2933 | return 0; | |
2934 | } | |
2935 | found = kzalloc(sizeof(*found), GFP_NOFS); | |
2936 | if (!found) | |
2937 | return -ENOMEM; | |
2938 | ||
2939 | for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) | |
2940 | INIT_LIST_HEAD(&found->block_groups[i]); | |
2941 | init_rwsem(&found->groups_sem); | |
2942 | spin_lock_init(&found->lock); | |
2943 | found->flags = flags & (BTRFS_BLOCK_GROUP_DATA | | |
2944 | BTRFS_BLOCK_GROUP_SYSTEM | | |
2945 | BTRFS_BLOCK_GROUP_METADATA); | |
2946 | found->total_bytes = total_bytes; | |
2947 | found->disk_total = total_bytes * factor; | |
2948 | found->bytes_used = bytes_used; | |
2949 | found->disk_used = bytes_used * factor; | |
2950 | found->bytes_pinned = 0; | |
2951 | found->bytes_reserved = 0; | |
2952 | found->bytes_readonly = 0; | |
2953 | found->bytes_may_use = 0; | |
2954 | found->full = 0; | |
2955 | found->force_alloc = CHUNK_ALLOC_NO_FORCE; | |
2956 | found->chunk_alloc = 0; | |
2957 | found->flush = 0; | |
2958 | init_waitqueue_head(&found->wait); | |
2959 | *space_info = found; | |
2960 | list_add_rcu(&found->list, &info->space_info); | |
2961 | return 0; | |
2962 | } | |
2963 | ||
2964 | static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) | |
2965 | { | |
2966 | u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 | | |
2967 | BTRFS_BLOCK_GROUP_RAID1 | | |
2968 | BTRFS_BLOCK_GROUP_RAID10 | | |
2969 | BTRFS_BLOCK_GROUP_DUP); | |
2970 | if (extra_flags) { | |
2971 | if (flags & BTRFS_BLOCK_GROUP_DATA) | |
2972 | fs_info->avail_data_alloc_bits |= extra_flags; | |
2973 | if (flags & BTRFS_BLOCK_GROUP_METADATA) | |
2974 | fs_info->avail_metadata_alloc_bits |= extra_flags; | |
2975 | if (flags & BTRFS_BLOCK_GROUP_SYSTEM) | |
2976 | fs_info->avail_system_alloc_bits |= extra_flags; | |
2977 | } | |
2978 | } | |
2979 | ||
2980 | u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags) | |
2981 | { | |
2982 | /* | |
2983 | * we add in the count of missing devices because we want | |
2984 | * to make sure that any RAID levels on a degraded FS | |
2985 | * continue to be honored. | |
2986 | */ | |
2987 | u64 num_devices = root->fs_info->fs_devices->rw_devices + | |
2988 | root->fs_info->fs_devices->missing_devices; | |
2989 | ||
2990 | if (num_devices == 1) | |
2991 | flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0); | |
2992 | if (num_devices < 4) | |
2993 | flags &= ~BTRFS_BLOCK_GROUP_RAID10; | |
2994 | ||
2995 | if ((flags & BTRFS_BLOCK_GROUP_DUP) && | |
2996 | (flags & (BTRFS_BLOCK_GROUP_RAID1 | | |
2997 | BTRFS_BLOCK_GROUP_RAID10))) { | |
2998 | flags &= ~BTRFS_BLOCK_GROUP_DUP; | |
2999 | } | |
3000 | ||
3001 | if ((flags & BTRFS_BLOCK_GROUP_RAID1) && | |
3002 | (flags & BTRFS_BLOCK_GROUP_RAID10)) { | |
3003 | flags &= ~BTRFS_BLOCK_GROUP_RAID1; | |
3004 | } | |
3005 | ||
3006 | if ((flags & BTRFS_BLOCK_GROUP_RAID0) && | |
3007 | ((flags & BTRFS_BLOCK_GROUP_RAID1) | | |
3008 | (flags & BTRFS_BLOCK_GROUP_RAID10) | | |
3009 | (flags & BTRFS_BLOCK_GROUP_DUP))) | |
3010 | flags &= ~BTRFS_BLOCK_GROUP_RAID0; | |
3011 | return flags; | |
3012 | } | |
3013 | ||
3014 | static u64 get_alloc_profile(struct btrfs_root *root, u64 flags) | |
3015 | { | |
3016 | if (flags & BTRFS_BLOCK_GROUP_DATA) | |
3017 | flags |= root->fs_info->avail_data_alloc_bits & | |
3018 | root->fs_info->data_alloc_profile; | |
3019 | else if (flags & BTRFS_BLOCK_GROUP_SYSTEM) | |
3020 | flags |= root->fs_info->avail_system_alloc_bits & | |
3021 | root->fs_info->system_alloc_profile; | |
3022 | else if (flags & BTRFS_BLOCK_GROUP_METADATA) | |
3023 | flags |= root->fs_info->avail_metadata_alloc_bits & | |
3024 | root->fs_info->metadata_alloc_profile; | |
3025 | return btrfs_reduce_alloc_profile(root, flags); | |
3026 | } | |
3027 | ||
3028 | u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data) | |
3029 | { | |
3030 | u64 flags; | |
3031 | ||
3032 | if (data) | |
3033 | flags = BTRFS_BLOCK_GROUP_DATA; | |
3034 | else if (root == root->fs_info->chunk_root) | |
3035 | flags = BTRFS_BLOCK_GROUP_SYSTEM; | |
3036 | else | |
3037 | flags = BTRFS_BLOCK_GROUP_METADATA; | |
3038 | ||
3039 | return get_alloc_profile(root, flags); | |
3040 | } | |
3041 | ||
3042 | void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode) | |
3043 | { | |
3044 | BTRFS_I(inode)->space_info = __find_space_info(root->fs_info, | |
3045 | BTRFS_BLOCK_GROUP_DATA); | |
3046 | } | |
3047 | ||
3048 | /* | |
3049 | * This will check the space that the inode allocates from to make sure we have | |
3050 | * enough space for bytes. | |
3051 | */ | |
3052 | int btrfs_check_data_free_space(struct inode *inode, u64 bytes) | |
3053 | { | |
3054 | struct btrfs_space_info *data_sinfo; | |
3055 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
3056 | u64 used; | |
3057 | int ret = 0, committed = 0, alloc_chunk = 1; | |
3058 | ||
3059 | /* make sure bytes are sectorsize aligned */ | |
3060 | bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); | |
3061 | ||
3062 | if (root == root->fs_info->tree_root || | |
3063 | BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) { | |
3064 | alloc_chunk = 0; | |
3065 | committed = 1; | |
3066 | } | |
3067 | ||
3068 | data_sinfo = BTRFS_I(inode)->space_info; | |
3069 | if (!data_sinfo) | |
3070 | goto alloc; | |
3071 | ||
3072 | again: | |
3073 | /* make sure we have enough space to handle the data first */ | |
3074 | spin_lock(&data_sinfo->lock); | |
3075 | used = data_sinfo->bytes_used + data_sinfo->bytes_reserved + | |
3076 | data_sinfo->bytes_pinned + data_sinfo->bytes_readonly + | |
3077 | data_sinfo->bytes_may_use; | |
3078 | ||
3079 | if (used + bytes > data_sinfo->total_bytes) { | |
3080 | struct btrfs_trans_handle *trans; | |
3081 | ||
3082 | /* | |
3083 | * if we don't have enough free bytes in this space then we need | |
3084 | * to alloc a new chunk. | |
3085 | */ | |
3086 | if (!data_sinfo->full && alloc_chunk) { | |
3087 | u64 alloc_target; | |
3088 | ||
3089 | data_sinfo->force_alloc = CHUNK_ALLOC_FORCE; | |
3090 | spin_unlock(&data_sinfo->lock); | |
3091 | alloc: | |
3092 | alloc_target = btrfs_get_alloc_profile(root, 1); | |
3093 | trans = btrfs_join_transaction(root); | |
3094 | if (IS_ERR(trans)) | |
3095 | return PTR_ERR(trans); | |
3096 | ||
3097 | ret = do_chunk_alloc(trans, root->fs_info->extent_root, | |
3098 | bytes + 2 * 1024 * 1024, | |
3099 | alloc_target, | |
3100 | CHUNK_ALLOC_NO_FORCE); | |
3101 | btrfs_end_transaction(trans, root); | |
3102 | if (ret < 0) { | |
3103 | if (ret != -ENOSPC) | |
3104 | return ret; | |
3105 | else | |
3106 | goto commit_trans; | |
3107 | } | |
3108 | ||
3109 | if (!data_sinfo) { | |
3110 | btrfs_set_inode_space_info(root, inode); | |
3111 | data_sinfo = BTRFS_I(inode)->space_info; | |
3112 | } | |
3113 | goto again; | |
3114 | } | |
3115 | ||
3116 | /* | |
3117 | * If we have less pinned bytes than we want to allocate then | |
3118 | * don't bother committing the transaction, it won't help us. | |
3119 | */ | |
3120 | if (data_sinfo->bytes_pinned < bytes) | |
3121 | committed = 1; | |
3122 | spin_unlock(&data_sinfo->lock); | |
3123 | ||
3124 | /* commit the current transaction and try again */ | |
3125 | commit_trans: | |
3126 | if (!committed && | |
3127 | !atomic_read(&root->fs_info->open_ioctl_trans)) { | |
3128 | committed = 1; | |
3129 | trans = btrfs_join_transaction(root); | |
3130 | if (IS_ERR(trans)) | |
3131 | return PTR_ERR(trans); | |
3132 | ret = btrfs_commit_transaction(trans, root); | |
3133 | if (ret) | |
3134 | return ret; | |
3135 | goto again; | |
3136 | } | |
3137 | ||
3138 | return -ENOSPC; | |
3139 | } | |
3140 | data_sinfo->bytes_may_use += bytes; | |
3141 | spin_unlock(&data_sinfo->lock); | |
3142 | ||
3143 | return 0; | |
3144 | } | |
3145 | ||
3146 | /* | |
3147 | * Called if we need to clear a data reservation for this inode. | |
3148 | */ | |
3149 | void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes) | |
3150 | { | |
3151 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
3152 | struct btrfs_space_info *data_sinfo; | |
3153 | ||
3154 | /* make sure bytes are sectorsize aligned */ | |
3155 | bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); | |
3156 | ||
3157 | data_sinfo = BTRFS_I(inode)->space_info; | |
3158 | spin_lock(&data_sinfo->lock); | |
3159 | data_sinfo->bytes_may_use -= bytes; | |
3160 | spin_unlock(&data_sinfo->lock); | |
3161 | } | |
3162 | ||
3163 | static void force_metadata_allocation(struct btrfs_fs_info *info) | |
3164 | { | |
3165 | struct list_head *head = &info->space_info; | |
3166 | struct btrfs_space_info *found; | |
3167 | ||
3168 | rcu_read_lock(); | |
3169 | list_for_each_entry_rcu(found, head, list) { | |
3170 | if (found->flags & BTRFS_BLOCK_GROUP_METADATA) | |
3171 | found->force_alloc = CHUNK_ALLOC_FORCE; | |
3172 | } | |
3173 | rcu_read_unlock(); | |
3174 | } | |
3175 | ||
3176 | static int should_alloc_chunk(struct btrfs_root *root, | |
3177 | struct btrfs_space_info *sinfo, u64 alloc_bytes, | |
3178 | int force) | |
3179 | { | |
3180 | struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; | |
3181 | u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly; | |
3182 | u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved; | |
3183 | u64 thresh; | |
3184 | ||
3185 | if (force == CHUNK_ALLOC_FORCE) | |
3186 | return 1; | |
3187 | ||
3188 | /* | |
3189 | * We need to take into account the global rsv because for all intents | |
3190 | * and purposes it's used space. Don't worry about locking the | |
3191 | * global_rsv, it doesn't change except when the transaction commits. | |
3192 | */ | |
3193 | num_allocated += global_rsv->size; | |
3194 | ||
3195 | /* | |
3196 | * in limited mode, we want to have some free space up to | |
3197 | * about 1% of the FS size. | |
3198 | */ | |
3199 | if (force == CHUNK_ALLOC_LIMITED) { | |
3200 | thresh = btrfs_super_total_bytes(&root->fs_info->super_copy); | |
3201 | thresh = max_t(u64, 64 * 1024 * 1024, | |
3202 | div_factor_fine(thresh, 1)); | |
3203 | ||
3204 | if (num_bytes - num_allocated < thresh) | |
3205 | return 1; | |
3206 | } | |
3207 | ||
3208 | /* | |
3209 | * we have two similar checks here, one based on percentage | |
3210 | * and once based on a hard number of 256MB. The idea | |
3211 | * is that if we have a good amount of free | |
3212 | * room, don't allocate a chunk. A good mount is | |
3213 | * less than 80% utilized of the chunks we have allocated, | |
3214 | * or more than 256MB free | |
3215 | */ | |
3216 | if (num_allocated + alloc_bytes + 256 * 1024 * 1024 < num_bytes) | |
3217 | return 0; | |
3218 | ||
3219 | if (num_allocated + alloc_bytes < div_factor(num_bytes, 8)) | |
3220 | return 0; | |
3221 | ||
3222 | thresh = btrfs_super_total_bytes(&root->fs_info->super_copy); | |
3223 | ||
3224 | /* 256MB or 5% of the FS */ | |
3225 | thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5)); | |
3226 | ||
3227 | if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3)) | |
3228 | return 0; | |
3229 | return 1; | |
3230 | } | |
3231 | ||
3232 | static int do_chunk_alloc(struct btrfs_trans_handle *trans, | |
3233 | struct btrfs_root *extent_root, u64 alloc_bytes, | |
3234 | u64 flags, int force) | |
3235 | { | |
3236 | struct btrfs_space_info *space_info; | |
3237 | struct btrfs_fs_info *fs_info = extent_root->fs_info; | |
3238 | int wait_for_alloc = 0; | |
3239 | int ret = 0; | |
3240 | ||
3241 | flags = btrfs_reduce_alloc_profile(extent_root, flags); | |
3242 | ||
3243 | space_info = __find_space_info(extent_root->fs_info, flags); | |
3244 | if (!space_info) { | |
3245 | ret = update_space_info(extent_root->fs_info, flags, | |
3246 | 0, 0, &space_info); | |
3247 | BUG_ON(ret); | |
3248 | } | |
3249 | BUG_ON(!space_info); | |
3250 | ||
3251 | again: | |
3252 | spin_lock(&space_info->lock); | |
3253 | if (space_info->force_alloc) | |
3254 | force = space_info->force_alloc; | |
3255 | if (space_info->full) { | |
3256 | spin_unlock(&space_info->lock); | |
3257 | return 0; | |
3258 | } | |
3259 | ||
3260 | if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) { | |
3261 | spin_unlock(&space_info->lock); | |
3262 | return 0; | |
3263 | } else if (space_info->chunk_alloc) { | |
3264 | wait_for_alloc = 1; | |
3265 | } else { | |
3266 | space_info->chunk_alloc = 1; | |
3267 | } | |
3268 | ||
3269 | spin_unlock(&space_info->lock); | |
3270 | ||
3271 | mutex_lock(&fs_info->chunk_mutex); | |
3272 | ||
3273 | /* | |
3274 | * The chunk_mutex is held throughout the entirety of a chunk | |
3275 | * allocation, so once we've acquired the chunk_mutex we know that the | |
3276 | * other guy is done and we need to recheck and see if we should | |
3277 | * allocate. | |
3278 | */ | |
3279 | if (wait_for_alloc) { | |
3280 | mutex_unlock(&fs_info->chunk_mutex); | |
3281 | wait_for_alloc = 0; | |
3282 | goto again; | |
3283 | } | |
3284 | ||
3285 | /* | |
3286 | * If we have mixed data/metadata chunks we want to make sure we keep | |
3287 | * allocating mixed chunks instead of individual chunks. | |
3288 | */ | |
3289 | if (btrfs_mixed_space_info(space_info)) | |
3290 | flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA); | |
3291 | ||
3292 | /* | |
3293 | * if we're doing a data chunk, go ahead and make sure that | |
3294 | * we keep a reasonable number of metadata chunks allocated in the | |
3295 | * FS as well. | |
3296 | */ | |
3297 | if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) { | |
3298 | fs_info->data_chunk_allocations++; | |
3299 | if (!(fs_info->data_chunk_allocations % | |
3300 | fs_info->metadata_ratio)) | |
3301 | force_metadata_allocation(fs_info); | |
3302 | } | |
3303 | ||
3304 | ret = btrfs_alloc_chunk(trans, extent_root, flags); | |
3305 | if (ret < 0 && ret != -ENOSPC) | |
3306 | goto out; | |
3307 | ||
3308 | spin_lock(&space_info->lock); | |
3309 | if (ret) | |
3310 | space_info->full = 1; | |
3311 | else | |
3312 | ret = 1; | |
3313 | ||
3314 | space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; | |
3315 | space_info->chunk_alloc = 0; | |
3316 | spin_unlock(&space_info->lock); | |
3317 | out: | |
3318 | mutex_unlock(&extent_root->fs_info->chunk_mutex); | |
3319 | return ret; | |
3320 | } | |
3321 | ||
3322 | /* | |
3323 | * shrink metadata reservation for delalloc | |
3324 | */ | |
3325 | static int shrink_delalloc(struct btrfs_trans_handle *trans, | |
3326 | struct btrfs_root *root, u64 to_reclaim, int sync) | |
3327 | { | |
3328 | struct btrfs_block_rsv *block_rsv; | |
3329 | struct btrfs_space_info *space_info; | |
3330 | u64 reserved; | |
3331 | u64 max_reclaim; | |
3332 | u64 reclaimed = 0; | |
3333 | long time_left; | |
3334 | int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT; | |
3335 | int loops = 0; | |
3336 | unsigned long progress; | |
3337 | ||
3338 | block_rsv = &root->fs_info->delalloc_block_rsv; | |
3339 | space_info = block_rsv->space_info; | |
3340 | ||
3341 | smp_mb(); | |
3342 | reserved = space_info->bytes_may_use; | |
3343 | progress = space_info->reservation_progress; | |
3344 | ||
3345 | if (reserved == 0) | |
3346 | return 0; | |
3347 | ||
3348 | smp_mb(); | |
3349 | if (root->fs_info->delalloc_bytes == 0) { | |
3350 | if (trans) | |
3351 | return 0; | |
3352 | btrfs_wait_ordered_extents(root, 0, 0); | |
3353 | return 0; | |
3354 | } | |
3355 | ||
3356 | max_reclaim = min(reserved, to_reclaim); | |
3357 | ||
3358 | while (loops < 1024) { | |
3359 | /* have the flusher threads jump in and do some IO */ | |
3360 | smp_mb(); | |
3361 | nr_pages = min_t(unsigned long, nr_pages, | |
3362 | root->fs_info->delalloc_bytes >> PAGE_CACHE_SHIFT); | |
3363 | writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages); | |
3364 | ||
3365 | spin_lock(&space_info->lock); | |
3366 | if (reserved > space_info->bytes_may_use) | |
3367 | reclaimed += reserved - space_info->bytes_may_use; | |
3368 | reserved = space_info->bytes_may_use; | |
3369 | spin_unlock(&space_info->lock); | |
3370 | ||
3371 | loops++; | |
3372 | ||
3373 | if (reserved == 0 || reclaimed >= max_reclaim) | |
3374 | break; | |
3375 | ||
3376 | if (trans && trans->transaction->blocked) | |
3377 | return -EAGAIN; | |
3378 | ||
3379 | time_left = schedule_timeout_interruptible(1); | |
3380 | ||
3381 | /* We were interrupted, exit */ | |
3382 | if (time_left) | |
3383 | break; | |
3384 | ||
3385 | /* we've kicked the IO a few times, if anything has been freed, | |
3386 | * exit. There is no sense in looping here for a long time | |
3387 | * when we really need to commit the transaction, or there are | |
3388 | * just too many writers without enough free space | |
3389 | */ | |
3390 | ||
3391 | if (loops > 3) { | |
3392 | smp_mb(); | |
3393 | if (progress != space_info->reservation_progress) | |
3394 | break; | |
3395 | } | |
3396 | ||
3397 | } | |
3398 | if (reclaimed >= to_reclaim && !trans) | |
3399 | btrfs_wait_ordered_extents(root, 0, 0); | |
3400 | return reclaimed >= to_reclaim; | |
3401 | } | |
3402 | ||
3403 | /* | |
3404 | * Retries tells us how many times we've called reserve_metadata_bytes. The | |
3405 | * idea is if this is the first call (retries == 0) then we will add to our | |
3406 | * reserved count if we can't make the allocation in order to hold our place | |
3407 | * while we go and try and free up space. That way for retries > 1 we don't try | |
3408 | * and add space, we just check to see if the amount of unused space is >= the | |
3409 | * total space, meaning that our reservation is valid. | |
3410 | * | |
3411 | * However if we don't intend to retry this reservation, pass -1 as retries so | |
3412 | * that it short circuits this logic. | |
3413 | */ | |
3414 | static int reserve_metadata_bytes(struct btrfs_trans_handle *trans, | |
3415 | struct btrfs_root *root, | |
3416 | struct btrfs_block_rsv *block_rsv, | |
3417 | u64 orig_bytes, int flush) | |
3418 | { | |
3419 | struct btrfs_space_info *space_info = block_rsv->space_info; | |
3420 | u64 unused; | |
3421 | u64 num_bytes = orig_bytes; | |
3422 | int retries = 0; | |
3423 | int ret = 0; | |
3424 | bool committed = false; | |
3425 | bool flushing = false; | |
3426 | again: | |
3427 | ret = 0; | |
3428 | spin_lock(&space_info->lock); | |
3429 | /* | |
3430 | * We only want to wait if somebody other than us is flushing and we are | |
3431 | * actually alloed to flush. | |
3432 | */ | |
3433 | while (flush && !flushing && space_info->flush) { | |
3434 | spin_unlock(&space_info->lock); | |
3435 | /* | |
3436 | * If we have a trans handle we can't wait because the flusher | |
3437 | * may have to commit the transaction, which would mean we would | |
3438 | * deadlock since we are waiting for the flusher to finish, but | |
3439 | * hold the current transaction open. | |
3440 | */ | |
3441 | if (trans) | |
3442 | return -EAGAIN; | |
3443 | ret = wait_event_interruptible(space_info->wait, | |
3444 | !space_info->flush); | |
3445 | /* Must have been interrupted, return */ | |
3446 | if (ret) | |
3447 | return -EINTR; | |
3448 | ||
3449 | spin_lock(&space_info->lock); | |
3450 | } | |
3451 | ||
3452 | ret = -ENOSPC; | |
3453 | unused = space_info->bytes_used + space_info->bytes_reserved + | |
3454 | space_info->bytes_pinned + space_info->bytes_readonly + | |
3455 | space_info->bytes_may_use; | |
3456 | ||
3457 | /* | |
3458 | * The idea here is that we've not already over-reserved the block group | |
3459 | * then we can go ahead and save our reservation first and then start | |
3460 | * flushing if we need to. Otherwise if we've already overcommitted | |
3461 | * lets start flushing stuff first and then come back and try to make | |
3462 | * our reservation. | |
3463 | */ | |
3464 | if (unused <= space_info->total_bytes) { | |
3465 | unused = space_info->total_bytes - unused; | |
3466 | if (unused >= num_bytes) { | |
3467 | space_info->bytes_may_use += orig_bytes; | |
3468 | ret = 0; | |
3469 | } else { | |
3470 | /* | |
3471 | * Ok set num_bytes to orig_bytes since we aren't | |
3472 | * overocmmitted, this way we only try and reclaim what | |
3473 | * we need. | |
3474 | */ | |
3475 | num_bytes = orig_bytes; | |
3476 | } | |
3477 | } else { | |
3478 | /* | |
3479 | * Ok we're over committed, set num_bytes to the overcommitted | |
3480 | * amount plus the amount of bytes that we need for this | |
3481 | * reservation. | |
3482 | */ | |
3483 | num_bytes = unused - space_info->total_bytes + | |
3484 | (orig_bytes * (retries + 1)); | |
3485 | } | |
3486 | ||
3487 | /* | |
3488 | * Couldn't make our reservation, save our place so while we're trying | |
3489 | * to reclaim space we can actually use it instead of somebody else | |
3490 | * stealing it from us. | |
3491 | */ | |
3492 | if (ret && flush) { | |
3493 | flushing = true; | |
3494 | space_info->flush = 1; | |
3495 | } | |
3496 | ||
3497 | spin_unlock(&space_info->lock); | |
3498 | ||
3499 | if (!ret || !flush) | |
3500 | goto out; | |
3501 | ||
3502 | /* | |
3503 | * We do synchronous shrinking since we don't actually unreserve | |
3504 | * metadata until after the IO is completed. | |
3505 | */ | |
3506 | ret = shrink_delalloc(trans, root, num_bytes, 1); | |
3507 | if (ret < 0) | |
3508 | goto out; | |
3509 | ||
3510 | ret = 0; | |
3511 | ||
3512 | /* | |
3513 | * So if we were overcommitted it's possible that somebody else flushed | |
3514 | * out enough space and we simply didn't have enough space to reclaim, | |
3515 | * so go back around and try again. | |
3516 | */ | |
3517 | if (retries < 2) { | |
3518 | retries++; | |
3519 | goto again; | |
3520 | } | |
3521 | ||
3522 | /* | |
3523 | * Not enough space to be reclaimed, don't bother committing the | |
3524 | * transaction. | |
3525 | */ | |
3526 | spin_lock(&space_info->lock); | |
3527 | if (space_info->bytes_pinned < orig_bytes) | |
3528 | ret = -ENOSPC; | |
3529 | spin_unlock(&space_info->lock); | |
3530 | if (ret) | |
3531 | goto out; | |
3532 | ||
3533 | ret = -EAGAIN; | |
3534 | if (trans) | |
3535 | goto out; | |
3536 | ||
3537 | ret = -ENOSPC; | |
3538 | if (committed) | |
3539 | goto out; | |
3540 | ||
3541 | trans = btrfs_join_transaction(root); | |
3542 | if (IS_ERR(trans)) | |
3543 | goto out; | |
3544 | ret = btrfs_commit_transaction(trans, root); | |
3545 | if (!ret) { | |
3546 | trans = NULL; | |
3547 | committed = true; | |
3548 | goto again; | |
3549 | } | |
3550 | ||
3551 | out: | |
3552 | if (flushing) { | |
3553 | spin_lock(&space_info->lock); | |
3554 | space_info->flush = 0; | |
3555 | wake_up_all(&space_info->wait); | |
3556 | spin_unlock(&space_info->lock); | |
3557 | } | |
3558 | return ret; | |
3559 | } | |
3560 | ||
3561 | static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans, | |
3562 | struct btrfs_root *root) | |
3563 | { | |
3564 | struct btrfs_block_rsv *block_rsv; | |
3565 | if (root->ref_cows) | |
3566 | block_rsv = trans->block_rsv; | |
3567 | else | |
3568 | block_rsv = root->block_rsv; | |
3569 | ||
3570 | if (!block_rsv) | |
3571 | block_rsv = &root->fs_info->empty_block_rsv; | |
3572 | ||
3573 | return block_rsv; | |
3574 | } | |
3575 | ||
3576 | static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, | |
3577 | u64 num_bytes) | |
3578 | { | |
3579 | int ret = -ENOSPC; | |
3580 | spin_lock(&block_rsv->lock); | |
3581 | if (block_rsv->reserved >= num_bytes) { | |
3582 | block_rsv->reserved -= num_bytes; | |
3583 | if (block_rsv->reserved < block_rsv->size) | |
3584 | block_rsv->full = 0; | |
3585 | ret = 0; | |
3586 | } | |
3587 | spin_unlock(&block_rsv->lock); | |
3588 | return ret; | |
3589 | } | |
3590 | ||
3591 | static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv, | |
3592 | u64 num_bytes, int update_size) | |
3593 | { | |
3594 | spin_lock(&block_rsv->lock); | |
3595 | block_rsv->reserved += num_bytes; | |
3596 | if (update_size) | |
3597 | block_rsv->size += num_bytes; | |
3598 | else if (block_rsv->reserved >= block_rsv->size) | |
3599 | block_rsv->full = 1; | |
3600 | spin_unlock(&block_rsv->lock); | |
3601 | } | |
3602 | ||
3603 | static void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv, | |
3604 | struct btrfs_block_rsv *dest, u64 num_bytes) | |
3605 | { | |
3606 | struct btrfs_space_info *space_info = block_rsv->space_info; | |
3607 | ||
3608 | spin_lock(&block_rsv->lock); | |
3609 | if (num_bytes == (u64)-1) | |
3610 | num_bytes = block_rsv->size; | |
3611 | block_rsv->size -= num_bytes; | |
3612 | if (block_rsv->reserved >= block_rsv->size) { | |
3613 | num_bytes = block_rsv->reserved - block_rsv->size; | |
3614 | block_rsv->reserved = block_rsv->size; | |
3615 | block_rsv->full = 1; | |
3616 | } else { | |
3617 | num_bytes = 0; | |
3618 | } | |
3619 | spin_unlock(&block_rsv->lock); | |
3620 | ||
3621 | if (num_bytes > 0) { | |
3622 | if (dest) { | |
3623 | spin_lock(&dest->lock); | |
3624 | if (!dest->full) { | |
3625 | u64 bytes_to_add; | |
3626 | ||
3627 | bytes_to_add = dest->size - dest->reserved; | |
3628 | bytes_to_add = min(num_bytes, bytes_to_add); | |
3629 | dest->reserved += bytes_to_add; | |
3630 | if (dest->reserved >= dest->size) | |
3631 | dest->full = 1; | |
3632 | num_bytes -= bytes_to_add; | |
3633 | } | |
3634 | spin_unlock(&dest->lock); | |
3635 | } | |
3636 | if (num_bytes) { | |
3637 | spin_lock(&space_info->lock); | |
3638 | space_info->bytes_may_use -= num_bytes; | |
3639 | space_info->reservation_progress++; | |
3640 | spin_unlock(&space_info->lock); | |
3641 | } | |
3642 | } | |
3643 | } | |
3644 | ||
3645 | static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src, | |
3646 | struct btrfs_block_rsv *dst, u64 num_bytes) | |
3647 | { | |
3648 | int ret; | |
3649 | ||
3650 | ret = block_rsv_use_bytes(src, num_bytes); | |
3651 | if (ret) | |
3652 | return ret; | |
3653 | ||
3654 | block_rsv_add_bytes(dst, num_bytes, 1); | |
3655 | return 0; | |
3656 | } | |
3657 | ||
3658 | void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv) | |
3659 | { | |
3660 | memset(rsv, 0, sizeof(*rsv)); | |
3661 | spin_lock_init(&rsv->lock); | |
3662 | } | |
3663 | ||
3664 | struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root) | |
3665 | { | |
3666 | struct btrfs_block_rsv *block_rsv; | |
3667 | struct btrfs_fs_info *fs_info = root->fs_info; | |
3668 | ||
3669 | block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS); | |
3670 | if (!block_rsv) | |
3671 | return NULL; | |
3672 | ||
3673 | btrfs_init_block_rsv(block_rsv); | |
3674 | block_rsv->space_info = __find_space_info(fs_info, | |
3675 | BTRFS_BLOCK_GROUP_METADATA); | |
3676 | return block_rsv; | |
3677 | } | |
3678 | ||
3679 | void btrfs_free_block_rsv(struct btrfs_root *root, | |
3680 | struct btrfs_block_rsv *rsv) | |
3681 | { | |
3682 | btrfs_block_rsv_release(root, rsv, (u64)-1); | |
3683 | kfree(rsv); | |
3684 | } | |
3685 | ||
3686 | int btrfs_block_rsv_add(struct btrfs_trans_handle *trans, | |
3687 | struct btrfs_root *root, | |
3688 | struct btrfs_block_rsv *block_rsv, | |
3689 | u64 num_bytes) | |
3690 | { | |
3691 | int ret; | |
3692 | ||
3693 | if (num_bytes == 0) | |
3694 | return 0; | |
3695 | ||
3696 | ret = reserve_metadata_bytes(trans, root, block_rsv, num_bytes, 1); | |
3697 | if (!ret) { | |
3698 | block_rsv_add_bytes(block_rsv, num_bytes, 1); | |
3699 | return 0; | |
3700 | } | |
3701 | ||
3702 | return ret; | |
3703 | } | |
3704 | ||
3705 | int btrfs_block_rsv_check(struct btrfs_trans_handle *trans, | |
3706 | struct btrfs_root *root, | |
3707 | struct btrfs_block_rsv *block_rsv, | |
3708 | u64 min_reserved, int min_factor) | |
3709 | { | |
3710 | u64 num_bytes = 0; | |
3711 | int ret = -ENOSPC; | |
3712 | ||
3713 | if (!block_rsv) | |
3714 | return 0; | |
3715 | ||
3716 | spin_lock(&block_rsv->lock); | |
3717 | if (min_factor > 0) | |
3718 | num_bytes = div_factor(block_rsv->size, min_factor); | |
3719 | if (min_reserved > num_bytes) | |
3720 | num_bytes = min_reserved; | |
3721 | ||
3722 | if (block_rsv->reserved >= num_bytes) | |
3723 | ret = 0; | |
3724 | else | |
3725 | num_bytes -= block_rsv->reserved; | |
3726 | spin_unlock(&block_rsv->lock); | |
3727 | ||
3728 | if (!ret) | |
3729 | return 0; | |
3730 | ||
3731 | ret = reserve_metadata_bytes(trans, root, block_rsv, num_bytes, 0); | |
3732 | if (!ret) { | |
3733 | block_rsv_add_bytes(block_rsv, num_bytes, 0); | |
3734 | return 0; | |
3735 | } | |
3736 | ||
3737 | return ret; | |
3738 | } | |
3739 | ||
3740 | int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, | |
3741 | struct btrfs_block_rsv *dst_rsv, | |
3742 | u64 num_bytes) | |
3743 | { | |
3744 | return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); | |
3745 | } | |
3746 | ||
3747 | void btrfs_block_rsv_release(struct btrfs_root *root, | |
3748 | struct btrfs_block_rsv *block_rsv, | |
3749 | u64 num_bytes) | |
3750 | { | |
3751 | struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; | |
3752 | if (global_rsv->full || global_rsv == block_rsv || | |
3753 | block_rsv->space_info != global_rsv->space_info) | |
3754 | global_rsv = NULL; | |
3755 | block_rsv_release_bytes(block_rsv, global_rsv, num_bytes); | |
3756 | } | |
3757 | ||
3758 | /* | |
3759 | * helper to calculate size of global block reservation. | |
3760 | * the desired value is sum of space used by extent tree, | |
3761 | * checksum tree and root tree | |
3762 | */ | |
3763 | static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info) | |
3764 | { | |
3765 | struct btrfs_space_info *sinfo; | |
3766 | u64 num_bytes; | |
3767 | u64 meta_used; | |
3768 | u64 data_used; | |
3769 | int csum_size = btrfs_super_csum_size(&fs_info->super_copy); | |
3770 | ||
3771 | sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA); | |
3772 | spin_lock(&sinfo->lock); | |
3773 | data_used = sinfo->bytes_used; | |
3774 | spin_unlock(&sinfo->lock); | |
3775 | ||
3776 | sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); | |
3777 | spin_lock(&sinfo->lock); | |
3778 | if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA) | |
3779 | data_used = 0; | |
3780 | meta_used = sinfo->bytes_used; | |
3781 | spin_unlock(&sinfo->lock); | |
3782 | ||
3783 | num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) * | |
3784 | csum_size * 2; | |
3785 | num_bytes += div64_u64(data_used + meta_used, 50); | |
3786 | ||
3787 | if (num_bytes * 3 > meta_used) | |
3788 | num_bytes = div64_u64(meta_used, 3); | |
3789 | ||
3790 | return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10); | |
3791 | } | |
3792 | ||
3793 | static void update_global_block_rsv(struct btrfs_fs_info *fs_info) | |
3794 | { | |
3795 | struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv; | |
3796 | struct btrfs_space_info *sinfo = block_rsv->space_info; | |
3797 | u64 num_bytes; | |
3798 | ||
3799 | num_bytes = calc_global_metadata_size(fs_info); | |
3800 | ||
3801 | spin_lock(&block_rsv->lock); | |
3802 | spin_lock(&sinfo->lock); | |
3803 | ||
3804 | block_rsv->size = num_bytes; | |
3805 | ||
3806 | num_bytes = sinfo->bytes_used + sinfo->bytes_pinned + | |
3807 | sinfo->bytes_reserved + sinfo->bytes_readonly + | |
3808 | sinfo->bytes_may_use; | |
3809 | ||
3810 | if (sinfo->total_bytes > num_bytes) { | |
3811 | num_bytes = sinfo->total_bytes - num_bytes; | |
3812 | block_rsv->reserved += num_bytes; | |
3813 | sinfo->bytes_may_use += num_bytes; | |
3814 | } | |
3815 | ||
3816 | if (block_rsv->reserved >= block_rsv->size) { | |
3817 | num_bytes = block_rsv->reserved - block_rsv->size; | |
3818 | sinfo->bytes_may_use -= num_bytes; | |
3819 | sinfo->reservation_progress++; | |
3820 | block_rsv->reserved = block_rsv->size; | |
3821 | block_rsv->full = 1; | |
3822 | } | |
3823 | ||
3824 | spin_unlock(&sinfo->lock); | |
3825 | spin_unlock(&block_rsv->lock); | |
3826 | } | |
3827 | ||
3828 | static void init_global_block_rsv(struct btrfs_fs_info *fs_info) | |
3829 | { | |
3830 | struct btrfs_space_info *space_info; | |
3831 | ||
3832 | space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); | |
3833 | fs_info->chunk_block_rsv.space_info = space_info; | |
3834 | ||
3835 | space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); | |
3836 | fs_info->global_block_rsv.space_info = space_info; | |
3837 | fs_info->delalloc_block_rsv.space_info = space_info; | |
3838 | fs_info->trans_block_rsv.space_info = space_info; | |
3839 | fs_info->empty_block_rsv.space_info = space_info; | |
3840 | ||
3841 | fs_info->extent_root->block_rsv = &fs_info->global_block_rsv; | |
3842 | fs_info->csum_root->block_rsv = &fs_info->global_block_rsv; | |
3843 | fs_info->dev_root->block_rsv = &fs_info->global_block_rsv; | |
3844 | fs_info->tree_root->block_rsv = &fs_info->global_block_rsv; | |
3845 | fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv; | |
3846 | ||
3847 | update_global_block_rsv(fs_info); | |
3848 | } | |
3849 | ||
3850 | static void release_global_block_rsv(struct btrfs_fs_info *fs_info) | |
3851 | { | |
3852 | block_rsv_release_bytes(&fs_info->global_block_rsv, NULL, (u64)-1); | |
3853 | WARN_ON(fs_info->delalloc_block_rsv.size > 0); | |
3854 | WARN_ON(fs_info->delalloc_block_rsv.reserved > 0); | |
3855 | WARN_ON(fs_info->trans_block_rsv.size > 0); | |
3856 | WARN_ON(fs_info->trans_block_rsv.reserved > 0); | |
3857 | WARN_ON(fs_info->chunk_block_rsv.size > 0); | |
3858 | WARN_ON(fs_info->chunk_block_rsv.reserved > 0); | |
3859 | } | |
3860 | ||
3861 | void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans, | |
3862 | struct btrfs_root *root) | |
3863 | { | |
3864 | if (!trans->bytes_reserved) | |
3865 | return; | |
3866 | ||
3867 | BUG_ON(trans->block_rsv != &root->fs_info->trans_block_rsv); | |
3868 | btrfs_block_rsv_release(root, trans->block_rsv, | |
3869 | trans->bytes_reserved); | |
3870 | trans->bytes_reserved = 0; | |
3871 | } | |
3872 | ||
3873 | int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, | |
3874 | struct inode *inode) | |
3875 | { | |
3876 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
3877 | struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root); | |
3878 | struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv; | |
3879 | ||
3880 | /* | |
3881 | * We need to hold space in order to delete our orphan item once we've | |
3882 | * added it, so this takes the reservation so we can release it later | |
3883 | * when we are truly done with the orphan item. | |
3884 | */ | |
3885 | u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1); | |
3886 | return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); | |
3887 | } | |
3888 | ||
3889 | void btrfs_orphan_release_metadata(struct inode *inode) | |
3890 | { | |
3891 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
3892 | u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1); | |
3893 | btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes); | |
3894 | } | |
3895 | ||
3896 | int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans, | |
3897 | struct btrfs_pending_snapshot *pending) | |
3898 | { | |
3899 | struct btrfs_root *root = pending->root; | |
3900 | struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root); | |
3901 | struct btrfs_block_rsv *dst_rsv = &pending->block_rsv; | |
3902 | /* | |
3903 | * two for root back/forward refs, two for directory entries | |
3904 | * and one for root of the snapshot. | |
3905 | */ | |
3906 | u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5); | |
3907 | dst_rsv->space_info = src_rsv->space_info; | |
3908 | return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); | |
3909 | } | |
3910 | ||
3911 | /** | |
3912 | * drop_outstanding_extent - drop an outstanding extent | |
3913 | * @inode: the inode we're dropping the extent for | |
3914 | * | |
3915 | * This is called when we are freeing up an outstanding extent, either called | |
3916 | * after an error or after an extent is written. This will return the number of | |
3917 | * reserved extents that need to be freed. This must be called with | |
3918 | * BTRFS_I(inode)->lock held. | |
3919 | */ | |
3920 | static unsigned drop_outstanding_extent(struct inode *inode) | |
3921 | { | |
3922 | unsigned dropped_extents = 0; | |
3923 | ||
3924 | BUG_ON(!BTRFS_I(inode)->outstanding_extents); | |
3925 | BTRFS_I(inode)->outstanding_extents--; | |
3926 | ||
3927 | /* | |
3928 | * If we have more or the same amount of outsanding extents than we have | |
3929 | * reserved then we need to leave the reserved extents count alone. | |
3930 | */ | |
3931 | if (BTRFS_I(inode)->outstanding_extents >= | |
3932 | BTRFS_I(inode)->reserved_extents) | |
3933 | return 0; | |
3934 | ||
3935 | dropped_extents = BTRFS_I(inode)->reserved_extents - | |
3936 | BTRFS_I(inode)->outstanding_extents; | |
3937 | BTRFS_I(inode)->reserved_extents -= dropped_extents; | |
3938 | return dropped_extents; | |
3939 | } | |
3940 | ||
3941 | /** | |
3942 | * calc_csum_metadata_size - return the amount of metada space that must be | |
3943 | * reserved/free'd for the given bytes. | |
3944 | * @inode: the inode we're manipulating | |
3945 | * @num_bytes: the number of bytes in question | |
3946 | * @reserve: 1 if we are reserving space, 0 if we are freeing space | |
3947 | * | |
3948 | * This adjusts the number of csum_bytes in the inode and then returns the | |
3949 | * correct amount of metadata that must either be reserved or freed. We | |
3950 | * calculate how many checksums we can fit into one leaf and then divide the | |
3951 | * number of bytes that will need to be checksumed by this value to figure out | |
3952 | * how many checksums will be required. If we are adding bytes then the number | |
3953 | * may go up and we will return the number of additional bytes that must be | |
3954 | * reserved. If it is going down we will return the number of bytes that must | |
3955 | * be freed. | |
3956 | * | |
3957 | * This must be called with BTRFS_I(inode)->lock held. | |
3958 | */ | |
3959 | static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes, | |
3960 | int reserve) | |
3961 | { | |
3962 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
3963 | u64 csum_size; | |
3964 | int num_csums_per_leaf; | |
3965 | int num_csums; | |
3966 | int old_csums; | |
3967 | ||
3968 | if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM && | |
3969 | BTRFS_I(inode)->csum_bytes == 0) | |
3970 | return 0; | |
3971 | ||
3972 | old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize); | |
3973 | if (reserve) | |
3974 | BTRFS_I(inode)->csum_bytes += num_bytes; | |
3975 | else | |
3976 | BTRFS_I(inode)->csum_bytes -= num_bytes; | |
3977 | csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item); | |
3978 | num_csums_per_leaf = (int)div64_u64(csum_size, | |
3979 | sizeof(struct btrfs_csum_item) + | |
3980 | sizeof(struct btrfs_disk_key)); | |
3981 | num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize); | |
3982 | num_csums = num_csums + num_csums_per_leaf - 1; | |
3983 | num_csums = num_csums / num_csums_per_leaf; | |
3984 | ||
3985 | old_csums = old_csums + num_csums_per_leaf - 1; | |
3986 | old_csums = old_csums / num_csums_per_leaf; | |
3987 | ||
3988 | /* No change, no need to reserve more */ | |
3989 | if (old_csums == num_csums) | |
3990 | return 0; | |
3991 | ||
3992 | if (reserve) | |
3993 | return btrfs_calc_trans_metadata_size(root, | |
3994 | num_csums - old_csums); | |
3995 | ||
3996 | return btrfs_calc_trans_metadata_size(root, old_csums - num_csums); | |
3997 | } | |
3998 | ||
3999 | int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) | |
4000 | { | |
4001 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
4002 | struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv; | |
4003 | u64 to_reserve = 0; | |
4004 | unsigned nr_extents = 0; | |
4005 | int ret; | |
4006 | ||
4007 | if (btrfs_transaction_in_commit(root->fs_info)) | |
4008 | schedule_timeout(1); | |
4009 | ||
4010 | num_bytes = ALIGN(num_bytes, root->sectorsize); | |
4011 | ||
4012 | spin_lock(&BTRFS_I(inode)->lock); | |
4013 | BTRFS_I(inode)->outstanding_extents++; | |
4014 | ||
4015 | if (BTRFS_I(inode)->outstanding_extents > | |
4016 | BTRFS_I(inode)->reserved_extents) { | |
4017 | nr_extents = BTRFS_I(inode)->outstanding_extents - | |
4018 | BTRFS_I(inode)->reserved_extents; | |
4019 | BTRFS_I(inode)->reserved_extents += nr_extents; | |
4020 | ||
4021 | to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents); | |
4022 | } | |
4023 | to_reserve += calc_csum_metadata_size(inode, num_bytes, 1); | |
4024 | spin_unlock(&BTRFS_I(inode)->lock); | |
4025 | ||
4026 | ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1); | |
4027 | if (ret) { | |
4028 | unsigned dropped; | |
4029 | /* | |
4030 | * We don't need the return value since our reservation failed, | |
4031 | * we just need to clean up our counter. | |
4032 | */ | |
4033 | spin_lock(&BTRFS_I(inode)->lock); | |
4034 | dropped = drop_outstanding_extent(inode); | |
4035 | WARN_ON(dropped > 1); | |
4036 | BTRFS_I(inode)->csum_bytes -= num_bytes; | |
4037 | spin_unlock(&BTRFS_I(inode)->lock); | |
4038 | return ret; | |
4039 | } | |
4040 | ||
4041 | block_rsv_add_bytes(block_rsv, to_reserve, 1); | |
4042 | ||
4043 | return 0; | |
4044 | } | |
4045 | ||
4046 | /** | |
4047 | * btrfs_delalloc_release_metadata - release a metadata reservation for an inode | |
4048 | * @inode: the inode to release the reservation for | |
4049 | * @num_bytes: the number of bytes we're releasing | |
4050 | * | |
4051 | * This will release the metadata reservation for an inode. This can be called | |
4052 | * once we complete IO for a given set of bytes to release their metadata | |
4053 | * reservations. | |
4054 | */ | |
4055 | void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) | |
4056 | { | |
4057 | struct btrfs_root *root = BTRFS_I(inode)->root; | |
4058 | u64 to_free = 0; | |
4059 | unsigned dropped; | |
4060 | ||
4061 | num_bytes = ALIGN(num_bytes, root->sectorsize); | |
4062 | spin_lock(&BTRFS_I(inode)->lock); | |
4063 | dropped = drop_outstanding_extent(inode); | |
4064 | ||
4065 | to_free = calc_csum_metadata_size(inode, num_bytes, 0); | |
4066 | spin_unlock(&BTRFS_I(inode)->lock); | |
4067 | if (dropped > 0) | |
4068 | to_free += btrfs_calc_trans_metadata_size(root, dropped); | |
4069 | ||
4070 | btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv, | |
4071 | to_free); | |
4072 | } | |
4073 | ||
4074 | /** | |
4075 | * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc | |
4076 | * @inode: inode we're writing to | |
4077 | * @num_bytes: the number of bytes we want to allocate | |
4078 | * | |
4079 | * This will do the following things | |
4080 | * | |
4081 | * o reserve space in the data space info for num_bytes | |
4082 | * o reserve space in the metadata space info based on number of outstanding | |
4083 | * extents and how much csums will be needed | |
4084 | * o add to the inodes ->delalloc_bytes | |
4085 | * o add it to the fs_info's delalloc inodes list. | |
4086 | * | |
4087 | * This will return 0 for success and -ENOSPC if there is no space left. | |
4088 | */ | |
4089 | int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes) | |
4090 | { | |
4091 | int ret; | |
4092 | ||
4093 | ret = btrfs_check_data_free_space(inode, num_bytes); | |
4094 | if (ret) | |
4095 | return ret; | |
4096 | ||
4097 | ret = btrfs_delalloc_reserve_metadata(inode, num_bytes); | |
4098 | if (ret) { | |
4099 | btrfs_free_reserved_data_space(inode, num_bytes); | |
4100 | return ret; | |
4101 | } | |
4102 | ||
4103 | return 0; | |
4104 | } | |
4105 | ||
4106 | /** | |
4107 | * btrfs_delalloc_release_space - release data and metadata space for delalloc | |
4108 | * @inode: inode we're releasing space for | |
4109 | * @num_bytes: the number of bytes we want to free up | |
4110 | * | |
4111 | * This must be matched with a call to btrfs_delalloc_reserve_space. This is | |
4112 | * called in the case that we don't need the metadata AND data reservations | |
4113 | * anymore. So if there is an error or we insert an inline extent. | |
4114 | * | |
4115 | * This function will release the metadata space that was not used and will | |
4116 | * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes | |
4117 | * list if there are no delalloc bytes left. | |
4118 | */ | |
4119 | void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes) | |
4120 | { | |
4121 | btrfs_delalloc_release_metadata(inode, num_bytes); | |
4122 | btrfs_free_reserved_data_space(inode, num_bytes); | |
4123 | } | |
4124 | ||
4125 | static int update_block_group(struct btrfs_trans_handle *trans, | |
4126 | struct btrfs_root *root, | |
4127 | u64 bytenr, u64 num_bytes, int alloc) | |
4128 | { | |
4129 | struct btrfs_block_group_cache *cache = NULL; | |
4130 | struct btrfs_fs_info *info = root->fs_info; | |
4131 | u64 total = num_bytes; | |
4132 | u64 old_val; | |
4133 | u64 byte_in_group; | |
4134 | int factor; | |
4135 | ||
4136 | /* block accounting for super block */ | |
4137 | spin_lock(&info->delalloc_lock); | |
4138 | old_val = btrfs_super_bytes_used(&info->super_copy); | |
4139 | if (alloc) | |
4140 | old_val += num_bytes; | |
4141 | else | |
4142 | old_val -= num_bytes; | |
4143 | btrfs_set_super_bytes_used(&info->super_copy, old_val); | |
4144 | spin_unlock(&info->delalloc_lock); | |
4145 | ||
4146 | while (total) { | |
4147 | cache = btrfs_lookup_block_group(info, bytenr); | |
4148 | if (!cache) | |
4149 | return -1; | |
4150 | if (cache->flags & (BTRFS_BLOCK_GROUP_DUP | | |
4151 | BTRFS_BLOCK_GROUP_RAID1 | | |
4152 | BTRFS_BLOCK_GROUP_RAID10)) | |
4153 | factor = 2; | |
4154 | else | |
4155 | factor = 1; | |
4156 | /* | |
4157 | * If this block group has free space cache written out, we | |
4158 | * need to make sure to load it if we are removing space. This | |
4159 | * is because we need the unpinning stage to actually add the | |
4160 | * space back to the block group, otherwise we will leak space. | |
4161 | */ | |
4162 | if (!alloc && cache->cached == BTRFS_CACHE_NO) | |
4163 | cache_block_group(cache, trans, NULL, 1); | |
4164 | ||
4165 | byte_in_group = bytenr - cache->key.objectid; | |
4166 | WARN_ON(byte_in_group > cache->key.offset); | |
4167 | ||
4168 | spin_lock(&cache->space_info->lock); | |
4169 | spin_lock(&cache->lock); | |
4170 | ||
4171 | if (btrfs_super_cache_generation(&info->super_copy) != 0 && | |
4172 | cache->disk_cache_state < BTRFS_DC_CLEAR) | |
4173 | cache->disk_cache_state = BTRFS_DC_CLEAR; | |
4174 | ||
4175 | cache->dirty = 1; | |
4176 | old_val = btrfs_block_group_used(&cache->item); | |
4177 | num_bytes = min(total, cache->key.offset - byte_in_group); | |
4178 | if (alloc) { | |
4179 | old_val += num_bytes; | |
4180 | btrfs_set_block_group_used(&cache->item, old_val); | |
4181 | cache->reserved -= num_bytes; | |
4182 | cache->space_info->bytes_reserved -= num_bytes; | |
4183 | cache->space_info->bytes_used += num_bytes; | |
4184 | cache->space_info->disk_used += num_bytes * factor; | |
4185 | spin_unlock(&cache->lock); | |
4186 | spin_unlock(&cache->space_info->lock); | |
4187 | } else { | |
4188 | old_val -= num_bytes; | |
4189 | btrfs_set_block_group_used(&cache->item, old_val); | |
4190 | cache->pinned += num_bytes; | |
4191 | cache->space_info->bytes_pinned += num_bytes; | |
4192 | cache->space_info->bytes_used -= num_bytes; | |
4193 | cache->space_info->disk_used -= num_bytes * factor; | |
4194 | spin_unlock(&cache->lock); | |
4195 | spin_unlock(&cache->space_info->lock); | |
4196 | ||
4197 | set_extent_dirty(info->pinned_extents, | |
4198 | bytenr, bytenr + num_bytes - 1, | |
4199 | GFP_NOFS | __GFP_NOFAIL); | |
4200 | } | |
4201 | btrfs_put_block_group(cache); | |
4202 | total -= num_bytes; | |
4203 | bytenr += num_bytes; | |
4204 | } | |
4205 | return 0; | |
4206 | } | |
4207 | ||
4208 | static u64 first_logical_byte(struct btrfs_root *root, u64 search_start) | |
4209 | { | |
4210 | struct btrfs_block_group_cache *cache; | |
4211 | u64 bytenr; | |
4212 | ||
4213 | cache = btrfs_lookup_first_block_group(root->fs_info, search_start); | |
4214 | if (!cache) | |
4215 | return 0; | |
4216 | ||
4217 | bytenr = cache->key.objectid; | |
4218 | btrfs_put_block_group(cache); | |
4219 | ||
4220 | return bytenr; | |
4221 | } | |
4222 | ||
4223 | static int pin_down_extent(struct btrfs_root *root, | |
4224 | struct btrfs_block_group_cache *cache, | |
4225 | u64 bytenr, u64 num_bytes, int reserved) | |
4226 | { | |
4227 | spin_lock(&cache->space_info->lock); | |
4228 | spin_lock(&cache->lock); | |
4229 | cache->pinned += num_bytes; | |
4230 | cache->space_info->bytes_pinned += num_bytes; | |
4231 | if (reserved) { | |
4232 | cache->reserved -= num_bytes; | |
4233 | cache->space_info->bytes_reserved -= num_bytes; | |
4234 | } | |
4235 | spin_unlock(&cache->lock); | |
4236 | spin_unlock(&cache->space_info->lock); | |
4237 | ||
4238 | set_extent_dirty(root->fs_info->pinned_extents, bytenr, | |
4239 | bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL); | |
4240 | return 0; | |
4241 | } | |
4242 | ||
4243 | /* | |
4244 | * this function must be called within transaction | |
4245 | */ | |
4246 | int btrfs_pin_extent(struct btrfs_root *root, | |
4247 | u64 bytenr, u64 num_bytes, int reserved) | |
4248 | { | |
4249 | struct btrfs_block_group_cache *cache; | |
4250 | ||
4251 | cache = btrfs_lookup_block_group(root->fs_info, bytenr); | |
4252 | BUG_ON(!cache); | |
4253 | ||
4254 | pin_down_extent(root, cache, bytenr, num_bytes, reserved); | |
4255 | ||
4256 | btrfs_put_block_group(cache); | |
4257 | return 0; | |
4258 | } | |
4259 | ||
4260 | /** | |
4261 | * btrfs_update_reserved_bytes - update the block_group and space info counters | |
4262 | * @cache: The cache we are manipulating | |
4263 | * @num_bytes: The number of bytes in question | |
4264 | * @reserve: One of the reservation enums | |
4265 | * | |
4266 | * This is called by the allocator when it reserves space, or by somebody who is | |
4267 | * freeing space that was never actually used on disk. For example if you | |
4268 | * reserve some space for a new leaf in transaction A and before transaction A | |
4269 | * commits you free that leaf, you call this with reserve set to 0 in order to | |
4270 | * clear the reservation. | |
4271 | * | |
4272 | * Metadata reservations should be called with RESERVE_ALLOC so we do the proper | |
4273 | * ENOSPC accounting. For data we handle the reservation through clearing the | |
4274 | * delalloc bits in the io_tree. We have to do this since we could end up | |
4275 | * allocating less disk space for the amount of data we have reserved in the | |
4276 | * case of compression. | |
4277 | * | |
4278 | * If this is a reservation and the block group has become read only we cannot | |
4279 | * make the reservation and return -EAGAIN, otherwise this function always | |
4280 | * succeeds. | |
4281 | */ | |
4282 | static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache, | |
4283 | u64 num_bytes, int reserve) | |
4284 | { | |
4285 | struct btrfs_space_info *space_info = cache->space_info; | |
4286 | int ret = 0; | |
4287 | spin_lock(&space_info->lock); | |
4288 | spin_lock(&cache->lock); | |
4289 | if (reserve != RESERVE_FREE) { | |
4290 | if (cache->ro) { | |
4291 | ret = -EAGAIN; | |
4292 | } else { | |
4293 | cache->reserved += num_bytes; | |
4294 | space_info->bytes_reserved += num_bytes; | |
4295 | if (reserve == RESERVE_ALLOC) { | |
4296 | BUG_ON(space_info->bytes_may_use < num_bytes); | |
4297 | space_info->bytes_may_use -= num_bytes; | |
4298 | } | |
4299 | } | |
4300 | } else { | |
4301 | if (cache->ro) | |
4302 | space_info->bytes_readonly += num_bytes; | |
4303 | cache->reserved -= num_bytes; | |
4304 | space_info->bytes_reserved -= num_bytes; | |
4305 | space_info->reservation_progress++; | |
4306 | } | |
4307 | spin_unlock(&cache->lock); | |
4308 | spin_unlock(&space_info->lock); | |
4309 | return ret; | |
4310 | } | |
4311 | ||
4312 | int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, | |
4313 | struct btrfs_root *root) | |
4314 | { | |
4315 | struct btrfs_fs_info *fs_info = root->fs_info; | |
4316 | struct btrfs_caching_control *next; | |
4317 | struct btrfs_caching_control *caching_ctl; | |
4318 | struct btrfs_block_group_cache *cache; | |
4319 | ||
4320 | down_write(&fs_info->extent_commit_sem); | |
4321 | ||
4322 | list_for_each_entry_safe(caching_ctl, next, | |
4323 | &fs_info->caching_block_groups, list) { | |
4324 | cache = caching_ctl->block_group; | |
4325 | if (block_group_cache_done(cache)) { | |
4326 | cache->last_byte_to_unpin = (u64)-1; | |
4327 | list_del_init(&caching_ctl->list); | |
4328 | put_caching_control(caching_ctl); | |
4329 | } else { | |
4330 | cache->last_byte_to_unpin = caching_ctl->progress; | |
4331 | } | |
4332 | } | |
4333 | ||
4334 | if (fs_info->pinned_extents == &fs_info->freed_extents[0]) | |
4335 | fs_info->pinned_extents = &fs_info->freed_extents[1]; | |
4336 | else | |
4337 | fs_info->pinned_extents = &fs_info->freed_extents[0]; | |
4338 | ||
4339 | up_write(&fs_info->extent_commit_sem); | |
4340 | ||
4341 | update_global_block_rsv(fs_info); | |
4342 | return 0; | |
4343 | } | |
4344 | ||
4345 | static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) | |
4346 | { | |
4347 | struct btrfs_fs_info *fs_info = root->fs_info; | |
4348 | struct btrfs_block_group_cache *cache = NULL; | |
4349 | u64 len; | |
4350 | ||
4351 | while (start <= end) { | |
4352 | if (!cache || | |
4353 | start >= cache->key.objectid + cache->key.offset) { | |
4354 | if (cache) | |
4355 | btrfs_put_block_group(cache); | |
4356 | cache = btrfs_lookup_block_group(fs_info, start); | |
4357 | BUG_ON(!cache); | |
4358 | } | |
4359 | ||
4360 | len = cache->key.objectid + cache->key.offset - start; | |
4361 | len = min(len, end + 1 - start); | |
4362 | ||
4363 | if (start < cache->last_byte_to_unpin) { | |
4364 | len = min(len, cache->last_byte_to_unpin - start); | |
4365 | btrfs_add_free_space(cache, start, len); | |
4366 | } | |
4367 | ||
4368 | start += len; | |
4369 | ||
4370 | spin_lock(&cache->space_info->lock); | |
4371 | spin_lock(&cache->lock); | |
4372 | cache->pinned -= len; | |
4373 | cache->space_info->bytes_pinned -= len; | |
4374 | if (cache->ro) | |
4375 | cache->space_info->bytes_readonly += len; | |
4376 | spin_unlock(&cache->lock); | |
4377 | spin_unlock(&cache->space_info->lock); | |
4378 | } | |
4379 | ||
4380 | if (cache) | |
4381 | btrfs_put_block_group(cache); | |
4382 | return 0; | |
4383 | } | |
4384 | ||
4385 | int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, | |
4386 | struct btrfs_root *root) | |
4387 | { | |
4388 | struct btrfs_fs_info *fs_info = root->fs_info; | |
4389 | struct extent_io_tree *unpin; | |
4390 | u64 start; | |
4391 | u64 end; | |
4392 | int ret; | |
4393 | ||
4394 | if (fs_info->pinned_extents == &fs_info->freed_extents[0]) | |
4395 | unpin = &fs_info->freed_extents[1]; | |
4396 | else | |
4397 | unpin = &fs_info->freed_extents[0]; | |
4398 | ||
4399 | while (1) { | |
4400 | ret = find_first_extent_bit(unpin, 0, &start, &end, | |
4401 | EXTENT_DIRTY); | |
4402 | if (ret) | |
4403 | break; | |
4404 | ||
4405 | if (btrfs_test_opt(root, DISCARD)) | |
4406 | ret = btrfs_discard_extent(root, start, | |
4407 | end + 1 - start, NULL); | |
4408 | ||
4409 | clear_extent_dirty(unpin, start, end, GFP_NOFS); | |
4410 | unpin_extent_range(root, start, end); | |
4411 | cond_resched(); | |
4412 | } | |
4413 | ||
4414 | return 0; | |
4415 | } | |
4416 | ||
4417 | static int __btrfs_free_extent(struct btrfs_trans_handle *trans, | |
4418 | struct btrfs_root *root, | |
4419 | u64 bytenr, u64 num_bytes, u64 parent, | |
4420 | u64 root_objectid, u64 owner_objectid, | |
4421 | u64 owner_offset, int refs_to_drop, | |
4422 | struct btrfs_delayed_extent_op *extent_op) | |
4423 | { | |
4424 | struct btrfs_key key; | |
4425 | struct btrfs_path *path; | |
4426 | struct btrfs_fs_info *info = root->fs_info; | |
4427 | struct btrfs_root *extent_root = info->extent_root; | |
4428 | struct extent_buffer *leaf; | |
4429 | struct btrfs_extent_item *ei; | |
4430 | struct btrfs_extent_inline_ref *iref; | |
4431 | int ret; | |
4432 | int is_data; | |
4433 | int extent_slot = 0; | |
4434 | int found_extent = 0; | |
4435 | int num_to_del = 1; | |
4436 | u32 item_size; | |
4437 | u64 refs; | |
4438 | ||
4439 | path = btrfs_alloc_path(); | |
4440 | if (!path) | |
4441 | return -ENOMEM; | |
4442 | ||
4443 | path->reada = 1; | |
4444 | path->leave_spinning = 1; | |
4445 | ||
4446 | is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID; | |
4447 | BUG_ON(!is_data && refs_to_drop != 1); | |
4448 | ||
4449 | ret = lookup_extent_backref(trans, extent_root, path, &iref, | |
4450 | bytenr, num_bytes, parent, | |
4451 | root_objectid, owner_objectid, | |
4452 | owner_offset); | |
4453 | if (ret == 0) { | |
4454 | extent_slot = path->slots[0]; | |
4455 | while (extent_slot >= 0) { | |
4456 | btrfs_item_key_to_cpu(path->nodes[0], &key, | |
4457 | extent_slot); | |
4458 | if (key.objectid != bytenr) | |
4459 | break; | |
4460 | if (key.type == BTRFS_EXTENT_ITEM_KEY && | |
4461 | key.offset == num_bytes) { | |
4462 | found_extent = 1; | |
4463 | break; | |
4464 | } | |
4465 | if (path->slots[0] - extent_slot > 5) | |
4466 | break; | |
4467 | extent_slot--; | |
4468 | } | |
4469 | #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 | |
4470 | item_size = btrfs_item_size_nr(path->nodes[0], extent_slot); | |
4471 | if (found_extent && item_size < sizeof(*ei)) | |
4472 | found_extent = 0; | |
4473 | #endif | |
4474 | if (!found_extent) { | |
4475 | BUG_ON(iref); | |
4476 | ret = remove_extent_backref(trans, extent_root, path, | |
4477 | NULL, refs_to_drop, | |
4478 | is_data); | |
4479 | BUG_ON(ret); | |
4480 | btrfs_release_path(path); | |
4481 | path->leave_spinning = 1; | |
4482 | ||
4483 | key.objectid = bytenr; | |
4484 | key.type = BTRFS_EXTENT_ITEM_KEY; | |
4485 | key.offset = num_bytes; | |
4486 | ||
4487 | ret = btrfs_search_slot(trans, extent_root, | |
4488 | &key, path, -1, 1); | |
4489 | if (ret) { | |
4490 | printk(KERN_ERR "umm, got %d back from search" | |
4491 | ", was looking for %llu\n", ret, | |
4492 | (unsigned long long)bytenr); | |
4493 | if (ret > 0) | |
4494 | btrfs_print_leaf(extent_root, | |
4495 | path->nodes[0]); | |
4496 | } | |
4497 | BUG_ON(ret); | |
4498 | extent_slot = path->slots[0]; | |
4499 | } | |
4500 | } else { | |
4501 | btrfs_print_leaf(extent_root, path->nodes[0]); | |
4502 | WARN_ON(1); | |
4503 | printk(KERN_ERR "btrfs unable to find ref byte nr %llu " | |
4504 | "parent %llu root %llu owner %llu offset %llu\n", | |
4505 | (unsigned long long)bytenr, | |
4506 | (unsigned long long)parent, | |
4507 | (unsigned long long)root_objectid, | |
4508 | (unsigned long long)owner_objectid, | |
4509 | (unsigned long long)owner_offset); | |
4510 | } | |
4511 | ||
4512 | leaf = path->nodes[0]; | |
4513 | item_size = btrfs_item_size_nr(leaf, extent_slot); | |
4514 | #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 | |
4515 | if (item_size < sizeof(*ei)) { | |
4516 | BUG_ON(found_extent || extent_slot != path->slots[0]); | |
4517 | ret = convert_extent_item_v0(trans, extent_root, path, | |
4518 | owner_objectid, 0); | |
4519 | BUG_ON(ret < 0); | |
4520 | ||
4521 | btrfs_release_path(path); | |
4522 | path->leave_spinning = 1; | |
4523 | ||
4524 | key.objectid = bytenr; | |
4525 | key.type = BTRFS_EXTENT_ITEM_KEY; | |
4526 | key.offset = num_bytes; | |
4527 | ||
4528 | ret = btrfs_search_slot(trans, extent_root, &key, path, | |
4529 | -1, 1); | |
4530 | if (ret) { | |
4531 | printk(KERN_ERR "umm, got %d back from search" | |
4532 | ", was looking for %llu\n", ret, | |
4533 | (unsigned long long)bytenr); | |
4534 | btrfs_print_leaf(extent_root, path->nodes[0]); | |
4535 | } | |
4536 | BUG_ON(ret); | |
4537 | extent_slot = path->slots[0]; | |
4538 | leaf = path->nodes[0]; | |
4539 | item_size = btrfs_item_size_nr(leaf, extent_slot); | |
4540 | } | |
4541 | #endif | |
4542 | BUG_ON(item_size < sizeof(*ei)); | |
4543 | ei = btrfs_item_ptr(leaf, extent_slot, | |
4544 | struct btrfs_extent_item); | |
4545 | if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) { | |
4546 | struct btrfs_tree_block_info *bi; | |
4547 | BUG_ON(item_size < sizeof(*ei) + sizeof(*bi)); | |
4548 | bi = (struct btrfs_tree_block_info *)(ei + 1); | |
4549 | WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi)); | |
4550 | } | |
4551 | ||
4552 | refs = btrfs_extent_refs(leaf, ei); | |
4553 | BUG_ON(refs < refs_to_drop); | |
4554 | refs -= refs_to_drop; | |
4555 | ||
4556 | if (refs > 0) { | |
4557 | if (extent_op) | |
4558 | __run_delayed_extent_op(extent_op, leaf, ei); | |
4559 | /* | |
4560 | * In the case of inline back ref, reference count will | |
4561 | * be updated by remove_extent_backref | |
4562 | */ | |
4563 | if (iref) { | |
4564 | BUG_ON(!found_extent); | |
4565 | } else { | |
4566 | btrfs_set_extent_refs(leaf, ei, refs); | |
4567 | btrfs_mark_buffer_dirty(leaf); | |
4568 | } | |
4569 | if (found_extent) { | |
4570 | ret = remove_extent_backref(trans, extent_root, path, | |
4571 | iref, refs_to_drop, | |
4572 | is_data); | |
4573 | BUG_ON(ret); | |
4574 | } | |
4575 | } else { | |
4576 | if (found_extent) { | |
4577 | BUG_ON(is_data && refs_to_drop != | |
4578 | extent_data_ref_count(root, path, iref)); | |
4579 | if (iref) { | |
4580 | BUG_ON(path->slots[0] != extent_slot); | |
4581 | } else { | |
4582 | BUG_ON(path->slots[0] != extent_slot + 1); | |
4583 | path->slots[0] = extent_slot; | |
4584 | num_to_del = 2; | |
4585 | } | |
4586 | } | |
4587 | ||
4588 | ret = btrfs_del_items(trans, extent_root, path, path->slots[0], | |
4589 | num_to_del); | |
4590 | BUG_ON(ret); | |
4591 | btrfs_release_path(path); | |
4592 | ||
4593 | if (is_data) { | |
4594 | ret = btrfs_del_csums(trans, root, bytenr, num_bytes); | |
4595 | BUG_ON(ret); | |
4596 | } else { | |
4597 | invalidate_mapping_pages(info->btree_inode->i_mapping, | |
4598 | bytenr >> PAGE_CACHE_SHIFT, | |
4599 | (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT); | |
4600 | } | |
4601 | ||
4602 | ret = update_block_group(trans, root, bytenr, num_bytes, 0); | |
4603 | BUG_ON(ret); | |
4604 | } | |
4605 | btrfs_free_path(path); | |
4606 | return ret; | |
4607 | } | |
4608 | ||
4609 | /* | |
4610 | * when we free an block, it is possible (and likely) that we free the last | |
4611 | * delayed ref for that extent as well. This searches the delayed ref tree for | |
4612 | * a given extent, and if there are no other delayed refs to be processed, it | |
4613 | * removes it from the tree. | |
4614 | */ | |
4615 | static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans, | |
4616 | struct btrfs_root *root, u64 bytenr) | |
4617 | { | |
4618 | struct btrfs_delayed_ref_head *head; | |
4619 | struct btrfs_delayed_ref_root *delayed_refs; | |
4620 | struct btrfs_delayed_ref_node *ref; | |
4621 | struct rb_node *node; | |
4622 | int ret = 0; | |
4623 | ||
4624 | delayed_refs = &trans->transaction->delayed_refs; | |
4625 | spin_lock(&delayed_refs->lock); | |
4626 | head = btrfs_find_delayed_ref_head(trans, bytenr); | |
4627 | if (!head) | |
4628 | goto out; | |
4629 | ||
4630 | node = rb_prev(&head->node.rb_node); | |
4631 | if (!node) | |
4632 | goto out; | |
4633 | ||
4634 | ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); | |
4635 | ||
4636 | /* there are still entries for this ref, we can't drop it */ | |
4637 | if (ref->bytenr == bytenr) | |
4638 | goto out; | |
4639 | ||
4640 | if (head->extent_op) { | |
4641 | if (!head->must_insert_reserved) | |
4642 | goto out; | |
4643 | kfree(head->extent_op); | |
4644 | head->extent_op = NULL; | |
4645 | } | |
4646 | ||
4647 | /* | |
4648 | * waiting for the lock here would deadlock. If someone else has it | |
4649 | * locked they are already in the process of dropping it anyway | |
4650 | */ | |
4651 | if (!mutex_trylock(&head->mutex)) | |
4652 | goto out; | |
4653 | ||
4654 | /* | |
4655 | * at this point we have a head with no other entries. Go | |
4656 | * ahead and process it. | |
4657 | */ | |
4658 | head->node.in_tree = 0; | |
4659 | rb_erase(&head->node.rb_node, &delayed_refs->root); | |
4660 | ||
4661 | delayed_refs->num_entries--; | |
4662 | ||
4663 | /* | |
4664 | * we don't take a ref on the node because we're removing it from the | |
4665 | * tree, so we just steal the ref the tree was holding. | |
4666 | */ | |
4667 | delayed_refs->num_heads--; | |
4668 | if (list_empty(&head->cluster)) | |
4669 | delayed_refs->num_heads_ready--; | |
4670 | ||
4671 | list_del_init(&head->cluster); | |
4672 | spin_unlock(&delayed_refs->lock); | |
4673 | ||
4674 | BUG_ON(head->extent_op); | |
4675 | if (head->must_insert_reserved) | |
4676 | ret = 1; | |
4677 | ||
4678 | mutex_unlock(&head->mutex); | |
4679 | btrfs_put_delayed_ref(&head->node); | |
4680 | return ret; | |
4681 | out: | |
4682 | spin_unlock(&delayed_refs->lock); | |
4683 | return 0; | |
4684 | } | |
4685 | ||
4686 | void btrfs_free_tree_block(struct btrfs_trans_handle *trans, | |
4687 | struct btrfs_root *root, | |
4688 | struct extent_buffer *buf, | |
4689 | u64 parent, int last_ref) | |
4690 | { | |
4691 | struct btrfs_block_rsv *block_rsv; | |
4692 | struct btrfs_block_group_cache *cache = NULL; | |
4693 | int ret; | |
4694 | ||
4695 | if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { | |
4696 | ret = btrfs_add_delayed_tree_ref(trans, buf->start, buf->len, | |
4697 | parent, root->root_key.objectid, | |
4698 | btrfs_header_level(buf), | |
4699 | BTRFS_DROP_DELAYED_REF, NULL); | |
4700 | BUG_ON(ret); | |
4701 | } | |
4702 | ||
4703 | if (!last_ref) | |
4704 | return; | |
4705 | ||
4706 | block_rsv = get_block_rsv(trans, root); | |
4707 | cache = btrfs_lookup_block_group(root->fs_info, buf->start); | |
4708 | if (block_rsv->space_info != cache->space_info) | |
4709 | goto out; | |
4710 | ||
4711 | if (btrfs_header_generation(buf) == trans->transid) { | |
4712 | if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { | |
4713 | ret = check_ref_cleanup(trans, root, buf->start); | |
4714 | if (!ret) | |
4715 | goto out; | |
4716 | } | |
4717 | ||
4718 | if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) { | |
4719 | pin_down_extent(root, cache, buf->start, buf->len, 1); | |
4720 | goto out; | |
4721 | } | |
4722 | ||
4723 | WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)); | |
4724 | ||
4725 | btrfs_add_free_space(cache, buf->start, buf->len); | |
4726 | btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE); | |
4727 | } | |
4728 | out: | |
4729 | /* | |
4730 | * Deleting the buffer, clear the corrupt flag since it doesn't matter | |
4731 | * anymore. | |
4732 | */ | |
4733 | clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags); | |
4734 | btrfs_put_block_group(cache); | |
4735 | } | |
4736 | ||
4737 | int btrfs_free_extent(struct btrfs_trans_handle *trans, | |
4738 | struct btrfs_root *root, | |
4739 | u64 bytenr, u64 num_bytes, u64 parent, | |
4740 | u64 root_objectid, u64 owner, u64 offset) | |
4741 | { | |
4742 | int ret; | |
4743 | ||
4744 | /* | |
4745 | * tree log blocks never actually go into the extent allocation | |
4746 | * tree, just update pinning info and exit early. | |
4747 | */ | |
4748 | if (root_objectid == BTRFS_TREE_LOG_OBJECTID) { | |
4749 | WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID); | |
4750 | /* unlocks the pinned mutex */ | |
4751 | btrfs_pin_extent(root, bytenr, num_bytes, 1); | |
4752 | ret = 0; | |
4753 | } else if (owner < BTRFS_FIRST_FREE_OBJECTID) { | |
4754 | ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes, | |
4755 | parent, root_objectid, (int)owner, | |
4756 | BTRFS_DROP_DELAYED_REF, NULL); | |
4757 | BUG_ON(ret); | |
4758 | } else { | |
4759 | ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes, | |
4760 | parent, root_objectid, owner, | |
4761 | offset, BTRFS_DROP_DELAYED_REF, NULL); | |
4762 | BUG_ON(ret); | |
4763 | } | |
4764 | return ret; | |
4765 | } | |
4766 | ||
4767 | static u64 stripe_align(struct btrfs_root *root, u64 val) | |
4768 | { | |
4769 | u64 mask = ((u64)root->stripesize - 1); | |
4770 | u64 ret = (val + mask) & ~mask; | |
4771 | return ret; | |
4772 | } | |
4773 | ||
4774 | /* | |
4775 | * when we wait for progress in the block group caching, its because | |
4776 | * our allocation attempt failed at least once. So, we must sleep | |
4777 | * and let some progress happen before we try again. | |
4778 | * | |
4779 | * This function will sleep at least once waiting for new free space to | |
4780 | * show up, and then it will check the block group free space numbers | |
4781 | * for our min num_bytes. Another option is to have it go ahead | |
4782 | * and look in the rbtree for a free extent of a given size, but this | |
4783 | * is a good start. | |
4784 | */ | |
4785 | static noinline int | |
4786 | wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, | |
4787 | u64 num_bytes) | |
4788 | { | |
4789 | struct btrfs_caching_control *caching_ctl; | |
4790 | DEFINE_WAIT(wait); | |
4791 | ||
4792 | caching_ctl = get_caching_control(cache); | |
4793 | if (!caching_ctl) | |
4794 | return 0; | |
4795 | ||
4796 | wait_event(caching_ctl->wait, block_group_cache_done(cache) || | |
4797 | (cache->free_space_ctl->free_space >= num_bytes)); | |
4798 | ||
4799 | put_caching_control(caching_ctl); | |
4800 | return 0; | |
4801 | } | |
4802 | ||
4803 | static noinline int | |
4804 | wait_block_group_cache_done(struct btrfs_block_group_cache *cache) | |
4805 | { | |
4806 | struct btrfs_caching_control *caching_ctl; | |
4807 | DEFINE_WAIT(wait); | |
4808 | ||
4809 | caching_ctl = get_caching_control(cache); | |
4810 | if (!caching_ctl) | |
4811 | return 0; | |
4812 | ||
4813 | wait_event(caching_ctl->wait, block_group_cache_done(cache)); | |
4814 | ||
4815 | put_caching_control(caching_ctl); | |
4816 | return 0; | |
4817 | } | |
4818 | ||
4819 | static int get_block_group_index(struct btrfs_block_group_cache *cache) | |
4820 | { | |
4821 | int index; | |
4822 | if (cache->flags & BTRFS_BLOCK_GROUP_RAID10) | |
4823 | index = 0; | |
4824 | else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1) | |
4825 | index = 1; | |
4826 | else if (cache->flags & BTRFS_BLOCK_GROUP_DUP) | |
4827 | index = 2; | |
4828 | else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0) | |
4829 | index = 3; | |
4830 | else | |
4831 | index = 4; | |
4832 | return index; | |
4833 | } | |
4834 | ||
4835 | enum btrfs_loop_type { | |
4836 | LOOP_FIND_IDEAL = 0, | |
4837 | LOOP_CACHING_NOWAIT = 1, | |
4838 | LOOP_CACHING_WAIT = 2, | |
4839 | LOOP_ALLOC_CHUNK = 3, | |
4840 | LOOP_NO_EMPTY_SIZE = 4, | |
4841 | }; | |
4842 | ||
4843 | /* | |
4844 | * walks the btree of allocated extents and find a hole of a given size. | |
4845 | * The key ins is changed to record the hole: | |
4846 | * ins->objectid == block start | |
4847 | * ins->flags = BTRFS_EXTENT_ITEM_KEY | |
4848 | * ins->offset == number of blocks | |
4849 | * Any available blocks before search_start are skipped. | |
4850 | */ | |
4851 | static noinline int find_free_extent(struct btrfs_trans_handle *trans, | |
4852 | struct btrfs_root *orig_root, | |
4853 | u64 num_bytes, u64 empty_size, | |
4854 | u64 search_start, u64 search_end, | |
4855 | u64 hint_byte, struct btrfs_key *ins, | |
4856 | u64 data) | |
4857 | { | |
4858 | int ret = 0; | |
4859 | struct btrfs_root *root = orig_root->fs_info->extent_root; | |
4860 | struct btrfs_free_cluster *last_ptr = NULL; | |
4861 | struct btrfs_block_group_cache *block_group = NULL; | |
4862 | int empty_cluster = 2 * 1024 * 1024; | |
4863 | int allowed_chunk_alloc = 0; | |
4864 | int done_chunk_alloc = 0; | |
4865 | struct btrfs_space_info *space_info; | |
4866 | int last_ptr_loop = 0; | |
4867 | int loop = 0; | |
4868 | int index = 0; | |
4869 | int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ? | |
4870 | RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC; | |
4871 | bool found_uncached_bg = false; | |
4872 | bool failed_cluster_refill = false; | |
4873 | bool failed_alloc = false; | |
4874 | bool use_cluster = true; | |
4875 | u64 ideal_cache_percent = 0; | |
4876 | u64 ideal_cache_offset = 0; | |
4877 | ||
4878 | WARN_ON(num_bytes < root->sectorsize); | |
4879 | btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY); | |
4880 | ins->objectid = 0; | |
4881 | ins->offset = 0; | |
4882 | ||
4883 | space_info = __find_space_info(root->fs_info, data); | |
4884 | if (!space_info) { | |
4885 | printk(KERN_ERR "No space info for %llu\n", data); | |
4886 | return -ENOSPC; | |
4887 | } | |
4888 | ||
4889 | /* | |
4890 | * If the space info is for both data and metadata it means we have a | |
4891 | * small filesystem and we can't use the clustering stuff. | |
4892 | */ | |
4893 | if (btrfs_mixed_space_info(space_info)) | |
4894 | use_cluster = false; | |
4895 | ||
4896 | if (orig_root->ref_cows || empty_size) | |
4897 | allowed_chunk_alloc = 1; | |
4898 | ||
4899 | if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) { | |
4900 | last_ptr = &root->fs_info->meta_alloc_cluster; | |
4901 | if (!btrfs_test_opt(root, SSD)) | |
4902 | empty_cluster = 64 * 1024; | |
4903 | } | |
4904 | ||
4905 | if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster && | |
4906 | btrfs_test_opt(root, SSD)) { | |
4907 | last_ptr = &root->fs_info->data_alloc_cluster; | |
4908 | } | |
4909 | ||
4910 | if (last_ptr) { | |
4911 | spin_lock(&last_ptr->lock); | |
4912 | if (last_ptr->block_group) | |
4913 | hint_byte = last_ptr->window_start; | |
4914 | spin_unlock(&last_ptr->lock); | |
4915 | } | |
4916 | ||
4917 | search_start = max(search_start, first_logical_byte(root, 0)); | |
4918 | search_start = max(search_start, hint_byte); | |
4919 | ||
4920 | if (!last_ptr) | |
4921 | empty_cluster = 0; | |
4922 | ||
4923 | if (search_start == hint_byte) { | |
4924 | ideal_cache: | |
4925 | block_group = btrfs_lookup_block_group(root->fs_info, | |
4926 | search_start); | |
4927 | /* | |
4928 | * we don't want to use the block group if it doesn't match our | |
4929 | * allocation bits, or if its not cached. | |
4930 | * | |
4931 | * However if we are re-searching with an ideal block group | |
4932 | * picked out then we don't care that the block group is cached. | |
4933 | */ | |
4934 | if (block_group && block_group_bits(block_group, data) && | |
4935 | (block_group->cached != BTRFS_CACHE_NO || | |
4936 | search_start == ideal_cache_offset)) { | |
4937 | down_read(&space_info->groups_sem); | |
4938 | if (list_empty(&block_group->list) || | |
4939 | block_group->ro) { | |
4940 | /* | |
4941 | * someone is removing this block group, | |
4942 | * we can't jump into the have_block_group | |
4943 | * target because our list pointers are not | |
4944 | * valid | |
4945 | */ | |
4946 | btrfs_put_block_group(block_group); | |
4947 | up_read(&space_info->groups_sem); | |
4948 | } else { | |
4949 | index = get_block_group_index(block_group); | |
4950 | goto have_block_group; | |
4951 | } | |
4952 | } else if (block_group) { | |
4953 | btrfs_put_block_group(block_group); | |
4954 | } | |
4955 | } | |
4956 | search: | |
4957 | down_read(&space_info->groups_sem); | |
4958 | list_for_each_entry(block_group, &space_info->block_groups[index], | |
4959 | list) { | |
4960 | u64 offset; | |
4961 | int cached; | |
4962 | ||
4963 | btrfs_get_block_group(block_group); | |
4964 | search_start = block_group->key.objectid; | |
4965 | ||
4966 | /* | |
4967 | * this can happen if we end up cycling through all the | |
4968 | * raid types, but we want to make sure we only allocate | |
4969 | * for the proper type. | |
4970 | */ | |
4971 | if (!block_group_bits(block_group, data)) { | |
4972 | u64 extra = BTRFS_BLOCK_GROUP_DUP | | |
4973 | BTRFS_BLOCK_GROUP_RAID1 | | |
4974 | BTRFS_BLOCK_GROUP_RAID10; | |
4975 | ||
4976 | /* | |
4977 | * if they asked for extra copies and this block group | |
4978 | * doesn't provide them, bail. This does allow us to | |
4979 | * fill raid0 from raid1. | |
4980 | */ | |
4981 | if ((data & extra) && !(block_group->flags & extra)) | |
4982 | goto loop; | |
4983 | } | |
4984 | ||
4985 | have_block_group: | |
4986 | if (unlikely(block_group->cached == BTRFS_CACHE_NO)) { | |
4987 | u64 free_percent; | |
4988 | ||
4989 | ret = cache_block_group(block_group, trans, | |
4990 | orig_root, 1); | |
4991 | if (block_group->cached == BTRFS_CACHE_FINISHED) | |
4992 | goto have_block_group; | |
4993 | ||
4994 | free_percent = btrfs_block_group_used(&block_group->item); | |
4995 | free_percent *= 100; | |
4996 | free_percent = div64_u64(free_percent, | |
4997 | block_group->key.offset); | |
4998 | free_percent = 100 - free_percent; | |
4999 | if (free_percent > ideal_cache_percent && | |
5000 | likely(!block_group->ro)) { | |
5001 | ideal_cache_offset = block_group->key.objectid; | |
5002 | ideal_cache_percent = free_percent; | |
5003 | } | |
5004 | ||
5005 | /* | |
5006 | * The caching workers are limited to 2 threads, so we | |
5007 | * can queue as much work as we care to. | |
5008 | */ | |
5009 | if (loop > LOOP_FIND_IDEAL) { | |
5010 | ret = cache_block_group(block_group, trans, | |
5011 | orig_root, 0); | |
5012 | BUG_ON(ret); | |
5013 | } | |
5014 | found_uncached_bg = true; | |
5015 | ||
5016 | /* | |
5017 | * If loop is set for cached only, try the next block | |
5018 | * group. | |
5019 | */ | |
5020 | if (loop == LOOP_FIND_IDEAL) | |
5021 | goto loop; | |
5022 | } | |
5023 | ||
5024 | cached = block_group_cache_done(block_group); | |
5025 | if (unlikely(!cached)) | |
5026 | found_uncached_bg = true; | |
5027 | ||
5028 | if (unlikely(block_group->ro)) | |
5029 | goto loop; | |
5030 | ||
5031 | spin_lock(&block_group->free_space_ctl->tree_lock); | |
5032 | if (cached && | |
5033 | block_group->free_space_ctl->free_space < | |
5034 | num_bytes + empty_size) { | |
5035 | spin_unlock(&block_group->free_space_ctl->tree_lock); | |
5036 | goto loop; | |
5037 | } | |
5038 | spin_unlock(&block_group->free_space_ctl->tree_lock); | |
5039 | ||
5040 | /* | |
5041 | * Ok we want to try and use the cluster allocator, so lets look | |
5042 | * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will | |
5043 | * have tried the cluster allocator plenty of times at this | |
5044 | * point and not have found anything, so we are likely way too | |
5045 | * fragmented for the clustering stuff to find anything, so lets | |
5046 | * just skip it and let the allocator find whatever block it can | |
5047 | * find | |
5048 | */ | |
5049 | if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) { | |
5050 | /* | |
5051 | * the refill lock keeps out other | |
5052 | * people trying to start a new cluster | |
5053 | */ | |
5054 | spin_lock(&last_ptr->refill_lock); | |
5055 | if (last_ptr->block_group && | |
5056 | (last_ptr->block_group->ro || | |
5057 | !block_group_bits(last_ptr->block_group, data))) { | |
5058 | offset = 0; | |
5059 | goto refill_cluster; | |
5060 | } | |
5061 | ||
5062 | offset = btrfs_alloc_from_cluster(block_group, last_ptr, | |
5063 | num_bytes, search_start); | |
5064 | if (offset) { | |
5065 | /* we have a block, we're done */ | |
5066 | spin_unlock(&last_ptr->refill_lock); | |
5067 | goto checks; | |
5068 | } | |
5069 | ||
5070 | spin_lock(&last_ptr->lock); | |
5071 | /* | |
5072 | * whoops, this cluster doesn't actually point to | |
5073 | * this block group. Get a ref on the block | |
5074 | * group is does point to and try again | |
5075 | */ | |
5076 | if (!last_ptr_loop && last_ptr->block_group && | |
5077 | last_ptr->block_group != block_group && | |
5078 | index <= | |
5079 | get_block_group_index(last_ptr->block_group)) { | |
5080 | ||
5081 | btrfs_put_block_group(block_group); | |
5082 | block_group = last_ptr->block_group; | |
5083 | btrfs_get_block_group(block_group); | |
5084 | spin_unlock(&last_ptr->lock); | |
5085 | spin_unlock(&last_ptr->refill_lock); | |
5086 | ||
5087 | last_ptr_loop = 1; | |
5088 | search_start = block_group->key.objectid; | |
5089 | /* | |
5090 | * we know this block group is properly | |
5091 | * in the list because | |
5092 | * btrfs_remove_block_group, drops the | |
5093 | * cluster before it removes the block | |
5094 | * group from the list | |
5095 | */ | |
5096 | goto have_block_group; | |
5097 | } | |
5098 | spin_unlock(&last_ptr->lock); | |
5099 | refill_cluster: | |
5100 | /* | |
5101 | * this cluster didn't work out, free it and | |
5102 | * start over | |
5103 | */ | |
5104 | btrfs_return_cluster_to_free_space(NULL, last_ptr); | |
5105 | ||
5106 | last_ptr_loop = 0; | |
5107 | ||
5108 | /* allocate a cluster in this block group */ | |
5109 | ret = btrfs_find_space_cluster(trans, root, | |
5110 | block_group, last_ptr, | |
5111 | offset, num_bytes, | |
5112 | empty_cluster + empty_size); | |
5113 | if (ret == 0) { | |
5114 | /* | |
5115 | * now pull our allocation out of this | |
5116 | * cluster | |
5117 | */ | |
5118 | offset = btrfs_alloc_from_cluster(block_group, | |
5119 | last_ptr, num_bytes, | |
5120 | search_start); | |
5121 | if (offset) { | |
5122 | /* we found one, proceed */ | |
5123 | spin_unlock(&last_ptr->refill_lock); | |
5124 | goto checks; | |
5125 | } | |
5126 | } else if (!cached && loop > LOOP_CACHING_NOWAIT | |
5127 | && !failed_cluster_refill) { | |
5128 | spin_unlock(&last_ptr->refill_lock); | |
5129 | ||
5130 | failed_cluster_refill = true; | |
5131 | wait_block_group_cache_progress(block_group, | |
5132 | num_bytes + empty_cluster + empty_size); | |
5133 | goto have_block_group; | |
5134 | } | |
5135 | ||
5136 | /* | |
5137 | * at this point we either didn't find a cluster | |
5138 | * or we weren't able to allocate a block from our | |
5139 | * cluster. Free the cluster we've been trying | |
5140 | * to use, and go to the next block group | |
5141 | */ | |
5142 | btrfs_return_cluster_to_free_space(NULL, last_ptr); | |
5143 | spin_unlock(&last_ptr->refill_lock); | |
5144 | goto loop; | |
5145 | } | |
5146 | ||
5147 | offset = btrfs_find_space_for_alloc(block_group, search_start, | |
5148 | num_bytes, empty_size); | |
5149 | /* | |
5150 | * If we didn't find a chunk, and we haven't failed on this | |
5151 | * block group before, and this block group is in the middle of | |
5152 | * caching and we are ok with waiting, then go ahead and wait | |
5153 | * for progress to be made, and set failed_alloc to true. | |
5154 | * | |
5155 | * If failed_alloc is true then we've already waited on this | |
5156 | * block group once and should move on to the next block group. | |
5157 | */ | |
5158 | if (!offset && !failed_alloc && !cached && | |
5159 | loop > LOOP_CACHING_NOWAIT) { | |
5160 | wait_block_group_cache_progress(block_group, | |
5161 | num_bytes + empty_size); | |
5162 | failed_alloc = true; | |
5163 | goto have_block_group; | |
5164 | } else if (!offset) { | |
5165 | goto loop; | |
5166 | } | |
5167 | checks: | |
5168 | search_start = stripe_align(root, offset); | |
5169 | /* move on to the next group */ | |
5170 | if (search_start + num_bytes >= search_end) { | |
5171 | btrfs_add_free_space(block_group, offset, num_bytes); | |
5172 | goto loop; | |
5173 | } | |
5174 | ||
5175 | /* move on to the next group */ | |
5176 | if (search_start + num_bytes > | |
5177 | block_group->key.objectid + block_group->key.offset) { | |
5178 | btrfs_add_free_space(block_group, offset, num_bytes); | |
5179 | goto loop; | |
5180 | } | |
5181 | ||
5182 | ins->objectid = search_start; | |
5183 | ins->offset = num_bytes; | |
5184 | ||
5185 | if (offset < search_start) | |
5186 | btrfs_add_free_space(block_group, offset, | |
5187 | search_start - offset); | |
5188 | BUG_ON(offset > search_start); | |
5189 | ||
5190 | ret = btrfs_update_reserved_bytes(block_group, num_bytes, | |
5191 | alloc_type); | |
5192 | if (ret == -EAGAIN) { | |
5193 | btrfs_add_free_space(block_group, offset, num_bytes); | |
5194 | goto loop; | |
5195 | } | |
5196 | ||
5197 | /* we are all good, lets return */ | |
5198 | ins->objectid = search_start; | |
5199 | ins->offset = num_bytes; | |
5200 | ||
5201 | if (offset < search_start) | |
5202 | btrfs_add_free_space(block_group, offset, | |
5203 | search_start - offset); | |
5204 | BUG_ON(offset > search_start); | |
5205 | btrfs_put_block_group(block_group); | |
5206 | break; | |
5207 | loop: | |
5208 | failed_cluster_refill = false; | |
5209 | failed_alloc = false; | |
5210 | BUG_ON(index != get_block_group_index(block_group)); | |
5211 | btrfs_put_block_group(block_group); | |
5212 | } | |
5213 | up_read(&space_info->groups_sem); | |
5214 | ||
5215 | if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES) | |
5216 | goto search; | |
5217 | ||
5218 | /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for | |
5219 | * for them to make caching progress. Also | |
5220 | * determine the best possible bg to cache | |
5221 | * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking | |
5222 | * caching kthreads as we move along | |
5223 | * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching | |
5224 | * LOOP_ALLOC_CHUNK, force a chunk allocation and try again | |
5225 | * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try | |
5226 | * again | |
5227 | */ | |
5228 | if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) { | |
5229 | index = 0; | |
5230 | if (loop == LOOP_FIND_IDEAL && found_uncached_bg) { | |
5231 | found_uncached_bg = false; | |
5232 | loop++; | |
5233 | if (!ideal_cache_percent) | |
5234 | goto search; | |
5235 | ||
5236 | /* | |
5237 | * 1 of the following 2 things have happened so far | |
5238 | * | |
5239 | * 1) We found an ideal block group for caching that | |
5240 | * is mostly full and will cache quickly, so we might | |
5241 | * as well wait for it. | |
5242 | * | |
5243 | * 2) We searched for cached only and we didn't find | |
5244 | * anything, and we didn't start any caching kthreads | |
5245 | * either, so chances are we will loop through and | |
5246 | * start a couple caching kthreads, and then come back | |
5247 | * around and just wait for them. This will be slower | |
5248 | * because we will have 2 caching kthreads reading at | |
5249 | * the same time when we could have just started one | |
5250 | * and waited for it to get far enough to give us an | |
5251 | * allocation, so go ahead and go to the wait caching | |
5252 | * loop. | |
5253 | */ | |
5254 | loop = LOOP_CACHING_WAIT; | |
5255 | search_start = ideal_cache_offset; | |
5256 | ideal_cache_percent = 0; | |
5257 | goto ideal_cache; | |
5258 | } else if (loop == LOOP_FIND_IDEAL) { | |
5259 | /* | |
5260 | * Didn't find a uncached bg, wait on anything we find | |
5261 | * next. | |
5262 | */ | |
5263 | loop = LOOP_CACHING_WAIT; | |
5264 | goto search; | |
5265 | } | |
5266 | ||
5267 | loop++; | |
5268 | ||
5269 | if (loop == LOOP_ALLOC_CHUNK) { | |
5270 | if (allowed_chunk_alloc) { | |
5271 | ret = do_chunk_alloc(trans, root, num_bytes + | |
5272 | 2 * 1024 * 1024, data, | |
5273 | CHUNK_ALLOC_LIMITED); | |
5274 | allowed_chunk_alloc = 0; | |
5275 | if (ret == 1) | |
5276 | done_chunk_alloc = 1; | |
5277 | } else if (!done_chunk_alloc && | |
5278 | space_info->force_alloc == | |
5279 | CHUNK_ALLOC_NO_FORCE) { | |
5280 | space_info->force_alloc = CHUNK_ALLOC_LIMITED; | |
5281 | } | |
5282 | ||
5283 | /* | |
5284 | * We didn't allocate a chunk, go ahead and drop the | |
5285 | * empty size and loop again. | |
5286 | */ | |
5287 | if (!done_chunk_alloc) | |
5288 | loop = LOOP_NO_EMPTY_SIZE; | |
5289 | } | |
5290 | ||
5291 | if (loop == LOOP_NO_EMPTY_SIZE) { | |
5292 | empty_size = 0; | |
5293 | empty_cluster = 0; | |
5294 | } | |
5295 | ||
5296 | goto search; | |
5297 | } else if (!ins->objectid) { | |
5298 | ret = -ENOSPC; | |
5299 | } else if (ins->objectid) { | |
5300 | ret = 0; | |
5301 | } | |
5302 | ||
5303 | return ret; | |
5304 | } | |
5305 | ||
5306 | static void dump_space_info(struct btrfs_space_info *info, u64 bytes, | |
5307 | int dump_block_groups) | |
5308 | { | |
5309 | struct btrfs_block_group_cache *cache; | |
5310 | int index = 0; | |
5311 | ||
5312 | spin_lock(&info->lock); | |
5313 | printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n", | |
5314 | (unsigned long long)info->flags, | |
5315 | (unsigned long long)(info->total_bytes - info->bytes_used - | |
5316 | info->bytes_pinned - info->bytes_reserved - | |
5317 | info->bytes_readonly), | |
5318 | (info->full) ? "" : "not "); | |
5319 | printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, " | |
5320 | "reserved=%llu, may_use=%llu, readonly=%llu\n", | |
5321 | (unsigned long long)info->total_bytes, | |
5322 | (unsigned long long)info->bytes_used, | |
5323 | (unsigned long long)info->bytes_pinned, | |
5324 | (unsigned long long)info->bytes_reserved, | |
5325 | (unsigned long long)info->bytes_may_use, | |
5326 | (unsigned long long)info->bytes_readonly); | |
5327 | spin_unlock(&info->lock); | |
5328 | ||
5329 | if (!dump_block_groups) | |
5330 | return; | |
5331 | ||
5332 | down_read(&info->groups_sem); | |
5333 | again: | |
5334 | list_for_each_entry(cache, &info->block_groups[index], list) { | |
5335 | spin_lock(&cache->lock); | |
5336 | printk(KERN_INFO "block group %llu has %llu bytes, %llu used " | |
5337 | "%llu pinned %llu reserved\n", | |
5338 | (unsigned long long)cache->key.objectid, | |
5339 | (unsigned long long)cache->key.offset, | |
5340 | (unsigned long long)btrfs_block_group_used(&cache->item), | |
5341 | (unsigned long long)cache->pinned, | |
5342 | (unsigned long long)cache->reserved); | |
5343 | btrfs_dump_free_space(cache, bytes); | |
5344 | spin_unlock(&cache->lock); | |
5345 | } | |
5346 | if (++index < BTRFS_NR_RAID_TYPES) | |
5347 | goto again; | |
5348 | up_read(&info->groups_sem); | |
5349 | } | |
5350 | ||
5351 | int btrfs_reserve_extent(struct btrfs_trans_handle *trans, | |
5352 | struct btrfs_root *root, | |
5353 | u64 num_bytes, u64 min_alloc_size, | |
5354 | u64 empty_size, u64 hint_byte, | |
5355 | u64 search_end, struct btrfs_key *ins, | |
5356 | u64 data) | |
5357 | { | |
5358 | int ret; | |
5359 | u64 search_start = 0; | |
5360 | ||
5361 | data = btrfs_get_alloc_profile(root, data); | |
5362 | again: | |
5363 | /* | |
5364 | * the only place that sets empty_size is btrfs_realloc_node, which | |
5365 | * is not called recursively on allocations | |
5366 | */ | |
5367 | if (empty_size || root->ref_cows) | |
5368 | ret = do_chunk_alloc(trans, root->fs_info->extent_root, | |
5369 | num_bytes + 2 * 1024 * 1024, data, | |
5370 | CHUNK_ALLOC_NO_FORCE); | |
5371 | ||
5372 | WARN_ON(num_bytes < root->sectorsize); | |
5373 | ret = find_free_extent(trans, root, num_bytes, empty_size, | |
5374 | search_start, search_end, hint_byte, | |
5375 | ins, data); | |
5376 | ||
5377 | if (ret == -ENOSPC && num_bytes > min_alloc_size) { | |
5378 | num_bytes = num_bytes >> 1; | |
5379 | num_bytes = num_bytes & ~(root->sectorsize - 1); | |
5380 | num_bytes = max(num_bytes, min_alloc_size); | |
5381 | do_chunk_alloc(trans, root->fs_info->extent_root, | |
5382 | num_bytes, data, CHUNK_ALLOC_FORCE); | |
5383 | goto again; | |
5384 | } | |
5385 | if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) { | |
5386 | struct btrfs_space_info *sinfo; | |
5387 | ||
5388 | sinfo = __find_space_info(root->fs_info, data); | |
5389 | printk(KERN_ERR "btrfs allocation failed flags %llu, " | |
5390 | "wanted %llu\n", (unsigned long long)data, | |
5391 | (unsigned long long)num_bytes); | |
5392 | dump_space_info(sinfo, num_bytes, 1); | |
5393 | } | |
5394 | ||
5395 | trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset); | |
5396 | ||
5397 | return ret; | |
5398 | } | |
5399 | ||
5400 | int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len) | |
5401 | { | |
5402 | struct btrfs_block_group_cache *cache; | |
5403 | int ret = 0; | |
5404 | ||
5405 | cache = btrfs_lookup_block_group(root->fs_info, start); | |
5406 | if (!cache) { | |
5407 | printk(KERN_ERR "Unable to find block group for %llu\n", | |
5408 | (unsigned long long)start); | |
5409 | return -ENOSPC; | |
5410 | } | |
5411 | ||
5412 | if (btrfs_test_opt(root, DISCARD)) | |
5413 | ret = btrfs_discard_extent(root, start, len, NULL); | |
5414 | ||
5415 | btrfs_add_free_space(cache, start, len); | |
5416 | btrfs_update_reserved_bytes(cache, len, RESERVE_FREE); | |
5417 | btrfs_put_block_group(cache); | |
5418 | ||
5419 | trace_btrfs_reserved_extent_free(root, start, len); | |
5420 | ||
5421 | return ret; | |
5422 | } | |
5423 | ||
5424 | static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, | |
5425 | struct btrfs_root *root, | |
5426 | u64 parent, u64 root_objectid, | |
5427 | u64 flags, u64 owner, u64 offset, | |
5428 | struct btrfs_key *ins, int ref_mod) | |
5429 | { | |
5430 | int ret; | |
5431 | struct btrfs_fs_info *fs_info = root->fs_info; | |
5432 | struct btrfs_extent_item *extent_item; | |
5433 | struct btrfs_extent_inline_ref *iref; | |
5434 | struct btrfs_path *path; | |
5435 | struct extent_buffer *leaf; | |
5436 | int type; | |
5437 | u32 size; | |
5438 | ||
5439 | if (parent > 0) | |
5440 | type = BTRFS_SHARED_DATA_REF_KEY; | |
5441 | else | |
5442 | type = BTRFS_EXTENT_DATA_REF_KEY; | |
5443 | ||
5444 | size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type); | |
5445 | ||
5446 | path = btrfs_alloc_path(); | |
5447 | if (!path) | |
5448 | return -ENOMEM; | |
5449 | ||
5450 | path->leave_spinning = 1; | |
5451 | ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path, | |
5452 | ins, size); | |
5453 | BUG_ON(ret); | |
5454 | ||
5455 | leaf = path->nodes[0]; | |
5456 | extent_item = btrfs_item_ptr(leaf, path->slots[0], | |
5457 | struct btrfs_extent_item); | |
5458 | btrfs_set_extent_refs(leaf, extent_item, ref_mod); | |
5459 | btrfs_set_extent_generation(leaf, extent_item, trans->transid); | |
5460 | btrfs_set_extent_flags(leaf, extent_item, | |
5461 | flags | BTRFS_EXTENT_FLAG_DATA); | |
5462 | ||
5463 | iref = (struct btrfs_extent_inline_ref *)(extent_item + 1); | |
5464 | btrfs_set_extent_inline_ref_type(leaf, iref, type); | |
5465 | if (parent > 0) { | |
5466 | struct btrfs_shared_data_ref *ref; | |
5467 | ref = (struct btrfs_shared_data_ref *)(iref + 1); | |
5468 | btrfs_set_extent_inline_ref_offset(leaf, iref, parent); | |
5469 | btrfs_set_shared_data_ref_count(leaf, ref, ref_mod); | |
5470 | } else { | |
5471 | struct btrfs_extent_data_ref *ref; | |
5472 | ref = (struct btrfs_extent_data_ref *)(&iref->offset); | |
5473 | btrfs_set_extent_data_ref_root(leaf, ref, root_objectid); | |
5474 | btrfs_set_extent_data_ref_objectid(leaf, ref, owner); | |
5475 | btrfs_set_extent_data_ref_offset(leaf, ref, offset); | |
5476 | btrfs_set_extent_data_ref_count(leaf, ref, ref_mod); | |
5477 | } | |
5478 | ||
5479 | btrfs_mark_buffer_dirty(path->nodes[0]); | |
5480 | btrfs_free_path(path); | |
5481 | ||
5482 | ret = update_block_group(trans, root, ins->objectid, ins->offset, 1); | |
5483 | if (ret) { | |
5484 | printk(KERN_ERR "btrfs update block group failed for %llu " | |
5485 | "%llu\n", (unsigned long long)ins->objectid, | |
5486 | (unsigned long long)ins->offset); | |
5487 | BUG(); | |
5488 | } | |
5489 | return ret; | |
5490 | } | |
5491 | ||
5492 | static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, | |
5493 | struct btrfs_root *root, | |
5494 | u64 parent, u64 root_objectid, | |
5495 | u64 flags, struct btrfs_disk_key *key, | |
5496 | int level, struct btrfs_key *ins) | |
5497 | { | |
5498 | int ret; | |
5499 | struct btrfs_fs_info *fs_info = root->fs_info; | |
5500 | struct btrfs_extent_item *extent_item; | |
5501 | struct btrfs_tree_block_info *block_info; | |
5502 | struct btrfs_extent_inline_ref *iref; | |
5503 | struct btrfs_path *path; | |
5504 | struct extent_buffer *leaf; | |
5505 | u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref); | |
5506 | ||
5507 | path = btrfs_alloc_path(); | |
5508 | if (!path) | |
5509 | return -ENOMEM; | |
5510 | ||
5511 | path->leave_spinning = 1; | |
5512 | ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path, | |
5513 | ins, size); | |
5514 | BUG_ON(ret); | |
5515 | ||
5516 | leaf = path->nodes[0]; | |
5517 | extent_item = btrfs_item_ptr(leaf, path->slots[0], | |
5518 | struct btrfs_extent_item); | |
5519 | btrfs_set_extent_refs(leaf, extent_item, 1); | |
5520 | btrfs_set_extent_generation(leaf, extent_item, trans->transid); | |
5521 | btrfs_set_extent_flags(leaf, extent_item, | |
5522 | flags | BTRFS_EXTENT_FLAG_TREE_BLOCK); | |
5523 | block_info = (struct btrfs_tree_block_info *)(extent_item + 1); | |
5524 | ||
5525 | btrfs_set_tree_block_key(leaf, block_info, key); | |
5526 | btrfs_set_tree_block_level(leaf, block_info, level); | |
5527 | ||
5528 | iref = (struct btrfs_extent_inline_ref *)(block_info + 1); | |
5529 | if (parent > 0) { | |
5530 | BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); | |
5531 | btrfs_set_extent_inline_ref_type(leaf, iref, | |
5532 | BTRFS_SHARED_BLOCK_REF_KEY); | |
5533 | btrfs_set_extent_inline_ref_offset(leaf, iref, parent); | |
5534 | } else { | |
5535 | btrfs_set_extent_inline_ref_type(leaf, iref, | |
5536 | BTRFS_TREE_BLOCK_REF_KEY); | |
5537 | btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid); | |
5538 | } | |
5539 | ||
5540 | btrfs_mark_buffer_dirty(leaf); | |
5541 | btrfs_free_path(path); | |
5542 | ||
5543 | ret = update_block_group(trans, root, ins->objectid, ins->offset, 1); | |
5544 | if (ret) { | |
5545 | printk(KERN_ERR "btrfs update block group failed for %llu " | |
5546 | "%llu\n", (unsigned long long)ins->objectid, | |
5547 | (unsigned long long)ins->offset); | |
5548 | BUG(); | |
5549 | } | |
5550 | return ret; | |
5551 | } | |
5552 | ||
5553 | int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans, | |
5554 | struct btrfs_root *root, | |
5555 | u64 root_objectid, u64 owner, | |
5556 | u64 offset, struct btrfs_key *ins) | |
5557 | { | |
5558 | int ret; | |
5559 | ||
5560 | BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID); | |
5561 | ||
5562 | ret = btrfs_add_delayed_data_ref(trans, ins->objectid, ins->offset, | |
5563 | 0, root_objectid, owner, offset, | |
5564 | BTRFS_ADD_DELAYED_EXTENT, NULL); | |
5565 | return ret; | |
5566 | } | |
5567 | ||
5568 | /* | |
5569 | * this is used by the tree logging recovery code. It records that | |
5570 | * an extent has been allocated and makes sure to clear the free | |
5571 | * space cache bits as well | |
5572 | */ | |
5573 | int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, | |
5574 | struct btrfs_root *root, | |
5575 | u64 root_objectid, u64 owner, u64 offset, | |
5576 | struct btrfs_key *ins) | |
5577 | { | |
5578 | int ret; | |
5579 | struct btrfs_block_group_cache *block_group; | |
5580 | struct btrfs_caching_control *caching_ctl; | |
5581 | u64 start = ins->objectid; | |
5582 | u64 num_bytes = ins->offset; | |
5583 | ||
5584 | block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid); | |
5585 | cache_block_group(block_group, trans, NULL, 0); | |
5586 | caching_ctl = get_caching_control(block_group); | |
5587 | ||
5588 | if (!caching_ctl) { | |
5589 | BUG_ON(!block_group_cache_done(block_group)); | |
5590 | ret = btrfs_remove_free_space(block_group, start, num_bytes); | |
5591 | BUG_ON(ret); | |
5592 | } else { | |
5593 | mutex_lock(&caching_ctl->mutex); | |
5594 | ||
5595 | if (start >= caching_ctl->progress) { | |
5596 | ret = add_excluded_extent(root, start, num_bytes); | |
5597 | BUG_ON(ret); | |
5598 | } else if (start + num_bytes <= caching_ctl->progress) { | |
5599 | ret = btrfs_remove_free_space(block_group, | |
5600 | start, num_bytes); | |
5601 | BUG_ON(ret); | |
5602 | } else { | |
5603 | num_bytes = caching_ctl->progress - start; | |
5604 | ret = btrfs_remove_free_space(block_group, | |
5605 | start, num_bytes); | |
5606 | BUG_ON(ret); | |
5607 | ||
5608 | start = caching_ctl->progress; | |
5609 | num_bytes = ins->objectid + ins->offset - | |
5610 | caching_ctl->progress; | |
5611 | ret = add_excluded_extent(root, start, num_bytes); | |
5612 | BUG_ON(ret); | |
5613 | } | |
5614 | ||
5615 | mutex_unlock(&caching_ctl->mutex); | |
5616 | put_caching_control(caching_ctl); | |
5617 | } | |
5618 | ||
5619 | ret = btrfs_update_reserved_bytes(block_group, ins->offset, | |
5620 | RESERVE_ALLOC_NO_ACCOUNT); | |
5621 | BUG_ON(ret); | |
5622 | btrfs_put_block_group(block_group); | |
5623 | ret = alloc_reserved_file_extent(trans, root, 0, root_objectid, | |
5624 | 0, owner, offset, ins, 1); | |
5625 | return ret; | |
5626 | } | |
5627 | ||
5628 | struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans, | |
5629 | struct btrfs_root *root, | |
5630 | u64 bytenr, u32 blocksize, | |
5631 | int level) | |
5632 | { | |
5633 | struct extent_buffer *buf; | |
5634 | ||
5635 | buf = btrfs_find_create_tree_block(root, bytenr, blocksize); | |
5636 | if (!buf) | |
5637 | return ERR_PTR(-ENOMEM); | |
5638 | btrfs_set_header_generation(buf, trans->transid); | |
5639 | btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level); | |
5640 | btrfs_tree_lock(buf); | |
5641 | clean_tree_block(trans, root, buf); | |
5642 | ||
5643 | btrfs_set_lock_blocking(buf); | |
5644 | btrfs_set_buffer_uptodate(buf); | |
5645 | ||
5646 | if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { | |
5647 | /* | |
5648 | * we allow two log transactions at a time, use different | |
5649 | * EXENT bit to differentiate dirty pages. | |
5650 | */ | |
5651 | if (root->log_transid % 2 == 0) | |
5652 | set_extent_dirty(&root->dirty_log_pages, buf->start, | |
5653 | buf->start + buf->len - 1, GFP_NOFS); | |
5654 | else | |
5655 | set_extent_new(&root->dirty_log_pages, buf->start, | |
5656 | buf->start + buf->len - 1, GFP_NOFS); | |
5657 | } else { | |
5658 | set_extent_dirty(&trans->transaction->dirty_pages, buf->start, | |
5659 | buf->start + buf->len - 1, GFP_NOFS); | |
5660 | } | |
5661 | trans->blocks_used++; | |
5662 | /* this returns a buffer locked for blocking */ | |
5663 | return buf; | |
5664 | } | |
5665 | ||
5666 | static struct btrfs_block_rsv * | |
5667 | use_block_rsv(struct btrfs_trans_handle *trans, | |
5668 | struct btrfs_root *root, u32 blocksize) | |
5669 | { | |
5670 | struct btrfs_block_rsv *block_rsv; | |
5671 | struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; | |
5672 | int ret; | |
5673 | ||
5674 | block_rsv = get_block_rsv(trans, root); | |
5675 | ||
5676 | if (block_rsv->size == 0) { | |
5677 | ret = reserve_metadata_bytes(trans, root, block_rsv, | |
5678 | blocksize, 0); | |
5679 | /* | |
5680 | * If we couldn't reserve metadata bytes try and use some from | |
5681 | * the global reserve. | |
5682 | */ | |
5683 | if (ret && block_rsv != global_rsv) { | |
5684 | ret = block_rsv_use_bytes(global_rsv, blocksize); | |
5685 | if (!ret) | |
5686 | return global_rsv; | |
5687 | return ERR_PTR(ret); | |
5688 | } else if (ret) { | |
5689 | return ERR_PTR(ret); | |
5690 | } | |
5691 | return block_rsv; | |
5692 | } | |
5693 | ||
5694 | ret = block_rsv_use_bytes(block_rsv, blocksize); | |
5695 | if (!ret) | |
5696 | return block_rsv; | |
5697 | if (ret) { | |
5698 | WARN_ON(1); | |
5699 | ret = reserve_metadata_bytes(trans, root, block_rsv, blocksize, | |
5700 | 0); | |
5701 | if (!ret) { | |
5702 | spin_lock(&block_rsv->lock); | |
5703 | block_rsv->size += blocksize; | |
5704 | spin_unlock(&block_rsv->lock); | |
5705 | return block_rsv; | |
5706 | } else if (ret && block_rsv != global_rsv) { | |
5707 | ret = block_rsv_use_bytes(global_rsv, blocksize); | |
5708 | if (!ret) | |
5709 | return global_rsv; | |
5710 | } | |
5711 | } | |
5712 | ||
5713 | return ERR_PTR(-ENOSPC); | |
5714 | } | |
5715 | ||
5716 | static void unuse_block_rsv(struct btrfs_block_rsv *block_rsv, u32 blocksize) | |
5717 | { | |
5718 | block_rsv_add_bytes(block_rsv, blocksize, 0); | |
5719 | block_rsv_release_bytes(block_rsv, NULL, 0); | |
5720 | } | |
5721 | ||
5722 | /* | |
5723 | * finds a free extent and does all the dirty work required for allocation | |
5724 | * returns the key for the extent through ins, and a tree buffer for | |
5725 | * the first block of the extent through buf. | |
5726 | * | |
5727 | * returns the tree buffer or NULL. | |
5728 | */ | |
5729 | struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans, | |
5730 | struct btrfs_root *root, u32 blocksize, | |
5731 | u64 parent, u64 root_objectid, | |
5732 | struct btrfs_disk_key *key, int level, | |
5733 | u64 hint, u64 empty_size) | |
5734 | { | |
5735 | struct btrfs_key ins; | |
5736 | struct btrfs_block_rsv *block_rsv; | |
5737 | struct extent_buffer *buf; | |
5738 | u64 flags = 0; | |
5739 | int ret; | |
5740 | ||
5741 | ||
5742 | block_rsv = use_block_rsv(trans, root, blocksize); | |
5743 | if (IS_ERR(block_rsv)) | |
5744 | return ERR_CAST(block_rsv); | |
5745 | ||
5746 | ret = btrfs_reserve_extent(trans, root, blocksize, blocksize, | |
5747 | empty_size, hint, (u64)-1, &ins, 0); | |
5748 | if (ret) { | |
5749 | unuse_block_rsv(block_rsv, blocksize); | |
5750 | return ERR_PTR(ret); | |
5751 | } | |
5752 | ||
5753 | buf = btrfs_init_new_buffer(trans, root, ins.objectid, | |
5754 | blocksize, level); | |
5755 | BUG_ON(IS_ERR(buf)); | |
5756 | ||
5757 | if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { | |
5758 | if (parent == 0) | |
5759 | parent = ins.objectid; | |
5760 | flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; | |
5761 | } else | |
5762 | BUG_ON(parent > 0); | |
5763 | ||
5764 | if (root_objectid != BTRFS_TREE_LOG_OBJECTID) { | |
5765 | struct btrfs_delayed_extent_op *extent_op; | |
5766 | extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS); | |
5767 | BUG_ON(!extent_op); | |
5768 | if (key) | |
5769 | memcpy(&extent_op->key, key, sizeof(extent_op->key)); | |
5770 | else | |
5771 | memset(&extent_op->key, 0, sizeof(extent_op->key)); | |
5772 | extent_op->flags_to_set = flags; | |
5773 | extent_op->update_key = 1; | |
5774 | extent_op->update_flags = 1; | |
5775 | extent_op->is_data = 0; | |
5776 | ||
5777 | ret = btrfs_add_delayed_tree_ref(trans, ins.objectid, | |
5778 | ins.offset, parent, root_objectid, | |
5779 | level, BTRFS_ADD_DELAYED_EXTENT, | |
5780 | extent_op); | |
5781 | BUG_ON(ret); | |
5782 | } | |
5783 | return buf; | |
5784 | } | |
5785 | ||
5786 | struct walk_control { | |
5787 | u64 refs[BTRFS_MAX_LEVEL]; | |
5788 | u64 flags[BTRFS_MAX_LEVEL]; | |
5789 | struct btrfs_key update_progress; | |
5790 | int stage; | |
5791 | int level; | |
5792 | int shared_level; | |
5793 | int update_ref; | |
5794 | int keep_locks; | |
5795 | int reada_slot; | |
5796 | int reada_count; | |
5797 | }; | |
5798 | ||
5799 | #define DROP_REFERENCE 1 | |
5800 | #define UPDATE_BACKREF 2 | |
5801 | ||
5802 | static noinline void reada_walk_down(struct btrfs_trans_handle *trans, | |
5803 | struct btrfs_root *root, | |
5804 | struct walk_control *wc, | |
5805 | struct btrfs_path *path) | |
5806 | { | |
5807 | u64 bytenr; | |
5808 | u64 generation; | |
5809 | u64 refs; | |
5810 | u64 flags; | |
5811 | u32 nritems; | |
5812 | u32 blocksize; | |
5813 | struct btrfs_key key; | |
5814 | struct extent_buffer *eb; | |
5815 | int ret; | |
5816 | int slot; | |
5817 | int nread = 0; | |
5818 | ||
5819 | if (path->slots[wc->level] < wc->reada_slot) { | |
5820 | wc->reada_count = wc->reada_count * 2 / 3; | |
5821 | wc->reada_count = max(wc->reada_count, 2); | |
5822 | } else { | |
5823 | wc->reada_count = wc->reada_count * 3 / 2; | |
5824 | wc->reada_count = min_t(int, wc->reada_count, | |
5825 | BTRFS_NODEPTRS_PER_BLOCK(root)); | |
5826 | } | |
5827 | ||
5828 | eb = path->nodes[wc->level]; | |
5829 | nritems = btrfs_header_nritems(eb); | |
5830 | blocksize = btrfs_level_size(root, wc->level - 1); | |
5831 | ||
5832 | for (slot = path->slots[wc->level]; slot < nritems; slot++) { | |
5833 | if (nread >= wc->reada_count) | |
5834 | break; | |
5835 | ||
5836 | cond_resched(); | |
5837 | bytenr = btrfs_node_blockptr(eb, slot); | |
5838 | generation = btrfs_node_ptr_generation(eb, slot); | |
5839 | ||
5840 | if (slot == path->slots[wc->level]) | |
5841 | goto reada; | |
5842 | ||
5843 | if (wc->stage == UPDATE_BACKREF && | |
5844 | generation <= root->root_key.offset) | |
5845 | continue; | |
5846 | ||
5847 | /* We don't lock the tree block, it's OK to be racy here */ | |
5848 | ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize, | |
5849 | &refs, &flags); | |
5850 | BUG_ON(ret); | |
5851 | BUG_ON(refs == 0); | |
5852 | ||
5853 | if (wc->stage == DROP_REFERENCE) { | |
5854 | if (refs == 1) | |
5855 | goto reada; | |
5856 | ||
5857 | if (wc->level == 1 && | |
5858 | (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) | |
5859 | continue; | |
5860 | if (!wc->update_ref || | |
5861 | generation <= root->root_key.offset) | |
5862 | continue; | |
5863 | btrfs_node_key_to_cpu(eb, &key, slot); | |
5864 | ret = btrfs_comp_cpu_keys(&key, | |
5865 | &wc->update_progress); | |
5866 | if (ret < 0) | |
5867 | continue; | |
5868 | } else { | |
5869 | if (wc->level == 1 && | |
5870 | (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) | |
5871 | continue; | |
5872 | } | |
5873 | reada: | |
5874 | ret = readahead_tree_block(root, bytenr, blocksize, | |
5875 | generation); | |
5876 | if (ret) | |
5877 | break; | |
5878 | nread++; | |
5879 | } | |
5880 | wc->reada_slot = slot; | |
5881 | } | |
5882 | ||
5883 | /* | |
5884 | * hepler to process tree block while walking down the tree. | |
5885 | * | |
5886 | * when wc->stage == UPDATE_BACKREF, this function updates | |
5887 | * back refs for pointers in the block. | |
5888 | * | |
5889 | * NOTE: return value 1 means we should stop walking down. | |
5890 | */ | |
5891 | static noinline int walk_down_proc(struct btrfs_trans_handle *trans, | |
5892 | struct btrfs_root *root, | |
5893 | struct btrfs_path *path, | |
5894 | struct walk_control *wc, int lookup_info) | |
5895 | { | |
5896 | int level = wc->level; | |
5897 | struct extent_buffer *eb = path->nodes[level]; | |
5898 | u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF; | |
5899 | int ret; | |
5900 | ||
5901 | if (wc->stage == UPDATE_BACKREF && | |
5902 | btrfs_header_owner(eb) != root->root_key.objectid) | |
5903 | return 1; | |
5904 | ||
5905 | /* | |
5906 | * when reference count of tree block is 1, it won't increase | |
5907 | * again. once full backref flag is set, we never clear it. | |
5908 | */ | |
5909 | if (lookup_info && | |
5910 | ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) || | |
5911 | (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) { | |
5912 | BUG_ON(!path->locks[level]); | |
5913 | ret = btrfs_lookup_extent_info(trans, root, | |
5914 | eb->start, eb->len, | |
5915 | &wc->refs[level], | |
5916 | &wc->flags[level]); | |
5917 | BUG_ON(ret); | |
5918 | BUG_ON(wc->refs[level] == 0); | |
5919 | } | |
5920 | ||
5921 | if (wc->stage == DROP_REFERENCE) { | |
5922 | if (wc->refs[level] > 1) | |
5923 | return 1; | |
5924 | ||
5925 | if (path->locks[level] && !wc->keep_locks) { | |
5926 | btrfs_tree_unlock_rw(eb, path->locks[level]); | |
5927 | path->locks[level] = 0; | |
5928 | } | |
5929 | return 0; | |
5930 | } | |
5931 | ||
5932 | /* wc->stage == UPDATE_BACKREF */ | |
5933 | if (!(wc->flags[level] & flag)) { | |
5934 | BUG_ON(!path->locks[level]); | |
5935 | ret = btrfs_inc_ref(trans, root, eb, 1); | |
5936 | BUG_ON(ret); | |
5937 | ret = btrfs_dec_ref(trans, root, eb, 0); | |
5938 | BUG_ON(ret); | |
5939 | ret = btrfs_set_disk_extent_flags(trans, root, eb->start, | |
5940 | eb->len, flag, 0); | |
5941 | BUG_ON(ret); | |
5942 | wc->flags[level] |= flag; | |
5943 | } | |
5944 | ||
5945 | /* | |
5946 | * the block is shared by multiple trees, so it's not good to | |
5947 | * keep the tree lock | |
5948 | */ | |
5949 | if (path->locks[level] && level > 0) { | |
5950 | btrfs_tree_unlock_rw(eb, path->locks[level]); | |
5951 | path->locks[level] = 0; | |
5952 | } | |
5953 | return 0; | |
5954 | } | |
5955 | ||
5956 | /* | |
5957 | * hepler to process tree block pointer. | |
5958 | * | |
5959 | * when wc->stage == DROP_REFERENCE, this function checks | |
5960 | * reference count of the block pointed to. if the block | |
5961 | * is shared and we need update back refs for the subtree | |
5962 | * rooted at the block, this function changes wc->stage to | |
5963 | * UPDATE_BACKREF. if the block is shared and there is no | |
5964 | * need to update back, this function drops the reference | |
5965 | * to the block. | |
5966 | * | |
5967 | * NOTE: return value 1 means we should stop walking down. | |
5968 | */ | |
5969 | static noinline int do_walk_down(struct btrfs_trans_handle *trans, | |
5970 | struct btrfs_root *root, | |
5971 | struct btrfs_path *path, | |
5972 | struct walk_control *wc, int *lookup_info) | |
5973 | { | |
5974 | u64 bytenr; | |
5975 | u64 generation; | |
5976 | u64 parent; | |
5977 | u32 blocksize; | |
5978 | struct btrfs_key key; | |
5979 | struct extent_buffer *next; | |
5980 | int level = wc->level; | |
5981 | int reada = 0; | |
5982 | int ret = 0; | |
5983 | ||
5984 | generation = btrfs_node_ptr_generation(path->nodes[level], | |
5985 | path->slots[level]); | |
5986 | /* | |
5987 | * if the lower level block was created before the snapshot | |
5988 | * was created, we know there is no need to update back refs | |
5989 | * for the subtree | |
5990 | */ | |
5991 | if (wc->stage == UPDATE_BACKREF && | |
5992 | generation <= root->root_key.offset) { | |
5993 | *lookup_info = 1; | |
5994 | return 1; | |
5995 | } | |
5996 | ||
5997 | bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]); | |
5998 | blocksize = btrfs_level_size(root, level - 1); | |
5999 | ||
6000 | next = btrfs_find_tree_block(root, bytenr, blocksize); | |
6001 | if (!next) { | |
6002 | next = btrfs_find_create_tree_block(root, bytenr, blocksize); | |
6003 | if (!next) | |
6004 | return -ENOMEM; | |
6005 | reada = 1; | |
6006 | } | |
6007 | btrfs_tree_lock(next); | |
6008 | btrfs_set_lock_blocking(next); | |
6009 | ||
6010 | ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize, | |
6011 | &wc->refs[level - 1], | |
6012 | &wc->flags[level - 1]); | |
6013 | BUG_ON(ret); | |
6014 | BUG_ON(wc->refs[level - 1] == 0); | |
6015 | *lookup_info = 0; | |
6016 | ||
6017 | if (wc->stage == DROP_REFERENCE) { | |
6018 | if (wc->refs[level - 1] > 1) { | |
6019 | if (level == 1 && | |
6020 | (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF)) | |
6021 | goto skip; | |
6022 | ||
6023 | if (!wc->update_ref || | |
6024 | generation <= root->root_key.offset) | |
6025 | goto skip; | |
6026 | ||
6027 | btrfs_node_key_to_cpu(path->nodes[level], &key, | |
6028 | path->slots[level]); | |
6029 | ret = btrfs_comp_cpu_keys(&key, &wc->update_progress); | |
6030 | if (ret < 0) | |
6031 | goto skip; | |
6032 | ||
6033 | wc->stage = UPDATE_BACKREF; | |
6034 | wc->shared_level = level - 1; | |
6035 | } | |
6036 | } else { | |
6037 | if (level == 1 && | |
6038 | (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF)) | |
6039 | goto skip; | |
6040 | } | |
6041 | ||
6042 | if (!btrfs_buffer_uptodate(next, generation)) { | |
6043 | btrfs_tree_unlock(next); | |
6044 | free_extent_buffer(next); | |
6045 | next = NULL; | |
6046 | *lookup_info = 1; | |
6047 | } | |
6048 | ||
6049 | if (!next) { | |
6050 | if (reada && level == 1) | |
6051 | reada_walk_down(trans, root, wc, path); | |
6052 | next = read_tree_block(root, bytenr, blocksize, generation); | |
6053 | if (!next) | |
6054 | return -EIO; | |
6055 | btrfs_tree_lock(next); | |
6056 | btrfs_set_lock_blocking(next); | |
6057 | } | |
6058 | ||
6059 | level--; | |
6060 | BUG_ON(level != btrfs_header_level(next)); | |
6061 | path->nodes[level] = next; | |
6062 | path->slots[level] = 0; | |
6063 | path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; | |
6064 | wc->level = level; | |
6065 | if (wc->level == 1) | |
6066 | wc->reada_slot = 0; | |
6067 | return 0; | |
6068 | skip: | |
6069 | wc->refs[level - 1] = 0; | |
6070 | wc->flags[level - 1] = 0; | |
6071 | if (wc->stage == DROP_REFERENCE) { | |
6072 | if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) { | |
6073 | parent = path->nodes[level]->start; | |
6074 | } else { | |
6075 | BUG_ON(root->root_key.objectid != | |
6076 | btrfs_header_owner(path->nodes[level])); | |
6077 | parent = 0; | |
6078 | } | |
6079 | ||
6080 | ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent, | |
6081 | root->root_key.objectid, level - 1, 0); | |
6082 | BUG_ON(ret); | |
6083 | } | |
6084 | btrfs_tree_unlock(next); | |
6085 | free_extent_buffer(next); | |
6086 | *lookup_info = 1; | |
6087 | return 1; | |
6088 | } | |
6089 | ||
6090 | /* | |
6091 | * hepler to process tree block while walking up the tree. | |
6092 | * | |
6093 | * when wc->stage == DROP_REFERENCE, this function drops | |
6094 | * reference count on the block. | |
6095 | * | |
6096 | * when wc->stage == UPDATE_BACKREF, this function changes | |
6097 | * wc->stage back to DROP_REFERENCE if we changed wc->stage | |
6098 | * to UPDATE_BACKREF previously while processing the block. | |
6099 | * | |
6100 | * NOTE: return value 1 means we should stop walking up. | |
6101 | */ | |
6102 | static noinline int walk_up_proc(struct btrfs_trans_handle *trans, | |
6103 | struct btrfs_root *root, | |
6104 | struct btrfs_path *path, | |
6105 | struct walk_control *wc) | |
6106 | { | |
6107 | int ret; | |
6108 | int level = wc->level; | |
6109 | struct extent_buffer *eb = path->nodes[level]; | |
6110 | u64 parent = 0; | |
6111 | ||
6112 | if (wc->stage == UPDATE_BACKREF) { | |
6113 | BUG_ON(wc->shared_level < level); | |
6114 | if (level < wc->shared_level) | |
6115 | goto out; | |
6116 | ||
6117 | ret = find_next_key(path, level + 1, &wc->update_progress); | |
6118 | if (ret > 0) | |
6119 | wc->update_ref = 0; | |
6120 | ||
6121 | wc->stage = DROP_REFERENCE; | |
6122 | wc->shared_level = -1; | |
6123 | path->slots[level] = 0; | |
6124 | ||
6125 | /* | |
6126 | * check reference count again if the block isn't locked. | |
6127 | * we should start walking down the tree again if reference | |
6128 | * count is one. | |
6129 | */ | |
6130 | if (!path->locks[level]) { | |
6131 | BUG_ON(level == 0); | |
6132 | btrfs_tree_lock(eb); | |
6133 | btrfs_set_lock_blocking(eb); | |
6134 | path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; | |
6135 | ||
6136 | ret = btrfs_lookup_extent_info(trans, root, | |
6137 | eb->start, eb->len, | |
6138 | &wc->refs[level], | |
6139 | &wc->flags[level]); | |
6140 | BUG_ON(ret); | |
6141 | BUG_ON(wc->refs[level] == 0); | |
6142 | if (wc->refs[level] == 1) { | |
6143 | btrfs_tree_unlock_rw(eb, path->locks[level]); | |
6144 | return 1; | |
6145 | } | |
6146 | } | |
6147 | } | |
6148 | ||
6149 | /* wc->stage == DROP_REFERENCE */ | |
6150 | BUG_ON(wc->refs[level] > 1 && !path->locks[level]); | |
6151 | ||
6152 | if (wc->refs[level] == 1) { | |
6153 | if (level == 0) { | |
6154 | if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) | |
6155 | ret = btrfs_dec_ref(trans, root, eb, 1); | |
6156 | else | |
6157 | ret = btrfs_dec_ref(trans, root, eb, 0); | |
6158 | BUG_ON(ret); | |
6159 | } | |
6160 | /* make block locked assertion in clean_tree_block happy */ | |
6161 | if (!path->locks[level] && | |
6162 | btrfs_header_generation(eb) == trans->transid) { | |
6163 | btrfs_tree_lock(eb); | |
6164 | btrfs_set_lock_blocking(eb); | |
6165 | path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; | |
6166 | } | |
6167 | clean_tree_block(trans, root, eb); | |
6168 | } | |
6169 | ||
6170 | if (eb == root->node) { | |
6171 | if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) | |
6172 | parent = eb->start; | |
6173 | else | |
6174 | BUG_ON(root->root_key.objectid != | |
6175 | btrfs_header_owner(eb)); | |
6176 | } else { | |
6177 | if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF) | |
6178 | parent = path->nodes[level + 1]->start; | |
6179 | else | |
6180 | BUG_ON(root->root_key.objectid != | |
6181 | btrfs_header_owner(path->nodes[level + 1])); | |
6182 | } | |
6183 | ||
6184 | btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1); | |
6185 | out: | |
6186 | wc->refs[level] = 0; | |
6187 | wc->flags[level] = 0; | |
6188 | return 0; | |
6189 | } | |
6190 | ||
6191 | static noinline int walk_down_tree(struct btrfs_trans_handle *trans, | |
6192 | struct btrfs_root *root, | |
6193 | struct btrfs_path *path, | |
6194 | struct walk_control *wc) | |
6195 | { | |
6196 | int level = wc->level; | |
6197 | int lookup_info = 1; | |
6198 | int ret; | |
6199 | ||
6200 | while (level >= 0) { | |
6201 | ret = walk_down_proc(trans, root, path, wc, lookup_info); | |
6202 | if (ret > 0) | |
6203 | break; | |
6204 | ||
6205 | if (level == 0) | |
6206 | break; | |
6207 | ||
6208 | if (path->slots[level] >= | |
6209 | btrfs_header_nritems(path->nodes[level])) | |
6210 | break; | |
6211 | ||
6212 | ret = do_walk_down(trans, root, path, wc, &lookup_info); | |
6213 | if (ret > 0) { | |
6214 | path->slots[level]++; | |
6215 | continue; | |
6216 | } else if (ret < 0) | |
6217 | return ret; | |
6218 | level = wc->level; | |
6219 | } | |
6220 | return 0; | |
6221 | } | |
6222 | ||
6223 | static noinline int walk_up_tree(struct btrfs_trans_handle *trans, | |
6224 | struct btrfs_root *root, | |
6225 | struct btrfs_path *path, | |
6226 | struct walk_control *wc, int max_level) | |
6227 | { | |
6228 | int level = wc->level; | |
6229 | int ret; | |
6230 | ||
6231 | path->slots[level] = btrfs_header_nritems(path->nodes[level]); | |
6232 | while (level < max_level && path->nodes[level]) { | |
6233 | wc->level = level; | |
6234 | if (path->slots[level] + 1 < | |
6235 | btrfs_header_nritems(path->nodes[level])) { | |
6236 | path->slots[level]++; | |
6237 | return 0; | |
6238 | } else { | |
6239 | ret = walk_up_proc(trans, root, path, wc); | |
6240 | if (ret > 0) | |
6241 | return 0; | |
6242 | ||
6243 | if (path->locks[level]) { | |
6244 | btrfs_tree_unlock_rw(path->nodes[level], | |
6245 | path->locks[level]); | |
6246 | path->locks[level] = 0; | |
6247 | } | |
6248 | free_extent_buffer(path->nodes[level]); | |
6249 | path->nodes[level] = NULL; | |
6250 | level++; | |
6251 | } | |
6252 | } | |
6253 | return 1; | |
6254 | } | |
6255 | ||
6256 | /* | |
6257 | * drop a subvolume tree. | |
6258 | * | |
6259 | * this function traverses the tree freeing any blocks that only | |
6260 | * referenced by the tree. | |
6261 | * | |
6262 | * when a shared tree block is found. this function decreases its | |
6263 | * reference count by one. if update_ref is true, this function | |
6264 | * also make sure backrefs for the shared block and all lower level | |
6265 | * blocks are properly updated. | |
6266 | */ | |
6267 | void btrfs_drop_snapshot(struct btrfs_root *root, | |
6268 | struct btrfs_block_rsv *block_rsv, int update_ref) | |
6269 | { | |
6270 | struct btrfs_path *path; | |
6271 | struct btrfs_trans_handle *trans; | |
6272 | struct btrfs_root *tree_root = root->fs_info->tree_root; | |
6273 | struct btrfs_root_item *root_item = &root->root_item; | |
6274 | struct walk_control *wc; | |
6275 | struct btrfs_key key; | |
6276 | int err = 0; | |
6277 | int ret; | |
6278 | int level; | |
6279 | ||
6280 | path = btrfs_alloc_path(); | |
6281 | if (!path) { | |
6282 | err = -ENOMEM; | |
6283 | goto out; | |
6284 | } | |
6285 | ||
6286 | wc = kzalloc(sizeof(*wc), GFP_NOFS); | |
6287 | if (!wc) { | |
6288 | btrfs_free_path(path); | |
6289 | err = -ENOMEM; | |
6290 | goto out; | |
6291 | } | |
6292 | ||
6293 | trans = btrfs_start_transaction(tree_root, 0); | |
6294 | BUG_ON(IS_ERR(trans)); | |
6295 | ||
6296 | if (block_rsv) | |
6297 | trans->block_rsv = block_rsv; | |
6298 | ||
6299 | if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { | |
6300 | level = btrfs_header_level(root->node); | |
6301 | path->nodes[level] = btrfs_lock_root_node(root); | |
6302 | btrfs_set_lock_blocking(path->nodes[level]); | |
6303 | path->slots[level] = 0; | |
6304 | path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; | |
6305 | memset(&wc->update_progress, 0, | |
6306 | sizeof(wc->update_progress)); | |
6307 | } else { | |
6308 | btrfs_disk_key_to_cpu(&key, &root_item->drop_progress); | |
6309 | memcpy(&wc->update_progress, &key, | |
6310 | sizeof(wc->update_progress)); | |
6311 | ||
6312 | level = root_item->drop_level; | |
6313 | BUG_ON(level == 0); | |
6314 | path->lowest_level = level; | |
6315 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
6316 | path->lowest_level = 0; | |
6317 | if (ret < 0) { | |
6318 | err = ret; | |
6319 | goto out_free; | |
6320 | } | |
6321 | WARN_ON(ret > 0); | |
6322 | ||
6323 | /* | |
6324 | * unlock our path, this is safe because only this | |
6325 | * function is allowed to delete this snapshot | |
6326 | */ | |
6327 | btrfs_unlock_up_safe(path, 0); | |
6328 | ||
6329 | level = btrfs_header_level(root->node); | |
6330 | while (1) { | |
6331 | btrfs_tree_lock(path->nodes[level]); | |
6332 | btrfs_set_lock_blocking(path->nodes[level]); | |
6333 | ||
6334 | ret = btrfs_lookup_extent_info(trans, root, | |
6335 | path->nodes[level]->start, | |
6336 | path->nodes[level]->len, | |
6337 | &wc->refs[level], | |
6338 | &wc->flags[level]); | |
6339 | BUG_ON(ret); | |
6340 | BUG_ON(wc->refs[level] == 0); | |
6341 | ||
6342 | if (level == root_item->drop_level) | |
6343 | break; | |
6344 | ||
6345 | btrfs_tree_unlock(path->nodes[level]); | |
6346 | WARN_ON(wc->refs[level] != 1); | |
6347 | level--; | |
6348 | } | |
6349 | } | |
6350 | ||
6351 | wc->level = level; | |
6352 | wc->shared_level = -1; | |
6353 | wc->stage = DROP_REFERENCE; | |
6354 | wc->update_ref = update_ref; | |
6355 | wc->keep_locks = 0; | |
6356 | wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root); | |
6357 | ||
6358 | while (1) { | |
6359 | ret = walk_down_tree(trans, root, path, wc); | |
6360 | if (ret < 0) { | |
6361 | err = ret; | |
6362 | break; | |
6363 | } | |
6364 | ||
6365 | ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL); | |
6366 | if (ret < 0) { | |
6367 | err = ret; | |
6368 | break; | |
6369 | } | |
6370 | ||
6371 | if (ret > 0) { | |
6372 | BUG_ON(wc->stage != DROP_REFERENCE); | |
6373 | break; | |
6374 | } | |
6375 | ||
6376 | if (wc->stage == DROP_REFERENCE) { | |
6377 | level = wc->level; | |
6378 | btrfs_node_key(path->nodes[level], | |
6379 | &root_item->drop_progress, | |
6380 | path->slots[level]); | |
6381 | root_item->drop_level = level; | |
6382 | } | |
6383 | ||
6384 | BUG_ON(wc->level == 0); | |
6385 | if (btrfs_should_end_transaction(trans, tree_root)) { | |
6386 | ret = btrfs_update_root(trans, tree_root, | |
6387 | &root->root_key, | |
6388 | root_item); | |
6389 | BUG_ON(ret); | |
6390 | ||
6391 | btrfs_end_transaction_throttle(trans, tree_root); | |
6392 | trans = btrfs_start_transaction(tree_root, 0); | |
6393 | BUG_ON(IS_ERR(trans)); | |
6394 | if (block_rsv) | |
6395 | trans->block_rsv = block_rsv; | |
6396 | } | |
6397 | } | |
6398 | btrfs_release_path(path); | |
6399 | BUG_ON(err); | |
6400 | ||
6401 | ret = btrfs_del_root(trans, tree_root, &root->root_key); | |
6402 | BUG_ON(ret); | |
6403 | ||
6404 | if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { | |
6405 | ret = btrfs_find_last_root(tree_root, root->root_key.objectid, | |
6406 | NULL, NULL); | |
6407 | BUG_ON(ret < 0); | |
6408 | if (ret > 0) { | |
6409 | /* if we fail to delete the orphan item this time | |
6410 | * around, it'll get picked up the next time. | |
6411 | * | |
6412 | * The most common failure here is just -ENOENT. | |
6413 | */ | |
6414 | btrfs_del_orphan_item(trans, tree_root, | |
6415 | root->root_key.objectid); | |
6416 | } | |
6417 | } | |
6418 | ||
6419 | if (root->in_radix) { | |
6420 | btrfs_free_fs_root(tree_root->fs_info, root); | |
6421 | } else { | |
6422 | free_extent_buffer(root->node); | |
6423 | free_extent_buffer(root->commit_root); | |
6424 | kfree(root); | |
6425 | } | |
6426 | out_free: | |
6427 | btrfs_end_transaction_throttle(trans, tree_root); | |
6428 | kfree(wc); | |
6429 | btrfs_free_path(path); | |
6430 | out: | |
6431 | if (err) | |
6432 | btrfs_std_error(root->fs_info, err); | |
6433 | return; | |
6434 | } | |
6435 | ||
6436 | /* | |
6437 | * drop subtree rooted at tree block 'node'. | |
6438 | * | |
6439 | * NOTE: this function will unlock and release tree block 'node' | |
6440 | */ | |
6441 | int btrfs_drop_subtree(struct btrfs_trans_handle *trans, | |
6442 | struct btrfs_root *root, | |
6443 | struct extent_buffer *node, | |
6444 | struct extent_buffer *parent) | |
6445 | { | |
6446 | struct btrfs_path *path; | |
6447 | struct walk_control *wc; | |
6448 | int level; | |
6449 | int parent_level; | |
6450 | int ret = 0; | |
6451 | int wret; | |
6452 | ||
6453 | BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); | |
6454 | ||
6455 | path = btrfs_alloc_path(); | |
6456 | if (!path) | |
6457 | return -ENOMEM; | |
6458 | ||
6459 | wc = kzalloc(sizeof(*wc), GFP_NOFS); | |
6460 | if (!wc) { | |
6461 | btrfs_free_path(path); | |
6462 | return -ENOMEM; | |
6463 | } | |
6464 | ||
6465 | btrfs_assert_tree_locked(parent); | |
6466 | parent_level = btrfs_header_level(parent); | |
6467 | extent_buffer_get(parent); | |
6468 | path->nodes[parent_level] = parent; | |
6469 | path->slots[parent_level] = btrfs_header_nritems(parent); | |
6470 | ||
6471 | btrfs_assert_tree_locked(node); | |
6472 | level = btrfs_header_level(node); | |
6473 | path->nodes[level] = node; | |
6474 | path->slots[level] = 0; | |
6475 | path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; | |
6476 | ||
6477 | wc->refs[parent_level] = 1; | |
6478 | wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF; | |
6479 | wc->level = level; | |
6480 | wc->shared_level = -1; | |
6481 | wc->stage = DROP_REFERENCE; | |
6482 | wc->update_ref = 0; | |
6483 | wc->keep_locks = 1; | |
6484 | wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root); | |
6485 | ||
6486 | while (1) { | |
6487 | wret = walk_down_tree(trans, root, path, wc); | |
6488 | if (wret < 0) { | |
6489 | ret = wret; | |
6490 | break; | |
6491 | } | |
6492 | ||
6493 | wret = walk_up_tree(trans, root, path, wc, parent_level); | |
6494 | if (wret < 0) | |
6495 | ret = wret; | |
6496 | if (wret != 0) | |
6497 | break; | |
6498 | } | |
6499 | ||
6500 | kfree(wc); | |
6501 | btrfs_free_path(path); | |
6502 | return ret; | |
6503 | } | |
6504 | ||
6505 | static u64 update_block_group_flags(struct btrfs_root *root, u64 flags) | |
6506 | { | |
6507 | u64 num_devices; | |
6508 | u64 stripped = BTRFS_BLOCK_GROUP_RAID0 | | |
6509 | BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10; | |
6510 | ||
6511 | /* | |
6512 | * we add in the count of missing devices because we want | |
6513 | * to make sure that any RAID levels on a degraded FS | |
6514 | * continue to be honored. | |
6515 | */ | |
6516 | num_devices = root->fs_info->fs_devices->rw_devices + | |
6517 | root->fs_info->fs_devices->missing_devices; | |
6518 | ||
6519 | if (num_devices == 1) { | |
6520 | stripped |= BTRFS_BLOCK_GROUP_DUP; | |
6521 | stripped = flags & ~stripped; | |
6522 | ||
6523 | /* turn raid0 into single device chunks */ | |
6524 | if (flags & BTRFS_BLOCK_GROUP_RAID0) | |
6525 | return stripped; | |
6526 | ||
6527 | /* turn mirroring into duplication */ | |
6528 | if (flags & (BTRFS_BLOCK_GROUP_RAID1 | | |
6529 | BTRFS_BLOCK_GROUP_RAID10)) | |
6530 | return stripped | BTRFS_BLOCK_GROUP_DUP; | |
6531 | return flags; | |
6532 | } else { | |
6533 | /* they already had raid on here, just return */ | |
6534 | if (flags & stripped) | |
6535 | return flags; | |
6536 | ||
6537 | stripped |= BTRFS_BLOCK_GROUP_DUP; | |
6538 | stripped = flags & ~stripped; | |
6539 | ||
6540 | /* switch duplicated blocks with raid1 */ | |
6541 | if (flags & BTRFS_BLOCK_GROUP_DUP) | |
6542 | return stripped | BTRFS_BLOCK_GROUP_RAID1; | |
6543 | ||
6544 | /* turn single device chunks into raid0 */ | |
6545 | return stripped | BTRFS_BLOCK_GROUP_RAID0; | |
6546 | } | |
6547 | return flags; | |
6548 | } | |
6549 | ||
6550 | static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force) | |
6551 | { | |
6552 | struct btrfs_space_info *sinfo = cache->space_info; | |
6553 | u64 num_bytes; | |
6554 | u64 min_allocable_bytes; | |
6555 | int ret = -ENOSPC; | |
6556 | ||
6557 | ||
6558 | /* | |
6559 | * We need some metadata space and system metadata space for | |
6560 | * allocating chunks in some corner cases until we force to set | |
6561 | * it to be readonly. | |
6562 | */ | |
6563 | if ((sinfo->flags & | |
6564 | (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) && | |
6565 | !force) | |
6566 | min_allocable_bytes = 1 * 1024 * 1024; | |
6567 | else | |
6568 | min_allocable_bytes = 0; | |
6569 | ||
6570 | spin_lock(&sinfo->lock); | |
6571 | spin_lock(&cache->lock); | |
6572 | ||
6573 | if (cache->ro) { | |
6574 | ret = 0; | |
6575 | goto out; | |
6576 | } | |
6577 | ||
6578 | num_bytes = cache->key.offset - cache->reserved - cache->pinned - | |
6579 | cache->bytes_super - btrfs_block_group_used(&cache->item); | |
6580 | ||
6581 | if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned + | |
6582 | sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes + | |
6583 | min_allocable_bytes <= sinfo->total_bytes) { | |
6584 | sinfo->bytes_readonly += num_bytes; | |
6585 | cache->ro = 1; | |
6586 | ret = 0; | |
6587 | } | |
6588 | out: | |
6589 | spin_unlock(&cache->lock); | |
6590 | spin_unlock(&sinfo->lock); | |
6591 | return ret; | |
6592 | } | |
6593 | ||
6594 | int btrfs_set_block_group_ro(struct btrfs_root *root, | |
6595 | struct btrfs_block_group_cache *cache) | |
6596 | ||
6597 | { | |
6598 | struct btrfs_trans_handle *trans; | |
6599 | u64 alloc_flags; | |
6600 | int ret; | |
6601 | ||
6602 | BUG_ON(cache->ro); | |
6603 | ||
6604 | trans = btrfs_join_transaction(root); | |
6605 | BUG_ON(IS_ERR(trans)); | |
6606 | ||
6607 | alloc_flags = update_block_group_flags(root, cache->flags); | |
6608 | if (alloc_flags != cache->flags) | |
6609 | do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, | |
6610 | CHUNK_ALLOC_FORCE); | |
6611 | ||
6612 | ret = set_block_group_ro(cache, 0); | |
6613 | if (!ret) | |
6614 | goto out; | |
6615 | alloc_flags = get_alloc_profile(root, cache->space_info->flags); | |
6616 | ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, | |
6617 | CHUNK_ALLOC_FORCE); | |
6618 | if (ret < 0) | |
6619 | goto out; | |
6620 | ret = set_block_group_ro(cache, 0); | |
6621 | out: | |
6622 | btrfs_end_transaction(trans, root); | |
6623 | return ret; | |
6624 | } | |
6625 | ||
6626 | int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, | |
6627 | struct btrfs_root *root, u64 type) | |
6628 | { | |
6629 | u64 alloc_flags = get_alloc_profile(root, type); | |
6630 | return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, | |
6631 | CHUNK_ALLOC_FORCE); | |
6632 | } | |
6633 | ||
6634 | /* | |
6635 | * helper to account the unused space of all the readonly block group in the | |
6636 | * list. takes mirrors into account. | |
6637 | */ | |
6638 | static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list) | |
6639 | { | |
6640 | struct btrfs_block_group_cache *block_group; | |
6641 | u64 free_bytes = 0; | |
6642 | int factor; | |
6643 | ||
6644 | list_for_each_entry(block_group, groups_list, list) { | |
6645 | spin_lock(&block_group->lock); | |
6646 | ||
6647 | if (!block_group->ro) { | |
6648 | spin_unlock(&block_group->lock); | |
6649 | continue; | |
6650 | } | |
6651 | ||
6652 | if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 | | |
6653 | BTRFS_BLOCK_GROUP_RAID10 | | |
6654 | BTRFS_BLOCK_GROUP_DUP)) | |
6655 | factor = 2; | |
6656 | else | |
6657 | factor = 1; | |
6658 | ||
6659 | free_bytes += (block_group->key.offset - | |
6660 | btrfs_block_group_used(&block_group->item)) * | |
6661 | factor; | |
6662 | ||
6663 | spin_unlock(&block_group->lock); | |
6664 | } | |
6665 | ||
6666 | return free_bytes; | |
6667 | } | |
6668 | ||
6669 | /* | |
6670 | * helper to account the unused space of all the readonly block group in the | |
6671 | * space_info. takes mirrors into account. | |
6672 | */ | |
6673 | u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo) | |
6674 | { | |
6675 | int i; | |
6676 | u64 free_bytes = 0; | |
6677 | ||
6678 | spin_lock(&sinfo->lock); | |
6679 | ||
6680 | for(i = 0; i < BTRFS_NR_RAID_TYPES; i++) | |
6681 | if (!list_empty(&sinfo->block_groups[i])) | |
6682 | free_bytes += __btrfs_get_ro_block_group_free_space( | |
6683 | &sinfo->block_groups[i]); | |
6684 | ||
6685 | spin_unlock(&sinfo->lock); | |
6686 | ||
6687 | return free_bytes; | |
6688 | } | |
6689 | ||
6690 | int btrfs_set_block_group_rw(struct btrfs_root *root, | |
6691 | struct btrfs_block_group_cache *cache) | |
6692 | { | |
6693 | struct btrfs_space_info *sinfo = cache->space_info; | |
6694 | u64 num_bytes; | |
6695 | ||
6696 | BUG_ON(!cache->ro); | |
6697 | ||
6698 | spin_lock(&sinfo->lock); | |
6699 | spin_lock(&cache->lock); | |
6700 | num_bytes = cache->key.offset - cache->reserved - cache->pinned - | |
6701 | cache->bytes_super - btrfs_block_group_used(&cache->item); | |
6702 | sinfo->bytes_readonly -= num_bytes; | |
6703 | cache->ro = 0; | |
6704 | spin_unlock(&cache->lock); | |
6705 | spin_unlock(&sinfo->lock); | |
6706 | return 0; | |
6707 | } | |
6708 | ||
6709 | /* | |
6710 | * checks to see if its even possible to relocate this block group. | |
6711 | * | |
6712 | * @return - -1 if it's not a good idea to relocate this block group, 0 if its | |
6713 | * ok to go ahead and try. | |
6714 | */ | |
6715 | int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr) | |
6716 | { | |
6717 | struct btrfs_block_group_cache *block_group; | |
6718 | struct btrfs_space_info *space_info; | |
6719 | struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; | |
6720 | struct btrfs_device *device; | |
6721 | u64 min_free; | |
6722 | u64 dev_min = 1; | |
6723 | u64 dev_nr = 0; | |
6724 | int index; | |
6725 | int full = 0; | |
6726 | int ret = 0; | |
6727 | ||
6728 | block_group = btrfs_lookup_block_group(root->fs_info, bytenr); | |
6729 | ||
6730 | /* odd, couldn't find the block group, leave it alone */ | |
6731 | if (!block_group) | |
6732 | return -1; | |
6733 | ||
6734 | min_free = btrfs_block_group_used(&block_group->item); | |
6735 | ||
6736 | /* no bytes used, we're good */ | |
6737 | if (!min_free) | |
6738 | goto out; | |
6739 | ||
6740 | space_info = block_group->space_info; | |
6741 | spin_lock(&space_info->lock); | |
6742 | ||
6743 | full = space_info->full; | |
6744 | ||
6745 | /* | |
6746 | * if this is the last block group we have in this space, we can't | |
6747 | * relocate it unless we're able to allocate a new chunk below. | |
6748 | * | |
6749 | * Otherwise, we need to make sure we have room in the space to handle | |
6750 | * all of the extents from this block group. If we can, we're good | |
6751 | */ | |
6752 | if ((space_info->total_bytes != block_group->key.offset) && | |
6753 | (space_info->bytes_used + space_info->bytes_reserved + | |
6754 | space_info->bytes_pinned + space_info->bytes_readonly + | |
6755 | min_free < space_info->total_bytes)) { | |
6756 | spin_unlock(&space_info->lock); | |
6757 | goto out; | |
6758 | } | |
6759 | spin_unlock(&space_info->lock); | |
6760 | ||
6761 | /* | |
6762 | * ok we don't have enough space, but maybe we have free space on our | |
6763 | * devices to allocate new chunks for relocation, so loop through our | |
6764 | * alloc devices and guess if we have enough space. However, if we | |
6765 | * were marked as full, then we know there aren't enough chunks, and we | |
6766 | * can just return. | |
6767 | */ | |
6768 | ret = -1; | |
6769 | if (full) | |
6770 | goto out; | |
6771 | ||
6772 | /* | |
6773 | * index: | |
6774 | * 0: raid10 | |
6775 | * 1: raid1 | |
6776 | * 2: dup | |
6777 | * 3: raid0 | |
6778 | * 4: single | |
6779 | */ | |
6780 | index = get_block_group_index(block_group); | |
6781 | if (index == 0) { | |
6782 | dev_min = 4; | |
6783 | /* Divide by 2 */ | |
6784 | min_free >>= 1; | |
6785 | } else if (index == 1) { | |
6786 | dev_min = 2; | |
6787 | } else if (index == 2) { | |
6788 | /* Multiply by 2 */ | |
6789 | min_free <<= 1; | |
6790 | } else if (index == 3) { | |
6791 | dev_min = fs_devices->rw_devices; | |
6792 | do_div(min_free, dev_min); | |
6793 | } | |
6794 | ||
6795 | mutex_lock(&root->fs_info->chunk_mutex); | |
6796 | list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { | |
6797 | u64 dev_offset; | |
6798 | ||
6799 | /* | |
6800 | * check to make sure we can actually find a chunk with enough | |
6801 | * space to fit our block group in. | |
6802 | */ | |
6803 | if (device->total_bytes > device->bytes_used + min_free) { | |
6804 | ret = find_free_dev_extent(NULL, device, min_free, | |
6805 | &dev_offset, NULL); | |
6806 | if (!ret) | |
6807 | dev_nr++; | |
6808 | ||
6809 | if (dev_nr >= dev_min) | |
6810 | break; | |
6811 | ||
6812 | ret = -1; | |
6813 | } | |
6814 | } | |
6815 | mutex_unlock(&root->fs_info->chunk_mutex); | |
6816 | out: | |
6817 | btrfs_put_block_group(block_group); | |
6818 | return ret; | |
6819 | } | |
6820 | ||
6821 | static int find_first_block_group(struct btrfs_root *root, | |
6822 | struct btrfs_path *path, struct btrfs_key *key) | |
6823 | { | |
6824 | int ret = 0; | |
6825 | struct btrfs_key found_key; | |
6826 | struct extent_buffer *leaf; | |
6827 | int slot; | |
6828 | ||
6829 | ret = btrfs_search_slot(NULL, root, key, path, 0, 0); | |
6830 | if (ret < 0) | |
6831 | goto out; | |
6832 | ||
6833 | while (1) { | |
6834 | slot = path->slots[0]; | |
6835 | leaf = path->nodes[0]; | |
6836 | if (slot >= btrfs_header_nritems(leaf)) { | |
6837 | ret = btrfs_next_leaf(root, path); | |
6838 | if (ret == 0) | |
6839 | continue; | |
6840 | if (ret < 0) | |
6841 | goto out; | |
6842 | break; | |
6843 | } | |
6844 | btrfs_item_key_to_cpu(leaf, &found_key, slot); | |
6845 | ||
6846 | if (found_key.objectid >= key->objectid && | |
6847 | found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { | |
6848 | ret = 0; | |
6849 | goto out; | |
6850 | } | |
6851 | path->slots[0]++; | |
6852 | } | |
6853 | out: | |
6854 | return ret; | |
6855 | } | |
6856 | ||
6857 | void btrfs_put_block_group_cache(struct btrfs_fs_info *info) | |
6858 | { | |
6859 | struct btrfs_block_group_cache *block_group; | |
6860 | u64 last = 0; | |
6861 | ||
6862 | while (1) { | |
6863 | struct inode *inode; | |
6864 | ||
6865 | block_group = btrfs_lookup_first_block_group(info, last); | |
6866 | while (block_group) { | |
6867 | spin_lock(&block_group->lock); | |
6868 | if (block_group->iref) | |
6869 | break; | |
6870 | spin_unlock(&block_group->lock); | |
6871 | block_group = next_block_group(info->tree_root, | |
6872 | block_group); | |
6873 | } | |
6874 | if (!block_group) { | |
6875 | if (last == 0) | |
6876 | break; | |
6877 | last = 0; | |
6878 | continue; | |
6879 | } | |
6880 | ||
6881 | inode = block_group->inode; | |
6882 | block_group->iref = 0; | |
6883 | block_group->inode = NULL; | |
6884 | spin_unlock(&block_group->lock); | |
6885 | iput(inode); | |
6886 | last = block_group->key.objectid + block_group->key.offset; | |
6887 | btrfs_put_block_group(block_group); | |
6888 | } | |
6889 | } | |
6890 | ||
6891 | int btrfs_free_block_groups(struct btrfs_fs_info *info) | |
6892 | { | |
6893 | struct btrfs_block_group_cache *block_group; | |
6894 | struct btrfs_space_info *space_info; | |
6895 | struct btrfs_caching_control *caching_ctl; | |
6896 | struct rb_node *n; | |
6897 | ||
6898 | down_write(&info->extent_commit_sem); | |
6899 | while (!list_empty(&info->caching_block_groups)) { | |
6900 | caching_ctl = list_entry(info->caching_block_groups.next, | |
6901 | struct btrfs_caching_control, list); | |
6902 | list_del(&caching_ctl->list); | |
6903 | put_caching_control(caching_ctl); | |
6904 | } | |
6905 | up_write(&info->extent_commit_sem); | |
6906 | ||
6907 | spin_lock(&info->block_group_cache_lock); | |
6908 | while ((n = rb_last(&info->block_group_cache_tree)) != NULL) { | |
6909 | block_group = rb_entry(n, struct btrfs_block_group_cache, | |
6910 | cache_node); | |
6911 | rb_erase(&block_group->cache_node, | |
6912 | &info->block_group_cache_tree); | |
6913 | spin_unlock(&info->block_group_cache_lock); | |
6914 | ||
6915 | down_write(&block_group->space_info->groups_sem); | |
6916 | list_del(&block_group->list); | |
6917 | up_write(&block_group->space_info->groups_sem); | |
6918 | ||
6919 | if (block_group->cached == BTRFS_CACHE_STARTED) | |
6920 | wait_block_group_cache_done(block_group); | |
6921 | ||
6922 | /* | |
6923 | * We haven't cached this block group, which means we could | |
6924 | * possibly have excluded extents on this block group. | |
6925 | */ | |
6926 | if (block_group->cached == BTRFS_CACHE_NO) | |
6927 | free_excluded_extents(info->extent_root, block_group); | |
6928 | ||
6929 | btrfs_remove_free_space_cache(block_group); | |
6930 | btrfs_put_block_group(block_group); | |
6931 | ||
6932 | spin_lock(&info->block_group_cache_lock); | |
6933 | } | |
6934 | spin_unlock(&info->block_group_cache_lock); | |
6935 | ||
6936 | /* now that all the block groups are freed, go through and | |
6937 | * free all the space_info structs. This is only called during | |
6938 | * the final stages of unmount, and so we know nobody is | |
6939 | * using them. We call synchronize_rcu() once before we start, | |
6940 | * just to be on the safe side. | |
6941 | */ | |
6942 | synchronize_rcu(); | |
6943 | ||
6944 | release_global_block_rsv(info); | |
6945 | ||
6946 | while(!list_empty(&info->space_info)) { | |
6947 | space_info = list_entry(info->space_info.next, | |
6948 | struct btrfs_space_info, | |
6949 | list); | |
6950 | if (space_info->bytes_pinned > 0 || | |
6951 | space_info->bytes_reserved > 0 || | |
6952 | space_info->bytes_may_use > 0) { | |
6953 | WARN_ON(1); | |
6954 | dump_space_info(space_info, 0, 0); | |
6955 | } | |
6956 | list_del(&space_info->list); | |
6957 | kfree(space_info); | |
6958 | } | |
6959 | return 0; | |
6960 | } | |
6961 | ||
6962 | static void __link_block_group(struct btrfs_space_info *space_info, | |
6963 | struct btrfs_block_group_cache *cache) | |
6964 | { | |
6965 | int index = get_block_group_index(cache); | |
6966 | ||
6967 | down_write(&space_info->groups_sem); | |
6968 | list_add_tail(&cache->list, &space_info->block_groups[index]); | |
6969 | up_write(&space_info->groups_sem); | |
6970 | } | |
6971 | ||
6972 | int btrfs_read_block_groups(struct btrfs_root *root) | |
6973 | { | |
6974 | struct btrfs_path *path; | |
6975 | int ret; | |
6976 | struct btrfs_block_group_cache *cache; | |
6977 | struct btrfs_fs_info *info = root->fs_info; | |
6978 | struct btrfs_space_info *space_info; | |
6979 | struct btrfs_key key; | |
6980 | struct btrfs_key found_key; | |
6981 | struct extent_buffer *leaf; | |
6982 | int need_clear = 0; | |
6983 | u64 cache_gen; | |
6984 | ||
6985 | root = info->extent_root; | |
6986 | key.objectid = 0; | |
6987 | key.offset = 0; | |
6988 | btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY); | |
6989 | path = btrfs_alloc_path(); | |
6990 | if (!path) | |
6991 | return -ENOMEM; | |
6992 | path->reada = 1; | |
6993 | ||
6994 | cache_gen = btrfs_super_cache_generation(&root->fs_info->super_copy); | |
6995 | if (cache_gen != 0 && | |
6996 | btrfs_super_generation(&root->fs_info->super_copy) != cache_gen) | |
6997 | need_clear = 1; | |
6998 | if (btrfs_test_opt(root, CLEAR_CACHE)) | |
6999 | need_clear = 1; | |
7000 | if (!btrfs_test_opt(root, SPACE_CACHE) && cache_gen) | |
7001 | printk(KERN_INFO "btrfs: disk space caching is enabled\n"); | |
7002 | ||
7003 | while (1) { | |
7004 | ret = find_first_block_group(root, path, &key); | |
7005 | if (ret > 0) | |
7006 | break; | |
7007 | if (ret != 0) | |
7008 | goto error; | |
7009 | leaf = path->nodes[0]; | |
7010 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | |
7011 | cache = kzalloc(sizeof(*cache), GFP_NOFS); | |
7012 | if (!cache) { | |
7013 | ret = -ENOMEM; | |
7014 | goto error; | |
7015 | } | |
7016 | cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), | |
7017 | GFP_NOFS); | |
7018 | if (!cache->free_space_ctl) { | |
7019 | kfree(cache); | |
7020 | ret = -ENOMEM; | |
7021 | goto error; | |
7022 | } | |
7023 | ||
7024 | atomic_set(&cache->count, 1); | |
7025 | spin_lock_init(&cache->lock); | |
7026 | cache->fs_info = info; | |
7027 | INIT_LIST_HEAD(&cache->list); | |
7028 | INIT_LIST_HEAD(&cache->cluster_list); | |
7029 | ||
7030 | if (need_clear) | |
7031 | cache->disk_cache_state = BTRFS_DC_CLEAR; | |
7032 | ||
7033 | read_extent_buffer(leaf, &cache->item, | |
7034 | btrfs_item_ptr_offset(leaf, path->slots[0]), | |
7035 | sizeof(cache->item)); | |
7036 | memcpy(&cache->key, &found_key, sizeof(found_key)); | |
7037 | ||
7038 | key.objectid = found_key.objectid + found_key.offset; | |
7039 | btrfs_release_path(path); | |
7040 | cache->flags = btrfs_block_group_flags(&cache->item); | |
7041 | cache->sectorsize = root->sectorsize; | |
7042 | ||
7043 | btrfs_init_free_space_ctl(cache); | |
7044 | ||
7045 | /* | |
7046 | * We need to exclude the super stripes now so that the space | |
7047 | * info has super bytes accounted for, otherwise we'll think | |
7048 | * we have more space than we actually do. | |
7049 | */ | |
7050 | exclude_super_stripes(root, cache); | |
7051 | ||
7052 | /* | |
7053 | * check for two cases, either we are full, and therefore | |
7054 | * don't need to bother with the caching work since we won't | |
7055 | * find any space, or we are empty, and we can just add all | |
7056 | * the space in and be done with it. This saves us _alot_ of | |
7057 | * time, particularly in the full case. | |
7058 | */ | |
7059 | if (found_key.offset == btrfs_block_group_used(&cache->item)) { | |
7060 | cache->last_byte_to_unpin = (u64)-1; | |
7061 | cache->cached = BTRFS_CACHE_FINISHED; | |
7062 | free_excluded_extents(root, cache); | |
7063 | } else if (btrfs_block_group_used(&cache->item) == 0) { | |
7064 | cache->last_byte_to_unpin = (u64)-1; | |
7065 | cache->cached = BTRFS_CACHE_FINISHED; | |
7066 | add_new_free_space(cache, root->fs_info, | |
7067 | found_key.objectid, | |
7068 | found_key.objectid + | |
7069 | found_key.offset); | |
7070 | free_excluded_extents(root, cache); | |
7071 | } | |
7072 | ||
7073 | ret = update_space_info(info, cache->flags, found_key.offset, | |
7074 | btrfs_block_group_used(&cache->item), | |
7075 | &space_info); | |
7076 | BUG_ON(ret); | |
7077 | cache->space_info = space_info; | |
7078 | spin_lock(&cache->space_info->lock); | |
7079 | cache->space_info->bytes_readonly += cache->bytes_super; | |
7080 | spin_unlock(&cache->space_info->lock); | |
7081 | ||
7082 | __link_block_group(space_info, cache); | |
7083 | ||
7084 | ret = btrfs_add_block_group_cache(root->fs_info, cache); | |
7085 | BUG_ON(ret); | |
7086 | ||
7087 | set_avail_alloc_bits(root->fs_info, cache->flags); | |
7088 | if (btrfs_chunk_readonly(root, cache->key.objectid)) | |
7089 | set_block_group_ro(cache, 1); | |
7090 | } | |
7091 | ||
7092 | list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) { | |
7093 | if (!(get_alloc_profile(root, space_info->flags) & | |
7094 | (BTRFS_BLOCK_GROUP_RAID10 | | |
7095 | BTRFS_BLOCK_GROUP_RAID1 | | |
7096 | BTRFS_BLOCK_GROUP_DUP))) | |
7097 | continue; | |
7098 | /* | |
7099 | * avoid allocating from un-mirrored block group if there are | |
7100 | * mirrored block groups. | |
7101 | */ | |
7102 | list_for_each_entry(cache, &space_info->block_groups[3], list) | |
7103 | set_block_group_ro(cache, 1); | |
7104 | list_for_each_entry(cache, &space_info->block_groups[4], list) | |
7105 | set_block_group_ro(cache, 1); | |
7106 | } | |
7107 | ||
7108 | init_global_block_rsv(info); | |
7109 | ret = 0; | |
7110 | error: | |
7111 | btrfs_free_path(path); | |
7112 | return ret; | |
7113 | } | |
7114 | ||
7115 | int btrfs_make_block_group(struct btrfs_trans_handle *trans, | |
7116 | struct btrfs_root *root, u64 bytes_used, | |
7117 | u64 type, u64 chunk_objectid, u64 chunk_offset, | |
7118 | u64 size) | |
7119 | { | |
7120 | int ret; | |
7121 | struct btrfs_root *extent_root; | |
7122 | struct btrfs_block_group_cache *cache; | |
7123 | ||
7124 | extent_root = root->fs_info->extent_root; | |
7125 | ||
7126 | root->fs_info->last_trans_log_full_commit = trans->transid; | |
7127 | ||
7128 | cache = kzalloc(sizeof(*cache), GFP_NOFS); | |
7129 | if (!cache) | |
7130 | return -ENOMEM; | |
7131 | cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), | |
7132 | GFP_NOFS); | |
7133 | if (!cache->free_space_ctl) { | |
7134 | kfree(cache); | |
7135 | return -ENOMEM; | |
7136 | } | |
7137 | ||
7138 | cache->key.objectid = chunk_offset; | |
7139 | cache->key.offset = size; | |
7140 | cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; | |
7141 | cache->sectorsize = root->sectorsize; | |
7142 | cache->fs_info = root->fs_info; | |
7143 | ||
7144 | atomic_set(&cache->count, 1); | |
7145 | spin_lock_init(&cache->lock); | |
7146 | INIT_LIST_HEAD(&cache->list); | |
7147 | INIT_LIST_HEAD(&cache->cluster_list); | |
7148 | ||
7149 | btrfs_init_free_space_ctl(cache); | |
7150 | ||
7151 | btrfs_set_block_group_used(&cache->item, bytes_used); | |
7152 | btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid); | |
7153 | cache->flags = type; | |
7154 | btrfs_set_block_group_flags(&cache->item, type); | |
7155 | ||
7156 | cache->last_byte_to_unpin = (u64)-1; | |
7157 | cache->cached = BTRFS_CACHE_FINISHED; | |
7158 | exclude_super_stripes(root, cache); | |
7159 | ||
7160 | add_new_free_space(cache, root->fs_info, chunk_offset, | |
7161 | chunk_offset + size); | |
7162 | ||
7163 | free_excluded_extents(root, cache); | |
7164 | ||
7165 | ret = update_space_info(root->fs_info, cache->flags, size, bytes_used, | |
7166 | &cache->space_info); | |
7167 | BUG_ON(ret); | |
7168 | ||
7169 | spin_lock(&cache->space_info->lock); | |
7170 | cache->space_info->bytes_readonly += cache->bytes_super; | |
7171 | spin_unlock(&cache->space_info->lock); | |
7172 | ||
7173 | __link_block_group(cache->space_info, cache); | |
7174 | ||
7175 | ret = btrfs_add_block_group_cache(root->fs_info, cache); | |
7176 | BUG_ON(ret); | |
7177 | ||
7178 | ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item, | |
7179 | sizeof(cache->item)); | |
7180 | BUG_ON(ret); | |
7181 | ||
7182 | set_avail_alloc_bits(extent_root->fs_info, type); | |
7183 | ||
7184 | return 0; | |
7185 | } | |
7186 | ||
7187 | int btrfs_remove_block_group(struct btrfs_trans_handle *trans, | |
7188 | struct btrfs_root *root, u64 group_start) | |
7189 | { | |
7190 | struct btrfs_path *path; | |
7191 | struct btrfs_block_group_cache *block_group; | |
7192 | struct btrfs_free_cluster *cluster; | |
7193 | struct btrfs_root *tree_root = root->fs_info->tree_root; | |
7194 | struct btrfs_key key; | |
7195 | struct inode *inode; | |
7196 | int ret; | |
7197 | int factor; | |
7198 | ||
7199 | root = root->fs_info->extent_root; | |
7200 | ||
7201 | block_group = btrfs_lookup_block_group(root->fs_info, group_start); | |
7202 | BUG_ON(!block_group); | |
7203 | BUG_ON(!block_group->ro); | |
7204 | ||
7205 | /* | |
7206 | * Free the reserved super bytes from this block group before | |
7207 | * remove it. | |
7208 | */ | |
7209 | free_excluded_extents(root, block_group); | |
7210 | ||
7211 | memcpy(&key, &block_group->key, sizeof(key)); | |
7212 | if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP | | |
7213 | BTRFS_BLOCK_GROUP_RAID1 | | |
7214 | BTRFS_BLOCK_GROUP_RAID10)) | |
7215 | factor = 2; | |
7216 | else | |
7217 | factor = 1; | |
7218 | ||
7219 | /* make sure this block group isn't part of an allocation cluster */ | |
7220 | cluster = &root->fs_info->data_alloc_cluster; | |
7221 | spin_lock(&cluster->refill_lock); | |
7222 | btrfs_return_cluster_to_free_space(block_group, cluster); | |
7223 | spin_unlock(&cluster->refill_lock); | |
7224 | ||
7225 | /* | |
7226 | * make sure this block group isn't part of a metadata | |
7227 | * allocation cluster | |
7228 | */ | |
7229 | cluster = &root->fs_info->meta_alloc_cluster; | |
7230 | spin_lock(&cluster->refill_lock); | |
7231 | btrfs_return_cluster_to_free_space(block_group, cluster); | |
7232 | spin_unlock(&cluster->refill_lock); | |
7233 | ||
7234 | path = btrfs_alloc_path(); | |
7235 | if (!path) { | |
7236 | ret = -ENOMEM; | |
7237 | goto out; | |
7238 | } | |
7239 | ||
7240 | inode = lookup_free_space_inode(root, block_group, path); | |
7241 | if (!IS_ERR(inode)) { | |
7242 | ret = btrfs_orphan_add(trans, inode); | |
7243 | BUG_ON(ret); | |
7244 | clear_nlink(inode); | |
7245 | /* One for the block groups ref */ | |
7246 | spin_lock(&block_group->lock); | |
7247 | if (block_group->iref) { | |
7248 | block_group->iref = 0; | |
7249 | block_group->inode = NULL; | |
7250 | spin_unlock(&block_group->lock); | |
7251 | iput(inode); | |
7252 | } else { | |
7253 | spin_unlock(&block_group->lock); | |
7254 | } | |
7255 | /* One for our lookup ref */ | |
7256 | iput(inode); | |
7257 | } | |
7258 | ||
7259 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; | |
7260 | key.offset = block_group->key.objectid; | |
7261 | key.type = 0; | |
7262 | ||
7263 | ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1); | |
7264 | if (ret < 0) | |
7265 | goto out; | |
7266 | if (ret > 0) | |
7267 | btrfs_release_path(path); | |
7268 | if (ret == 0) { | |
7269 | ret = btrfs_del_item(trans, tree_root, path); | |
7270 | if (ret) | |
7271 | goto out; | |
7272 | btrfs_release_path(path); | |
7273 | } | |
7274 | ||
7275 | spin_lock(&root->fs_info->block_group_cache_lock); | |
7276 | rb_erase(&block_group->cache_node, | |
7277 | &root->fs_info->block_group_cache_tree); | |
7278 | spin_unlock(&root->fs_info->block_group_cache_lock); | |
7279 | ||
7280 | down_write(&block_group->space_info->groups_sem); | |
7281 | /* | |
7282 | * we must use list_del_init so people can check to see if they | |
7283 | * are still on the list after taking the semaphore | |
7284 | */ | |
7285 | list_del_init(&block_group->list); | |
7286 | up_write(&block_group->space_info->groups_sem); | |
7287 | ||
7288 | if (block_group->cached == BTRFS_CACHE_STARTED) | |
7289 | wait_block_group_cache_done(block_group); | |
7290 | ||
7291 | btrfs_remove_free_space_cache(block_group); | |
7292 | ||
7293 | spin_lock(&block_group->space_info->lock); | |
7294 | block_group->space_info->total_bytes -= block_group->key.offset; | |
7295 | block_group->space_info->bytes_readonly -= block_group->key.offset; | |
7296 | block_group->space_info->disk_total -= block_group->key.offset * factor; | |
7297 | spin_unlock(&block_group->space_info->lock); | |
7298 | ||
7299 | memcpy(&key, &block_group->key, sizeof(key)); | |
7300 | ||
7301 | btrfs_clear_space_info_full(root->fs_info); | |
7302 | ||
7303 | btrfs_put_block_group(block_group); | |
7304 | btrfs_put_block_group(block_group); | |
7305 | ||
7306 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); | |
7307 | if (ret > 0) | |
7308 | ret = -EIO; | |
7309 | if (ret < 0) | |
7310 | goto out; | |
7311 | ||
7312 | ret = btrfs_del_item(trans, root, path); | |
7313 | out: | |
7314 | btrfs_free_path(path); | |
7315 | return ret; | |
7316 | } | |
7317 | ||
7318 | int btrfs_init_space_info(struct btrfs_fs_info *fs_info) | |
7319 | { | |
7320 | struct btrfs_space_info *space_info; | |
7321 | struct btrfs_super_block *disk_super; | |
7322 | u64 features; | |
7323 | u64 flags; | |
7324 | int mixed = 0; | |
7325 | int ret; | |
7326 | ||
7327 | disk_super = &fs_info->super_copy; | |
7328 | if (!btrfs_super_root(disk_super)) | |
7329 | return 1; | |
7330 | ||
7331 | features = btrfs_super_incompat_flags(disk_super); | |
7332 | if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) | |
7333 | mixed = 1; | |
7334 | ||
7335 | flags = BTRFS_BLOCK_GROUP_SYSTEM; | |
7336 | ret = update_space_info(fs_info, flags, 0, 0, &space_info); | |
7337 | if (ret) | |
7338 | goto out; | |
7339 | ||
7340 | if (mixed) { | |
7341 | flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA; | |
7342 | ret = update_space_info(fs_info, flags, 0, 0, &space_info); | |
7343 | } else { | |
7344 | flags = BTRFS_BLOCK_GROUP_METADATA; | |
7345 | ret = update_space_info(fs_info, flags, 0, 0, &space_info); | |
7346 | if (ret) | |
7347 | goto out; | |
7348 | ||
7349 | flags = BTRFS_BLOCK_GROUP_DATA; | |
7350 | ret = update_space_info(fs_info, flags, 0, 0, &space_info); | |
7351 | } | |
7352 | out: | |
7353 | return ret; | |
7354 | } | |
7355 | ||
7356 | int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) | |
7357 | { | |
7358 | return unpin_extent_range(root, start, end); | |
7359 | } | |
7360 | ||
7361 | int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, | |
7362 | u64 num_bytes, u64 *actual_bytes) | |
7363 | { | |
7364 | return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes); | |
7365 | } | |
7366 | ||
7367 | int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range) | |
7368 | { | |
7369 | struct btrfs_fs_info *fs_info = root->fs_info; | |
7370 | struct btrfs_block_group_cache *cache = NULL; | |
7371 | u64 group_trimmed; | |
7372 | u64 start; | |
7373 | u64 end; | |
7374 | u64 trimmed = 0; | |
7375 | int ret = 0; | |
7376 | ||
7377 | cache = btrfs_lookup_block_group(fs_info, range->start); | |
7378 | ||
7379 | while (cache) { | |
7380 | if (cache->key.objectid >= (range->start + range->len)) { | |
7381 | btrfs_put_block_group(cache); | |
7382 | break; | |
7383 | } | |
7384 | ||
7385 | start = max(range->start, cache->key.objectid); | |
7386 | end = min(range->start + range->len, | |
7387 | cache->key.objectid + cache->key.offset); | |
7388 | ||
7389 | if (end - start >= range->minlen) { | |
7390 | if (!block_group_cache_done(cache)) { | |
7391 | ret = cache_block_group(cache, NULL, root, 0); | |
7392 | if (!ret) | |
7393 | wait_block_group_cache_done(cache); | |
7394 | } | |
7395 | ret = btrfs_trim_block_group(cache, | |
7396 | &group_trimmed, | |
7397 | start, | |
7398 | end, | |
7399 | range->minlen); | |
7400 | ||
7401 | trimmed += group_trimmed; | |
7402 | if (ret) { | |
7403 | btrfs_put_block_group(cache); | |
7404 | break; | |
7405 | } | |
7406 | } | |
7407 | ||
7408 | cache = next_block_group(fs_info->tree_root, cache); | |
7409 | } | |
7410 | ||
7411 | range->len = trimmed; | |
7412 | return ret; | |
7413 | } |