]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/btrfs/extent-tree.c
Btrfs: Add a leaf reference cache
[mirror_ubuntu-artful-kernel.git] / fs / btrfs / extent-tree.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include "hash.h"
22 #include "crc32c.h"
23 #include "ctree.h"
24 #include "disk-io.h"
25 #include "print-tree.h"
26 #include "transaction.h"
27 #include "volumes.h"
28 #include "locking.h"
29 #include "ref-cache.h"
30
31 #define BLOCK_GROUP_DATA EXTENT_WRITEBACK
32 #define BLOCK_GROUP_METADATA EXTENT_UPTODATE
33 #define BLOCK_GROUP_SYSTEM EXTENT_NEW
34
35 #define BLOCK_GROUP_DIRTY EXTENT_DIRTY
36
37 static int finish_current_insert(struct btrfs_trans_handle *trans, struct
38 btrfs_root *extent_root);
39 static int del_pending_extents(struct btrfs_trans_handle *trans, struct
40 btrfs_root *extent_root);
41 static struct btrfs_block_group_cache *
42 __btrfs_find_block_group(struct btrfs_root *root,
43 struct btrfs_block_group_cache *hint,
44 u64 search_start, int data, int owner);
45
46 void maybe_lock_mutex(struct btrfs_root *root)
47 {
48 if (root != root->fs_info->extent_root &&
49 root != root->fs_info->chunk_root &&
50 root != root->fs_info->dev_root) {
51 mutex_lock(&root->fs_info->alloc_mutex);
52 }
53 }
54
55 void maybe_unlock_mutex(struct btrfs_root *root)
56 {
57 if (root != root->fs_info->extent_root &&
58 root != root->fs_info->chunk_root &&
59 root != root->fs_info->dev_root) {
60 mutex_unlock(&root->fs_info->alloc_mutex);
61 }
62 }
63
64 static int cache_block_group(struct btrfs_root *root,
65 struct btrfs_block_group_cache *block_group)
66 {
67 struct btrfs_path *path;
68 int ret;
69 struct btrfs_key key;
70 struct extent_buffer *leaf;
71 struct extent_io_tree *free_space_cache;
72 int slot;
73 u64 last = 0;
74 u64 hole_size;
75 u64 first_free;
76 int found = 0;
77
78 if (!block_group)
79 return 0;
80
81 root = root->fs_info->extent_root;
82 free_space_cache = &root->fs_info->free_space_cache;
83
84 if (block_group->cached)
85 return 0;
86
87 path = btrfs_alloc_path();
88 if (!path)
89 return -ENOMEM;
90
91 path->reada = 2;
92 /*
93 * we get into deadlocks with paths held by callers of this function.
94 * since the alloc_mutex is protecting things right now, just
95 * skip the locking here
96 */
97 path->skip_locking = 1;
98 first_free = block_group->key.objectid;
99 key.objectid = block_group->key.objectid;
100 key.offset = 0;
101 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
102 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
103 if (ret < 0)
104 return ret;
105 ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY);
106 if (ret < 0)
107 return ret;
108 if (ret == 0) {
109 leaf = path->nodes[0];
110 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
111 if (key.objectid + key.offset > first_free)
112 first_free = key.objectid + key.offset;
113 }
114 while(1) {
115 leaf = path->nodes[0];
116 slot = path->slots[0];
117 if (slot >= btrfs_header_nritems(leaf)) {
118 ret = btrfs_next_leaf(root, path);
119 if (ret < 0)
120 goto err;
121 if (ret == 0) {
122 continue;
123 } else {
124 break;
125 }
126 }
127 btrfs_item_key_to_cpu(leaf, &key, slot);
128 if (key.objectid < block_group->key.objectid) {
129 goto next;
130 }
131 if (key.objectid >= block_group->key.objectid +
132 block_group->key.offset) {
133 break;
134 }
135
136 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
137 if (!found) {
138 last = first_free;
139 found = 1;
140 }
141 if (key.objectid > last) {
142 hole_size = key.objectid - last;
143 set_extent_dirty(free_space_cache, last,
144 last + hole_size - 1,
145 GFP_NOFS);
146 }
147 last = key.objectid + key.offset;
148 }
149 next:
150 path->slots[0]++;
151 }
152
153 if (!found)
154 last = first_free;
155 if (block_group->key.objectid +
156 block_group->key.offset > last) {
157 hole_size = block_group->key.objectid +
158 block_group->key.offset - last;
159 set_extent_dirty(free_space_cache, last,
160 last + hole_size - 1, GFP_NOFS);
161 }
162 block_group->cached = 1;
163 err:
164 btrfs_free_path(path);
165 return 0;
166 }
167
168 struct btrfs_block_group_cache *btrfs_lookup_first_block_group(struct
169 btrfs_fs_info *info,
170 u64 bytenr)
171 {
172 struct extent_io_tree *block_group_cache;
173 struct btrfs_block_group_cache *block_group = NULL;
174 u64 ptr;
175 u64 start;
176 u64 end;
177 int ret;
178
179 bytenr = max_t(u64, bytenr,
180 BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE);
181 block_group_cache = &info->block_group_cache;
182 ret = find_first_extent_bit(block_group_cache,
183 bytenr, &start, &end,
184 BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA |
185 BLOCK_GROUP_SYSTEM);
186 if (ret) {
187 return NULL;
188 }
189 ret = get_state_private(block_group_cache, start, &ptr);
190 if (ret)
191 return NULL;
192
193 block_group = (struct btrfs_block_group_cache *)(unsigned long)ptr;
194 return block_group;
195 }
196
197 struct btrfs_block_group_cache *btrfs_lookup_block_group(struct
198 btrfs_fs_info *info,
199 u64 bytenr)
200 {
201 struct extent_io_tree *block_group_cache;
202 struct btrfs_block_group_cache *block_group = NULL;
203 u64 ptr;
204 u64 start;
205 u64 end;
206 int ret;
207
208 bytenr = max_t(u64, bytenr,
209 BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE);
210 block_group_cache = &info->block_group_cache;
211 ret = find_first_extent_bit(block_group_cache,
212 bytenr, &start, &end,
213 BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA |
214 BLOCK_GROUP_SYSTEM);
215 if (ret) {
216 return NULL;
217 }
218 ret = get_state_private(block_group_cache, start, &ptr);
219 if (ret)
220 return NULL;
221
222 block_group = (struct btrfs_block_group_cache *)(unsigned long)ptr;
223 if (block_group->key.objectid <= bytenr && bytenr <
224 block_group->key.objectid + block_group->key.offset)
225 return block_group;
226 return NULL;
227 }
228
229 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
230 {
231 return (cache->flags & bits) == bits;
232 }
233
234 static int noinline find_search_start(struct btrfs_root *root,
235 struct btrfs_block_group_cache **cache_ret,
236 u64 *start_ret, u64 num, int data)
237 {
238 int ret;
239 struct btrfs_block_group_cache *cache = *cache_ret;
240 struct extent_io_tree *free_space_cache;
241 struct extent_state *state;
242 u64 last;
243 u64 start = 0;
244 u64 cache_miss = 0;
245 u64 total_fs_bytes;
246 u64 search_start = *start_ret;
247 int wrapped = 0;
248
249 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
250 total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
251 free_space_cache = &root->fs_info->free_space_cache;
252
253 if (!cache)
254 goto out;
255
256 again:
257 ret = cache_block_group(root, cache);
258 if (ret) {
259 goto out;
260 }
261
262 last = max(search_start, cache->key.objectid);
263 if (!block_group_bits(cache, data) || cache->ro)
264 goto new_group;
265
266 spin_lock_irq(&free_space_cache->lock);
267 state = find_first_extent_bit_state(free_space_cache, last, EXTENT_DIRTY);
268 while(1) {
269 if (!state) {
270 if (!cache_miss)
271 cache_miss = last;
272 spin_unlock_irq(&free_space_cache->lock);
273 goto new_group;
274 }
275
276 start = max(last, state->start);
277 last = state->end + 1;
278 if (last - start < num) {
279 do {
280 state = extent_state_next(state);
281 } while(state && !(state->state & EXTENT_DIRTY));
282 continue;
283 }
284 spin_unlock_irq(&free_space_cache->lock);
285 if (cache->ro) {
286 goto new_group;
287 }
288 if (start + num > cache->key.objectid + cache->key.offset)
289 goto new_group;
290 if (!block_group_bits(cache, data)) {
291 printk("block group bits don't match %Lu %d\n", cache->flags, data);
292 }
293 *start_ret = start;
294 return 0;
295 }
296 out:
297 cache = btrfs_lookup_block_group(root->fs_info, search_start);
298 if (!cache) {
299 printk("Unable to find block group for %Lu\n", search_start);
300 WARN_ON(1);
301 }
302 return -ENOSPC;
303
304 new_group:
305 last = cache->key.objectid + cache->key.offset;
306 wrapped:
307 cache = btrfs_lookup_first_block_group(root->fs_info, last);
308 if (!cache || cache->key.objectid >= total_fs_bytes) {
309 no_cache:
310 if (!wrapped) {
311 wrapped = 1;
312 last = search_start;
313 goto wrapped;
314 }
315 goto out;
316 }
317 if (cache_miss && !cache->cached) {
318 cache_block_group(root, cache);
319 last = cache_miss;
320 cache = btrfs_lookup_first_block_group(root->fs_info, last);
321 }
322 cache_miss = 0;
323 cache = btrfs_find_block_group(root, cache, last, data, 0);
324 if (!cache)
325 goto no_cache;
326 *cache_ret = cache;
327 goto again;
328 }
329
330 static u64 div_factor(u64 num, int factor)
331 {
332 if (factor == 10)
333 return num;
334 num *= factor;
335 do_div(num, 10);
336 return num;
337 }
338
339 static int block_group_state_bits(u64 flags)
340 {
341 int bits = 0;
342 if (flags & BTRFS_BLOCK_GROUP_DATA)
343 bits |= BLOCK_GROUP_DATA;
344 if (flags & BTRFS_BLOCK_GROUP_METADATA)
345 bits |= BLOCK_GROUP_METADATA;
346 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
347 bits |= BLOCK_GROUP_SYSTEM;
348 return bits;
349 }
350
351 static struct btrfs_block_group_cache *
352 __btrfs_find_block_group(struct btrfs_root *root,
353 struct btrfs_block_group_cache *hint,
354 u64 search_start, int data, int owner)
355 {
356 struct btrfs_block_group_cache *cache;
357 struct extent_io_tree *block_group_cache;
358 struct btrfs_block_group_cache *found_group = NULL;
359 struct btrfs_fs_info *info = root->fs_info;
360 u64 used;
361 u64 last = 0;
362 u64 start;
363 u64 end;
364 u64 free_check;
365 u64 ptr;
366 int bit;
367 int ret;
368 int full_search = 0;
369 int factor = 10;
370 int wrapped = 0;
371
372 block_group_cache = &info->block_group_cache;
373
374 if (data & BTRFS_BLOCK_GROUP_METADATA)
375 factor = 9;
376
377 bit = block_group_state_bits(data);
378
379 if (search_start) {
380 struct btrfs_block_group_cache *shint;
381 shint = btrfs_lookup_first_block_group(info, search_start);
382 if (shint && block_group_bits(shint, data) && !shint->ro) {
383 spin_lock(&shint->lock);
384 used = btrfs_block_group_used(&shint->item);
385 if (used + shint->pinned <
386 div_factor(shint->key.offset, factor)) {
387 spin_unlock(&shint->lock);
388 return shint;
389 }
390 spin_unlock(&shint->lock);
391 }
392 }
393 if (hint && !hint->ro && block_group_bits(hint, data)) {
394 spin_lock(&hint->lock);
395 used = btrfs_block_group_used(&hint->item);
396 if (used + hint->pinned <
397 div_factor(hint->key.offset, factor)) {
398 spin_unlock(&hint->lock);
399 return hint;
400 }
401 spin_unlock(&hint->lock);
402 last = hint->key.objectid + hint->key.offset;
403 } else {
404 if (hint)
405 last = max(hint->key.objectid, search_start);
406 else
407 last = search_start;
408 }
409 again:
410 while(1) {
411 ret = find_first_extent_bit(block_group_cache, last,
412 &start, &end, bit);
413 if (ret)
414 break;
415
416 ret = get_state_private(block_group_cache, start, &ptr);
417 if (ret) {
418 last = end + 1;
419 continue;
420 }
421
422 cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
423 spin_lock(&cache->lock);
424 last = cache->key.objectid + cache->key.offset;
425 used = btrfs_block_group_used(&cache->item);
426
427 if (!cache->ro && block_group_bits(cache, data)) {
428 free_check = div_factor(cache->key.offset, factor);
429 if (used + cache->pinned < free_check) {
430 found_group = cache;
431 spin_unlock(&cache->lock);
432 goto found;
433 }
434 }
435 spin_unlock(&cache->lock);
436 cond_resched();
437 }
438 if (!wrapped) {
439 last = search_start;
440 wrapped = 1;
441 goto again;
442 }
443 if (!full_search && factor < 10) {
444 last = search_start;
445 full_search = 1;
446 factor = 10;
447 goto again;
448 }
449 found:
450 return found_group;
451 }
452
453 struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
454 struct btrfs_block_group_cache
455 *hint, u64 search_start,
456 int data, int owner)
457 {
458
459 struct btrfs_block_group_cache *ret;
460 ret = __btrfs_find_block_group(root, hint, search_start, data, owner);
461 return ret;
462 }
463 static u64 hash_extent_ref(u64 root_objectid, u64 ref_generation,
464 u64 owner, u64 owner_offset)
465 {
466 u32 high_crc = ~(u32)0;
467 u32 low_crc = ~(u32)0;
468 __le64 lenum;
469 lenum = cpu_to_le64(root_objectid);
470 high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
471 lenum = cpu_to_le64(ref_generation);
472 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
473 if (owner >= BTRFS_FIRST_FREE_OBJECTID) {
474 lenum = cpu_to_le64(owner);
475 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
476 lenum = cpu_to_le64(owner_offset);
477 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
478 }
479 return ((u64)high_crc << 32) | (u64)low_crc;
480 }
481
482 static int match_extent_ref(struct extent_buffer *leaf,
483 struct btrfs_extent_ref *disk_ref,
484 struct btrfs_extent_ref *cpu_ref)
485 {
486 int ret;
487 int len;
488
489 if (cpu_ref->objectid)
490 len = sizeof(*cpu_ref);
491 else
492 len = 2 * sizeof(u64);
493 ret = memcmp_extent_buffer(leaf, cpu_ref, (unsigned long)disk_ref,
494 len);
495 return ret == 0;
496 }
497
498 static int noinline lookup_extent_backref(struct btrfs_trans_handle *trans,
499 struct btrfs_root *root,
500 struct btrfs_path *path, u64 bytenr,
501 u64 root_objectid,
502 u64 ref_generation, u64 owner,
503 u64 owner_offset, int del)
504 {
505 u64 hash;
506 struct btrfs_key key;
507 struct btrfs_key found_key;
508 struct btrfs_extent_ref ref;
509 struct extent_buffer *leaf;
510 struct btrfs_extent_ref *disk_ref;
511 int ret;
512 int ret2;
513
514 btrfs_set_stack_ref_root(&ref, root_objectid);
515 btrfs_set_stack_ref_generation(&ref, ref_generation);
516 btrfs_set_stack_ref_objectid(&ref, owner);
517 btrfs_set_stack_ref_offset(&ref, owner_offset);
518
519 hash = hash_extent_ref(root_objectid, ref_generation, owner,
520 owner_offset);
521 key.offset = hash;
522 key.objectid = bytenr;
523 key.type = BTRFS_EXTENT_REF_KEY;
524
525 while (1) {
526 ret = btrfs_search_slot(trans, root, &key, path,
527 del ? -1 : 0, del);
528 if (ret < 0)
529 goto out;
530 leaf = path->nodes[0];
531 if (ret != 0) {
532 u32 nritems = btrfs_header_nritems(leaf);
533 if (path->slots[0] >= nritems) {
534 ret2 = btrfs_next_leaf(root, path);
535 if (ret2)
536 goto out;
537 leaf = path->nodes[0];
538 }
539 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
540 if (found_key.objectid != bytenr ||
541 found_key.type != BTRFS_EXTENT_REF_KEY)
542 goto out;
543 key.offset = found_key.offset;
544 if (del) {
545 btrfs_release_path(root, path);
546 continue;
547 }
548 }
549 disk_ref = btrfs_item_ptr(path->nodes[0],
550 path->slots[0],
551 struct btrfs_extent_ref);
552 if (match_extent_ref(path->nodes[0], disk_ref, &ref)) {
553 ret = 0;
554 goto out;
555 }
556 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
557 key.offset = found_key.offset + 1;
558 btrfs_release_path(root, path);
559 }
560 out:
561 return ret;
562 }
563
564 /*
565 * Back reference rules. Back refs have three main goals:
566 *
567 * 1) differentiate between all holders of references to an extent so that
568 * when a reference is dropped we can make sure it was a valid reference
569 * before freeing the extent.
570 *
571 * 2) Provide enough information to quickly find the holders of an extent
572 * if we notice a given block is corrupted or bad.
573 *
574 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
575 * maintenance. This is actually the same as #2, but with a slightly
576 * different use case.
577 *
578 * File extents can be referenced by:
579 *
580 * - multiple snapshots, subvolumes, or different generations in one subvol
581 * - different files inside a single subvolume (in theory, not implemented yet)
582 * - different offsets inside a file (bookend extents in file.c)
583 *
584 * The extent ref structure has fields for:
585 *
586 * - Objectid of the subvolume root
587 * - Generation number of the tree holding the reference
588 * - objectid of the file holding the reference
589 * - offset in the file corresponding to the key holding the reference
590 *
591 * When a file extent is allocated the fields are filled in:
592 * (root_key.objectid, trans->transid, inode objectid, offset in file)
593 *
594 * When a leaf is cow'd new references are added for every file extent found
595 * in the leaf. It looks the same as the create case, but trans->transid
596 * will be different when the block is cow'd.
597 *
598 * (root_key.objectid, trans->transid, inode objectid, offset in file)
599 *
600 * When a file extent is removed either during snapshot deletion or file
601 * truncation, the corresponding back reference is found
602 * by searching for:
603 *
604 * (btrfs_header_owner(leaf), btrfs_header_generation(leaf),
605 * inode objectid, offset in file)
606 *
607 * Btree extents can be referenced by:
608 *
609 * - Different subvolumes
610 * - Different generations of the same subvolume
611 *
612 * Storing sufficient information for a full reverse mapping of a btree
613 * block would require storing the lowest key of the block in the backref,
614 * and it would require updating that lowest key either before write out or
615 * every time it changed. Instead, the objectid of the lowest key is stored
616 * along with the level of the tree block. This provides a hint
617 * about where in the btree the block can be found. Searches through the
618 * btree only need to look for a pointer to that block, so they stop one
619 * level higher than the level recorded in the backref.
620 *
621 * Some btrees do not do reference counting on their extents. These
622 * include the extent tree and the tree of tree roots. Backrefs for these
623 * trees always have a generation of zero.
624 *
625 * When a tree block is created, back references are inserted:
626 *
627 * (root->root_key.objectid, trans->transid or zero, level, lowest_key_objectid)
628 *
629 * When a tree block is cow'd in a reference counted root,
630 * new back references are added for all the blocks it points to.
631 * These are of the form (trans->transid will have increased since creation):
632 *
633 * (root->root_key.objectid, trans->transid, level, lowest_key_objectid)
634 *
635 * Because the lowest_key_objectid and the level are just hints
636 * they are not used when backrefs are deleted. When a backref is deleted:
637 *
638 * if backref was for a tree root:
639 * root_objectid = root->root_key.objectid
640 * else
641 * root_objectid = btrfs_header_owner(parent)
642 *
643 * (root_objectid, btrfs_header_generation(parent) or zero, 0, 0)
644 *
645 * Back Reference Key hashing:
646 *
647 * Back references have four fields, each 64 bits long. Unfortunately,
648 * This is hashed into a single 64 bit number and placed into the key offset.
649 * The key objectid corresponds to the first byte in the extent, and the
650 * key type is set to BTRFS_EXTENT_REF_KEY
651 */
652 int btrfs_insert_extent_backref(struct btrfs_trans_handle *trans,
653 struct btrfs_root *root,
654 struct btrfs_path *path, u64 bytenr,
655 u64 root_objectid, u64 ref_generation,
656 u64 owner, u64 owner_offset)
657 {
658 u64 hash;
659 struct btrfs_key key;
660 struct btrfs_extent_ref ref;
661 struct btrfs_extent_ref *disk_ref;
662 int ret;
663
664 btrfs_set_stack_ref_root(&ref, root_objectid);
665 btrfs_set_stack_ref_generation(&ref, ref_generation);
666 btrfs_set_stack_ref_objectid(&ref, owner);
667 btrfs_set_stack_ref_offset(&ref, owner_offset);
668
669 hash = hash_extent_ref(root_objectid, ref_generation, owner,
670 owner_offset);
671 key.offset = hash;
672 key.objectid = bytenr;
673 key.type = BTRFS_EXTENT_REF_KEY;
674
675 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(ref));
676 while (ret == -EEXIST) {
677 disk_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
678 struct btrfs_extent_ref);
679 if (match_extent_ref(path->nodes[0], disk_ref, &ref))
680 goto out;
681 key.offset++;
682 btrfs_release_path(root, path);
683 ret = btrfs_insert_empty_item(trans, root, path, &key,
684 sizeof(ref));
685 }
686 if (ret)
687 goto out;
688 disk_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
689 struct btrfs_extent_ref);
690 write_extent_buffer(path->nodes[0], &ref, (unsigned long)disk_ref,
691 sizeof(ref));
692 btrfs_mark_buffer_dirty(path->nodes[0]);
693 out:
694 btrfs_release_path(root, path);
695 return ret;
696 }
697
698 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
699 struct btrfs_root *root,
700 u64 bytenr, u64 num_bytes,
701 u64 root_objectid, u64 ref_generation,
702 u64 owner, u64 owner_offset)
703 {
704 struct btrfs_path *path;
705 int ret;
706 struct btrfs_key key;
707 struct extent_buffer *l;
708 struct btrfs_extent_item *item;
709 u32 refs;
710
711 WARN_ON(num_bytes < root->sectorsize);
712 path = btrfs_alloc_path();
713 if (!path)
714 return -ENOMEM;
715
716 path->reada = 1;
717 key.objectid = bytenr;
718 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
719 key.offset = num_bytes;
720 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
721 0, 1);
722 if (ret < 0)
723 return ret;
724 if (ret != 0) {
725 BUG();
726 }
727 BUG_ON(ret != 0);
728 l = path->nodes[0];
729 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
730 refs = btrfs_extent_refs(l, item);
731 btrfs_set_extent_refs(l, item, refs + 1);
732 btrfs_mark_buffer_dirty(path->nodes[0]);
733
734 btrfs_release_path(root->fs_info->extent_root, path);
735
736 path->reada = 1;
737 ret = btrfs_insert_extent_backref(trans, root->fs_info->extent_root,
738 path, bytenr, root_objectid,
739 ref_generation, owner, owner_offset);
740 BUG_ON(ret);
741 finish_current_insert(trans, root->fs_info->extent_root);
742 del_pending_extents(trans, root->fs_info->extent_root);
743
744 btrfs_free_path(path);
745 return 0;
746 }
747
748 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
749 struct btrfs_root *root,
750 u64 bytenr, u64 num_bytes,
751 u64 root_objectid, u64 ref_generation,
752 u64 owner, u64 owner_offset)
753 {
754 int ret;
755
756 mutex_lock(&root->fs_info->alloc_mutex);
757 ret = __btrfs_inc_extent_ref(trans, root, bytenr, num_bytes,
758 root_objectid, ref_generation,
759 owner, owner_offset);
760 mutex_unlock(&root->fs_info->alloc_mutex);
761 return ret;
762 }
763
764 int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
765 struct btrfs_root *root)
766 {
767 finish_current_insert(trans, root->fs_info->extent_root);
768 del_pending_extents(trans, root->fs_info->extent_root);
769 return 0;
770 }
771
772 static int lookup_extent_ref(struct btrfs_trans_handle *trans,
773 struct btrfs_root *root, u64 bytenr,
774 u64 num_bytes, u32 *refs)
775 {
776 struct btrfs_path *path;
777 int ret;
778 struct btrfs_key key;
779 struct extent_buffer *l;
780 struct btrfs_extent_item *item;
781
782 WARN_ON(num_bytes < root->sectorsize);
783 path = btrfs_alloc_path();
784 path->reada = 1;
785 key.objectid = bytenr;
786 key.offset = num_bytes;
787 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
788 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
789 0, 0);
790 if (ret < 0)
791 goto out;
792 if (ret != 0) {
793 btrfs_print_leaf(root, path->nodes[0]);
794 printk("failed to find block number %Lu\n", bytenr);
795 BUG();
796 }
797 l = path->nodes[0];
798 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
799 *refs = btrfs_extent_refs(l, item);
800 out:
801 btrfs_free_path(path);
802 return 0;
803 }
804
805 u32 btrfs_count_snapshots_in_path(struct btrfs_root *root,
806 struct btrfs_path *count_path,
807 u64 expected_owner,
808 u64 first_extent)
809 {
810 struct btrfs_root *extent_root = root->fs_info->extent_root;
811 struct btrfs_path *path;
812 u64 bytenr;
813 u64 found_objectid;
814 u64 found_owner;
815 u64 root_objectid = root->root_key.objectid;
816 u32 total_count = 0;
817 u32 extent_refs;
818 u32 cur_count;
819 u32 nritems;
820 int ret;
821 struct btrfs_key key;
822 struct btrfs_key found_key;
823 struct extent_buffer *l;
824 struct btrfs_extent_item *item;
825 struct btrfs_extent_ref *ref_item;
826 int level = -1;
827
828 /* FIXME, needs locking */
829 BUG();
830
831 mutex_lock(&root->fs_info->alloc_mutex);
832 path = btrfs_alloc_path();
833 again:
834 if (level == -1)
835 bytenr = first_extent;
836 else
837 bytenr = count_path->nodes[level]->start;
838
839 cur_count = 0;
840 key.objectid = bytenr;
841 key.offset = 0;
842
843 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
844 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
845 if (ret < 0)
846 goto out;
847 BUG_ON(ret == 0);
848
849 l = path->nodes[0];
850 btrfs_item_key_to_cpu(l, &found_key, path->slots[0]);
851
852 if (found_key.objectid != bytenr ||
853 found_key.type != BTRFS_EXTENT_ITEM_KEY) {
854 goto out;
855 }
856
857 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
858 extent_refs = btrfs_extent_refs(l, item);
859 while (1) {
860 l = path->nodes[0];
861 nritems = btrfs_header_nritems(l);
862 if (path->slots[0] >= nritems) {
863 ret = btrfs_next_leaf(extent_root, path);
864 if (ret == 0)
865 continue;
866 break;
867 }
868 btrfs_item_key_to_cpu(l, &found_key, path->slots[0]);
869 if (found_key.objectid != bytenr)
870 break;
871
872 if (found_key.type != BTRFS_EXTENT_REF_KEY) {
873 path->slots[0]++;
874 continue;
875 }
876
877 cur_count++;
878 ref_item = btrfs_item_ptr(l, path->slots[0],
879 struct btrfs_extent_ref);
880 found_objectid = btrfs_ref_root(l, ref_item);
881
882 if (found_objectid != root_objectid) {
883 total_count = 2;
884 goto out;
885 }
886 if (level == -1) {
887 found_owner = btrfs_ref_objectid(l, ref_item);
888 if (found_owner != expected_owner) {
889 total_count = 2;
890 goto out;
891 }
892 /*
893 * nasty. we don't count a reference held by
894 * the running transaction. This allows nodatacow
895 * to avoid cow most of the time
896 */
897 if (found_owner >= BTRFS_FIRST_FREE_OBJECTID &&
898 btrfs_ref_generation(l, ref_item) ==
899 root->fs_info->generation) {
900 extent_refs--;
901 }
902 }
903 total_count = 1;
904 path->slots[0]++;
905 }
906 /*
907 * if there is more than one reference against a data extent,
908 * we have to assume the other ref is another snapshot
909 */
910 if (level == -1 && extent_refs > 1) {
911 total_count = 2;
912 goto out;
913 }
914 if (cur_count == 0) {
915 total_count = 0;
916 goto out;
917 }
918 if (level >= 0 && root->node == count_path->nodes[level])
919 goto out;
920 level++;
921 btrfs_release_path(root, path);
922 goto again;
923
924 out:
925 btrfs_free_path(path);
926 mutex_unlock(&root->fs_info->alloc_mutex);
927 return total_count;
928 }
929
930 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
931 struct extent_buffer *buf, int cache_ref)
932 {
933 u64 bytenr;
934 u32 nritems;
935 struct btrfs_key key;
936 struct btrfs_file_extent_item *fi;
937 int i;
938 int level;
939 int ret;
940 int faili;
941 int nr_file_extents = 0;
942
943 if (!root->ref_cows)
944 return 0;
945
946 level = btrfs_header_level(buf);
947 nritems = btrfs_header_nritems(buf);
948 for (i = 0; i < nritems; i++) {
949 cond_resched();
950 if (level == 0) {
951 u64 disk_bytenr;
952 btrfs_item_key_to_cpu(buf, &key, i);
953 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
954 continue;
955 fi = btrfs_item_ptr(buf, i,
956 struct btrfs_file_extent_item);
957 if (btrfs_file_extent_type(buf, fi) ==
958 BTRFS_FILE_EXTENT_INLINE)
959 continue;
960 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
961 if (disk_bytenr == 0)
962 continue;
963
964 if (buf != root->commit_root)
965 nr_file_extents++;
966
967 mutex_lock(&root->fs_info->alloc_mutex);
968 ret = __btrfs_inc_extent_ref(trans, root, disk_bytenr,
969 btrfs_file_extent_disk_num_bytes(buf, fi),
970 root->root_key.objectid, trans->transid,
971 key.objectid, key.offset);
972 mutex_unlock(&root->fs_info->alloc_mutex);
973 if (ret) {
974 faili = i;
975 WARN_ON(1);
976 goto fail;
977 }
978 } else {
979 bytenr = btrfs_node_blockptr(buf, i);
980 btrfs_node_key_to_cpu(buf, &key, i);
981
982 mutex_lock(&root->fs_info->alloc_mutex);
983 ret = __btrfs_inc_extent_ref(trans, root, bytenr,
984 btrfs_level_size(root, level - 1),
985 root->root_key.objectid,
986 trans->transid,
987 level - 1, key.objectid);
988 mutex_unlock(&root->fs_info->alloc_mutex);
989 if (ret) {
990 faili = i;
991 WARN_ON(1);
992 goto fail;
993 }
994 }
995 }
996 /* cache orignal leaf block's references */
997 if (level == 0 && cache_ref && buf != root->commit_root) {
998 struct btrfs_leaf_ref *ref;
999 struct btrfs_extent_info *info;
1000
1001 ref = btrfs_alloc_leaf_ref(nr_file_extents);
1002 if (!ref) {
1003 WARN_ON(1);
1004 goto out;
1005 }
1006
1007 btrfs_item_key_to_cpu(buf, &ref->key, 0);
1008
1009 ref->bytenr = buf->start;
1010 ref->owner = btrfs_header_owner(buf);
1011 ref->generation = btrfs_header_generation(buf);
1012 ref->nritems = nr_file_extents;
1013 info = ref->extents;
1014
1015 for (i = 0; nr_file_extents > 0 && i < nritems; i++) {
1016 u64 disk_bytenr;
1017 btrfs_item_key_to_cpu(buf, &key, i);
1018 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1019 continue;
1020 fi = btrfs_item_ptr(buf, i,
1021 struct btrfs_file_extent_item);
1022 if (btrfs_file_extent_type(buf, fi) ==
1023 BTRFS_FILE_EXTENT_INLINE)
1024 continue;
1025 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1026 if (disk_bytenr == 0)
1027 continue;
1028
1029 info->bytenr = disk_bytenr;
1030 info->num_bytes =
1031 btrfs_file_extent_disk_num_bytes(buf, fi);
1032 info->objectid = key.objectid;
1033 info->offset = key.offset;
1034 info++;
1035 }
1036
1037 BUG_ON(!root->ref_tree);
1038 ret = btrfs_add_leaf_ref(root, ref);
1039 WARN_ON(ret);
1040 btrfs_free_leaf_ref(ref);
1041 }
1042 out:
1043 return 0;
1044 fail:
1045 WARN_ON(1);
1046 #if 0
1047 for (i =0; i < faili; i++) {
1048 if (level == 0) {
1049 u64 disk_bytenr;
1050 btrfs_item_key_to_cpu(buf, &key, i);
1051 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1052 continue;
1053 fi = btrfs_item_ptr(buf, i,
1054 struct btrfs_file_extent_item);
1055 if (btrfs_file_extent_type(buf, fi) ==
1056 BTRFS_FILE_EXTENT_INLINE)
1057 continue;
1058 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1059 if (disk_bytenr == 0)
1060 continue;
1061 err = btrfs_free_extent(trans, root, disk_bytenr,
1062 btrfs_file_extent_disk_num_bytes(buf,
1063 fi), 0);
1064 BUG_ON(err);
1065 } else {
1066 bytenr = btrfs_node_blockptr(buf, i);
1067 err = btrfs_free_extent(trans, root, bytenr,
1068 btrfs_level_size(root, level - 1), 0);
1069 BUG_ON(err);
1070 }
1071 }
1072 #endif
1073 return ret;
1074 }
1075
1076 static int write_one_cache_group(struct btrfs_trans_handle *trans,
1077 struct btrfs_root *root,
1078 struct btrfs_path *path,
1079 struct btrfs_block_group_cache *cache)
1080 {
1081 int ret;
1082 int pending_ret;
1083 struct btrfs_root *extent_root = root->fs_info->extent_root;
1084 unsigned long bi;
1085 struct extent_buffer *leaf;
1086
1087 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
1088 if (ret < 0)
1089 goto fail;
1090 BUG_ON(ret);
1091
1092 leaf = path->nodes[0];
1093 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
1094 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
1095 btrfs_mark_buffer_dirty(leaf);
1096 btrfs_release_path(extent_root, path);
1097 fail:
1098 finish_current_insert(trans, extent_root);
1099 pending_ret = del_pending_extents(trans, extent_root);
1100 if (ret)
1101 return ret;
1102 if (pending_ret)
1103 return pending_ret;
1104 return 0;
1105
1106 }
1107
1108 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
1109 struct btrfs_root *root)
1110 {
1111 struct extent_io_tree *block_group_cache;
1112 struct btrfs_block_group_cache *cache;
1113 int ret;
1114 int err = 0;
1115 int werr = 0;
1116 struct btrfs_path *path;
1117 u64 last = 0;
1118 u64 start;
1119 u64 end;
1120 u64 ptr;
1121
1122 block_group_cache = &root->fs_info->block_group_cache;
1123 path = btrfs_alloc_path();
1124 if (!path)
1125 return -ENOMEM;
1126
1127 mutex_lock(&root->fs_info->alloc_mutex);
1128 while(1) {
1129 ret = find_first_extent_bit(block_group_cache, last,
1130 &start, &end, BLOCK_GROUP_DIRTY);
1131 if (ret)
1132 break;
1133
1134 last = end + 1;
1135 ret = get_state_private(block_group_cache, start, &ptr);
1136 if (ret)
1137 break;
1138 cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
1139 err = write_one_cache_group(trans, root,
1140 path, cache);
1141 /*
1142 * if we fail to write the cache group, we want
1143 * to keep it marked dirty in hopes that a later
1144 * write will work
1145 */
1146 if (err) {
1147 werr = err;
1148 continue;
1149 }
1150 clear_extent_bits(block_group_cache, start, end,
1151 BLOCK_GROUP_DIRTY, GFP_NOFS);
1152 }
1153 btrfs_free_path(path);
1154 mutex_unlock(&root->fs_info->alloc_mutex);
1155 return werr;
1156 }
1157
1158 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
1159 u64 flags)
1160 {
1161 struct list_head *head = &info->space_info;
1162 struct list_head *cur;
1163 struct btrfs_space_info *found;
1164 list_for_each(cur, head) {
1165 found = list_entry(cur, struct btrfs_space_info, list);
1166 if (found->flags == flags)
1167 return found;
1168 }
1169 return NULL;
1170
1171 }
1172
1173 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
1174 u64 total_bytes, u64 bytes_used,
1175 struct btrfs_space_info **space_info)
1176 {
1177 struct btrfs_space_info *found;
1178
1179 found = __find_space_info(info, flags);
1180 if (found) {
1181 found->total_bytes += total_bytes;
1182 found->bytes_used += bytes_used;
1183 found->full = 0;
1184 WARN_ON(found->total_bytes < found->bytes_used);
1185 *space_info = found;
1186 return 0;
1187 }
1188 found = kmalloc(sizeof(*found), GFP_NOFS);
1189 if (!found)
1190 return -ENOMEM;
1191
1192 list_add(&found->list, &info->space_info);
1193 found->flags = flags;
1194 found->total_bytes = total_bytes;
1195 found->bytes_used = bytes_used;
1196 found->bytes_pinned = 0;
1197 found->full = 0;
1198 found->force_alloc = 0;
1199 *space_info = found;
1200 return 0;
1201 }
1202
1203 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
1204 {
1205 u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
1206 BTRFS_BLOCK_GROUP_RAID1 |
1207 BTRFS_BLOCK_GROUP_RAID10 |
1208 BTRFS_BLOCK_GROUP_DUP);
1209 if (extra_flags) {
1210 if (flags & BTRFS_BLOCK_GROUP_DATA)
1211 fs_info->avail_data_alloc_bits |= extra_flags;
1212 if (flags & BTRFS_BLOCK_GROUP_METADATA)
1213 fs_info->avail_metadata_alloc_bits |= extra_flags;
1214 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
1215 fs_info->avail_system_alloc_bits |= extra_flags;
1216 }
1217 }
1218
1219 static u64 reduce_alloc_profile(struct btrfs_root *root, u64 flags)
1220 {
1221 u64 num_devices = root->fs_info->fs_devices->num_devices;
1222
1223 if (num_devices == 1)
1224 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
1225 if (num_devices < 4)
1226 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
1227
1228 if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
1229 (flags & (BTRFS_BLOCK_GROUP_RAID1 |
1230 BTRFS_BLOCK_GROUP_RAID10))) {
1231 flags &= ~BTRFS_BLOCK_GROUP_DUP;
1232 }
1233
1234 if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
1235 (flags & BTRFS_BLOCK_GROUP_RAID10)) {
1236 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
1237 }
1238
1239 if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
1240 ((flags & BTRFS_BLOCK_GROUP_RAID1) |
1241 (flags & BTRFS_BLOCK_GROUP_RAID10) |
1242 (flags & BTRFS_BLOCK_GROUP_DUP)))
1243 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
1244 return flags;
1245 }
1246
1247 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
1248 struct btrfs_root *extent_root, u64 alloc_bytes,
1249 u64 flags, int force)
1250 {
1251 struct btrfs_space_info *space_info;
1252 u64 thresh;
1253 u64 start;
1254 u64 num_bytes;
1255 int ret;
1256
1257 flags = reduce_alloc_profile(extent_root, flags);
1258
1259 space_info = __find_space_info(extent_root->fs_info, flags);
1260 if (!space_info) {
1261 ret = update_space_info(extent_root->fs_info, flags,
1262 0, 0, &space_info);
1263 BUG_ON(ret);
1264 }
1265 BUG_ON(!space_info);
1266
1267 if (space_info->force_alloc) {
1268 force = 1;
1269 space_info->force_alloc = 0;
1270 }
1271 if (space_info->full)
1272 goto out;
1273
1274 thresh = div_factor(space_info->total_bytes, 6);
1275 if (!force &&
1276 (space_info->bytes_used + space_info->bytes_pinned + alloc_bytes) <
1277 thresh)
1278 goto out;
1279
1280 mutex_lock(&extent_root->fs_info->chunk_mutex);
1281 ret = btrfs_alloc_chunk(trans, extent_root, &start, &num_bytes, flags);
1282 if (ret == -ENOSPC) {
1283 printk("space info full %Lu\n", flags);
1284 space_info->full = 1;
1285 goto out_unlock;
1286 }
1287 BUG_ON(ret);
1288
1289 ret = btrfs_make_block_group(trans, extent_root, 0, flags,
1290 BTRFS_FIRST_CHUNK_TREE_OBJECTID, start, num_bytes);
1291 BUG_ON(ret);
1292 out_unlock:
1293 mutex_unlock(&extent_root->fs_info->chunk_mutex);
1294 out:
1295 return 0;
1296 }
1297
1298 static int update_block_group(struct btrfs_trans_handle *trans,
1299 struct btrfs_root *root,
1300 u64 bytenr, u64 num_bytes, int alloc,
1301 int mark_free)
1302 {
1303 struct btrfs_block_group_cache *cache;
1304 struct btrfs_fs_info *info = root->fs_info;
1305 u64 total = num_bytes;
1306 u64 old_val;
1307 u64 byte_in_group;
1308 u64 start;
1309 u64 end;
1310
1311 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1312 while(total) {
1313 cache = btrfs_lookup_block_group(info, bytenr);
1314 if (!cache) {
1315 return -1;
1316 }
1317 byte_in_group = bytenr - cache->key.objectid;
1318 WARN_ON(byte_in_group > cache->key.offset);
1319 start = cache->key.objectid;
1320 end = start + cache->key.offset - 1;
1321 set_extent_bits(&info->block_group_cache, start, end,
1322 BLOCK_GROUP_DIRTY, GFP_NOFS);
1323
1324 spin_lock(&cache->lock);
1325 old_val = btrfs_block_group_used(&cache->item);
1326 num_bytes = min(total, cache->key.offset - byte_in_group);
1327 if (alloc) {
1328 old_val += num_bytes;
1329 cache->space_info->bytes_used += num_bytes;
1330 btrfs_set_block_group_used(&cache->item, old_val);
1331 spin_unlock(&cache->lock);
1332 } else {
1333 old_val -= num_bytes;
1334 cache->space_info->bytes_used -= num_bytes;
1335 btrfs_set_block_group_used(&cache->item, old_val);
1336 spin_unlock(&cache->lock);
1337 if (mark_free) {
1338 set_extent_dirty(&info->free_space_cache,
1339 bytenr, bytenr + num_bytes - 1,
1340 GFP_NOFS);
1341 }
1342 }
1343 total -= num_bytes;
1344 bytenr += num_bytes;
1345 }
1346 return 0;
1347 }
1348
1349 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
1350 {
1351 u64 start;
1352 u64 end;
1353 int ret;
1354 ret = find_first_extent_bit(&root->fs_info->block_group_cache,
1355 search_start, &start, &end,
1356 BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA |
1357 BLOCK_GROUP_SYSTEM);
1358 if (ret)
1359 return 0;
1360 return start;
1361 }
1362
1363
1364 static int update_pinned_extents(struct btrfs_root *root,
1365 u64 bytenr, u64 num, int pin)
1366 {
1367 u64 len;
1368 struct btrfs_block_group_cache *cache;
1369 struct btrfs_fs_info *fs_info = root->fs_info;
1370
1371 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1372 if (pin) {
1373 set_extent_dirty(&fs_info->pinned_extents,
1374 bytenr, bytenr + num - 1, GFP_NOFS);
1375 } else {
1376 clear_extent_dirty(&fs_info->pinned_extents,
1377 bytenr, bytenr + num - 1, GFP_NOFS);
1378 }
1379 while (num > 0) {
1380 cache = btrfs_lookup_block_group(fs_info, bytenr);
1381 if (!cache) {
1382 u64 first = first_logical_byte(root, bytenr);
1383 WARN_ON(first < bytenr);
1384 len = min(first - bytenr, num);
1385 } else {
1386 len = min(num, cache->key.offset -
1387 (bytenr - cache->key.objectid));
1388 }
1389 if (pin) {
1390 if (cache) {
1391 spin_lock(&cache->lock);
1392 cache->pinned += len;
1393 cache->space_info->bytes_pinned += len;
1394 spin_unlock(&cache->lock);
1395 }
1396 fs_info->total_pinned += len;
1397 } else {
1398 if (cache) {
1399 spin_lock(&cache->lock);
1400 cache->pinned -= len;
1401 cache->space_info->bytes_pinned -= len;
1402 spin_unlock(&cache->lock);
1403 }
1404 fs_info->total_pinned -= len;
1405 }
1406 bytenr += len;
1407 num -= len;
1408 }
1409 return 0;
1410 }
1411
1412 int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
1413 {
1414 u64 last = 0;
1415 u64 start;
1416 u64 end;
1417 struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents;
1418 int ret;
1419
1420 while(1) {
1421 ret = find_first_extent_bit(pinned_extents, last,
1422 &start, &end, EXTENT_DIRTY);
1423 if (ret)
1424 break;
1425 set_extent_dirty(copy, start, end, GFP_NOFS);
1426 last = end + 1;
1427 }
1428 return 0;
1429 }
1430
1431 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
1432 struct btrfs_root *root,
1433 struct extent_io_tree *unpin)
1434 {
1435 u64 start;
1436 u64 end;
1437 int ret;
1438 struct extent_io_tree *free_space_cache;
1439 free_space_cache = &root->fs_info->free_space_cache;
1440
1441 mutex_lock(&root->fs_info->alloc_mutex);
1442 while(1) {
1443 ret = find_first_extent_bit(unpin, 0, &start, &end,
1444 EXTENT_DIRTY);
1445 if (ret)
1446 break;
1447 update_pinned_extents(root, start, end + 1 - start, 0);
1448 clear_extent_dirty(unpin, start, end, GFP_NOFS);
1449 set_extent_dirty(free_space_cache, start, end, GFP_NOFS);
1450 if (need_resched()) {
1451 mutex_unlock(&root->fs_info->alloc_mutex);
1452 cond_resched();
1453 mutex_lock(&root->fs_info->alloc_mutex);
1454 }
1455 }
1456 mutex_unlock(&root->fs_info->alloc_mutex);
1457 return 0;
1458 }
1459
1460 static int finish_current_insert(struct btrfs_trans_handle *trans,
1461 struct btrfs_root *extent_root)
1462 {
1463 u64 start;
1464 u64 end;
1465 struct btrfs_fs_info *info = extent_root->fs_info;
1466 struct extent_buffer *eb;
1467 struct btrfs_path *path;
1468 struct btrfs_key ins;
1469 struct btrfs_disk_key first;
1470 struct btrfs_extent_item extent_item;
1471 int ret;
1472 int level;
1473 int err = 0;
1474
1475 WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
1476 btrfs_set_stack_extent_refs(&extent_item, 1);
1477 btrfs_set_key_type(&ins, BTRFS_EXTENT_ITEM_KEY);
1478 path = btrfs_alloc_path();
1479
1480 while(1) {
1481 ret = find_first_extent_bit(&info->extent_ins, 0, &start,
1482 &end, EXTENT_LOCKED);
1483 if (ret)
1484 break;
1485
1486 ins.objectid = start;
1487 ins.offset = end + 1 - start;
1488 err = btrfs_insert_item(trans, extent_root, &ins,
1489 &extent_item, sizeof(extent_item));
1490 clear_extent_bits(&info->extent_ins, start, end, EXTENT_LOCKED,
1491 GFP_NOFS);
1492
1493 eb = btrfs_find_tree_block(extent_root, ins.objectid,
1494 ins.offset);
1495
1496 if (!btrfs_buffer_uptodate(eb, trans->transid)) {
1497 mutex_unlock(&extent_root->fs_info->alloc_mutex);
1498 btrfs_read_buffer(eb, trans->transid);
1499 mutex_lock(&extent_root->fs_info->alloc_mutex);
1500 }
1501
1502 btrfs_tree_lock(eb);
1503 level = btrfs_header_level(eb);
1504 if (level == 0) {
1505 btrfs_item_key(eb, &first, 0);
1506 } else {
1507 btrfs_node_key(eb, &first, 0);
1508 }
1509 btrfs_tree_unlock(eb);
1510 free_extent_buffer(eb);
1511 /*
1512 * the first key is just a hint, so the race we've created
1513 * against reading it is fine
1514 */
1515 err = btrfs_insert_extent_backref(trans, extent_root, path,
1516 start, extent_root->root_key.objectid,
1517 0, level,
1518 btrfs_disk_key_objectid(&first));
1519 BUG_ON(err);
1520 if (need_resched()) {
1521 mutex_unlock(&extent_root->fs_info->alloc_mutex);
1522 cond_resched();
1523 mutex_lock(&extent_root->fs_info->alloc_mutex);
1524 }
1525 }
1526 btrfs_free_path(path);
1527 return 0;
1528 }
1529
1530 static int pin_down_bytes(struct btrfs_root *root, u64 bytenr, u32 num_bytes,
1531 int pending)
1532 {
1533 int err = 0;
1534
1535 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1536 if (!pending) {
1537 struct extent_buffer *buf;
1538 buf = btrfs_find_tree_block(root, bytenr, num_bytes);
1539 if (buf) {
1540 if (btrfs_buffer_uptodate(buf, 0) &&
1541 btrfs_try_tree_lock(buf)) {
1542 u64 transid =
1543 root->fs_info->running_transaction->transid;
1544 u64 header_transid =
1545 btrfs_header_generation(buf);
1546 if (header_transid == transid &&
1547 !btrfs_header_flag(buf,
1548 BTRFS_HEADER_FLAG_WRITTEN)) {
1549 clean_tree_block(NULL, root, buf);
1550 btrfs_tree_unlock(buf);
1551 free_extent_buffer(buf);
1552 return 1;
1553 }
1554 btrfs_tree_unlock(buf);
1555 }
1556 free_extent_buffer(buf);
1557 }
1558 update_pinned_extents(root, bytenr, num_bytes, 1);
1559 } else {
1560 set_extent_bits(&root->fs_info->pending_del,
1561 bytenr, bytenr + num_bytes - 1,
1562 EXTENT_LOCKED, GFP_NOFS);
1563 }
1564 BUG_ON(err < 0);
1565 return 0;
1566 }
1567
1568 /*
1569 * remove an extent from the root, returns 0 on success
1570 */
1571 static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
1572 *root, u64 bytenr, u64 num_bytes,
1573 u64 root_objectid, u64 ref_generation,
1574 u64 owner_objectid, u64 owner_offset, int pin,
1575 int mark_free)
1576 {
1577 struct btrfs_path *path;
1578 struct btrfs_key key;
1579 struct btrfs_fs_info *info = root->fs_info;
1580 struct btrfs_root *extent_root = info->extent_root;
1581 struct extent_buffer *leaf;
1582 int ret;
1583 int extent_slot = 0;
1584 int found_extent = 0;
1585 int num_to_del = 1;
1586 struct btrfs_extent_item *ei;
1587 u32 refs;
1588
1589 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1590 key.objectid = bytenr;
1591 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
1592 key.offset = num_bytes;
1593 path = btrfs_alloc_path();
1594 if (!path)
1595 return -ENOMEM;
1596
1597 path->reada = 1;
1598 ret = lookup_extent_backref(trans, extent_root, path,
1599 bytenr, root_objectid,
1600 ref_generation,
1601 owner_objectid, owner_offset, 1);
1602 if (ret == 0) {
1603 struct btrfs_key found_key;
1604 extent_slot = path->slots[0];
1605 while(extent_slot > 0) {
1606 extent_slot--;
1607 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1608 extent_slot);
1609 if (found_key.objectid != bytenr)
1610 break;
1611 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
1612 found_key.offset == num_bytes) {
1613 found_extent = 1;
1614 break;
1615 }
1616 if (path->slots[0] - extent_slot > 5)
1617 break;
1618 }
1619 if (!found_extent)
1620 ret = btrfs_del_item(trans, extent_root, path);
1621 } else {
1622 btrfs_print_leaf(extent_root, path->nodes[0]);
1623 WARN_ON(1);
1624 printk("Unable to find ref byte nr %Lu root %Lu "
1625 " gen %Lu owner %Lu offset %Lu\n", bytenr,
1626 root_objectid, ref_generation, owner_objectid,
1627 owner_offset);
1628 }
1629 if (!found_extent) {
1630 btrfs_release_path(extent_root, path);
1631 ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1);
1632 if (ret < 0)
1633 return ret;
1634 BUG_ON(ret);
1635 extent_slot = path->slots[0];
1636 }
1637
1638 leaf = path->nodes[0];
1639 ei = btrfs_item_ptr(leaf, extent_slot,
1640 struct btrfs_extent_item);
1641 refs = btrfs_extent_refs(leaf, ei);
1642 BUG_ON(refs == 0);
1643 refs -= 1;
1644 btrfs_set_extent_refs(leaf, ei, refs);
1645
1646 btrfs_mark_buffer_dirty(leaf);
1647
1648 if (refs == 0 && found_extent && path->slots[0] == extent_slot + 1) {
1649 /* if the back ref and the extent are next to each other
1650 * they get deleted below in one shot
1651 */
1652 path->slots[0] = extent_slot;
1653 num_to_del = 2;
1654 } else if (found_extent) {
1655 /* otherwise delete the extent back ref */
1656 ret = btrfs_del_item(trans, extent_root, path);
1657 BUG_ON(ret);
1658 /* if refs are 0, we need to setup the path for deletion */
1659 if (refs == 0) {
1660 btrfs_release_path(extent_root, path);
1661 ret = btrfs_search_slot(trans, extent_root, &key, path,
1662 -1, 1);
1663 if (ret < 0)
1664 return ret;
1665 BUG_ON(ret);
1666 }
1667 }
1668
1669 if (refs == 0) {
1670 u64 super_used;
1671 u64 root_used;
1672
1673 if (pin) {
1674 ret = pin_down_bytes(root, bytenr, num_bytes, 0);
1675 if (ret > 0)
1676 mark_free = 1;
1677 BUG_ON(ret < 0);
1678 }
1679
1680 /* block accounting for super block */
1681 spin_lock_irq(&info->delalloc_lock);
1682 super_used = btrfs_super_bytes_used(&info->super_copy);
1683 btrfs_set_super_bytes_used(&info->super_copy,
1684 super_used - num_bytes);
1685 spin_unlock_irq(&info->delalloc_lock);
1686
1687 /* block accounting for root item */
1688 root_used = btrfs_root_used(&root->root_item);
1689 btrfs_set_root_used(&root->root_item,
1690 root_used - num_bytes);
1691 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
1692 num_to_del);
1693 if (ret) {
1694 return ret;
1695 }
1696 ret = update_block_group(trans, root, bytenr, num_bytes, 0,
1697 mark_free);
1698 BUG_ON(ret);
1699 }
1700 btrfs_free_path(path);
1701 finish_current_insert(trans, extent_root);
1702 return ret;
1703 }
1704
1705 /*
1706 * find all the blocks marked as pending in the radix tree and remove
1707 * them from the extent map
1708 */
1709 static int del_pending_extents(struct btrfs_trans_handle *trans, struct
1710 btrfs_root *extent_root)
1711 {
1712 int ret;
1713 int err = 0;
1714 u64 start;
1715 u64 end;
1716 struct extent_io_tree *pending_del;
1717 struct extent_io_tree *pinned_extents;
1718
1719 WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
1720 pending_del = &extent_root->fs_info->pending_del;
1721 pinned_extents = &extent_root->fs_info->pinned_extents;
1722
1723 while(1) {
1724 ret = find_first_extent_bit(pending_del, 0, &start, &end,
1725 EXTENT_LOCKED);
1726 if (ret)
1727 break;
1728 clear_extent_bits(pending_del, start, end, EXTENT_LOCKED,
1729 GFP_NOFS);
1730 if (!test_range_bit(&extent_root->fs_info->extent_ins,
1731 start, end, EXTENT_LOCKED, 0)) {
1732 update_pinned_extents(extent_root, start,
1733 end + 1 - start, 1);
1734 ret = __free_extent(trans, extent_root,
1735 start, end + 1 - start,
1736 extent_root->root_key.objectid,
1737 0, 0, 0, 0, 0);
1738 } else {
1739 clear_extent_bits(&extent_root->fs_info->extent_ins,
1740 start, end, EXTENT_LOCKED, GFP_NOFS);
1741 }
1742 if (ret)
1743 err = ret;
1744
1745 if (need_resched()) {
1746 mutex_unlock(&extent_root->fs_info->alloc_mutex);
1747 cond_resched();
1748 mutex_lock(&extent_root->fs_info->alloc_mutex);
1749 }
1750 }
1751 return err;
1752 }
1753
1754 /*
1755 * remove an extent from the root, returns 0 on success
1756 */
1757 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
1758 struct btrfs_root *root, u64 bytenr,
1759 u64 num_bytes, u64 root_objectid,
1760 u64 ref_generation, u64 owner_objectid,
1761 u64 owner_offset, int pin)
1762 {
1763 struct btrfs_root *extent_root = root->fs_info->extent_root;
1764 int pending_ret;
1765 int ret;
1766
1767 WARN_ON(num_bytes < root->sectorsize);
1768 if (!root->ref_cows)
1769 ref_generation = 0;
1770
1771 if (root == extent_root) {
1772 pin_down_bytes(root, bytenr, num_bytes, 1);
1773 return 0;
1774 }
1775 ret = __free_extent(trans, root, bytenr, num_bytes, root_objectid,
1776 ref_generation, owner_objectid, owner_offset,
1777 pin, pin == 0);
1778
1779 finish_current_insert(trans, root->fs_info->extent_root);
1780 pending_ret = del_pending_extents(trans, root->fs_info->extent_root);
1781 return ret ? ret : pending_ret;
1782 }
1783
1784 int btrfs_free_extent(struct btrfs_trans_handle *trans,
1785 struct btrfs_root *root, u64 bytenr,
1786 u64 num_bytes, u64 root_objectid,
1787 u64 ref_generation, u64 owner_objectid,
1788 u64 owner_offset, int pin)
1789 {
1790 int ret;
1791
1792 maybe_lock_mutex(root);
1793 ret = __btrfs_free_extent(trans, root, bytenr, num_bytes,
1794 root_objectid, ref_generation,
1795 owner_objectid, owner_offset, pin);
1796 maybe_unlock_mutex(root);
1797 return ret;
1798 }
1799
1800 static u64 stripe_align(struct btrfs_root *root, u64 val)
1801 {
1802 u64 mask = ((u64)root->stripesize - 1);
1803 u64 ret = (val + mask) & ~mask;
1804 return ret;
1805 }
1806
1807 /*
1808 * walks the btree of allocated extents and find a hole of a given size.
1809 * The key ins is changed to record the hole:
1810 * ins->objectid == block start
1811 * ins->flags = BTRFS_EXTENT_ITEM_KEY
1812 * ins->offset == number of blocks
1813 * Any available blocks before search_start are skipped.
1814 */
1815 static int noinline find_free_extent(struct btrfs_trans_handle *trans,
1816 struct btrfs_root *orig_root,
1817 u64 num_bytes, u64 empty_size,
1818 u64 search_start, u64 search_end,
1819 u64 hint_byte, struct btrfs_key *ins,
1820 u64 exclude_start, u64 exclude_nr,
1821 int data)
1822 {
1823 int ret;
1824 u64 orig_search_start;
1825 struct btrfs_root * root = orig_root->fs_info->extent_root;
1826 struct btrfs_fs_info *info = root->fs_info;
1827 u64 total_needed = num_bytes;
1828 u64 *last_ptr = NULL;
1829 struct btrfs_block_group_cache *block_group;
1830 int full_scan = 0;
1831 int wrapped = 0;
1832 int chunk_alloc_done = 0;
1833 int empty_cluster = 2 * 1024 * 1024;
1834 int allowed_chunk_alloc = 0;
1835
1836 WARN_ON(num_bytes < root->sectorsize);
1837 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
1838
1839 if (orig_root->ref_cows || empty_size)
1840 allowed_chunk_alloc = 1;
1841
1842 if (data & BTRFS_BLOCK_GROUP_METADATA) {
1843 last_ptr = &root->fs_info->last_alloc;
1844 empty_cluster = 256 * 1024;
1845 }
1846
1847 if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD)) {
1848 last_ptr = &root->fs_info->last_data_alloc;
1849 }
1850
1851 if (last_ptr) {
1852 if (*last_ptr)
1853 hint_byte = *last_ptr;
1854 else {
1855 empty_size += empty_cluster;
1856 }
1857 }
1858
1859 search_start = max(search_start, first_logical_byte(root, 0));
1860 orig_search_start = search_start;
1861
1862 if (search_end == (u64)-1)
1863 search_end = btrfs_super_total_bytes(&info->super_copy);
1864
1865 if (hint_byte) {
1866 block_group = btrfs_lookup_first_block_group(info, hint_byte);
1867 if (!block_group)
1868 hint_byte = search_start;
1869 block_group = btrfs_find_block_group(root, block_group,
1870 hint_byte, data, 1);
1871 if (last_ptr && *last_ptr == 0 && block_group)
1872 hint_byte = block_group->key.objectid;
1873 } else {
1874 block_group = btrfs_find_block_group(root,
1875 trans->block_group,
1876 search_start, data, 1);
1877 }
1878 search_start = max(search_start, hint_byte);
1879
1880 total_needed += empty_size;
1881
1882 check_failed:
1883 if (!block_group) {
1884 block_group = btrfs_lookup_first_block_group(info,
1885 search_start);
1886 if (!block_group)
1887 block_group = btrfs_lookup_first_block_group(info,
1888 orig_search_start);
1889 }
1890 if (full_scan && !chunk_alloc_done) {
1891 if (allowed_chunk_alloc) {
1892 do_chunk_alloc(trans, root,
1893 num_bytes + 2 * 1024 * 1024, data, 1);
1894 allowed_chunk_alloc = 0;
1895 } else if (block_group && block_group_bits(block_group, data)) {
1896 block_group->space_info->force_alloc = 1;
1897 }
1898 chunk_alloc_done = 1;
1899 }
1900 ret = find_search_start(root, &block_group, &search_start,
1901 total_needed, data);
1902 if (ret == -ENOSPC && last_ptr && *last_ptr) {
1903 *last_ptr = 0;
1904 block_group = btrfs_lookup_first_block_group(info,
1905 orig_search_start);
1906 search_start = orig_search_start;
1907 ret = find_search_start(root, &block_group, &search_start,
1908 total_needed, data);
1909 }
1910 if (ret == -ENOSPC)
1911 goto enospc;
1912 if (ret)
1913 goto error;
1914
1915 if (last_ptr && *last_ptr && search_start != *last_ptr) {
1916 *last_ptr = 0;
1917 if (!empty_size) {
1918 empty_size += empty_cluster;
1919 total_needed += empty_size;
1920 }
1921 block_group = btrfs_lookup_first_block_group(info,
1922 orig_search_start);
1923 search_start = orig_search_start;
1924 ret = find_search_start(root, &block_group,
1925 &search_start, total_needed, data);
1926 if (ret == -ENOSPC)
1927 goto enospc;
1928 if (ret)
1929 goto error;
1930 }
1931
1932 search_start = stripe_align(root, search_start);
1933 ins->objectid = search_start;
1934 ins->offset = num_bytes;
1935
1936 if (ins->objectid + num_bytes >= search_end)
1937 goto enospc;
1938
1939 if (ins->objectid + num_bytes >
1940 block_group->key.objectid + block_group->key.offset) {
1941 search_start = block_group->key.objectid +
1942 block_group->key.offset;
1943 goto new_group;
1944 }
1945
1946 if (test_range_bit(&info->extent_ins, ins->objectid,
1947 ins->objectid + num_bytes -1, EXTENT_LOCKED, 0)) {
1948 search_start = ins->objectid + num_bytes;
1949 goto new_group;
1950 }
1951
1952 if (test_range_bit(&info->pinned_extents, ins->objectid,
1953 ins->objectid + num_bytes -1, EXTENT_DIRTY, 0)) {
1954 search_start = ins->objectid + num_bytes;
1955 goto new_group;
1956 }
1957
1958 if (exclude_nr > 0 && (ins->objectid + num_bytes > exclude_start &&
1959 ins->objectid < exclude_start + exclude_nr)) {
1960 search_start = exclude_start + exclude_nr;
1961 goto new_group;
1962 }
1963
1964 if (!(data & BTRFS_BLOCK_GROUP_DATA)) {
1965 block_group = btrfs_lookup_block_group(info, ins->objectid);
1966 if (block_group)
1967 trans->block_group = block_group;
1968 }
1969 ins->offset = num_bytes;
1970 if (last_ptr) {
1971 *last_ptr = ins->objectid + ins->offset;
1972 if (*last_ptr ==
1973 btrfs_super_total_bytes(&root->fs_info->super_copy)) {
1974 *last_ptr = 0;
1975 }
1976 }
1977 return 0;
1978
1979 new_group:
1980 if (search_start + num_bytes >= search_end) {
1981 enospc:
1982 search_start = orig_search_start;
1983 if (full_scan) {
1984 ret = -ENOSPC;
1985 goto error;
1986 }
1987 if (wrapped) {
1988 if (!full_scan)
1989 total_needed -= empty_size;
1990 full_scan = 1;
1991 } else
1992 wrapped = 1;
1993 }
1994 block_group = btrfs_lookup_first_block_group(info, search_start);
1995 cond_resched();
1996 block_group = btrfs_find_block_group(root, block_group,
1997 search_start, data, 0);
1998 goto check_failed;
1999
2000 error:
2001 return ret;
2002 }
2003
2004 static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans,
2005 struct btrfs_root *root,
2006 u64 num_bytes, u64 min_alloc_size,
2007 u64 empty_size, u64 hint_byte,
2008 u64 search_end, struct btrfs_key *ins,
2009 u64 data)
2010 {
2011 int ret;
2012 u64 search_start = 0;
2013 u64 alloc_profile;
2014 struct btrfs_fs_info *info = root->fs_info;
2015
2016 if (data) {
2017 alloc_profile = info->avail_data_alloc_bits &
2018 info->data_alloc_profile;
2019 data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
2020 } else if (root == root->fs_info->chunk_root) {
2021 alloc_profile = info->avail_system_alloc_bits &
2022 info->system_alloc_profile;
2023 data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
2024 } else {
2025 alloc_profile = info->avail_metadata_alloc_bits &
2026 info->metadata_alloc_profile;
2027 data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
2028 }
2029 again:
2030 data = reduce_alloc_profile(root, data);
2031 /*
2032 * the only place that sets empty_size is btrfs_realloc_node, which
2033 * is not called recursively on allocations
2034 */
2035 if (empty_size || root->ref_cows) {
2036 if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
2037 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2038 2 * 1024 * 1024,
2039 BTRFS_BLOCK_GROUP_METADATA |
2040 (info->metadata_alloc_profile &
2041 info->avail_metadata_alloc_bits), 0);
2042 BUG_ON(ret);
2043 }
2044 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2045 num_bytes + 2 * 1024 * 1024, data, 0);
2046 BUG_ON(ret);
2047 }
2048
2049 WARN_ON(num_bytes < root->sectorsize);
2050 ret = find_free_extent(trans, root, num_bytes, empty_size,
2051 search_start, search_end, hint_byte, ins,
2052 trans->alloc_exclude_start,
2053 trans->alloc_exclude_nr, data);
2054
2055 if (ret == -ENOSPC && num_bytes > min_alloc_size) {
2056 num_bytes = num_bytes >> 1;
2057 num_bytes = max(num_bytes, min_alloc_size);
2058 do_chunk_alloc(trans, root->fs_info->extent_root,
2059 num_bytes, data, 1);
2060 goto again;
2061 }
2062 if (ret) {
2063 printk("allocation failed flags %Lu\n", data);
2064 BUG();
2065 }
2066 clear_extent_dirty(&root->fs_info->free_space_cache,
2067 ins->objectid, ins->objectid + ins->offset - 1,
2068 GFP_NOFS);
2069 return 0;
2070 }
2071
2072 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
2073 struct btrfs_root *root,
2074 u64 num_bytes, u64 min_alloc_size,
2075 u64 empty_size, u64 hint_byte,
2076 u64 search_end, struct btrfs_key *ins,
2077 u64 data)
2078 {
2079 int ret;
2080 maybe_lock_mutex(root);
2081 ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size,
2082 empty_size, hint_byte, search_end, ins,
2083 data);
2084 maybe_unlock_mutex(root);
2085 return ret;
2086 }
2087
2088 static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
2089 struct btrfs_root *root,
2090 u64 root_objectid, u64 ref_generation,
2091 u64 owner, u64 owner_offset,
2092 struct btrfs_key *ins)
2093 {
2094 int ret;
2095 int pending_ret;
2096 u64 super_used;
2097 u64 root_used;
2098 u64 num_bytes = ins->offset;
2099 u32 sizes[2];
2100 struct btrfs_fs_info *info = root->fs_info;
2101 struct btrfs_root *extent_root = info->extent_root;
2102 struct btrfs_extent_item *extent_item;
2103 struct btrfs_extent_ref *ref;
2104 struct btrfs_path *path;
2105 struct btrfs_key keys[2];
2106
2107 /* block accounting for super block */
2108 spin_lock_irq(&info->delalloc_lock);
2109 super_used = btrfs_super_bytes_used(&info->super_copy);
2110 btrfs_set_super_bytes_used(&info->super_copy, super_used + num_bytes);
2111 spin_unlock_irq(&info->delalloc_lock);
2112
2113 /* block accounting for root item */
2114 root_used = btrfs_root_used(&root->root_item);
2115 btrfs_set_root_used(&root->root_item, root_used + num_bytes);
2116
2117 if (root == extent_root) {
2118 set_extent_bits(&root->fs_info->extent_ins, ins->objectid,
2119 ins->objectid + ins->offset - 1,
2120 EXTENT_LOCKED, GFP_NOFS);
2121 goto update_block;
2122 }
2123
2124 memcpy(&keys[0], ins, sizeof(*ins));
2125 keys[1].offset = hash_extent_ref(root_objectid, ref_generation,
2126 owner, owner_offset);
2127 keys[1].objectid = ins->objectid;
2128 keys[1].type = BTRFS_EXTENT_REF_KEY;
2129 sizes[0] = sizeof(*extent_item);
2130 sizes[1] = sizeof(*ref);
2131
2132 path = btrfs_alloc_path();
2133 BUG_ON(!path);
2134
2135 ret = btrfs_insert_empty_items(trans, extent_root, path, keys,
2136 sizes, 2);
2137
2138 BUG_ON(ret);
2139 extent_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2140 struct btrfs_extent_item);
2141 btrfs_set_extent_refs(path->nodes[0], extent_item, 1);
2142 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
2143 struct btrfs_extent_ref);
2144
2145 btrfs_set_ref_root(path->nodes[0], ref, root_objectid);
2146 btrfs_set_ref_generation(path->nodes[0], ref, ref_generation);
2147 btrfs_set_ref_objectid(path->nodes[0], ref, owner);
2148 btrfs_set_ref_offset(path->nodes[0], ref, owner_offset);
2149
2150 btrfs_mark_buffer_dirty(path->nodes[0]);
2151
2152 trans->alloc_exclude_start = 0;
2153 trans->alloc_exclude_nr = 0;
2154 btrfs_free_path(path);
2155 finish_current_insert(trans, extent_root);
2156 pending_ret = del_pending_extents(trans, extent_root);
2157
2158 if (ret)
2159 goto out;
2160 if (pending_ret) {
2161 ret = pending_ret;
2162 goto out;
2163 }
2164
2165 update_block:
2166 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0);
2167 if (ret) {
2168 printk("update block group failed for %Lu %Lu\n",
2169 ins->objectid, ins->offset);
2170 BUG();
2171 }
2172 out:
2173 return ret;
2174 }
2175
2176 int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
2177 struct btrfs_root *root,
2178 u64 root_objectid, u64 ref_generation,
2179 u64 owner, u64 owner_offset,
2180 struct btrfs_key *ins)
2181 {
2182 int ret;
2183 maybe_lock_mutex(root);
2184 ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid,
2185 ref_generation, owner,
2186 owner_offset, ins);
2187 maybe_unlock_mutex(root);
2188 return ret;
2189 }
2190 /*
2191 * finds a free extent and does all the dirty work required for allocation
2192 * returns the key for the extent through ins, and a tree buffer for
2193 * the first block of the extent through buf.
2194 *
2195 * returns 0 if everything worked, non-zero otherwise.
2196 */
2197 int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
2198 struct btrfs_root *root,
2199 u64 num_bytes, u64 min_alloc_size,
2200 u64 root_objectid, u64 ref_generation,
2201 u64 owner, u64 owner_offset,
2202 u64 empty_size, u64 hint_byte,
2203 u64 search_end, struct btrfs_key *ins, u64 data)
2204 {
2205 int ret;
2206
2207 maybe_lock_mutex(root);
2208
2209 ret = __btrfs_reserve_extent(trans, root, num_bytes,
2210 min_alloc_size, empty_size, hint_byte,
2211 search_end, ins, data);
2212 BUG_ON(ret);
2213 ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid,
2214 ref_generation, owner,
2215 owner_offset, ins);
2216 BUG_ON(ret);
2217
2218 maybe_unlock_mutex(root);
2219 return ret;
2220 }
2221 /*
2222 * helper function to allocate a block for a given tree
2223 * returns the tree buffer or NULL.
2224 */
2225 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
2226 struct btrfs_root *root,
2227 u32 blocksize,
2228 u64 root_objectid,
2229 u64 ref_generation,
2230 u64 first_objectid,
2231 int level,
2232 u64 hint,
2233 u64 empty_size)
2234 {
2235 struct btrfs_key ins;
2236 int ret;
2237 struct extent_buffer *buf;
2238
2239 ret = btrfs_alloc_extent(trans, root, blocksize, blocksize,
2240 root_objectid, ref_generation,
2241 level, first_objectid, empty_size, hint,
2242 (u64)-1, &ins, 0);
2243 if (ret) {
2244 BUG_ON(ret > 0);
2245 return ERR_PTR(ret);
2246 }
2247 buf = btrfs_find_create_tree_block(root, ins.objectid, blocksize);
2248 if (!buf) {
2249 btrfs_free_extent(trans, root, ins.objectid, blocksize,
2250 root->root_key.objectid, ref_generation,
2251 0, 0, 0);
2252 return ERR_PTR(-ENOMEM);
2253 }
2254 btrfs_set_header_generation(buf, trans->transid);
2255 btrfs_tree_lock(buf);
2256 clean_tree_block(trans, root, buf);
2257 btrfs_set_buffer_uptodate(buf);
2258
2259 if (PageDirty(buf->first_page)) {
2260 printk("page %lu dirty\n", buf->first_page->index);
2261 WARN_ON(1);
2262 }
2263
2264 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
2265 buf->start + buf->len - 1, GFP_NOFS);
2266 trans->blocks_used++;
2267 return buf;
2268 }
2269
2270 static int noinline drop_leaf_ref_no_cache(struct btrfs_trans_handle *trans,
2271 struct btrfs_root *root,
2272 struct extent_buffer *leaf)
2273 {
2274 u64 leaf_owner;
2275 u64 leaf_generation;
2276 struct btrfs_key key;
2277 struct btrfs_file_extent_item *fi;
2278 int i;
2279 int nritems;
2280 int ret;
2281
2282 BUG_ON(!btrfs_is_leaf(leaf));
2283 nritems = btrfs_header_nritems(leaf);
2284 leaf_owner = btrfs_header_owner(leaf);
2285 leaf_generation = btrfs_header_generation(leaf);
2286
2287 mutex_unlock(&root->fs_info->alloc_mutex);
2288
2289 for (i = 0; i < nritems; i++) {
2290 u64 disk_bytenr;
2291 cond_resched();
2292
2293 btrfs_item_key_to_cpu(leaf, &key, i);
2294 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2295 continue;
2296 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
2297 if (btrfs_file_extent_type(leaf, fi) ==
2298 BTRFS_FILE_EXTENT_INLINE)
2299 continue;
2300 /*
2301 * FIXME make sure to insert a trans record that
2302 * repeats the snapshot del on crash
2303 */
2304 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
2305 if (disk_bytenr == 0)
2306 continue;
2307
2308 mutex_lock(&root->fs_info->alloc_mutex);
2309 ret = __btrfs_free_extent(trans, root, disk_bytenr,
2310 btrfs_file_extent_disk_num_bytes(leaf, fi),
2311 leaf_owner, leaf_generation,
2312 key.objectid, key.offset, 0);
2313 mutex_unlock(&root->fs_info->alloc_mutex);
2314 BUG_ON(ret);
2315 }
2316
2317 mutex_lock(&root->fs_info->alloc_mutex);
2318 return 0;
2319 }
2320
2321 static int noinline drop_leaf_ref(struct btrfs_trans_handle *trans,
2322 struct btrfs_root *root,
2323 struct btrfs_leaf_ref *ref)
2324 {
2325 int i;
2326 int ret;
2327 struct btrfs_extent_info *info = ref->extents;
2328
2329 mutex_unlock(&root->fs_info->alloc_mutex);
2330 for (i = 0; i < ref->nritems; i++) {
2331 mutex_lock(&root->fs_info->alloc_mutex);
2332 ret = __btrfs_free_extent(trans, root,
2333 info->bytenr, info->num_bytes,
2334 ref->owner, ref->generation,
2335 info->objectid, info->offset, 0);
2336 mutex_unlock(&root->fs_info->alloc_mutex);
2337 BUG_ON(ret);
2338 info++;
2339 }
2340 mutex_lock(&root->fs_info->alloc_mutex);
2341
2342 return 0;
2343 }
2344
2345 static void noinline reada_walk_down(struct btrfs_root *root,
2346 struct extent_buffer *node,
2347 int slot)
2348 {
2349 u64 bytenr;
2350 u64 last = 0;
2351 u32 nritems;
2352 u32 refs;
2353 u32 blocksize;
2354 int ret;
2355 int i;
2356 int level;
2357 int skipped = 0;
2358
2359 nritems = btrfs_header_nritems(node);
2360 level = btrfs_header_level(node);
2361 if (level)
2362 return;
2363
2364 for (i = slot; i < nritems && skipped < 32; i++) {
2365 bytenr = btrfs_node_blockptr(node, i);
2366 if (last && ((bytenr > last && bytenr - last > 32 * 1024) ||
2367 (last > bytenr && last - bytenr > 32 * 1024))) {
2368 skipped++;
2369 continue;
2370 }
2371 blocksize = btrfs_level_size(root, level - 1);
2372 if (i != slot) {
2373 ret = lookup_extent_ref(NULL, root, bytenr,
2374 blocksize, &refs);
2375 BUG_ON(ret);
2376 if (refs != 1) {
2377 skipped++;
2378 continue;
2379 }
2380 }
2381 ret = readahead_tree_block(root, bytenr, blocksize,
2382 btrfs_node_ptr_generation(node, i));
2383 last = bytenr + blocksize;
2384 cond_resched();
2385 if (ret)
2386 break;
2387 }
2388 }
2389
2390 /*
2391 * we want to avoid as much random IO as we can with the alloc mutex
2392 * held, so drop the lock and do the lookup, then do it again with the
2393 * lock held.
2394 */
2395 int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len,
2396 u32 *refs)
2397 {
2398 mutex_unlock(&root->fs_info->alloc_mutex);
2399 lookup_extent_ref(NULL, root, start, len, refs);
2400 cond_resched();
2401 mutex_lock(&root->fs_info->alloc_mutex);
2402 return lookup_extent_ref(NULL, root, start, len, refs);
2403 }
2404
2405 /*
2406 * helper function for drop_snapshot, this walks down the tree dropping ref
2407 * counts as it goes.
2408 */
2409 static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
2410 struct btrfs_root *root,
2411 struct btrfs_path *path, int *level)
2412 {
2413 u64 root_owner;
2414 u64 root_gen;
2415 u64 bytenr;
2416 u64 ptr_gen;
2417 struct extent_buffer *next;
2418 struct extent_buffer *cur;
2419 struct extent_buffer *parent;
2420 struct btrfs_leaf_ref *ref;
2421 u32 blocksize;
2422 int ret;
2423 u32 refs;
2424
2425 mutex_lock(&root->fs_info->alloc_mutex);
2426
2427 WARN_ON(*level < 0);
2428 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2429 ret = drop_snap_lookup_refcount(root, path->nodes[*level]->start,
2430 path->nodes[*level]->len, &refs);
2431 BUG_ON(ret);
2432 if (refs > 1)
2433 goto out;
2434
2435 /*
2436 * walk down to the last node level and free all the leaves
2437 */
2438 while(*level >= 0) {
2439 WARN_ON(*level < 0);
2440 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2441 cur = path->nodes[*level];
2442
2443 if (btrfs_header_level(cur) != *level)
2444 WARN_ON(1);
2445
2446 if (path->slots[*level] >=
2447 btrfs_header_nritems(cur))
2448 break;
2449 if (*level == 0) {
2450 ret = drop_leaf_ref_no_cache(trans, root, cur);
2451 BUG_ON(ret);
2452 break;
2453 }
2454 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2455 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2456 blocksize = btrfs_level_size(root, *level - 1);
2457
2458 ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
2459 BUG_ON(ret);
2460 if (refs != 1) {
2461 parent = path->nodes[*level];
2462 root_owner = btrfs_header_owner(parent);
2463 root_gen = btrfs_header_generation(parent);
2464 path->slots[*level]++;
2465 ret = __btrfs_free_extent(trans, root, bytenr,
2466 blocksize, root_owner,
2467 root_gen, 0, 0, 1);
2468 BUG_ON(ret);
2469 continue;
2470 }
2471
2472 if (*level == 1) {
2473 struct btrfs_key key;
2474 btrfs_node_key_to_cpu(cur, &key, path->slots[*level]);
2475 ref = btrfs_lookup_leaf_ref(root, &key);
2476 if (ref) {
2477 ret = drop_leaf_ref(trans, root, ref);
2478 BUG_ON(ret);
2479 btrfs_remove_leaf_ref(root, ref);
2480 btrfs_free_leaf_ref(ref);
2481 *level = 0;
2482 break;
2483 }
2484 }
2485
2486 next = btrfs_find_tree_block(root, bytenr, blocksize);
2487 if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
2488 free_extent_buffer(next);
2489 mutex_unlock(&root->fs_info->alloc_mutex);
2490
2491 if (path->slots[*level] == 0)
2492 reada_walk_down(root, cur, path->slots[*level]);
2493 next = read_tree_block(root, bytenr, blocksize,
2494 ptr_gen);
2495 cond_resched();
2496 mutex_lock(&root->fs_info->alloc_mutex);
2497
2498 /* we've dropped the lock, double check */
2499 ret = lookup_extent_ref(NULL, root, bytenr, blocksize,
2500 &refs);
2501 BUG_ON(ret);
2502 if (refs != 1) {
2503 parent = path->nodes[*level];
2504 root_owner = btrfs_header_owner(parent);
2505 root_gen = btrfs_header_generation(parent);
2506
2507 path->slots[*level]++;
2508 free_extent_buffer(next);
2509 ret = __btrfs_free_extent(trans, root, bytenr,
2510 blocksize,
2511 root_owner,
2512 root_gen, 0, 0, 1);
2513 BUG_ON(ret);
2514 continue;
2515 }
2516 }
2517 WARN_ON(*level <= 0);
2518 if (path->nodes[*level-1])
2519 free_extent_buffer(path->nodes[*level-1]);
2520 path->nodes[*level-1] = next;
2521 *level = btrfs_header_level(next);
2522 path->slots[*level] = 0;
2523 }
2524 out:
2525 WARN_ON(*level < 0);
2526 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2527
2528 if (path->nodes[*level] == root->node) {
2529 parent = path->nodes[*level];
2530 bytenr = path->nodes[*level]->start;
2531 } else {
2532 parent = path->nodes[*level + 1];
2533 bytenr = btrfs_node_blockptr(parent, path->slots[*level + 1]);
2534 }
2535
2536 blocksize = btrfs_level_size(root, *level);
2537 root_owner = btrfs_header_owner(parent);
2538 root_gen = btrfs_header_generation(parent);
2539
2540 ret = __btrfs_free_extent(trans, root, bytenr, blocksize,
2541 root_owner, root_gen, 0, 0, 1);
2542 free_extent_buffer(path->nodes[*level]);
2543 path->nodes[*level] = NULL;
2544 *level += 1;
2545 BUG_ON(ret);
2546 mutex_unlock(&root->fs_info->alloc_mutex);
2547 cond_resched();
2548 return 0;
2549 }
2550
2551 /*
2552 * helper for dropping snapshots. This walks back up the tree in the path
2553 * to find the first node higher up where we haven't yet gone through
2554 * all the slots
2555 */
2556 static int noinline walk_up_tree(struct btrfs_trans_handle *trans,
2557 struct btrfs_root *root,
2558 struct btrfs_path *path, int *level)
2559 {
2560 u64 root_owner;
2561 u64 root_gen;
2562 struct btrfs_root_item *root_item = &root->root_item;
2563 int i;
2564 int slot;
2565 int ret;
2566
2567 for(i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2568 slot = path->slots[i];
2569 if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
2570 struct extent_buffer *node;
2571 struct btrfs_disk_key disk_key;
2572 node = path->nodes[i];
2573 path->slots[i]++;
2574 *level = i;
2575 WARN_ON(*level == 0);
2576 btrfs_node_key(node, &disk_key, path->slots[i]);
2577 memcpy(&root_item->drop_progress,
2578 &disk_key, sizeof(disk_key));
2579 root_item->drop_level = i;
2580 return 0;
2581 } else {
2582 if (path->nodes[*level] == root->node) {
2583 root_owner = root->root_key.objectid;
2584 root_gen =
2585 btrfs_header_generation(path->nodes[*level]);
2586 } else {
2587 struct extent_buffer *node;
2588 node = path->nodes[*level + 1];
2589 root_owner = btrfs_header_owner(node);
2590 root_gen = btrfs_header_generation(node);
2591 }
2592 ret = btrfs_free_extent(trans, root,
2593 path->nodes[*level]->start,
2594 path->nodes[*level]->len,
2595 root_owner, root_gen, 0, 0, 1);
2596 BUG_ON(ret);
2597 free_extent_buffer(path->nodes[*level]);
2598 path->nodes[*level] = NULL;
2599 *level = i + 1;
2600 }
2601 }
2602 return 1;
2603 }
2604
2605 /*
2606 * drop the reference count on the tree rooted at 'snap'. This traverses
2607 * the tree freeing any blocks that have a ref count of zero after being
2608 * decremented.
2609 */
2610 int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
2611 *root)
2612 {
2613 int ret = 0;
2614 int wret;
2615 int level;
2616 struct btrfs_path *path;
2617 int i;
2618 int orig_level;
2619 struct btrfs_root_item *root_item = &root->root_item;
2620
2621 WARN_ON(!mutex_is_locked(&root->fs_info->drop_mutex));
2622 path = btrfs_alloc_path();
2623 BUG_ON(!path);
2624
2625 level = btrfs_header_level(root->node);
2626 orig_level = level;
2627 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
2628 path->nodes[level] = root->node;
2629 extent_buffer_get(root->node);
2630 path->slots[level] = 0;
2631 } else {
2632 struct btrfs_key key;
2633 struct btrfs_disk_key found_key;
2634 struct extent_buffer *node;
2635
2636 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
2637 level = root_item->drop_level;
2638 path->lowest_level = level;
2639 wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2640 if (wret < 0) {
2641 ret = wret;
2642 goto out;
2643 }
2644 node = path->nodes[level];
2645 btrfs_node_key(node, &found_key, path->slots[level]);
2646 WARN_ON(memcmp(&found_key, &root_item->drop_progress,
2647 sizeof(found_key)));
2648 /*
2649 * unlock our path, this is safe because only this
2650 * function is allowed to delete this snapshot
2651 */
2652 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
2653 if (path->nodes[i] && path->locks[i]) {
2654 path->locks[i] = 0;
2655 btrfs_tree_unlock(path->nodes[i]);
2656 }
2657 }
2658 }
2659 while(1) {
2660 wret = walk_down_tree(trans, root, path, &level);
2661 if (wret > 0)
2662 break;
2663 if (wret < 0)
2664 ret = wret;
2665
2666 wret = walk_up_tree(trans, root, path, &level);
2667 if (wret > 0)
2668 break;
2669 if (wret < 0)
2670 ret = wret;
2671 if (trans->transaction->in_commit) {
2672 ret = -EAGAIN;
2673 break;
2674 }
2675 }
2676 for (i = 0; i <= orig_level; i++) {
2677 if (path->nodes[i]) {
2678 free_extent_buffer(path->nodes[i]);
2679 path->nodes[i] = NULL;
2680 }
2681 }
2682 out:
2683 btrfs_free_path(path);
2684 return ret;
2685 }
2686
2687 int btrfs_free_block_groups(struct btrfs_fs_info *info)
2688 {
2689 u64 start;
2690 u64 end;
2691 u64 ptr;
2692 int ret;
2693
2694 mutex_lock(&info->alloc_mutex);
2695 while(1) {
2696 ret = find_first_extent_bit(&info->block_group_cache, 0,
2697 &start, &end, (unsigned int)-1);
2698 if (ret)
2699 break;
2700 ret = get_state_private(&info->block_group_cache, start, &ptr);
2701 if (!ret)
2702 kfree((void *)(unsigned long)ptr);
2703 clear_extent_bits(&info->block_group_cache, start,
2704 end, (unsigned int)-1, GFP_NOFS);
2705 }
2706 while(1) {
2707 ret = find_first_extent_bit(&info->free_space_cache, 0,
2708 &start, &end, EXTENT_DIRTY);
2709 if (ret)
2710 break;
2711 clear_extent_dirty(&info->free_space_cache, start,
2712 end, GFP_NOFS);
2713 }
2714 mutex_unlock(&info->alloc_mutex);
2715 return 0;
2716 }
2717
2718 static unsigned long calc_ra(unsigned long start, unsigned long last,
2719 unsigned long nr)
2720 {
2721 return min(last, start + nr - 1);
2722 }
2723
2724 static int noinline relocate_inode_pages(struct inode *inode, u64 start,
2725 u64 len)
2726 {
2727 u64 page_start;
2728 u64 page_end;
2729 unsigned long last_index;
2730 unsigned long i;
2731 struct page *page;
2732 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2733 struct file_ra_state *ra;
2734 unsigned long total_read = 0;
2735 unsigned long ra_pages;
2736 struct btrfs_ordered_extent *ordered;
2737 struct btrfs_trans_handle *trans;
2738
2739 ra = kzalloc(sizeof(*ra), GFP_NOFS);
2740
2741 mutex_lock(&inode->i_mutex);
2742 i = start >> PAGE_CACHE_SHIFT;
2743 last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
2744
2745 ra_pages = BTRFS_I(inode)->root->fs_info->bdi.ra_pages;
2746
2747 file_ra_state_init(ra, inode->i_mapping);
2748
2749 for (; i <= last_index; i++) {
2750 if (total_read % ra_pages == 0) {
2751 btrfs_force_ra(inode->i_mapping, ra, NULL, i,
2752 calc_ra(i, last_index, ra_pages));
2753 }
2754 total_read++;
2755 again:
2756 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
2757 goto truncate_racing;
2758 page = grab_cache_page(inode->i_mapping, i);
2759 if (!page) {
2760 goto out_unlock;
2761 }
2762 if (!PageUptodate(page)) {
2763 btrfs_readpage(NULL, page);
2764 lock_page(page);
2765 if (!PageUptodate(page)) {
2766 unlock_page(page);
2767 page_cache_release(page);
2768 goto out_unlock;
2769 }
2770 }
2771 wait_on_page_writeback(page);
2772
2773 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2774 page_end = page_start + PAGE_CACHE_SIZE - 1;
2775 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
2776
2777 ordered = btrfs_lookup_ordered_extent(inode, page_start);
2778 if (ordered) {
2779 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2780 unlock_page(page);
2781 page_cache_release(page);
2782 btrfs_start_ordered_extent(inode, ordered, 1);
2783 btrfs_put_ordered_extent(ordered);
2784 goto again;
2785 }
2786 set_page_extent_mapped(page);
2787
2788
2789 set_extent_delalloc(io_tree, page_start,
2790 page_end, GFP_NOFS);
2791 set_page_dirty(page);
2792
2793 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2794 unlock_page(page);
2795 page_cache_release(page);
2796 }
2797
2798 out_unlock:
2799 /* we have to start the IO in order to get the ordered extents
2800 * instantiated. This allows the relocation to code to wait
2801 * for all the ordered extents to hit the disk.
2802 *
2803 * Otherwise, it would constantly loop over the same extents
2804 * because the old ones don't get deleted until the IO is
2805 * started
2806 */
2807 btrfs_fdatawrite_range(inode->i_mapping, start, start + len - 1,
2808 WB_SYNC_NONE);
2809 kfree(ra);
2810 trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1);
2811 if (trans) {
2812 btrfs_end_transaction(trans, BTRFS_I(inode)->root);
2813 mark_inode_dirty(inode);
2814 }
2815 mutex_unlock(&inode->i_mutex);
2816 return 0;
2817
2818 truncate_racing:
2819 vmtruncate(inode, inode->i_size);
2820 balance_dirty_pages_ratelimited_nr(inode->i_mapping,
2821 total_read);
2822 goto out_unlock;
2823 }
2824
2825 /*
2826 * The back references tell us which tree holds a ref on a block,
2827 * but it is possible for the tree root field in the reference to
2828 * reflect the original root before a snapshot was made. In this
2829 * case we should search through all the children of a given root
2830 * to find potential holders of references on a block.
2831 *
2832 * Instead, we do something a little less fancy and just search
2833 * all the roots for a given key/block combination.
2834 */
2835 static int find_root_for_ref(struct btrfs_root *root,
2836 struct btrfs_path *path,
2837 struct btrfs_key *key0,
2838 int level,
2839 int file_key,
2840 struct btrfs_root **found_root,
2841 u64 bytenr)
2842 {
2843 struct btrfs_key root_location;
2844 struct btrfs_root *cur_root = *found_root;
2845 struct btrfs_file_extent_item *file_extent;
2846 u64 root_search_start = BTRFS_FS_TREE_OBJECTID;
2847 u64 found_bytenr;
2848 int ret;
2849
2850 root_location.offset = (u64)-1;
2851 root_location.type = BTRFS_ROOT_ITEM_KEY;
2852 path->lowest_level = level;
2853 path->reada = 0;
2854 while(1) {
2855 ret = btrfs_search_slot(NULL, cur_root, key0, path, 0, 0);
2856 found_bytenr = 0;
2857 if (ret == 0 && file_key) {
2858 struct extent_buffer *leaf = path->nodes[0];
2859 file_extent = btrfs_item_ptr(leaf, path->slots[0],
2860 struct btrfs_file_extent_item);
2861 if (btrfs_file_extent_type(leaf, file_extent) ==
2862 BTRFS_FILE_EXTENT_REG) {
2863 found_bytenr =
2864 btrfs_file_extent_disk_bytenr(leaf,
2865 file_extent);
2866 }
2867 } else if (!file_key) {
2868 if (path->nodes[level])
2869 found_bytenr = path->nodes[level]->start;
2870 }
2871
2872 btrfs_release_path(cur_root, path);
2873
2874 if (found_bytenr == bytenr) {
2875 *found_root = cur_root;
2876 ret = 0;
2877 goto out;
2878 }
2879 ret = btrfs_search_root(root->fs_info->tree_root,
2880 root_search_start, &root_search_start);
2881 if (ret)
2882 break;
2883
2884 root_location.objectid = root_search_start;
2885 cur_root = btrfs_read_fs_root_no_name(root->fs_info,
2886 &root_location);
2887 if (!cur_root) {
2888 ret = 1;
2889 break;
2890 }
2891 }
2892 out:
2893 path->lowest_level = 0;
2894 return ret;
2895 }
2896
2897 /*
2898 * note, this releases the path
2899 */
2900 static int noinline relocate_one_reference(struct btrfs_root *extent_root,
2901 struct btrfs_path *path,
2902 struct btrfs_key *extent_key,
2903 u64 *last_file_objectid,
2904 u64 *last_file_offset,
2905 u64 *last_file_root,
2906 u64 last_extent)
2907 {
2908 struct inode *inode;
2909 struct btrfs_root *found_root;
2910 struct btrfs_key root_location;
2911 struct btrfs_key found_key;
2912 struct btrfs_extent_ref *ref;
2913 u64 ref_root;
2914 u64 ref_gen;
2915 u64 ref_objectid;
2916 u64 ref_offset;
2917 int ret;
2918 int level;
2919
2920 WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
2921
2922 ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
2923 struct btrfs_extent_ref);
2924 ref_root = btrfs_ref_root(path->nodes[0], ref);
2925 ref_gen = btrfs_ref_generation(path->nodes[0], ref);
2926 ref_objectid = btrfs_ref_objectid(path->nodes[0], ref);
2927 ref_offset = btrfs_ref_offset(path->nodes[0], ref);
2928 btrfs_release_path(extent_root, path);
2929
2930 root_location.objectid = ref_root;
2931 if (ref_gen == 0)
2932 root_location.offset = 0;
2933 else
2934 root_location.offset = (u64)-1;
2935 root_location.type = BTRFS_ROOT_ITEM_KEY;
2936
2937 found_root = btrfs_read_fs_root_no_name(extent_root->fs_info,
2938 &root_location);
2939 BUG_ON(!found_root);
2940 mutex_unlock(&extent_root->fs_info->alloc_mutex);
2941
2942 if (ref_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
2943 found_key.objectid = ref_objectid;
2944 found_key.type = BTRFS_EXTENT_DATA_KEY;
2945 found_key.offset = ref_offset;
2946 level = 0;
2947
2948 if (last_extent == extent_key->objectid &&
2949 *last_file_objectid == ref_objectid &&
2950 *last_file_offset == ref_offset &&
2951 *last_file_root == ref_root)
2952 goto out;
2953
2954 ret = find_root_for_ref(extent_root, path, &found_key,
2955 level, 1, &found_root,
2956 extent_key->objectid);
2957
2958 if (ret)
2959 goto out;
2960
2961 if (last_extent == extent_key->objectid &&
2962 *last_file_objectid == ref_objectid &&
2963 *last_file_offset == ref_offset &&
2964 *last_file_root == ref_root)
2965 goto out;
2966
2967 inode = btrfs_iget_locked(extent_root->fs_info->sb,
2968 ref_objectid, found_root);
2969 if (inode->i_state & I_NEW) {
2970 /* the inode and parent dir are two different roots */
2971 BTRFS_I(inode)->root = found_root;
2972 BTRFS_I(inode)->location.objectid = ref_objectid;
2973 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
2974 BTRFS_I(inode)->location.offset = 0;
2975 btrfs_read_locked_inode(inode);
2976 unlock_new_inode(inode);
2977
2978 }
2979 /* this can happen if the reference is not against
2980 * the latest version of the tree root
2981 */
2982 if (is_bad_inode(inode))
2983 goto out;
2984
2985 *last_file_objectid = inode->i_ino;
2986 *last_file_root = found_root->root_key.objectid;
2987 *last_file_offset = ref_offset;
2988
2989 relocate_inode_pages(inode, ref_offset, extent_key->offset);
2990 iput(inode);
2991 } else {
2992 struct btrfs_trans_handle *trans;
2993 struct extent_buffer *eb;
2994 int needs_lock = 0;
2995
2996 eb = read_tree_block(found_root, extent_key->objectid,
2997 extent_key->offset, 0);
2998 btrfs_tree_lock(eb);
2999 level = btrfs_header_level(eb);
3000
3001 if (level == 0)
3002 btrfs_item_key_to_cpu(eb, &found_key, 0);
3003 else
3004 btrfs_node_key_to_cpu(eb, &found_key, 0);
3005
3006 btrfs_tree_unlock(eb);
3007 free_extent_buffer(eb);
3008
3009 ret = find_root_for_ref(extent_root, path, &found_key,
3010 level, 0, &found_root,
3011 extent_key->objectid);
3012
3013 if (ret)
3014 goto out;
3015
3016 /*
3017 * right here almost anything could happen to our key,
3018 * but that's ok. The cow below will either relocate it
3019 * or someone else will have relocated it. Either way,
3020 * it is in a different spot than it was before and
3021 * we're happy.
3022 */
3023
3024 trans = btrfs_start_transaction(found_root, 1);
3025
3026 if (found_root == extent_root->fs_info->extent_root ||
3027 found_root == extent_root->fs_info->chunk_root ||
3028 found_root == extent_root->fs_info->dev_root) {
3029 needs_lock = 1;
3030 mutex_lock(&extent_root->fs_info->alloc_mutex);
3031 }
3032
3033 path->lowest_level = level;
3034 path->reada = 2;
3035 ret = btrfs_search_slot(trans, found_root, &found_key, path,
3036 0, 1);
3037 path->lowest_level = 0;
3038 btrfs_release_path(found_root, path);
3039
3040 if (found_root == found_root->fs_info->extent_root)
3041 btrfs_extent_post_op(trans, found_root);
3042 if (needs_lock)
3043 mutex_unlock(&extent_root->fs_info->alloc_mutex);
3044
3045 btrfs_end_transaction(trans, found_root);
3046
3047 }
3048 out:
3049 mutex_lock(&extent_root->fs_info->alloc_mutex);
3050 return 0;
3051 }
3052
3053 static int noinline del_extent_zero(struct btrfs_root *extent_root,
3054 struct btrfs_path *path,
3055 struct btrfs_key *extent_key)
3056 {
3057 int ret;
3058 struct btrfs_trans_handle *trans;
3059
3060 trans = btrfs_start_transaction(extent_root, 1);
3061 ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
3062 if (ret > 0) {
3063 ret = -EIO;
3064 goto out;
3065 }
3066 if (ret < 0)
3067 goto out;
3068 ret = btrfs_del_item(trans, extent_root, path);
3069 out:
3070 btrfs_end_transaction(trans, extent_root);
3071 return ret;
3072 }
3073
3074 static int noinline relocate_one_extent(struct btrfs_root *extent_root,
3075 struct btrfs_path *path,
3076 struct btrfs_key *extent_key)
3077 {
3078 struct btrfs_key key;
3079 struct btrfs_key found_key;
3080 struct extent_buffer *leaf;
3081 u64 last_file_objectid = 0;
3082 u64 last_file_root = 0;
3083 u64 last_file_offset = (u64)-1;
3084 u64 last_extent = 0;
3085 u32 nritems;
3086 u32 item_size;
3087 int ret = 0;
3088
3089 if (extent_key->objectid == 0) {
3090 ret = del_extent_zero(extent_root, path, extent_key);
3091 goto out;
3092 }
3093 key.objectid = extent_key->objectid;
3094 key.type = BTRFS_EXTENT_REF_KEY;
3095 key.offset = 0;
3096
3097 while(1) {
3098 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3099
3100 if (ret < 0)
3101 goto out;
3102
3103 ret = 0;
3104 leaf = path->nodes[0];
3105 nritems = btrfs_header_nritems(leaf);
3106 if (path->slots[0] == nritems) {
3107 ret = btrfs_next_leaf(extent_root, path);
3108 if (ret > 0) {
3109 ret = 0;
3110 goto out;
3111 }
3112 if (ret < 0)
3113 goto out;
3114 leaf = path->nodes[0];
3115 }
3116
3117 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3118 if (found_key.objectid != extent_key->objectid) {
3119 break;
3120 }
3121
3122 if (found_key.type != BTRFS_EXTENT_REF_KEY) {
3123 break;
3124 }
3125
3126 key.offset = found_key.offset + 1;
3127 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3128
3129 ret = relocate_one_reference(extent_root, path, extent_key,
3130 &last_file_objectid,
3131 &last_file_offset,
3132 &last_file_root, last_extent);
3133 if (ret)
3134 goto out;
3135 last_extent = extent_key->objectid;
3136 }
3137 ret = 0;
3138 out:
3139 btrfs_release_path(extent_root, path);
3140 return ret;
3141 }
3142
3143 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
3144 {
3145 u64 num_devices;
3146 u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
3147 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
3148
3149 num_devices = root->fs_info->fs_devices->num_devices;
3150 if (num_devices == 1) {
3151 stripped |= BTRFS_BLOCK_GROUP_DUP;
3152 stripped = flags & ~stripped;
3153
3154 /* turn raid0 into single device chunks */
3155 if (flags & BTRFS_BLOCK_GROUP_RAID0)
3156 return stripped;
3157
3158 /* turn mirroring into duplication */
3159 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3160 BTRFS_BLOCK_GROUP_RAID10))
3161 return stripped | BTRFS_BLOCK_GROUP_DUP;
3162 return flags;
3163 } else {
3164 /* they already had raid on here, just return */
3165 if (flags & stripped)
3166 return flags;
3167
3168 stripped |= BTRFS_BLOCK_GROUP_DUP;
3169 stripped = flags & ~stripped;
3170
3171 /* switch duplicated blocks with raid1 */
3172 if (flags & BTRFS_BLOCK_GROUP_DUP)
3173 return stripped | BTRFS_BLOCK_GROUP_RAID1;
3174
3175 /* turn single device chunks into raid0 */
3176 return stripped | BTRFS_BLOCK_GROUP_RAID0;
3177 }
3178 return flags;
3179 }
3180
3181 int __alloc_chunk_for_shrink(struct btrfs_root *root,
3182 struct btrfs_block_group_cache *shrink_block_group,
3183 int force)
3184 {
3185 struct btrfs_trans_handle *trans;
3186 u64 new_alloc_flags;
3187 u64 calc;
3188
3189 spin_lock(&shrink_block_group->lock);
3190 if (btrfs_block_group_used(&shrink_block_group->item) > 0) {
3191 spin_unlock(&shrink_block_group->lock);
3192 mutex_unlock(&root->fs_info->alloc_mutex);
3193
3194 trans = btrfs_start_transaction(root, 1);
3195 mutex_lock(&root->fs_info->alloc_mutex);
3196 spin_lock(&shrink_block_group->lock);
3197
3198 new_alloc_flags = update_block_group_flags(root,
3199 shrink_block_group->flags);
3200 if (new_alloc_flags != shrink_block_group->flags) {
3201 calc =
3202 btrfs_block_group_used(&shrink_block_group->item);
3203 } else {
3204 calc = shrink_block_group->key.offset;
3205 }
3206 spin_unlock(&shrink_block_group->lock);
3207
3208 do_chunk_alloc(trans, root->fs_info->extent_root,
3209 calc + 2 * 1024 * 1024, new_alloc_flags, force);
3210
3211 mutex_unlock(&root->fs_info->alloc_mutex);
3212 btrfs_end_transaction(trans, root);
3213 mutex_lock(&root->fs_info->alloc_mutex);
3214 } else
3215 spin_unlock(&shrink_block_group->lock);
3216 return 0;
3217 }
3218
3219 int btrfs_shrink_extent_tree(struct btrfs_root *root, u64 shrink_start)
3220 {
3221 struct btrfs_trans_handle *trans;
3222 struct btrfs_root *tree_root = root->fs_info->tree_root;
3223 struct btrfs_path *path;
3224 u64 cur_byte;
3225 u64 total_found;
3226 u64 shrink_last_byte;
3227 struct btrfs_block_group_cache *shrink_block_group;
3228 struct btrfs_fs_info *info = root->fs_info;
3229 struct btrfs_key key;
3230 struct btrfs_key found_key;
3231 struct extent_buffer *leaf;
3232 u32 nritems;
3233 int ret;
3234 int progress;
3235
3236 mutex_lock(&root->fs_info->alloc_mutex);
3237 shrink_block_group = btrfs_lookup_block_group(root->fs_info,
3238 shrink_start);
3239 BUG_ON(!shrink_block_group);
3240
3241 shrink_last_byte = shrink_block_group->key.objectid +
3242 shrink_block_group->key.offset;
3243
3244 shrink_block_group->space_info->total_bytes -=
3245 shrink_block_group->key.offset;
3246 path = btrfs_alloc_path();
3247 root = root->fs_info->extent_root;
3248 path->reada = 2;
3249
3250 printk("btrfs relocating block group %llu flags %llu\n",
3251 (unsigned long long)shrink_start,
3252 (unsigned long long)shrink_block_group->flags);
3253
3254 __alloc_chunk_for_shrink(root, shrink_block_group, 1);
3255
3256 again:
3257
3258 shrink_block_group->ro = 1;
3259
3260 total_found = 0;
3261 progress = 0;
3262 key.objectid = shrink_start;
3263 key.offset = 0;
3264 key.type = 0;
3265 cur_byte = key.objectid;
3266
3267 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3268 if (ret < 0)
3269 goto out;
3270
3271 ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY);
3272 if (ret < 0)
3273 goto out;
3274
3275 if (ret == 0) {
3276 leaf = path->nodes[0];
3277 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3278 if (found_key.objectid + found_key.offset > shrink_start &&
3279 found_key.objectid < shrink_last_byte) {
3280 cur_byte = found_key.objectid;
3281 key.objectid = cur_byte;
3282 }
3283 }
3284 btrfs_release_path(root, path);
3285
3286 while(1) {
3287 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3288 if (ret < 0)
3289 goto out;
3290
3291 next:
3292 leaf = path->nodes[0];
3293 nritems = btrfs_header_nritems(leaf);
3294 if (path->slots[0] >= nritems) {
3295 ret = btrfs_next_leaf(root, path);
3296 if (ret < 0)
3297 goto out;
3298 if (ret == 1) {
3299 ret = 0;
3300 break;
3301 }
3302 leaf = path->nodes[0];
3303 nritems = btrfs_header_nritems(leaf);
3304 }
3305
3306 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3307
3308 if (found_key.objectid >= shrink_last_byte)
3309 break;
3310
3311 if (progress && need_resched()) {
3312 memcpy(&key, &found_key, sizeof(key));
3313 cond_resched();
3314 btrfs_release_path(root, path);
3315 btrfs_search_slot(NULL, root, &key, path, 0, 0);
3316 progress = 0;
3317 goto next;
3318 }
3319 progress = 1;
3320
3321 if (btrfs_key_type(&found_key) != BTRFS_EXTENT_ITEM_KEY ||
3322 found_key.objectid + found_key.offset <= cur_byte) {
3323 memcpy(&key, &found_key, sizeof(key));
3324 key.offset++;
3325 path->slots[0]++;
3326 goto next;
3327 }
3328
3329 total_found++;
3330 cur_byte = found_key.objectid + found_key.offset;
3331 key.objectid = cur_byte;
3332 btrfs_release_path(root, path);
3333 ret = relocate_one_extent(root, path, &found_key);
3334 __alloc_chunk_for_shrink(root, shrink_block_group, 0);
3335 }
3336
3337 btrfs_release_path(root, path);
3338
3339 if (total_found > 0) {
3340 printk("btrfs relocate found %llu last extent was %llu\n",
3341 (unsigned long long)total_found,
3342 (unsigned long long)found_key.objectid);
3343 mutex_unlock(&root->fs_info->alloc_mutex);
3344 trans = btrfs_start_transaction(tree_root, 1);
3345 btrfs_commit_transaction(trans, tree_root);
3346
3347 btrfs_clean_old_snapshots(tree_root);
3348
3349 btrfs_wait_ordered_extents(tree_root);
3350
3351 trans = btrfs_start_transaction(tree_root, 1);
3352 btrfs_commit_transaction(trans, tree_root);
3353 mutex_lock(&root->fs_info->alloc_mutex);
3354 goto again;
3355 }
3356
3357 /*
3358 * we've freed all the extents, now remove the block
3359 * group item from the tree
3360 */
3361 mutex_unlock(&root->fs_info->alloc_mutex);
3362
3363 trans = btrfs_start_transaction(root, 1);
3364
3365 mutex_lock(&root->fs_info->alloc_mutex);
3366 memcpy(&key, &shrink_block_group->key, sizeof(key));
3367
3368 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3369 if (ret > 0)
3370 ret = -EIO;
3371 if (ret < 0) {
3372 btrfs_end_transaction(trans, root);
3373 goto out;
3374 }
3375
3376 clear_extent_bits(&info->block_group_cache, key.objectid,
3377 key.objectid + key.offset - 1,
3378 (unsigned int)-1, GFP_NOFS);
3379
3380
3381 clear_extent_bits(&info->free_space_cache,
3382 key.objectid, key.objectid + key.offset - 1,
3383 (unsigned int)-1, GFP_NOFS);
3384
3385 memset(shrink_block_group, 0, sizeof(*shrink_block_group));
3386 kfree(shrink_block_group);
3387
3388 btrfs_del_item(trans, root, path);
3389 btrfs_release_path(root, path);
3390 mutex_unlock(&root->fs_info->alloc_mutex);
3391 btrfs_commit_transaction(trans, root);
3392
3393 mutex_lock(&root->fs_info->alloc_mutex);
3394
3395 /* the code to unpin extents might set a few bits in the free
3396 * space cache for this range again
3397 */
3398 clear_extent_bits(&info->free_space_cache,
3399 key.objectid, key.objectid + key.offset - 1,
3400 (unsigned int)-1, GFP_NOFS);
3401 out:
3402 btrfs_free_path(path);
3403 mutex_unlock(&root->fs_info->alloc_mutex);
3404 return ret;
3405 }
3406
3407 int find_first_block_group(struct btrfs_root *root, struct btrfs_path *path,
3408 struct btrfs_key *key)
3409 {
3410 int ret = 0;
3411 struct btrfs_key found_key;
3412 struct extent_buffer *leaf;
3413 int slot;
3414
3415 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
3416 if (ret < 0)
3417 goto out;
3418
3419 while(1) {
3420 slot = path->slots[0];
3421 leaf = path->nodes[0];
3422 if (slot >= btrfs_header_nritems(leaf)) {
3423 ret = btrfs_next_leaf(root, path);
3424 if (ret == 0)
3425 continue;
3426 if (ret < 0)
3427 goto out;
3428 break;
3429 }
3430 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3431
3432 if (found_key.objectid >= key->objectid &&
3433 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
3434 ret = 0;
3435 goto out;
3436 }
3437 path->slots[0]++;
3438 }
3439 ret = -ENOENT;
3440 out:
3441 return ret;
3442 }
3443
3444 int btrfs_read_block_groups(struct btrfs_root *root)
3445 {
3446 struct btrfs_path *path;
3447 int ret;
3448 int bit;
3449 struct btrfs_block_group_cache *cache;
3450 struct btrfs_fs_info *info = root->fs_info;
3451 struct btrfs_space_info *space_info;
3452 struct extent_io_tree *block_group_cache;
3453 struct btrfs_key key;
3454 struct btrfs_key found_key;
3455 struct extent_buffer *leaf;
3456
3457 block_group_cache = &info->block_group_cache;
3458 root = info->extent_root;
3459 key.objectid = 0;
3460 key.offset = 0;
3461 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
3462 path = btrfs_alloc_path();
3463 if (!path)
3464 return -ENOMEM;
3465
3466 mutex_lock(&root->fs_info->alloc_mutex);
3467 while(1) {
3468 ret = find_first_block_group(root, path, &key);
3469 if (ret > 0) {
3470 ret = 0;
3471 goto error;
3472 }
3473 if (ret != 0)
3474 goto error;
3475
3476 leaf = path->nodes[0];
3477 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3478 cache = kzalloc(sizeof(*cache), GFP_NOFS);
3479 if (!cache) {
3480 ret = -ENOMEM;
3481 break;
3482 }
3483
3484 spin_lock_init(&cache->lock);
3485 read_extent_buffer(leaf, &cache->item,
3486 btrfs_item_ptr_offset(leaf, path->slots[0]),
3487 sizeof(cache->item));
3488 memcpy(&cache->key, &found_key, sizeof(found_key));
3489
3490 key.objectid = found_key.objectid + found_key.offset;
3491 btrfs_release_path(root, path);
3492 cache->flags = btrfs_block_group_flags(&cache->item);
3493 bit = 0;
3494 if (cache->flags & BTRFS_BLOCK_GROUP_DATA) {
3495 bit = BLOCK_GROUP_DATA;
3496 } else if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
3497 bit = BLOCK_GROUP_SYSTEM;
3498 } else if (cache->flags & BTRFS_BLOCK_GROUP_METADATA) {
3499 bit = BLOCK_GROUP_METADATA;
3500 }
3501 set_avail_alloc_bits(info, cache->flags);
3502
3503 ret = update_space_info(info, cache->flags, found_key.offset,
3504 btrfs_block_group_used(&cache->item),
3505 &space_info);
3506 BUG_ON(ret);
3507 cache->space_info = space_info;
3508
3509 /* use EXTENT_LOCKED to prevent merging */
3510 set_extent_bits(block_group_cache, found_key.objectid,
3511 found_key.objectid + found_key.offset - 1,
3512 EXTENT_LOCKED, GFP_NOFS);
3513 set_state_private(block_group_cache, found_key.objectid,
3514 (unsigned long)cache);
3515 set_extent_bits(block_group_cache, found_key.objectid,
3516 found_key.objectid + found_key.offset - 1,
3517 bit | EXTENT_LOCKED, GFP_NOFS);
3518 if (key.objectid >=
3519 btrfs_super_total_bytes(&info->super_copy))
3520 break;
3521 }
3522 ret = 0;
3523 error:
3524 btrfs_free_path(path);
3525 mutex_unlock(&root->fs_info->alloc_mutex);
3526 return ret;
3527 }
3528
3529 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
3530 struct btrfs_root *root, u64 bytes_used,
3531 u64 type, u64 chunk_objectid, u64 chunk_offset,
3532 u64 size)
3533 {
3534 int ret;
3535 int bit = 0;
3536 struct btrfs_root *extent_root;
3537 struct btrfs_block_group_cache *cache;
3538 struct extent_io_tree *block_group_cache;
3539
3540 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
3541 extent_root = root->fs_info->extent_root;
3542 block_group_cache = &root->fs_info->block_group_cache;
3543
3544 cache = kzalloc(sizeof(*cache), GFP_NOFS);
3545 BUG_ON(!cache);
3546 cache->key.objectid = chunk_offset;
3547 cache->key.offset = size;
3548 spin_lock_init(&cache->lock);
3549 btrfs_set_key_type(&cache->key, BTRFS_BLOCK_GROUP_ITEM_KEY);
3550
3551 btrfs_set_block_group_used(&cache->item, bytes_used);
3552 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
3553 cache->flags = type;
3554 btrfs_set_block_group_flags(&cache->item, type);
3555
3556 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
3557 &cache->space_info);
3558 BUG_ON(ret);
3559
3560 bit = block_group_state_bits(type);
3561 set_extent_bits(block_group_cache, chunk_offset,
3562 chunk_offset + size - 1,
3563 EXTENT_LOCKED, GFP_NOFS);
3564 set_state_private(block_group_cache, chunk_offset,
3565 (unsigned long)cache);
3566 set_extent_bits(block_group_cache, chunk_offset,
3567 chunk_offset + size - 1,
3568 bit | EXTENT_LOCKED, GFP_NOFS);
3569
3570 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
3571 sizeof(cache->item));
3572 BUG_ON(ret);
3573
3574 finish_current_insert(trans, extent_root);
3575 ret = del_pending_extents(trans, extent_root);
3576 BUG_ON(ret);
3577 set_avail_alloc_bits(extent_root->fs_info, type);
3578
3579 return 0;
3580 }