]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame_incremental - fs/btrfs/extent-tree.c
Btrfs: Throttle less often waiting for snapshots to delete
[mirror_ubuntu-eoan-kernel.git] / fs / btrfs / extent-tree.c
... / ...
CommitLineData
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18#include <linux/sched.h>
19#include <linux/pagemap.h>
20#include <linux/writeback.h>
21#include "hash.h"
22#include "crc32c.h"
23#include "ctree.h"
24#include "disk-io.h"
25#include "print-tree.h"
26#include "transaction.h"
27#include "volumes.h"
28#include "locking.h"
29#include "ref-cache.h"
30
31#define BLOCK_GROUP_DATA EXTENT_WRITEBACK
32#define BLOCK_GROUP_METADATA EXTENT_UPTODATE
33#define BLOCK_GROUP_SYSTEM EXTENT_NEW
34
35#define BLOCK_GROUP_DIRTY EXTENT_DIRTY
36
37static int finish_current_insert(struct btrfs_trans_handle *trans, struct
38 btrfs_root *extent_root);
39static int del_pending_extents(struct btrfs_trans_handle *trans, struct
40 btrfs_root *extent_root);
41static struct btrfs_block_group_cache *
42__btrfs_find_block_group(struct btrfs_root *root,
43 struct btrfs_block_group_cache *hint,
44 u64 search_start, int data, int owner);
45
46void maybe_lock_mutex(struct btrfs_root *root)
47{
48 if (root != root->fs_info->extent_root &&
49 root != root->fs_info->chunk_root &&
50 root != root->fs_info->dev_root) {
51 mutex_lock(&root->fs_info->alloc_mutex);
52 }
53}
54
55void maybe_unlock_mutex(struct btrfs_root *root)
56{
57 if (root != root->fs_info->extent_root &&
58 root != root->fs_info->chunk_root &&
59 root != root->fs_info->dev_root) {
60 mutex_unlock(&root->fs_info->alloc_mutex);
61 }
62}
63
64static int cache_block_group(struct btrfs_root *root,
65 struct btrfs_block_group_cache *block_group)
66{
67 struct btrfs_path *path;
68 int ret;
69 struct btrfs_key key;
70 struct extent_buffer *leaf;
71 struct extent_io_tree *free_space_cache;
72 int slot;
73 u64 last = 0;
74 u64 hole_size;
75 u64 first_free;
76 int found = 0;
77
78 if (!block_group)
79 return 0;
80
81 root = root->fs_info->extent_root;
82 free_space_cache = &root->fs_info->free_space_cache;
83
84 if (block_group->cached)
85 return 0;
86
87 path = btrfs_alloc_path();
88 if (!path)
89 return -ENOMEM;
90
91 path->reada = 2;
92 /*
93 * we get into deadlocks with paths held by callers of this function.
94 * since the alloc_mutex is protecting things right now, just
95 * skip the locking here
96 */
97 path->skip_locking = 1;
98 first_free = block_group->key.objectid;
99 key.objectid = block_group->key.objectid;
100 key.offset = 0;
101 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
102 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
103 if (ret < 0)
104 return ret;
105 ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY);
106 if (ret < 0)
107 return ret;
108 if (ret == 0) {
109 leaf = path->nodes[0];
110 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
111 if (key.objectid + key.offset > first_free)
112 first_free = key.objectid + key.offset;
113 }
114 while(1) {
115 leaf = path->nodes[0];
116 slot = path->slots[0];
117 if (slot >= btrfs_header_nritems(leaf)) {
118 ret = btrfs_next_leaf(root, path);
119 if (ret < 0)
120 goto err;
121 if (ret == 0) {
122 continue;
123 } else {
124 break;
125 }
126 }
127 btrfs_item_key_to_cpu(leaf, &key, slot);
128 if (key.objectid < block_group->key.objectid) {
129 goto next;
130 }
131 if (key.objectid >= block_group->key.objectid +
132 block_group->key.offset) {
133 break;
134 }
135
136 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
137 if (!found) {
138 last = first_free;
139 found = 1;
140 }
141 if (key.objectid > last) {
142 hole_size = key.objectid - last;
143 set_extent_dirty(free_space_cache, last,
144 last + hole_size - 1,
145 GFP_NOFS);
146 }
147 last = key.objectid + key.offset;
148 }
149next:
150 path->slots[0]++;
151 }
152
153 if (!found)
154 last = first_free;
155 if (block_group->key.objectid +
156 block_group->key.offset > last) {
157 hole_size = block_group->key.objectid +
158 block_group->key.offset - last;
159 set_extent_dirty(free_space_cache, last,
160 last + hole_size - 1, GFP_NOFS);
161 }
162 block_group->cached = 1;
163err:
164 btrfs_free_path(path);
165 return 0;
166}
167
168struct btrfs_block_group_cache *btrfs_lookup_first_block_group(struct
169 btrfs_fs_info *info,
170 u64 bytenr)
171{
172 struct extent_io_tree *block_group_cache;
173 struct btrfs_block_group_cache *block_group = NULL;
174 u64 ptr;
175 u64 start;
176 u64 end;
177 int ret;
178
179 bytenr = max_t(u64, bytenr,
180 BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE);
181 block_group_cache = &info->block_group_cache;
182 ret = find_first_extent_bit(block_group_cache,
183 bytenr, &start, &end,
184 BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA |
185 BLOCK_GROUP_SYSTEM);
186 if (ret) {
187 return NULL;
188 }
189 ret = get_state_private(block_group_cache, start, &ptr);
190 if (ret)
191 return NULL;
192
193 block_group = (struct btrfs_block_group_cache *)(unsigned long)ptr;
194 return block_group;
195}
196
197struct btrfs_block_group_cache *btrfs_lookup_block_group(struct
198 btrfs_fs_info *info,
199 u64 bytenr)
200{
201 struct extent_io_tree *block_group_cache;
202 struct btrfs_block_group_cache *block_group = NULL;
203 u64 ptr;
204 u64 start;
205 u64 end;
206 int ret;
207
208 bytenr = max_t(u64, bytenr,
209 BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE);
210 block_group_cache = &info->block_group_cache;
211 ret = find_first_extent_bit(block_group_cache,
212 bytenr, &start, &end,
213 BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA |
214 BLOCK_GROUP_SYSTEM);
215 if (ret) {
216 return NULL;
217 }
218 ret = get_state_private(block_group_cache, start, &ptr);
219 if (ret)
220 return NULL;
221
222 block_group = (struct btrfs_block_group_cache *)(unsigned long)ptr;
223 if (block_group->key.objectid <= bytenr && bytenr <
224 block_group->key.objectid + block_group->key.offset)
225 return block_group;
226 return NULL;
227}
228
229static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
230{
231 return (cache->flags & bits) == bits;
232}
233
234static int noinline find_search_start(struct btrfs_root *root,
235 struct btrfs_block_group_cache **cache_ret,
236 u64 *start_ret, u64 num, int data)
237{
238 int ret;
239 struct btrfs_block_group_cache *cache = *cache_ret;
240 struct extent_io_tree *free_space_cache;
241 struct extent_state *state;
242 u64 last;
243 u64 start = 0;
244 u64 cache_miss = 0;
245 u64 total_fs_bytes;
246 u64 search_start = *start_ret;
247 int wrapped = 0;
248
249 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
250 total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
251 free_space_cache = &root->fs_info->free_space_cache;
252
253 if (!cache)
254 goto out;
255
256again:
257 ret = cache_block_group(root, cache);
258 if (ret) {
259 goto out;
260 }
261
262 last = max(search_start, cache->key.objectid);
263 if (!block_group_bits(cache, data) || cache->ro)
264 goto new_group;
265
266 spin_lock_irq(&free_space_cache->lock);
267 state = find_first_extent_bit_state(free_space_cache, last, EXTENT_DIRTY);
268 while(1) {
269 if (!state) {
270 if (!cache_miss)
271 cache_miss = last;
272 spin_unlock_irq(&free_space_cache->lock);
273 goto new_group;
274 }
275
276 start = max(last, state->start);
277 last = state->end + 1;
278 if (last - start < num) {
279 do {
280 state = extent_state_next(state);
281 } while(state && !(state->state & EXTENT_DIRTY));
282 continue;
283 }
284 spin_unlock_irq(&free_space_cache->lock);
285 if (cache->ro) {
286 goto new_group;
287 }
288 if (start + num > cache->key.objectid + cache->key.offset)
289 goto new_group;
290 if (!block_group_bits(cache, data)) {
291 printk("block group bits don't match %Lu %d\n", cache->flags, data);
292 }
293 *start_ret = start;
294 return 0;
295 }
296out:
297 cache = btrfs_lookup_block_group(root->fs_info, search_start);
298 if (!cache) {
299 printk("Unable to find block group for %Lu\n", search_start);
300 WARN_ON(1);
301 }
302 return -ENOSPC;
303
304new_group:
305 last = cache->key.objectid + cache->key.offset;
306wrapped:
307 cache = btrfs_lookup_first_block_group(root->fs_info, last);
308 if (!cache || cache->key.objectid >= total_fs_bytes) {
309no_cache:
310 if (!wrapped) {
311 wrapped = 1;
312 last = search_start;
313 goto wrapped;
314 }
315 goto out;
316 }
317 if (cache_miss && !cache->cached) {
318 cache_block_group(root, cache);
319 last = cache_miss;
320 cache = btrfs_lookup_first_block_group(root->fs_info, last);
321 }
322 cache_miss = 0;
323 cache = btrfs_find_block_group(root, cache, last, data, 0);
324 if (!cache)
325 goto no_cache;
326 *cache_ret = cache;
327 goto again;
328}
329
330static u64 div_factor(u64 num, int factor)
331{
332 if (factor == 10)
333 return num;
334 num *= factor;
335 do_div(num, 10);
336 return num;
337}
338
339static int block_group_state_bits(u64 flags)
340{
341 int bits = 0;
342 if (flags & BTRFS_BLOCK_GROUP_DATA)
343 bits |= BLOCK_GROUP_DATA;
344 if (flags & BTRFS_BLOCK_GROUP_METADATA)
345 bits |= BLOCK_GROUP_METADATA;
346 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
347 bits |= BLOCK_GROUP_SYSTEM;
348 return bits;
349}
350
351static struct btrfs_block_group_cache *
352__btrfs_find_block_group(struct btrfs_root *root,
353 struct btrfs_block_group_cache *hint,
354 u64 search_start, int data, int owner)
355{
356 struct btrfs_block_group_cache *cache;
357 struct extent_io_tree *block_group_cache;
358 struct btrfs_block_group_cache *found_group = NULL;
359 struct btrfs_fs_info *info = root->fs_info;
360 u64 used;
361 u64 last = 0;
362 u64 start;
363 u64 end;
364 u64 free_check;
365 u64 ptr;
366 int bit;
367 int ret;
368 int full_search = 0;
369 int factor = 10;
370 int wrapped = 0;
371
372 block_group_cache = &info->block_group_cache;
373
374 if (data & BTRFS_BLOCK_GROUP_METADATA)
375 factor = 9;
376
377 bit = block_group_state_bits(data);
378
379 if (search_start) {
380 struct btrfs_block_group_cache *shint;
381 shint = btrfs_lookup_first_block_group(info, search_start);
382 if (shint && block_group_bits(shint, data) && !shint->ro) {
383 spin_lock(&shint->lock);
384 used = btrfs_block_group_used(&shint->item);
385 if (used + shint->pinned <
386 div_factor(shint->key.offset, factor)) {
387 spin_unlock(&shint->lock);
388 return shint;
389 }
390 spin_unlock(&shint->lock);
391 }
392 }
393 if (hint && !hint->ro && block_group_bits(hint, data)) {
394 spin_lock(&hint->lock);
395 used = btrfs_block_group_used(&hint->item);
396 if (used + hint->pinned <
397 div_factor(hint->key.offset, factor)) {
398 spin_unlock(&hint->lock);
399 return hint;
400 }
401 spin_unlock(&hint->lock);
402 last = hint->key.objectid + hint->key.offset;
403 } else {
404 if (hint)
405 last = max(hint->key.objectid, search_start);
406 else
407 last = search_start;
408 }
409again:
410 while(1) {
411 ret = find_first_extent_bit(block_group_cache, last,
412 &start, &end, bit);
413 if (ret)
414 break;
415
416 ret = get_state_private(block_group_cache, start, &ptr);
417 if (ret) {
418 last = end + 1;
419 continue;
420 }
421
422 cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
423 spin_lock(&cache->lock);
424 last = cache->key.objectid + cache->key.offset;
425 used = btrfs_block_group_used(&cache->item);
426
427 if (!cache->ro && block_group_bits(cache, data)) {
428 free_check = div_factor(cache->key.offset, factor);
429 if (used + cache->pinned < free_check) {
430 found_group = cache;
431 spin_unlock(&cache->lock);
432 goto found;
433 }
434 }
435 spin_unlock(&cache->lock);
436 cond_resched();
437 }
438 if (!wrapped) {
439 last = search_start;
440 wrapped = 1;
441 goto again;
442 }
443 if (!full_search && factor < 10) {
444 last = search_start;
445 full_search = 1;
446 factor = 10;
447 goto again;
448 }
449found:
450 return found_group;
451}
452
453struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
454 struct btrfs_block_group_cache
455 *hint, u64 search_start,
456 int data, int owner)
457{
458
459 struct btrfs_block_group_cache *ret;
460 ret = __btrfs_find_block_group(root, hint, search_start, data, owner);
461 return ret;
462}
463static u64 hash_extent_ref(u64 root_objectid, u64 ref_generation,
464 u64 owner, u64 owner_offset)
465{
466 u32 high_crc = ~(u32)0;
467 u32 low_crc = ~(u32)0;
468 __le64 lenum;
469 lenum = cpu_to_le64(root_objectid);
470 high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
471 lenum = cpu_to_le64(ref_generation);
472 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
473 if (owner >= BTRFS_FIRST_FREE_OBJECTID) {
474 lenum = cpu_to_le64(owner);
475 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
476 lenum = cpu_to_le64(owner_offset);
477 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
478 }
479 return ((u64)high_crc << 32) | (u64)low_crc;
480}
481
482static int match_extent_ref(struct extent_buffer *leaf,
483 struct btrfs_extent_ref *disk_ref,
484 struct btrfs_extent_ref *cpu_ref)
485{
486 int ret;
487 int len;
488
489 if (cpu_ref->objectid)
490 len = sizeof(*cpu_ref);
491 else
492 len = 2 * sizeof(u64);
493 ret = memcmp_extent_buffer(leaf, cpu_ref, (unsigned long)disk_ref,
494 len);
495 return ret == 0;
496}
497
498static int noinline lookup_extent_backref(struct btrfs_trans_handle *trans,
499 struct btrfs_root *root,
500 struct btrfs_path *path, u64 bytenr,
501 u64 root_objectid,
502 u64 ref_generation, u64 owner,
503 u64 owner_offset, int del)
504{
505 u64 hash;
506 struct btrfs_key key;
507 struct btrfs_key found_key;
508 struct btrfs_extent_ref ref;
509 struct extent_buffer *leaf;
510 struct btrfs_extent_ref *disk_ref;
511 int ret;
512 int ret2;
513
514 btrfs_set_stack_ref_root(&ref, root_objectid);
515 btrfs_set_stack_ref_generation(&ref, ref_generation);
516 btrfs_set_stack_ref_objectid(&ref, owner);
517 btrfs_set_stack_ref_offset(&ref, owner_offset);
518
519 hash = hash_extent_ref(root_objectid, ref_generation, owner,
520 owner_offset);
521 key.offset = hash;
522 key.objectid = bytenr;
523 key.type = BTRFS_EXTENT_REF_KEY;
524
525 while (1) {
526 ret = btrfs_search_slot(trans, root, &key, path,
527 del ? -1 : 0, del);
528 if (ret < 0)
529 goto out;
530 leaf = path->nodes[0];
531 if (ret != 0) {
532 u32 nritems = btrfs_header_nritems(leaf);
533 if (path->slots[0] >= nritems) {
534 ret2 = btrfs_next_leaf(root, path);
535 if (ret2)
536 goto out;
537 leaf = path->nodes[0];
538 }
539 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
540 if (found_key.objectid != bytenr ||
541 found_key.type != BTRFS_EXTENT_REF_KEY)
542 goto out;
543 key.offset = found_key.offset;
544 if (del) {
545 btrfs_release_path(root, path);
546 continue;
547 }
548 }
549 disk_ref = btrfs_item_ptr(path->nodes[0],
550 path->slots[0],
551 struct btrfs_extent_ref);
552 if (match_extent_ref(path->nodes[0], disk_ref, &ref)) {
553 ret = 0;
554 goto out;
555 }
556 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
557 key.offset = found_key.offset + 1;
558 btrfs_release_path(root, path);
559 }
560out:
561 return ret;
562}
563
564/*
565 * Back reference rules. Back refs have three main goals:
566 *
567 * 1) differentiate between all holders of references to an extent so that
568 * when a reference is dropped we can make sure it was a valid reference
569 * before freeing the extent.
570 *
571 * 2) Provide enough information to quickly find the holders of an extent
572 * if we notice a given block is corrupted or bad.
573 *
574 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
575 * maintenance. This is actually the same as #2, but with a slightly
576 * different use case.
577 *
578 * File extents can be referenced by:
579 *
580 * - multiple snapshots, subvolumes, or different generations in one subvol
581 * - different files inside a single subvolume (in theory, not implemented yet)
582 * - different offsets inside a file (bookend extents in file.c)
583 *
584 * The extent ref structure has fields for:
585 *
586 * - Objectid of the subvolume root
587 * - Generation number of the tree holding the reference
588 * - objectid of the file holding the reference
589 * - offset in the file corresponding to the key holding the reference
590 *
591 * When a file extent is allocated the fields are filled in:
592 * (root_key.objectid, trans->transid, inode objectid, offset in file)
593 *
594 * When a leaf is cow'd new references are added for every file extent found
595 * in the leaf. It looks the same as the create case, but trans->transid
596 * will be different when the block is cow'd.
597 *
598 * (root_key.objectid, trans->transid, inode objectid, offset in file)
599 *
600 * When a file extent is removed either during snapshot deletion or file
601 * truncation, the corresponding back reference is found
602 * by searching for:
603 *
604 * (btrfs_header_owner(leaf), btrfs_header_generation(leaf),
605 * inode objectid, offset in file)
606 *
607 * Btree extents can be referenced by:
608 *
609 * - Different subvolumes
610 * - Different generations of the same subvolume
611 *
612 * Storing sufficient information for a full reverse mapping of a btree
613 * block would require storing the lowest key of the block in the backref,
614 * and it would require updating that lowest key either before write out or
615 * every time it changed. Instead, the objectid of the lowest key is stored
616 * along with the level of the tree block. This provides a hint
617 * about where in the btree the block can be found. Searches through the
618 * btree only need to look for a pointer to that block, so they stop one
619 * level higher than the level recorded in the backref.
620 *
621 * Some btrees do not do reference counting on their extents. These
622 * include the extent tree and the tree of tree roots. Backrefs for these
623 * trees always have a generation of zero.
624 *
625 * When a tree block is created, back references are inserted:
626 *
627 * (root->root_key.objectid, trans->transid or zero, level, lowest_key_objectid)
628 *
629 * When a tree block is cow'd in a reference counted root,
630 * new back references are added for all the blocks it points to.
631 * These are of the form (trans->transid will have increased since creation):
632 *
633 * (root->root_key.objectid, trans->transid, level, lowest_key_objectid)
634 *
635 * Because the lowest_key_objectid and the level are just hints
636 * they are not used when backrefs are deleted. When a backref is deleted:
637 *
638 * if backref was for a tree root:
639 * root_objectid = root->root_key.objectid
640 * else
641 * root_objectid = btrfs_header_owner(parent)
642 *
643 * (root_objectid, btrfs_header_generation(parent) or zero, 0, 0)
644 *
645 * Back Reference Key hashing:
646 *
647 * Back references have four fields, each 64 bits long. Unfortunately,
648 * This is hashed into a single 64 bit number and placed into the key offset.
649 * The key objectid corresponds to the first byte in the extent, and the
650 * key type is set to BTRFS_EXTENT_REF_KEY
651 */
652int btrfs_insert_extent_backref(struct btrfs_trans_handle *trans,
653 struct btrfs_root *root,
654 struct btrfs_path *path, u64 bytenr,
655 u64 root_objectid, u64 ref_generation,
656 u64 owner, u64 owner_offset)
657{
658 u64 hash;
659 struct btrfs_key key;
660 struct btrfs_extent_ref ref;
661 struct btrfs_extent_ref *disk_ref;
662 int ret;
663
664 btrfs_set_stack_ref_root(&ref, root_objectid);
665 btrfs_set_stack_ref_generation(&ref, ref_generation);
666 btrfs_set_stack_ref_objectid(&ref, owner);
667 btrfs_set_stack_ref_offset(&ref, owner_offset);
668
669 hash = hash_extent_ref(root_objectid, ref_generation, owner,
670 owner_offset);
671 key.offset = hash;
672 key.objectid = bytenr;
673 key.type = BTRFS_EXTENT_REF_KEY;
674
675 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(ref));
676 while (ret == -EEXIST) {
677 disk_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
678 struct btrfs_extent_ref);
679 if (match_extent_ref(path->nodes[0], disk_ref, &ref))
680 goto out;
681 key.offset++;
682 btrfs_release_path(root, path);
683 ret = btrfs_insert_empty_item(trans, root, path, &key,
684 sizeof(ref));
685 }
686 if (ret)
687 goto out;
688 disk_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
689 struct btrfs_extent_ref);
690 write_extent_buffer(path->nodes[0], &ref, (unsigned long)disk_ref,
691 sizeof(ref));
692 btrfs_mark_buffer_dirty(path->nodes[0]);
693out:
694 btrfs_release_path(root, path);
695 return ret;
696}
697
698static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
699 struct btrfs_root *root,
700 u64 bytenr, u64 num_bytes,
701 u64 root_objectid, u64 ref_generation,
702 u64 owner, u64 owner_offset)
703{
704 struct btrfs_path *path;
705 int ret;
706 struct btrfs_key key;
707 struct extent_buffer *l;
708 struct btrfs_extent_item *item;
709 u32 refs;
710
711 WARN_ON(num_bytes < root->sectorsize);
712 path = btrfs_alloc_path();
713 if (!path)
714 return -ENOMEM;
715
716 path->reada = 1;
717 key.objectid = bytenr;
718 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
719 key.offset = num_bytes;
720 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
721 0, 1);
722 if (ret < 0)
723 return ret;
724 if (ret != 0) {
725 BUG();
726 }
727 BUG_ON(ret != 0);
728 l = path->nodes[0];
729 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
730 refs = btrfs_extent_refs(l, item);
731 btrfs_set_extent_refs(l, item, refs + 1);
732 btrfs_mark_buffer_dirty(path->nodes[0]);
733
734 btrfs_release_path(root->fs_info->extent_root, path);
735
736 path->reada = 1;
737 ret = btrfs_insert_extent_backref(trans, root->fs_info->extent_root,
738 path, bytenr, root_objectid,
739 ref_generation, owner, owner_offset);
740 BUG_ON(ret);
741 finish_current_insert(trans, root->fs_info->extent_root);
742 del_pending_extents(trans, root->fs_info->extent_root);
743
744 btrfs_free_path(path);
745 return 0;
746}
747
748int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
749 struct btrfs_root *root,
750 u64 bytenr, u64 num_bytes,
751 u64 root_objectid, u64 ref_generation,
752 u64 owner, u64 owner_offset)
753{
754 int ret;
755
756 mutex_lock(&root->fs_info->alloc_mutex);
757 ret = __btrfs_inc_extent_ref(trans, root, bytenr, num_bytes,
758 root_objectid, ref_generation,
759 owner, owner_offset);
760 mutex_unlock(&root->fs_info->alloc_mutex);
761 return ret;
762}
763
764int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
765 struct btrfs_root *root)
766{
767 finish_current_insert(trans, root->fs_info->extent_root);
768 del_pending_extents(trans, root->fs_info->extent_root);
769 return 0;
770}
771
772static int lookup_extent_ref(struct btrfs_trans_handle *trans,
773 struct btrfs_root *root, u64 bytenr,
774 u64 num_bytes, u32 *refs)
775{
776 struct btrfs_path *path;
777 int ret;
778 struct btrfs_key key;
779 struct extent_buffer *l;
780 struct btrfs_extent_item *item;
781
782 WARN_ON(num_bytes < root->sectorsize);
783 path = btrfs_alloc_path();
784 path->reada = 1;
785 key.objectid = bytenr;
786 key.offset = num_bytes;
787 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
788 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
789 0, 0);
790 if (ret < 0)
791 goto out;
792 if (ret != 0) {
793 btrfs_print_leaf(root, path->nodes[0]);
794 printk("failed to find block number %Lu\n", bytenr);
795 BUG();
796 }
797 l = path->nodes[0];
798 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
799 *refs = btrfs_extent_refs(l, item);
800out:
801 btrfs_free_path(path);
802 return 0;
803}
804
805
806static int get_reference_status(struct btrfs_root *root, u64 bytenr,
807 u64 parent_gen, u64 ref_objectid,
808 u64 *min_generation, u32 *ref_count)
809{
810 struct btrfs_root *extent_root = root->fs_info->extent_root;
811 struct btrfs_path *path;
812 struct extent_buffer *leaf;
813 struct btrfs_extent_ref *ref_item;
814 struct btrfs_key key;
815 struct btrfs_key found_key;
816 u64 root_objectid = root->root_key.objectid;
817 u64 ref_generation;
818 u32 nritems;
819 int ret;
820
821 key.objectid = bytenr;
822 key.offset = 0;
823 key.type = BTRFS_EXTENT_ITEM_KEY;
824
825 path = btrfs_alloc_path();
826 mutex_lock(&root->fs_info->alloc_mutex);
827 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
828 if (ret < 0)
829 goto out;
830 BUG_ON(ret == 0);
831
832 leaf = path->nodes[0];
833 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
834
835 if (found_key.objectid != bytenr ||
836 found_key.type != BTRFS_EXTENT_ITEM_KEY) {
837 ret = 1;
838 goto out;
839 }
840
841 *ref_count = 0;
842 *min_generation = (u64)-1;
843
844 while (1) {
845 leaf = path->nodes[0];
846 nritems = btrfs_header_nritems(leaf);
847 if (path->slots[0] >= nritems) {
848 ret = btrfs_next_leaf(extent_root, path);
849 if (ret < 0)
850 goto out;
851 if (ret == 0)
852 continue;
853 break;
854 }
855 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
856 if (found_key.objectid != bytenr)
857 break;
858
859 if (found_key.type != BTRFS_EXTENT_REF_KEY) {
860 path->slots[0]++;
861 continue;
862 }
863
864 ref_item = btrfs_item_ptr(leaf, path->slots[0],
865 struct btrfs_extent_ref);
866 ref_generation = btrfs_ref_generation(leaf, ref_item);
867 /*
868 * For (parent_gen > 0 && parent_gen > ref_gen):
869 *
870 * we reach here through the oldest root, therefore
871 * all other reference from same snapshot should have
872 * a larger generation.
873 */
874 if ((root_objectid != btrfs_ref_root(leaf, ref_item)) ||
875 (parent_gen > 0 && parent_gen > ref_generation) ||
876 (ref_objectid >= BTRFS_FIRST_FREE_OBJECTID &&
877 ref_objectid != btrfs_ref_objectid(leaf, ref_item))) {
878 if (ref_count)
879 *ref_count = 2;
880 break;
881 }
882
883 *ref_count = 1;
884 if (*min_generation > ref_generation)
885 *min_generation = ref_generation;
886
887 path->slots[0]++;
888 }
889 ret = 0;
890out:
891 mutex_unlock(&root->fs_info->alloc_mutex);
892 btrfs_free_path(path);
893 return ret;
894}
895
896int btrfs_cross_ref_exists(struct btrfs_root *root,
897 struct btrfs_key *key, u64 bytenr)
898{
899 struct btrfs_trans_handle *trans;
900 struct btrfs_root *old_root;
901 struct btrfs_path *path = NULL;
902 struct extent_buffer *eb;
903 struct btrfs_file_extent_item *item;
904 u64 ref_generation;
905 u64 min_generation;
906 u64 extent_start;
907 u32 ref_count;
908 int level;
909 int ret;
910
911 BUG_ON(key->type != BTRFS_EXTENT_DATA_KEY);
912 ret = get_reference_status(root, bytenr, 0, key->objectid,
913 &min_generation, &ref_count);
914 if (ret)
915 return ret;
916
917 if (ref_count != 1)
918 return 1;
919
920 trans = btrfs_start_transaction(root, 0);
921 old_root = root->dirty_root->root;
922 ref_generation = old_root->root_key.offset;
923
924 /* all references are created in running transaction */
925 if (min_generation > ref_generation) {
926 ret = 0;
927 goto out;
928 }
929
930 path = btrfs_alloc_path();
931 if (!path) {
932 ret = -ENOMEM;
933 goto out;
934 }
935
936 path->skip_locking = 1;
937 /* if no item found, the extent is referenced by other snapshot */
938 ret = btrfs_search_slot(NULL, old_root, key, path, 0, 0);
939 if (ret)
940 goto out;
941
942 eb = path->nodes[0];
943 item = btrfs_item_ptr(eb, path->slots[0],
944 struct btrfs_file_extent_item);
945 if (btrfs_file_extent_type(eb, item) != BTRFS_FILE_EXTENT_REG ||
946 btrfs_file_extent_disk_bytenr(eb, item) != bytenr) {
947 ret = 1;
948 goto out;
949 }
950
951 for (level = BTRFS_MAX_LEVEL - 1; level >= -1; level--) {
952 if (level >= 0) {
953 eb = path->nodes[level];
954 if (!eb)
955 continue;
956 extent_start = eb->start;
957 } else
958 extent_start = bytenr;
959
960 ret = get_reference_status(root, extent_start, ref_generation,
961 0, &min_generation, &ref_count);
962 if (ret)
963 goto out;
964
965 if (ref_count != 1) {
966 ret = 1;
967 goto out;
968 }
969 if (level >= 0)
970 ref_generation = btrfs_header_generation(eb);
971 }
972 ret = 0;
973out:
974 if (path)
975 btrfs_free_path(path);
976 btrfs_end_transaction(trans, root);
977 return ret;
978}
979
980int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
981 struct extent_buffer *buf, int cache_ref)
982{
983 u64 bytenr;
984 u32 nritems;
985 struct btrfs_key key;
986 struct btrfs_file_extent_item *fi;
987 int i;
988 int level;
989 int ret;
990 int faili;
991 int nr_file_extents = 0;
992
993 if (!root->ref_cows)
994 return 0;
995
996 level = btrfs_header_level(buf);
997 nritems = btrfs_header_nritems(buf);
998 for (i = 0; i < nritems; i++) {
999 cond_resched();
1000 if (level == 0) {
1001 u64 disk_bytenr;
1002 btrfs_item_key_to_cpu(buf, &key, i);
1003 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1004 continue;
1005 fi = btrfs_item_ptr(buf, i,
1006 struct btrfs_file_extent_item);
1007 if (btrfs_file_extent_type(buf, fi) ==
1008 BTRFS_FILE_EXTENT_INLINE)
1009 continue;
1010 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1011 if (disk_bytenr == 0)
1012 continue;
1013
1014 if (buf != root->commit_root)
1015 nr_file_extents++;
1016
1017 mutex_lock(&root->fs_info->alloc_mutex);
1018 ret = __btrfs_inc_extent_ref(trans, root, disk_bytenr,
1019 btrfs_file_extent_disk_num_bytes(buf, fi),
1020 root->root_key.objectid, trans->transid,
1021 key.objectid, key.offset);
1022 mutex_unlock(&root->fs_info->alloc_mutex);
1023 if (ret) {
1024 faili = i;
1025 WARN_ON(1);
1026 goto fail;
1027 }
1028 } else {
1029 bytenr = btrfs_node_blockptr(buf, i);
1030 btrfs_node_key_to_cpu(buf, &key, i);
1031
1032 mutex_lock(&root->fs_info->alloc_mutex);
1033 ret = __btrfs_inc_extent_ref(trans, root, bytenr,
1034 btrfs_level_size(root, level - 1),
1035 root->root_key.objectid,
1036 trans->transid,
1037 level - 1, key.objectid);
1038 mutex_unlock(&root->fs_info->alloc_mutex);
1039 if (ret) {
1040 faili = i;
1041 WARN_ON(1);
1042 goto fail;
1043 }
1044 }
1045 }
1046 /* cache orignal leaf block's references */
1047 if (level == 0 && cache_ref && buf != root->commit_root) {
1048 struct btrfs_leaf_ref *ref;
1049 struct btrfs_extent_info *info;
1050
1051 ref = btrfs_alloc_leaf_ref(root, nr_file_extents);
1052 if (!ref) {
1053 WARN_ON(1);
1054 goto out;
1055 }
1056
1057 ref->root_gen = root->root_key.offset;
1058 ref->bytenr = buf->start;
1059 ref->owner = btrfs_header_owner(buf);
1060 ref->generation = btrfs_header_generation(buf);
1061 ref->nritems = nr_file_extents;
1062 info = ref->extents;
1063
1064 for (i = 0; nr_file_extents > 0 && i < nritems; i++) {
1065 u64 disk_bytenr;
1066 btrfs_item_key_to_cpu(buf, &key, i);
1067 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1068 continue;
1069 fi = btrfs_item_ptr(buf, i,
1070 struct btrfs_file_extent_item);
1071 if (btrfs_file_extent_type(buf, fi) ==
1072 BTRFS_FILE_EXTENT_INLINE)
1073 continue;
1074 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1075 if (disk_bytenr == 0)
1076 continue;
1077
1078 info->bytenr = disk_bytenr;
1079 info->num_bytes =
1080 btrfs_file_extent_disk_num_bytes(buf, fi);
1081 info->objectid = key.objectid;
1082 info->offset = key.offset;
1083 info++;
1084 }
1085
1086 BUG_ON(!root->ref_tree);
1087 ret = btrfs_add_leaf_ref(root, ref);
1088 WARN_ON(ret);
1089 btrfs_free_leaf_ref(root, ref);
1090 }
1091out:
1092 return 0;
1093fail:
1094 WARN_ON(1);
1095#if 0
1096 for (i =0; i < faili; i++) {
1097 if (level == 0) {
1098 u64 disk_bytenr;
1099 btrfs_item_key_to_cpu(buf, &key, i);
1100 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1101 continue;
1102 fi = btrfs_item_ptr(buf, i,
1103 struct btrfs_file_extent_item);
1104 if (btrfs_file_extent_type(buf, fi) ==
1105 BTRFS_FILE_EXTENT_INLINE)
1106 continue;
1107 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1108 if (disk_bytenr == 0)
1109 continue;
1110 err = btrfs_free_extent(trans, root, disk_bytenr,
1111 btrfs_file_extent_disk_num_bytes(buf,
1112 fi), 0);
1113 BUG_ON(err);
1114 } else {
1115 bytenr = btrfs_node_blockptr(buf, i);
1116 err = btrfs_free_extent(trans, root, bytenr,
1117 btrfs_level_size(root, level - 1), 0);
1118 BUG_ON(err);
1119 }
1120 }
1121#endif
1122 return ret;
1123}
1124
1125static int write_one_cache_group(struct btrfs_trans_handle *trans,
1126 struct btrfs_root *root,
1127 struct btrfs_path *path,
1128 struct btrfs_block_group_cache *cache)
1129{
1130 int ret;
1131 int pending_ret;
1132 struct btrfs_root *extent_root = root->fs_info->extent_root;
1133 unsigned long bi;
1134 struct extent_buffer *leaf;
1135
1136 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
1137 if (ret < 0)
1138 goto fail;
1139 BUG_ON(ret);
1140
1141 leaf = path->nodes[0];
1142 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
1143 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
1144 btrfs_mark_buffer_dirty(leaf);
1145 btrfs_release_path(extent_root, path);
1146fail:
1147 finish_current_insert(trans, extent_root);
1148 pending_ret = del_pending_extents(trans, extent_root);
1149 if (ret)
1150 return ret;
1151 if (pending_ret)
1152 return pending_ret;
1153 return 0;
1154
1155}
1156
1157int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
1158 struct btrfs_root *root)
1159{
1160 struct extent_io_tree *block_group_cache;
1161 struct btrfs_block_group_cache *cache;
1162 int ret;
1163 int err = 0;
1164 int werr = 0;
1165 struct btrfs_path *path;
1166 u64 last = 0;
1167 u64 start;
1168 u64 end;
1169 u64 ptr;
1170
1171 block_group_cache = &root->fs_info->block_group_cache;
1172 path = btrfs_alloc_path();
1173 if (!path)
1174 return -ENOMEM;
1175
1176 mutex_lock(&root->fs_info->alloc_mutex);
1177 while(1) {
1178 ret = find_first_extent_bit(block_group_cache, last,
1179 &start, &end, BLOCK_GROUP_DIRTY);
1180 if (ret)
1181 break;
1182
1183 last = end + 1;
1184 ret = get_state_private(block_group_cache, start, &ptr);
1185 if (ret)
1186 break;
1187 cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
1188 err = write_one_cache_group(trans, root,
1189 path, cache);
1190 /*
1191 * if we fail to write the cache group, we want
1192 * to keep it marked dirty in hopes that a later
1193 * write will work
1194 */
1195 if (err) {
1196 werr = err;
1197 continue;
1198 }
1199 clear_extent_bits(block_group_cache, start, end,
1200 BLOCK_GROUP_DIRTY, GFP_NOFS);
1201 }
1202 btrfs_free_path(path);
1203 mutex_unlock(&root->fs_info->alloc_mutex);
1204 return werr;
1205}
1206
1207static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
1208 u64 flags)
1209{
1210 struct list_head *head = &info->space_info;
1211 struct list_head *cur;
1212 struct btrfs_space_info *found;
1213 list_for_each(cur, head) {
1214 found = list_entry(cur, struct btrfs_space_info, list);
1215 if (found->flags == flags)
1216 return found;
1217 }
1218 return NULL;
1219
1220}
1221
1222static int update_space_info(struct btrfs_fs_info *info, u64 flags,
1223 u64 total_bytes, u64 bytes_used,
1224 struct btrfs_space_info **space_info)
1225{
1226 struct btrfs_space_info *found;
1227
1228 found = __find_space_info(info, flags);
1229 if (found) {
1230 found->total_bytes += total_bytes;
1231 found->bytes_used += bytes_used;
1232 found->full = 0;
1233 WARN_ON(found->total_bytes < found->bytes_used);
1234 *space_info = found;
1235 return 0;
1236 }
1237 found = kmalloc(sizeof(*found), GFP_NOFS);
1238 if (!found)
1239 return -ENOMEM;
1240
1241 list_add(&found->list, &info->space_info);
1242 found->flags = flags;
1243 found->total_bytes = total_bytes;
1244 found->bytes_used = bytes_used;
1245 found->bytes_pinned = 0;
1246 found->full = 0;
1247 found->force_alloc = 0;
1248 *space_info = found;
1249 return 0;
1250}
1251
1252static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
1253{
1254 u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
1255 BTRFS_BLOCK_GROUP_RAID1 |
1256 BTRFS_BLOCK_GROUP_RAID10 |
1257 BTRFS_BLOCK_GROUP_DUP);
1258 if (extra_flags) {
1259 if (flags & BTRFS_BLOCK_GROUP_DATA)
1260 fs_info->avail_data_alloc_bits |= extra_flags;
1261 if (flags & BTRFS_BLOCK_GROUP_METADATA)
1262 fs_info->avail_metadata_alloc_bits |= extra_flags;
1263 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
1264 fs_info->avail_system_alloc_bits |= extra_flags;
1265 }
1266}
1267
1268static u64 reduce_alloc_profile(struct btrfs_root *root, u64 flags)
1269{
1270 u64 num_devices = root->fs_info->fs_devices->num_devices;
1271
1272 if (num_devices == 1)
1273 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
1274 if (num_devices < 4)
1275 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
1276
1277 if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
1278 (flags & (BTRFS_BLOCK_GROUP_RAID1 |
1279 BTRFS_BLOCK_GROUP_RAID10))) {
1280 flags &= ~BTRFS_BLOCK_GROUP_DUP;
1281 }
1282
1283 if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
1284 (flags & BTRFS_BLOCK_GROUP_RAID10)) {
1285 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
1286 }
1287
1288 if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
1289 ((flags & BTRFS_BLOCK_GROUP_RAID1) |
1290 (flags & BTRFS_BLOCK_GROUP_RAID10) |
1291 (flags & BTRFS_BLOCK_GROUP_DUP)))
1292 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
1293 return flags;
1294}
1295
1296static int do_chunk_alloc(struct btrfs_trans_handle *trans,
1297 struct btrfs_root *extent_root, u64 alloc_bytes,
1298 u64 flags, int force)
1299{
1300 struct btrfs_space_info *space_info;
1301 u64 thresh;
1302 u64 start;
1303 u64 num_bytes;
1304 int ret;
1305
1306 flags = reduce_alloc_profile(extent_root, flags);
1307
1308 space_info = __find_space_info(extent_root->fs_info, flags);
1309 if (!space_info) {
1310 ret = update_space_info(extent_root->fs_info, flags,
1311 0, 0, &space_info);
1312 BUG_ON(ret);
1313 }
1314 BUG_ON(!space_info);
1315
1316 if (space_info->force_alloc) {
1317 force = 1;
1318 space_info->force_alloc = 0;
1319 }
1320 if (space_info->full)
1321 goto out;
1322
1323 thresh = div_factor(space_info->total_bytes, 6);
1324 if (!force &&
1325 (space_info->bytes_used + space_info->bytes_pinned + alloc_bytes) <
1326 thresh)
1327 goto out;
1328
1329 mutex_lock(&extent_root->fs_info->chunk_mutex);
1330 ret = btrfs_alloc_chunk(trans, extent_root, &start, &num_bytes, flags);
1331 if (ret == -ENOSPC) {
1332printk("space info full %Lu\n", flags);
1333 space_info->full = 1;
1334 goto out_unlock;
1335 }
1336 BUG_ON(ret);
1337
1338 ret = btrfs_make_block_group(trans, extent_root, 0, flags,
1339 BTRFS_FIRST_CHUNK_TREE_OBJECTID, start, num_bytes);
1340 BUG_ON(ret);
1341out_unlock:
1342 mutex_unlock(&extent_root->fs_info->chunk_mutex);
1343out:
1344 return 0;
1345}
1346
1347static int update_block_group(struct btrfs_trans_handle *trans,
1348 struct btrfs_root *root,
1349 u64 bytenr, u64 num_bytes, int alloc,
1350 int mark_free)
1351{
1352 struct btrfs_block_group_cache *cache;
1353 struct btrfs_fs_info *info = root->fs_info;
1354 u64 total = num_bytes;
1355 u64 old_val;
1356 u64 byte_in_group;
1357 u64 start;
1358 u64 end;
1359
1360 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1361 while(total) {
1362 cache = btrfs_lookup_block_group(info, bytenr);
1363 if (!cache) {
1364 return -1;
1365 }
1366 byte_in_group = bytenr - cache->key.objectid;
1367 WARN_ON(byte_in_group > cache->key.offset);
1368 start = cache->key.objectid;
1369 end = start + cache->key.offset - 1;
1370 set_extent_bits(&info->block_group_cache, start, end,
1371 BLOCK_GROUP_DIRTY, GFP_NOFS);
1372
1373 spin_lock(&cache->lock);
1374 old_val = btrfs_block_group_used(&cache->item);
1375 num_bytes = min(total, cache->key.offset - byte_in_group);
1376 if (alloc) {
1377 old_val += num_bytes;
1378 cache->space_info->bytes_used += num_bytes;
1379 btrfs_set_block_group_used(&cache->item, old_val);
1380 spin_unlock(&cache->lock);
1381 } else {
1382 old_val -= num_bytes;
1383 cache->space_info->bytes_used -= num_bytes;
1384 btrfs_set_block_group_used(&cache->item, old_val);
1385 spin_unlock(&cache->lock);
1386 if (mark_free) {
1387 set_extent_dirty(&info->free_space_cache,
1388 bytenr, bytenr + num_bytes - 1,
1389 GFP_NOFS);
1390 }
1391 }
1392 total -= num_bytes;
1393 bytenr += num_bytes;
1394 }
1395 return 0;
1396}
1397
1398static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
1399{
1400 u64 start;
1401 u64 end;
1402 int ret;
1403 ret = find_first_extent_bit(&root->fs_info->block_group_cache,
1404 search_start, &start, &end,
1405 BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA |
1406 BLOCK_GROUP_SYSTEM);
1407 if (ret)
1408 return 0;
1409 return start;
1410}
1411
1412
1413static int update_pinned_extents(struct btrfs_root *root,
1414 u64 bytenr, u64 num, int pin)
1415{
1416 u64 len;
1417 struct btrfs_block_group_cache *cache;
1418 struct btrfs_fs_info *fs_info = root->fs_info;
1419
1420 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1421 if (pin) {
1422 set_extent_dirty(&fs_info->pinned_extents,
1423 bytenr, bytenr + num - 1, GFP_NOFS);
1424 } else {
1425 clear_extent_dirty(&fs_info->pinned_extents,
1426 bytenr, bytenr + num - 1, GFP_NOFS);
1427 }
1428 while (num > 0) {
1429 cache = btrfs_lookup_block_group(fs_info, bytenr);
1430 if (!cache) {
1431 u64 first = first_logical_byte(root, bytenr);
1432 WARN_ON(first < bytenr);
1433 len = min(first - bytenr, num);
1434 } else {
1435 len = min(num, cache->key.offset -
1436 (bytenr - cache->key.objectid));
1437 }
1438 if (pin) {
1439 if (cache) {
1440 spin_lock(&cache->lock);
1441 cache->pinned += len;
1442 cache->space_info->bytes_pinned += len;
1443 spin_unlock(&cache->lock);
1444 }
1445 fs_info->total_pinned += len;
1446 } else {
1447 if (cache) {
1448 spin_lock(&cache->lock);
1449 cache->pinned -= len;
1450 cache->space_info->bytes_pinned -= len;
1451 spin_unlock(&cache->lock);
1452 }
1453 fs_info->total_pinned -= len;
1454 }
1455 bytenr += len;
1456 num -= len;
1457 }
1458 return 0;
1459}
1460
1461int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
1462{
1463 u64 last = 0;
1464 u64 start;
1465 u64 end;
1466 struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents;
1467 int ret;
1468
1469 while(1) {
1470 ret = find_first_extent_bit(pinned_extents, last,
1471 &start, &end, EXTENT_DIRTY);
1472 if (ret)
1473 break;
1474 set_extent_dirty(copy, start, end, GFP_NOFS);
1475 last = end + 1;
1476 }
1477 return 0;
1478}
1479
1480int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
1481 struct btrfs_root *root,
1482 struct extent_io_tree *unpin)
1483{
1484 u64 start;
1485 u64 end;
1486 int ret;
1487 struct extent_io_tree *free_space_cache;
1488 free_space_cache = &root->fs_info->free_space_cache;
1489
1490 mutex_lock(&root->fs_info->alloc_mutex);
1491 while(1) {
1492 ret = find_first_extent_bit(unpin, 0, &start, &end,
1493 EXTENT_DIRTY);
1494 if (ret)
1495 break;
1496 update_pinned_extents(root, start, end + 1 - start, 0);
1497 clear_extent_dirty(unpin, start, end, GFP_NOFS);
1498 set_extent_dirty(free_space_cache, start, end, GFP_NOFS);
1499 if (need_resched()) {
1500 mutex_unlock(&root->fs_info->alloc_mutex);
1501 cond_resched();
1502 mutex_lock(&root->fs_info->alloc_mutex);
1503 }
1504 }
1505 mutex_unlock(&root->fs_info->alloc_mutex);
1506 return 0;
1507}
1508
1509static int finish_current_insert(struct btrfs_trans_handle *trans,
1510 struct btrfs_root *extent_root)
1511{
1512 u64 start;
1513 u64 end;
1514 struct btrfs_fs_info *info = extent_root->fs_info;
1515 struct extent_buffer *eb;
1516 struct btrfs_path *path;
1517 struct btrfs_key ins;
1518 struct btrfs_disk_key first;
1519 struct btrfs_extent_item extent_item;
1520 int ret;
1521 int level;
1522 int err = 0;
1523
1524 WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
1525 btrfs_set_stack_extent_refs(&extent_item, 1);
1526 btrfs_set_key_type(&ins, BTRFS_EXTENT_ITEM_KEY);
1527 path = btrfs_alloc_path();
1528
1529 while(1) {
1530 ret = find_first_extent_bit(&info->extent_ins, 0, &start,
1531 &end, EXTENT_LOCKED);
1532 if (ret)
1533 break;
1534
1535 ins.objectid = start;
1536 ins.offset = end + 1 - start;
1537 err = btrfs_insert_item(trans, extent_root, &ins,
1538 &extent_item, sizeof(extent_item));
1539 clear_extent_bits(&info->extent_ins, start, end, EXTENT_LOCKED,
1540 GFP_NOFS);
1541
1542 eb = btrfs_find_tree_block(extent_root, ins.objectid,
1543 ins.offset);
1544
1545 if (!btrfs_buffer_uptodate(eb, trans->transid)) {
1546 mutex_unlock(&extent_root->fs_info->alloc_mutex);
1547 btrfs_read_buffer(eb, trans->transid);
1548 mutex_lock(&extent_root->fs_info->alloc_mutex);
1549 }
1550
1551 btrfs_tree_lock(eb);
1552 level = btrfs_header_level(eb);
1553 if (level == 0) {
1554 btrfs_item_key(eb, &first, 0);
1555 } else {
1556 btrfs_node_key(eb, &first, 0);
1557 }
1558 btrfs_tree_unlock(eb);
1559 free_extent_buffer(eb);
1560 /*
1561 * the first key is just a hint, so the race we've created
1562 * against reading it is fine
1563 */
1564 err = btrfs_insert_extent_backref(trans, extent_root, path,
1565 start, extent_root->root_key.objectid,
1566 0, level,
1567 btrfs_disk_key_objectid(&first));
1568 BUG_ON(err);
1569 if (need_resched()) {
1570 mutex_unlock(&extent_root->fs_info->alloc_mutex);
1571 cond_resched();
1572 mutex_lock(&extent_root->fs_info->alloc_mutex);
1573 }
1574 }
1575 btrfs_free_path(path);
1576 return 0;
1577}
1578
1579static int pin_down_bytes(struct btrfs_root *root, u64 bytenr, u32 num_bytes,
1580 int pending)
1581{
1582 int err = 0;
1583
1584 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1585 if (!pending) {
1586 struct extent_buffer *buf;
1587 buf = btrfs_find_tree_block(root, bytenr, num_bytes);
1588 if (buf) {
1589 if (btrfs_buffer_uptodate(buf, 0) &&
1590 btrfs_try_tree_lock(buf)) {
1591 u64 transid =
1592 root->fs_info->running_transaction->transid;
1593 u64 header_transid =
1594 btrfs_header_generation(buf);
1595 if (header_transid == transid &&
1596 !btrfs_header_flag(buf,
1597 BTRFS_HEADER_FLAG_WRITTEN)) {
1598 clean_tree_block(NULL, root, buf);
1599 btrfs_tree_unlock(buf);
1600 free_extent_buffer(buf);
1601 return 1;
1602 }
1603 btrfs_tree_unlock(buf);
1604 }
1605 free_extent_buffer(buf);
1606 }
1607 update_pinned_extents(root, bytenr, num_bytes, 1);
1608 } else {
1609 set_extent_bits(&root->fs_info->pending_del,
1610 bytenr, bytenr + num_bytes - 1,
1611 EXTENT_LOCKED, GFP_NOFS);
1612 }
1613 BUG_ON(err < 0);
1614 return 0;
1615}
1616
1617/*
1618 * remove an extent from the root, returns 0 on success
1619 */
1620static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
1621 *root, u64 bytenr, u64 num_bytes,
1622 u64 root_objectid, u64 ref_generation,
1623 u64 owner_objectid, u64 owner_offset, int pin,
1624 int mark_free)
1625{
1626 struct btrfs_path *path;
1627 struct btrfs_key key;
1628 struct btrfs_fs_info *info = root->fs_info;
1629 struct btrfs_root *extent_root = info->extent_root;
1630 struct extent_buffer *leaf;
1631 int ret;
1632 int extent_slot = 0;
1633 int found_extent = 0;
1634 int num_to_del = 1;
1635 struct btrfs_extent_item *ei;
1636 u32 refs;
1637
1638 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1639 key.objectid = bytenr;
1640 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
1641 key.offset = num_bytes;
1642 path = btrfs_alloc_path();
1643 if (!path)
1644 return -ENOMEM;
1645
1646 path->reada = 1;
1647 ret = lookup_extent_backref(trans, extent_root, path,
1648 bytenr, root_objectid,
1649 ref_generation,
1650 owner_objectid, owner_offset, 1);
1651 if (ret == 0) {
1652 struct btrfs_key found_key;
1653 extent_slot = path->slots[0];
1654 while(extent_slot > 0) {
1655 extent_slot--;
1656 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1657 extent_slot);
1658 if (found_key.objectid != bytenr)
1659 break;
1660 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
1661 found_key.offset == num_bytes) {
1662 found_extent = 1;
1663 break;
1664 }
1665 if (path->slots[0] - extent_slot > 5)
1666 break;
1667 }
1668 if (!found_extent)
1669 ret = btrfs_del_item(trans, extent_root, path);
1670 } else {
1671 btrfs_print_leaf(extent_root, path->nodes[0]);
1672 WARN_ON(1);
1673 printk("Unable to find ref byte nr %Lu root %Lu "
1674 " gen %Lu owner %Lu offset %Lu\n", bytenr,
1675 root_objectid, ref_generation, owner_objectid,
1676 owner_offset);
1677 }
1678 if (!found_extent) {
1679 btrfs_release_path(extent_root, path);
1680 ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1);
1681 if (ret < 0)
1682 return ret;
1683 BUG_ON(ret);
1684 extent_slot = path->slots[0];
1685 }
1686
1687 leaf = path->nodes[0];
1688 ei = btrfs_item_ptr(leaf, extent_slot,
1689 struct btrfs_extent_item);
1690 refs = btrfs_extent_refs(leaf, ei);
1691 BUG_ON(refs == 0);
1692 refs -= 1;
1693 btrfs_set_extent_refs(leaf, ei, refs);
1694
1695 btrfs_mark_buffer_dirty(leaf);
1696
1697 if (refs == 0 && found_extent && path->slots[0] == extent_slot + 1) {
1698 /* if the back ref and the extent are next to each other
1699 * they get deleted below in one shot
1700 */
1701 path->slots[0] = extent_slot;
1702 num_to_del = 2;
1703 } else if (found_extent) {
1704 /* otherwise delete the extent back ref */
1705 ret = btrfs_del_item(trans, extent_root, path);
1706 BUG_ON(ret);
1707 /* if refs are 0, we need to setup the path for deletion */
1708 if (refs == 0) {
1709 btrfs_release_path(extent_root, path);
1710 ret = btrfs_search_slot(trans, extent_root, &key, path,
1711 -1, 1);
1712 if (ret < 0)
1713 return ret;
1714 BUG_ON(ret);
1715 }
1716 }
1717
1718 if (refs == 0) {
1719 u64 super_used;
1720 u64 root_used;
1721
1722 if (pin) {
1723 ret = pin_down_bytes(root, bytenr, num_bytes, 0);
1724 if (ret > 0)
1725 mark_free = 1;
1726 BUG_ON(ret < 0);
1727 }
1728
1729 /* block accounting for super block */
1730 spin_lock_irq(&info->delalloc_lock);
1731 super_used = btrfs_super_bytes_used(&info->super_copy);
1732 btrfs_set_super_bytes_used(&info->super_copy,
1733 super_used - num_bytes);
1734 spin_unlock_irq(&info->delalloc_lock);
1735
1736 /* block accounting for root item */
1737 root_used = btrfs_root_used(&root->root_item);
1738 btrfs_set_root_used(&root->root_item,
1739 root_used - num_bytes);
1740 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
1741 num_to_del);
1742 if (ret) {
1743 return ret;
1744 }
1745 ret = update_block_group(trans, root, bytenr, num_bytes, 0,
1746 mark_free);
1747 BUG_ON(ret);
1748 }
1749 btrfs_free_path(path);
1750 finish_current_insert(trans, extent_root);
1751 return ret;
1752}
1753
1754/*
1755 * find all the blocks marked as pending in the radix tree and remove
1756 * them from the extent map
1757 */
1758static int del_pending_extents(struct btrfs_trans_handle *trans, struct
1759 btrfs_root *extent_root)
1760{
1761 int ret;
1762 int err = 0;
1763 u64 start;
1764 u64 end;
1765 struct extent_io_tree *pending_del;
1766 struct extent_io_tree *pinned_extents;
1767
1768 WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
1769 pending_del = &extent_root->fs_info->pending_del;
1770 pinned_extents = &extent_root->fs_info->pinned_extents;
1771
1772 while(1) {
1773 ret = find_first_extent_bit(pending_del, 0, &start, &end,
1774 EXTENT_LOCKED);
1775 if (ret)
1776 break;
1777 clear_extent_bits(pending_del, start, end, EXTENT_LOCKED,
1778 GFP_NOFS);
1779 if (!test_range_bit(&extent_root->fs_info->extent_ins,
1780 start, end, EXTENT_LOCKED, 0)) {
1781 update_pinned_extents(extent_root, start,
1782 end + 1 - start, 1);
1783 ret = __free_extent(trans, extent_root,
1784 start, end + 1 - start,
1785 extent_root->root_key.objectid,
1786 0, 0, 0, 0, 0);
1787 } else {
1788 clear_extent_bits(&extent_root->fs_info->extent_ins,
1789 start, end, EXTENT_LOCKED, GFP_NOFS);
1790 }
1791 if (ret)
1792 err = ret;
1793
1794 if (need_resched()) {
1795 mutex_unlock(&extent_root->fs_info->alloc_mutex);
1796 cond_resched();
1797 mutex_lock(&extent_root->fs_info->alloc_mutex);
1798 }
1799 }
1800 return err;
1801}
1802
1803/*
1804 * remove an extent from the root, returns 0 on success
1805 */
1806static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
1807 struct btrfs_root *root, u64 bytenr,
1808 u64 num_bytes, u64 root_objectid,
1809 u64 ref_generation, u64 owner_objectid,
1810 u64 owner_offset, int pin)
1811{
1812 struct btrfs_root *extent_root = root->fs_info->extent_root;
1813 int pending_ret;
1814 int ret;
1815
1816 WARN_ON(num_bytes < root->sectorsize);
1817 if (!root->ref_cows)
1818 ref_generation = 0;
1819
1820 if (root == extent_root) {
1821 pin_down_bytes(root, bytenr, num_bytes, 1);
1822 return 0;
1823 }
1824 ret = __free_extent(trans, root, bytenr, num_bytes, root_objectid,
1825 ref_generation, owner_objectid, owner_offset,
1826 pin, pin == 0);
1827
1828 finish_current_insert(trans, root->fs_info->extent_root);
1829 pending_ret = del_pending_extents(trans, root->fs_info->extent_root);
1830 return ret ? ret : pending_ret;
1831}
1832
1833int btrfs_free_extent(struct btrfs_trans_handle *trans,
1834 struct btrfs_root *root, u64 bytenr,
1835 u64 num_bytes, u64 root_objectid,
1836 u64 ref_generation, u64 owner_objectid,
1837 u64 owner_offset, int pin)
1838{
1839 int ret;
1840
1841 maybe_lock_mutex(root);
1842 ret = __btrfs_free_extent(trans, root, bytenr, num_bytes,
1843 root_objectid, ref_generation,
1844 owner_objectid, owner_offset, pin);
1845 maybe_unlock_mutex(root);
1846 return ret;
1847}
1848
1849static u64 stripe_align(struct btrfs_root *root, u64 val)
1850{
1851 u64 mask = ((u64)root->stripesize - 1);
1852 u64 ret = (val + mask) & ~mask;
1853 return ret;
1854}
1855
1856/*
1857 * walks the btree of allocated extents and find a hole of a given size.
1858 * The key ins is changed to record the hole:
1859 * ins->objectid == block start
1860 * ins->flags = BTRFS_EXTENT_ITEM_KEY
1861 * ins->offset == number of blocks
1862 * Any available blocks before search_start are skipped.
1863 */
1864static int noinline find_free_extent(struct btrfs_trans_handle *trans,
1865 struct btrfs_root *orig_root,
1866 u64 num_bytes, u64 empty_size,
1867 u64 search_start, u64 search_end,
1868 u64 hint_byte, struct btrfs_key *ins,
1869 u64 exclude_start, u64 exclude_nr,
1870 int data)
1871{
1872 int ret;
1873 u64 orig_search_start;
1874 struct btrfs_root * root = orig_root->fs_info->extent_root;
1875 struct btrfs_fs_info *info = root->fs_info;
1876 u64 total_needed = num_bytes;
1877 u64 *last_ptr = NULL;
1878 struct btrfs_block_group_cache *block_group;
1879 int full_scan = 0;
1880 int wrapped = 0;
1881 int chunk_alloc_done = 0;
1882 int empty_cluster = 2 * 1024 * 1024;
1883 int allowed_chunk_alloc = 0;
1884
1885 WARN_ON(num_bytes < root->sectorsize);
1886 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
1887
1888 if (orig_root->ref_cows || empty_size)
1889 allowed_chunk_alloc = 1;
1890
1891 if (data & BTRFS_BLOCK_GROUP_METADATA) {
1892 last_ptr = &root->fs_info->last_alloc;
1893 empty_cluster = 256 * 1024;
1894 }
1895
1896 if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD)) {
1897 last_ptr = &root->fs_info->last_data_alloc;
1898 }
1899
1900 if (last_ptr) {
1901 if (*last_ptr)
1902 hint_byte = *last_ptr;
1903 else {
1904 empty_size += empty_cluster;
1905 }
1906 }
1907
1908 search_start = max(search_start, first_logical_byte(root, 0));
1909 orig_search_start = search_start;
1910
1911 if (search_end == (u64)-1)
1912 search_end = btrfs_super_total_bytes(&info->super_copy);
1913
1914 if (hint_byte) {
1915 block_group = btrfs_lookup_first_block_group(info, hint_byte);
1916 if (!block_group)
1917 hint_byte = search_start;
1918 block_group = btrfs_find_block_group(root, block_group,
1919 hint_byte, data, 1);
1920 if (last_ptr && *last_ptr == 0 && block_group)
1921 hint_byte = block_group->key.objectid;
1922 } else {
1923 block_group = btrfs_find_block_group(root,
1924 trans->block_group,
1925 search_start, data, 1);
1926 }
1927 search_start = max(search_start, hint_byte);
1928
1929 total_needed += empty_size;
1930
1931check_failed:
1932 if (!block_group) {
1933 block_group = btrfs_lookup_first_block_group(info,
1934 search_start);
1935 if (!block_group)
1936 block_group = btrfs_lookup_first_block_group(info,
1937 orig_search_start);
1938 }
1939 if (full_scan && !chunk_alloc_done) {
1940 if (allowed_chunk_alloc) {
1941 do_chunk_alloc(trans, root,
1942 num_bytes + 2 * 1024 * 1024, data, 1);
1943 allowed_chunk_alloc = 0;
1944 } else if (block_group && block_group_bits(block_group, data)) {
1945 block_group->space_info->force_alloc = 1;
1946 }
1947 chunk_alloc_done = 1;
1948 }
1949 ret = find_search_start(root, &block_group, &search_start,
1950 total_needed, data);
1951 if (ret == -ENOSPC && last_ptr && *last_ptr) {
1952 *last_ptr = 0;
1953 block_group = btrfs_lookup_first_block_group(info,
1954 orig_search_start);
1955 search_start = orig_search_start;
1956 ret = find_search_start(root, &block_group, &search_start,
1957 total_needed, data);
1958 }
1959 if (ret == -ENOSPC)
1960 goto enospc;
1961 if (ret)
1962 goto error;
1963
1964 if (last_ptr && *last_ptr && search_start != *last_ptr) {
1965 *last_ptr = 0;
1966 if (!empty_size) {
1967 empty_size += empty_cluster;
1968 total_needed += empty_size;
1969 }
1970 block_group = btrfs_lookup_first_block_group(info,
1971 orig_search_start);
1972 search_start = orig_search_start;
1973 ret = find_search_start(root, &block_group,
1974 &search_start, total_needed, data);
1975 if (ret == -ENOSPC)
1976 goto enospc;
1977 if (ret)
1978 goto error;
1979 }
1980
1981 search_start = stripe_align(root, search_start);
1982 ins->objectid = search_start;
1983 ins->offset = num_bytes;
1984
1985 if (ins->objectid + num_bytes >= search_end)
1986 goto enospc;
1987
1988 if (ins->objectid + num_bytes >
1989 block_group->key.objectid + block_group->key.offset) {
1990 search_start = block_group->key.objectid +
1991 block_group->key.offset;
1992 goto new_group;
1993 }
1994
1995 if (test_range_bit(&info->extent_ins, ins->objectid,
1996 ins->objectid + num_bytes -1, EXTENT_LOCKED, 0)) {
1997 search_start = ins->objectid + num_bytes;
1998 goto new_group;
1999 }
2000
2001 if (test_range_bit(&info->pinned_extents, ins->objectid,
2002 ins->objectid + num_bytes -1, EXTENT_DIRTY, 0)) {
2003 search_start = ins->objectid + num_bytes;
2004 goto new_group;
2005 }
2006
2007 if (exclude_nr > 0 && (ins->objectid + num_bytes > exclude_start &&
2008 ins->objectid < exclude_start + exclude_nr)) {
2009 search_start = exclude_start + exclude_nr;
2010 goto new_group;
2011 }
2012
2013 if (!(data & BTRFS_BLOCK_GROUP_DATA)) {
2014 block_group = btrfs_lookup_block_group(info, ins->objectid);
2015 if (block_group)
2016 trans->block_group = block_group;
2017 }
2018 ins->offset = num_bytes;
2019 if (last_ptr) {
2020 *last_ptr = ins->objectid + ins->offset;
2021 if (*last_ptr ==
2022 btrfs_super_total_bytes(&root->fs_info->super_copy)) {
2023 *last_ptr = 0;
2024 }
2025 }
2026 return 0;
2027
2028new_group:
2029 if (search_start + num_bytes >= search_end) {
2030enospc:
2031 search_start = orig_search_start;
2032 if (full_scan) {
2033 ret = -ENOSPC;
2034 goto error;
2035 }
2036 if (wrapped) {
2037 if (!full_scan)
2038 total_needed -= empty_size;
2039 full_scan = 1;
2040 } else
2041 wrapped = 1;
2042 }
2043 block_group = btrfs_lookup_first_block_group(info, search_start);
2044 cond_resched();
2045 block_group = btrfs_find_block_group(root, block_group,
2046 search_start, data, 0);
2047 goto check_failed;
2048
2049error:
2050 return ret;
2051}
2052
2053static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans,
2054 struct btrfs_root *root,
2055 u64 num_bytes, u64 min_alloc_size,
2056 u64 empty_size, u64 hint_byte,
2057 u64 search_end, struct btrfs_key *ins,
2058 u64 data)
2059{
2060 int ret;
2061 u64 search_start = 0;
2062 u64 alloc_profile;
2063 struct btrfs_fs_info *info = root->fs_info;
2064
2065 if (data) {
2066 alloc_profile = info->avail_data_alloc_bits &
2067 info->data_alloc_profile;
2068 data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
2069 } else if (root == root->fs_info->chunk_root) {
2070 alloc_profile = info->avail_system_alloc_bits &
2071 info->system_alloc_profile;
2072 data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
2073 } else {
2074 alloc_profile = info->avail_metadata_alloc_bits &
2075 info->metadata_alloc_profile;
2076 data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
2077 }
2078again:
2079 data = reduce_alloc_profile(root, data);
2080 /*
2081 * the only place that sets empty_size is btrfs_realloc_node, which
2082 * is not called recursively on allocations
2083 */
2084 if (empty_size || root->ref_cows) {
2085 if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
2086 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2087 2 * 1024 * 1024,
2088 BTRFS_BLOCK_GROUP_METADATA |
2089 (info->metadata_alloc_profile &
2090 info->avail_metadata_alloc_bits), 0);
2091 BUG_ON(ret);
2092 }
2093 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2094 num_bytes + 2 * 1024 * 1024, data, 0);
2095 BUG_ON(ret);
2096 }
2097
2098 WARN_ON(num_bytes < root->sectorsize);
2099 ret = find_free_extent(trans, root, num_bytes, empty_size,
2100 search_start, search_end, hint_byte, ins,
2101 trans->alloc_exclude_start,
2102 trans->alloc_exclude_nr, data);
2103
2104 if (ret == -ENOSPC && num_bytes > min_alloc_size) {
2105 num_bytes = num_bytes >> 1;
2106 num_bytes = max(num_bytes, min_alloc_size);
2107 do_chunk_alloc(trans, root->fs_info->extent_root,
2108 num_bytes, data, 1);
2109 goto again;
2110 }
2111 if (ret) {
2112 printk("allocation failed flags %Lu\n", data);
2113 BUG();
2114 }
2115 clear_extent_dirty(&root->fs_info->free_space_cache,
2116 ins->objectid, ins->objectid + ins->offset - 1,
2117 GFP_NOFS);
2118 return 0;
2119}
2120
2121int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
2122 struct btrfs_root *root,
2123 u64 num_bytes, u64 min_alloc_size,
2124 u64 empty_size, u64 hint_byte,
2125 u64 search_end, struct btrfs_key *ins,
2126 u64 data)
2127{
2128 int ret;
2129 maybe_lock_mutex(root);
2130 ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size,
2131 empty_size, hint_byte, search_end, ins,
2132 data);
2133 maybe_unlock_mutex(root);
2134 return ret;
2135}
2136
2137static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
2138 struct btrfs_root *root,
2139 u64 root_objectid, u64 ref_generation,
2140 u64 owner, u64 owner_offset,
2141 struct btrfs_key *ins)
2142{
2143 int ret;
2144 int pending_ret;
2145 u64 super_used;
2146 u64 root_used;
2147 u64 num_bytes = ins->offset;
2148 u32 sizes[2];
2149 struct btrfs_fs_info *info = root->fs_info;
2150 struct btrfs_root *extent_root = info->extent_root;
2151 struct btrfs_extent_item *extent_item;
2152 struct btrfs_extent_ref *ref;
2153 struct btrfs_path *path;
2154 struct btrfs_key keys[2];
2155
2156 /* block accounting for super block */
2157 spin_lock_irq(&info->delalloc_lock);
2158 super_used = btrfs_super_bytes_used(&info->super_copy);
2159 btrfs_set_super_bytes_used(&info->super_copy, super_used + num_bytes);
2160 spin_unlock_irq(&info->delalloc_lock);
2161
2162 /* block accounting for root item */
2163 root_used = btrfs_root_used(&root->root_item);
2164 btrfs_set_root_used(&root->root_item, root_used + num_bytes);
2165
2166 if (root == extent_root) {
2167 set_extent_bits(&root->fs_info->extent_ins, ins->objectid,
2168 ins->objectid + ins->offset - 1,
2169 EXTENT_LOCKED, GFP_NOFS);
2170 goto update_block;
2171 }
2172
2173 memcpy(&keys[0], ins, sizeof(*ins));
2174 keys[1].offset = hash_extent_ref(root_objectid, ref_generation,
2175 owner, owner_offset);
2176 keys[1].objectid = ins->objectid;
2177 keys[1].type = BTRFS_EXTENT_REF_KEY;
2178 sizes[0] = sizeof(*extent_item);
2179 sizes[1] = sizeof(*ref);
2180
2181 path = btrfs_alloc_path();
2182 BUG_ON(!path);
2183
2184 ret = btrfs_insert_empty_items(trans, extent_root, path, keys,
2185 sizes, 2);
2186
2187 BUG_ON(ret);
2188 extent_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2189 struct btrfs_extent_item);
2190 btrfs_set_extent_refs(path->nodes[0], extent_item, 1);
2191 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
2192 struct btrfs_extent_ref);
2193
2194 btrfs_set_ref_root(path->nodes[0], ref, root_objectid);
2195 btrfs_set_ref_generation(path->nodes[0], ref, ref_generation);
2196 btrfs_set_ref_objectid(path->nodes[0], ref, owner);
2197 btrfs_set_ref_offset(path->nodes[0], ref, owner_offset);
2198
2199 btrfs_mark_buffer_dirty(path->nodes[0]);
2200
2201 trans->alloc_exclude_start = 0;
2202 trans->alloc_exclude_nr = 0;
2203 btrfs_free_path(path);
2204 finish_current_insert(trans, extent_root);
2205 pending_ret = del_pending_extents(trans, extent_root);
2206
2207 if (ret)
2208 goto out;
2209 if (pending_ret) {
2210 ret = pending_ret;
2211 goto out;
2212 }
2213
2214update_block:
2215 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0);
2216 if (ret) {
2217 printk("update block group failed for %Lu %Lu\n",
2218 ins->objectid, ins->offset);
2219 BUG();
2220 }
2221out:
2222 return ret;
2223}
2224
2225int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
2226 struct btrfs_root *root,
2227 u64 root_objectid, u64 ref_generation,
2228 u64 owner, u64 owner_offset,
2229 struct btrfs_key *ins)
2230{
2231 int ret;
2232 maybe_lock_mutex(root);
2233 ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid,
2234 ref_generation, owner,
2235 owner_offset, ins);
2236 maybe_unlock_mutex(root);
2237 return ret;
2238}
2239/*
2240 * finds a free extent and does all the dirty work required for allocation
2241 * returns the key for the extent through ins, and a tree buffer for
2242 * the first block of the extent through buf.
2243 *
2244 * returns 0 if everything worked, non-zero otherwise.
2245 */
2246int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
2247 struct btrfs_root *root,
2248 u64 num_bytes, u64 min_alloc_size,
2249 u64 root_objectid, u64 ref_generation,
2250 u64 owner, u64 owner_offset,
2251 u64 empty_size, u64 hint_byte,
2252 u64 search_end, struct btrfs_key *ins, u64 data)
2253{
2254 int ret;
2255
2256 maybe_lock_mutex(root);
2257
2258 ret = __btrfs_reserve_extent(trans, root, num_bytes,
2259 min_alloc_size, empty_size, hint_byte,
2260 search_end, ins, data);
2261 BUG_ON(ret);
2262 ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid,
2263 ref_generation, owner,
2264 owner_offset, ins);
2265 BUG_ON(ret);
2266
2267 maybe_unlock_mutex(root);
2268 return ret;
2269}
2270/*
2271 * helper function to allocate a block for a given tree
2272 * returns the tree buffer or NULL.
2273 */
2274struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
2275 struct btrfs_root *root,
2276 u32 blocksize,
2277 u64 root_objectid,
2278 u64 ref_generation,
2279 u64 first_objectid,
2280 int level,
2281 u64 hint,
2282 u64 empty_size)
2283{
2284 struct btrfs_key ins;
2285 int ret;
2286 struct extent_buffer *buf;
2287
2288 ret = btrfs_alloc_extent(trans, root, blocksize, blocksize,
2289 root_objectid, ref_generation,
2290 level, first_objectid, empty_size, hint,
2291 (u64)-1, &ins, 0);
2292 if (ret) {
2293 BUG_ON(ret > 0);
2294 return ERR_PTR(ret);
2295 }
2296 buf = btrfs_find_create_tree_block(root, ins.objectid, blocksize);
2297 if (!buf) {
2298 btrfs_free_extent(trans, root, ins.objectid, blocksize,
2299 root->root_key.objectid, ref_generation,
2300 0, 0, 0);
2301 return ERR_PTR(-ENOMEM);
2302 }
2303 btrfs_set_header_generation(buf, trans->transid);
2304 btrfs_tree_lock(buf);
2305 clean_tree_block(trans, root, buf);
2306 btrfs_set_buffer_uptodate(buf);
2307
2308 if (PageDirty(buf->first_page)) {
2309 printk("page %lu dirty\n", buf->first_page->index);
2310 WARN_ON(1);
2311 }
2312
2313 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
2314 buf->start + buf->len - 1, GFP_NOFS);
2315 trans->blocks_used++;
2316 return buf;
2317}
2318
2319static int noinline drop_leaf_ref_no_cache(struct btrfs_trans_handle *trans,
2320 struct btrfs_root *root,
2321 struct extent_buffer *leaf)
2322{
2323 u64 leaf_owner;
2324 u64 leaf_generation;
2325 struct btrfs_key key;
2326 struct btrfs_file_extent_item *fi;
2327 int i;
2328 int nritems;
2329 int ret;
2330
2331 BUG_ON(!btrfs_is_leaf(leaf));
2332 nritems = btrfs_header_nritems(leaf);
2333 leaf_owner = btrfs_header_owner(leaf);
2334 leaf_generation = btrfs_header_generation(leaf);
2335
2336 for (i = 0; i < nritems; i++) {
2337 u64 disk_bytenr;
2338 cond_resched();
2339
2340 btrfs_item_key_to_cpu(leaf, &key, i);
2341 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2342 continue;
2343 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
2344 if (btrfs_file_extent_type(leaf, fi) ==
2345 BTRFS_FILE_EXTENT_INLINE)
2346 continue;
2347 /*
2348 * FIXME make sure to insert a trans record that
2349 * repeats the snapshot del on crash
2350 */
2351 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
2352 if (disk_bytenr == 0)
2353 continue;
2354
2355 mutex_lock(&root->fs_info->alloc_mutex);
2356 ret = __btrfs_free_extent(trans, root, disk_bytenr,
2357 btrfs_file_extent_disk_num_bytes(leaf, fi),
2358 leaf_owner, leaf_generation,
2359 key.objectid, key.offset, 0);
2360 mutex_unlock(&root->fs_info->alloc_mutex);
2361 BUG_ON(ret);
2362 }
2363 return 0;
2364}
2365
2366static int noinline drop_leaf_ref(struct btrfs_trans_handle *trans,
2367 struct btrfs_root *root,
2368 struct btrfs_leaf_ref *ref)
2369{
2370 int i;
2371 int ret;
2372 struct btrfs_extent_info *info = ref->extents;
2373
2374 for (i = 0; i < ref->nritems; i++) {
2375 mutex_lock(&root->fs_info->alloc_mutex);
2376 ret = __btrfs_free_extent(trans, root,
2377 info->bytenr, info->num_bytes,
2378 ref->owner, ref->generation,
2379 info->objectid, info->offset, 0);
2380 mutex_unlock(&root->fs_info->alloc_mutex);
2381 BUG_ON(ret);
2382 info++;
2383 }
2384
2385 return 0;
2386}
2387
2388static void noinline reada_walk_down(struct btrfs_root *root,
2389 struct extent_buffer *node,
2390 int slot)
2391{
2392 u64 bytenr;
2393 u64 last = 0;
2394 u32 nritems;
2395 u32 refs;
2396 u32 blocksize;
2397 int ret;
2398 int i;
2399 int level;
2400 int skipped = 0;
2401
2402 nritems = btrfs_header_nritems(node);
2403 level = btrfs_header_level(node);
2404 if (level)
2405 return;
2406
2407 for (i = slot; i < nritems && skipped < 32; i++) {
2408 bytenr = btrfs_node_blockptr(node, i);
2409 if (last && ((bytenr > last && bytenr - last > 32 * 1024) ||
2410 (last > bytenr && last - bytenr > 32 * 1024))) {
2411 skipped++;
2412 continue;
2413 }
2414 blocksize = btrfs_level_size(root, level - 1);
2415 if (i != slot) {
2416 ret = lookup_extent_ref(NULL, root, bytenr,
2417 blocksize, &refs);
2418 BUG_ON(ret);
2419 if (refs != 1) {
2420 skipped++;
2421 continue;
2422 }
2423 }
2424 ret = readahead_tree_block(root, bytenr, blocksize,
2425 btrfs_node_ptr_generation(node, i));
2426 last = bytenr + blocksize;
2427 cond_resched();
2428 if (ret)
2429 break;
2430 }
2431}
2432
2433int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len,
2434 u32 *refs)
2435{
2436 int ret;
2437
2438 ret = lookup_extent_ref(NULL, root, start, len, refs);
2439 BUG_ON(ret);
2440
2441#if 0 // some debugging code in case we see problems here
2442 /* if the refs count is one, it won't get increased again. But
2443 * if the ref count is > 1, someone may be decreasing it at
2444 * the same time we are.
2445 */
2446 if (*refs != 1) {
2447 struct extent_buffer *eb = NULL;
2448 eb = btrfs_find_create_tree_block(root, start, len);
2449 if (eb)
2450 btrfs_tree_lock(eb);
2451
2452 mutex_lock(&root->fs_info->alloc_mutex);
2453 ret = lookup_extent_ref(NULL, root, start, len, refs);
2454 BUG_ON(ret);
2455 mutex_unlock(&root->fs_info->alloc_mutex);
2456
2457 if (eb) {
2458 btrfs_tree_unlock(eb);
2459 free_extent_buffer(eb);
2460 }
2461 if (*refs == 1) {
2462 printk("block %llu went down to one during drop_snap\n",
2463 (unsigned long long)start);
2464 }
2465
2466 }
2467#endif
2468
2469 cond_resched();
2470 return ret;
2471}
2472
2473/*
2474 * helper function for drop_snapshot, this walks down the tree dropping ref
2475 * counts as it goes.
2476 */
2477static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
2478 struct btrfs_root *root,
2479 struct btrfs_path *path, int *level)
2480{
2481 u64 root_owner;
2482 u64 root_gen;
2483 u64 bytenr;
2484 u64 ptr_gen;
2485 struct extent_buffer *next;
2486 struct extent_buffer *cur;
2487 struct extent_buffer *parent;
2488 struct btrfs_leaf_ref *ref;
2489 u32 blocksize;
2490 int ret;
2491 u32 refs;
2492
2493 WARN_ON(*level < 0);
2494 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2495 ret = drop_snap_lookup_refcount(root, path->nodes[*level]->start,
2496 path->nodes[*level]->len, &refs);
2497 BUG_ON(ret);
2498 if (refs > 1)
2499 goto out;
2500
2501 /*
2502 * walk down to the last node level and free all the leaves
2503 */
2504 while(*level >= 0) {
2505 WARN_ON(*level < 0);
2506 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2507 cur = path->nodes[*level];
2508
2509 if (btrfs_header_level(cur) != *level)
2510 WARN_ON(1);
2511
2512 if (path->slots[*level] >=
2513 btrfs_header_nritems(cur))
2514 break;
2515 if (*level == 0) {
2516 ret = drop_leaf_ref_no_cache(trans, root, cur);
2517 BUG_ON(ret);
2518 break;
2519 }
2520 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2521 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2522 blocksize = btrfs_level_size(root, *level - 1);
2523
2524 ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
2525 BUG_ON(ret);
2526 if (refs != 1) {
2527 parent = path->nodes[*level];
2528 root_owner = btrfs_header_owner(parent);
2529 root_gen = btrfs_header_generation(parent);
2530 path->slots[*level]++;
2531
2532 mutex_lock(&root->fs_info->alloc_mutex);
2533 ret = __btrfs_free_extent(trans, root, bytenr,
2534 blocksize, root_owner,
2535 root_gen, 0, 0, 1);
2536 BUG_ON(ret);
2537 mutex_unlock(&root->fs_info->alloc_mutex);
2538
2539 atomic_inc(&root->fs_info->throttle_gen);
2540 wake_up(&root->fs_info->transaction_throttle);
2541
2542 continue;
2543 }
2544 /*
2545 * at this point, we have a single ref, and since the
2546 * only place referencing this extent is a dead root
2547 * the reference count should never go higher.
2548 * So, we don't need to check it again
2549 */
2550 if (*level == 1) {
2551 struct btrfs_key key;
2552 btrfs_node_key_to_cpu(cur, &key, path->slots[*level]);
2553 ref = btrfs_lookup_leaf_ref(root, bytenr);
2554 if (ref) {
2555 ret = drop_leaf_ref(trans, root, ref);
2556 BUG_ON(ret);
2557 btrfs_remove_leaf_ref(root, ref);
2558 btrfs_free_leaf_ref(root, ref);
2559 *level = 0;
2560 break;
2561 }
2562 if (printk_ratelimit())
2563 printk("leaf ref miss for bytenr %llu\n",
2564 (unsigned long long)bytenr);
2565 }
2566 next = btrfs_find_tree_block(root, bytenr, blocksize);
2567 if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
2568 free_extent_buffer(next);
2569
2570 if (path->slots[*level] == 0)
2571 reada_walk_down(root, cur, path->slots[*level]);
2572 next = read_tree_block(root, bytenr, blocksize,
2573 ptr_gen);
2574 cond_resched();
2575#if 0
2576 /*
2577 * this is a debugging check and can go away
2578 * the ref should never go all the way down to 1
2579 * at this point
2580 */
2581 ret = lookup_extent_ref(NULL, root, bytenr, blocksize,
2582 &refs);
2583 BUG_ON(ret);
2584 WARN_ON(refs != 1);
2585#endif
2586 }
2587 WARN_ON(*level <= 0);
2588 if (path->nodes[*level-1])
2589 free_extent_buffer(path->nodes[*level-1]);
2590 path->nodes[*level-1] = next;
2591 *level = btrfs_header_level(next);
2592 path->slots[*level] = 0;
2593 }
2594out:
2595 WARN_ON(*level < 0);
2596 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2597
2598 if (path->nodes[*level] == root->node) {
2599 parent = path->nodes[*level];
2600 bytenr = path->nodes[*level]->start;
2601 } else {
2602 parent = path->nodes[*level + 1];
2603 bytenr = btrfs_node_blockptr(parent, path->slots[*level + 1]);
2604 }
2605
2606 blocksize = btrfs_level_size(root, *level);
2607 root_owner = btrfs_header_owner(parent);
2608 root_gen = btrfs_header_generation(parent);
2609
2610 mutex_lock(&root->fs_info->alloc_mutex);
2611 ret = __btrfs_free_extent(trans, root, bytenr, blocksize,
2612 root_owner, root_gen, 0, 0, 1);
2613 free_extent_buffer(path->nodes[*level]);
2614 path->nodes[*level] = NULL;
2615 *level += 1;
2616 BUG_ON(ret);
2617 mutex_unlock(&root->fs_info->alloc_mutex);
2618
2619 cond_resched();
2620 return 0;
2621}
2622
2623/*
2624 * helper for dropping snapshots. This walks back up the tree in the path
2625 * to find the first node higher up where we haven't yet gone through
2626 * all the slots
2627 */
2628static int noinline walk_up_tree(struct btrfs_trans_handle *trans,
2629 struct btrfs_root *root,
2630 struct btrfs_path *path, int *level)
2631{
2632 u64 root_owner;
2633 u64 root_gen;
2634 struct btrfs_root_item *root_item = &root->root_item;
2635 int i;
2636 int slot;
2637 int ret;
2638
2639 for(i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2640 slot = path->slots[i];
2641 if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
2642 struct extent_buffer *node;
2643 struct btrfs_disk_key disk_key;
2644 node = path->nodes[i];
2645 path->slots[i]++;
2646 *level = i;
2647 WARN_ON(*level == 0);
2648 btrfs_node_key(node, &disk_key, path->slots[i]);
2649 memcpy(&root_item->drop_progress,
2650 &disk_key, sizeof(disk_key));
2651 root_item->drop_level = i;
2652 return 0;
2653 } else {
2654 if (path->nodes[*level] == root->node) {
2655 root_owner = root->root_key.objectid;
2656 root_gen =
2657 btrfs_header_generation(path->nodes[*level]);
2658 } else {
2659 struct extent_buffer *node;
2660 node = path->nodes[*level + 1];
2661 root_owner = btrfs_header_owner(node);
2662 root_gen = btrfs_header_generation(node);
2663 }
2664 ret = btrfs_free_extent(trans, root,
2665 path->nodes[*level]->start,
2666 path->nodes[*level]->len,
2667 root_owner, root_gen, 0, 0, 1);
2668 BUG_ON(ret);
2669 free_extent_buffer(path->nodes[*level]);
2670 path->nodes[*level] = NULL;
2671 *level = i + 1;
2672 }
2673 }
2674 return 1;
2675}
2676
2677/*
2678 * drop the reference count on the tree rooted at 'snap'. This traverses
2679 * the tree freeing any blocks that have a ref count of zero after being
2680 * decremented.
2681 */
2682int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
2683 *root)
2684{
2685 int ret = 0;
2686 int wret;
2687 int level;
2688 struct btrfs_path *path;
2689 int i;
2690 int orig_level;
2691 struct btrfs_root_item *root_item = &root->root_item;
2692
2693 WARN_ON(!mutex_is_locked(&root->fs_info->drop_mutex));
2694 path = btrfs_alloc_path();
2695 BUG_ON(!path);
2696
2697 level = btrfs_header_level(root->node);
2698 orig_level = level;
2699 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
2700 path->nodes[level] = root->node;
2701 extent_buffer_get(root->node);
2702 path->slots[level] = 0;
2703 } else {
2704 struct btrfs_key key;
2705 struct btrfs_disk_key found_key;
2706 struct extent_buffer *node;
2707
2708 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
2709 level = root_item->drop_level;
2710 path->lowest_level = level;
2711 wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2712 if (wret < 0) {
2713 ret = wret;
2714 goto out;
2715 }
2716 node = path->nodes[level];
2717 btrfs_node_key(node, &found_key, path->slots[level]);
2718 WARN_ON(memcmp(&found_key, &root_item->drop_progress,
2719 sizeof(found_key)));
2720 /*
2721 * unlock our path, this is safe because only this
2722 * function is allowed to delete this snapshot
2723 */
2724 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
2725 if (path->nodes[i] && path->locks[i]) {
2726 path->locks[i] = 0;
2727 btrfs_tree_unlock(path->nodes[i]);
2728 }
2729 }
2730 }
2731 while(1) {
2732 wret = walk_down_tree(trans, root, path, &level);
2733 if (wret > 0)
2734 break;
2735 if (wret < 0)
2736 ret = wret;
2737
2738 wret = walk_up_tree(trans, root, path, &level);
2739 if (wret > 0)
2740 break;
2741 if (wret < 0)
2742 ret = wret;
2743 if (trans->transaction->in_commit) {
2744 ret = -EAGAIN;
2745 break;
2746 }
2747 atomic_inc(&root->fs_info->throttle_gen);
2748 wake_up(&root->fs_info->transaction_throttle);
2749 }
2750 for (i = 0; i <= orig_level; i++) {
2751 if (path->nodes[i]) {
2752 free_extent_buffer(path->nodes[i]);
2753 path->nodes[i] = NULL;
2754 }
2755 }
2756out:
2757 btrfs_free_path(path);
2758 return ret;
2759}
2760
2761int btrfs_free_block_groups(struct btrfs_fs_info *info)
2762{
2763 u64 start;
2764 u64 end;
2765 u64 ptr;
2766 int ret;
2767
2768 mutex_lock(&info->alloc_mutex);
2769 while(1) {
2770 ret = find_first_extent_bit(&info->block_group_cache, 0,
2771 &start, &end, (unsigned int)-1);
2772 if (ret)
2773 break;
2774 ret = get_state_private(&info->block_group_cache, start, &ptr);
2775 if (!ret)
2776 kfree((void *)(unsigned long)ptr);
2777 clear_extent_bits(&info->block_group_cache, start,
2778 end, (unsigned int)-1, GFP_NOFS);
2779 }
2780 while(1) {
2781 ret = find_first_extent_bit(&info->free_space_cache, 0,
2782 &start, &end, EXTENT_DIRTY);
2783 if (ret)
2784 break;
2785 clear_extent_dirty(&info->free_space_cache, start,
2786 end, GFP_NOFS);
2787 }
2788 mutex_unlock(&info->alloc_mutex);
2789 return 0;
2790}
2791
2792static unsigned long calc_ra(unsigned long start, unsigned long last,
2793 unsigned long nr)
2794{
2795 return min(last, start + nr - 1);
2796}
2797
2798static int noinline relocate_inode_pages(struct inode *inode, u64 start,
2799 u64 len)
2800{
2801 u64 page_start;
2802 u64 page_end;
2803 unsigned long last_index;
2804 unsigned long i;
2805 struct page *page;
2806 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2807 struct file_ra_state *ra;
2808 unsigned long total_read = 0;
2809 unsigned long ra_pages;
2810 struct btrfs_ordered_extent *ordered;
2811 struct btrfs_trans_handle *trans;
2812
2813 ra = kzalloc(sizeof(*ra), GFP_NOFS);
2814
2815 mutex_lock(&inode->i_mutex);
2816 i = start >> PAGE_CACHE_SHIFT;
2817 last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
2818
2819 ra_pages = BTRFS_I(inode)->root->fs_info->bdi.ra_pages;
2820
2821 file_ra_state_init(ra, inode->i_mapping);
2822
2823 for (; i <= last_index; i++) {
2824 if (total_read % ra_pages == 0) {
2825 btrfs_force_ra(inode->i_mapping, ra, NULL, i,
2826 calc_ra(i, last_index, ra_pages));
2827 }
2828 total_read++;
2829again:
2830 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
2831 goto truncate_racing;
2832 page = grab_cache_page(inode->i_mapping, i);
2833 if (!page) {
2834 goto out_unlock;
2835 }
2836 if (!PageUptodate(page)) {
2837 btrfs_readpage(NULL, page);
2838 lock_page(page);
2839 if (!PageUptodate(page)) {
2840 unlock_page(page);
2841 page_cache_release(page);
2842 goto out_unlock;
2843 }
2844 }
2845 wait_on_page_writeback(page);
2846
2847 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2848 page_end = page_start + PAGE_CACHE_SIZE - 1;
2849 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
2850
2851 ordered = btrfs_lookup_ordered_extent(inode, page_start);
2852 if (ordered) {
2853 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2854 unlock_page(page);
2855 page_cache_release(page);
2856 btrfs_start_ordered_extent(inode, ordered, 1);
2857 btrfs_put_ordered_extent(ordered);
2858 goto again;
2859 }
2860 set_page_extent_mapped(page);
2861
2862 /*
2863 * make sure page_mkwrite is called for this page if userland
2864 * wants to change it from mmap
2865 */
2866 clear_page_dirty_for_io(page);
2867
2868 set_extent_delalloc(io_tree, page_start,
2869 page_end, GFP_NOFS);
2870 set_page_dirty(page);
2871
2872 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2873 unlock_page(page);
2874 page_cache_release(page);
2875 }
2876
2877out_unlock:
2878 /* we have to start the IO in order to get the ordered extents
2879 * instantiated. This allows the relocation to code to wait
2880 * for all the ordered extents to hit the disk.
2881 *
2882 * Otherwise, it would constantly loop over the same extents
2883 * because the old ones don't get deleted until the IO is
2884 * started
2885 */
2886 btrfs_fdatawrite_range(inode->i_mapping, start, start + len - 1,
2887 WB_SYNC_NONE);
2888 kfree(ra);
2889 trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1);
2890 if (trans) {
2891 btrfs_end_transaction(trans, BTRFS_I(inode)->root);
2892 mark_inode_dirty(inode);
2893 }
2894 mutex_unlock(&inode->i_mutex);
2895 return 0;
2896
2897truncate_racing:
2898 vmtruncate(inode, inode->i_size);
2899 balance_dirty_pages_ratelimited_nr(inode->i_mapping,
2900 total_read);
2901 goto out_unlock;
2902}
2903
2904/*
2905 * The back references tell us which tree holds a ref on a block,
2906 * but it is possible for the tree root field in the reference to
2907 * reflect the original root before a snapshot was made. In this
2908 * case we should search through all the children of a given root
2909 * to find potential holders of references on a block.
2910 *
2911 * Instead, we do something a little less fancy and just search
2912 * all the roots for a given key/block combination.
2913 */
2914static int find_root_for_ref(struct btrfs_root *root,
2915 struct btrfs_path *path,
2916 struct btrfs_key *key0,
2917 int level,
2918 int file_key,
2919 struct btrfs_root **found_root,
2920 u64 bytenr)
2921{
2922 struct btrfs_key root_location;
2923 struct btrfs_root *cur_root = *found_root;
2924 struct btrfs_file_extent_item *file_extent;
2925 u64 root_search_start = BTRFS_FS_TREE_OBJECTID;
2926 u64 found_bytenr;
2927 int ret;
2928
2929 root_location.offset = (u64)-1;
2930 root_location.type = BTRFS_ROOT_ITEM_KEY;
2931 path->lowest_level = level;
2932 path->reada = 0;
2933 while(1) {
2934 ret = btrfs_search_slot(NULL, cur_root, key0, path, 0, 0);
2935 found_bytenr = 0;
2936 if (ret == 0 && file_key) {
2937 struct extent_buffer *leaf = path->nodes[0];
2938 file_extent = btrfs_item_ptr(leaf, path->slots[0],
2939 struct btrfs_file_extent_item);
2940 if (btrfs_file_extent_type(leaf, file_extent) ==
2941 BTRFS_FILE_EXTENT_REG) {
2942 found_bytenr =
2943 btrfs_file_extent_disk_bytenr(leaf,
2944 file_extent);
2945 }
2946 } else if (!file_key) {
2947 if (path->nodes[level])
2948 found_bytenr = path->nodes[level]->start;
2949 }
2950
2951 btrfs_release_path(cur_root, path);
2952
2953 if (found_bytenr == bytenr) {
2954 *found_root = cur_root;
2955 ret = 0;
2956 goto out;
2957 }
2958 ret = btrfs_search_root(root->fs_info->tree_root,
2959 root_search_start, &root_search_start);
2960 if (ret)
2961 break;
2962
2963 root_location.objectid = root_search_start;
2964 cur_root = btrfs_read_fs_root_no_name(root->fs_info,
2965 &root_location);
2966 if (!cur_root) {
2967 ret = 1;
2968 break;
2969 }
2970 }
2971out:
2972 path->lowest_level = 0;
2973 return ret;
2974}
2975
2976/*
2977 * note, this releases the path
2978 */
2979static int noinline relocate_one_reference(struct btrfs_root *extent_root,
2980 struct btrfs_path *path,
2981 struct btrfs_key *extent_key,
2982 u64 *last_file_objectid,
2983 u64 *last_file_offset,
2984 u64 *last_file_root,
2985 u64 last_extent)
2986{
2987 struct inode *inode;
2988 struct btrfs_root *found_root;
2989 struct btrfs_key root_location;
2990 struct btrfs_key found_key;
2991 struct btrfs_extent_ref *ref;
2992 u64 ref_root;
2993 u64 ref_gen;
2994 u64 ref_objectid;
2995 u64 ref_offset;
2996 int ret;
2997 int level;
2998
2999 WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
3000
3001 ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
3002 struct btrfs_extent_ref);
3003 ref_root = btrfs_ref_root(path->nodes[0], ref);
3004 ref_gen = btrfs_ref_generation(path->nodes[0], ref);
3005 ref_objectid = btrfs_ref_objectid(path->nodes[0], ref);
3006 ref_offset = btrfs_ref_offset(path->nodes[0], ref);
3007 btrfs_release_path(extent_root, path);
3008
3009 root_location.objectid = ref_root;
3010 if (ref_gen == 0)
3011 root_location.offset = 0;
3012 else
3013 root_location.offset = (u64)-1;
3014 root_location.type = BTRFS_ROOT_ITEM_KEY;
3015
3016 found_root = btrfs_read_fs_root_no_name(extent_root->fs_info,
3017 &root_location);
3018 BUG_ON(!found_root);
3019 mutex_unlock(&extent_root->fs_info->alloc_mutex);
3020
3021 if (ref_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
3022 found_key.objectid = ref_objectid;
3023 found_key.type = BTRFS_EXTENT_DATA_KEY;
3024 found_key.offset = ref_offset;
3025 level = 0;
3026
3027 if (last_extent == extent_key->objectid &&
3028 *last_file_objectid == ref_objectid &&
3029 *last_file_offset == ref_offset &&
3030 *last_file_root == ref_root)
3031 goto out;
3032
3033 ret = find_root_for_ref(extent_root, path, &found_key,
3034 level, 1, &found_root,
3035 extent_key->objectid);
3036
3037 if (ret)
3038 goto out;
3039
3040 if (last_extent == extent_key->objectid &&
3041 *last_file_objectid == ref_objectid &&
3042 *last_file_offset == ref_offset &&
3043 *last_file_root == ref_root)
3044 goto out;
3045
3046 inode = btrfs_iget_locked(extent_root->fs_info->sb,
3047 ref_objectid, found_root);
3048 if (inode->i_state & I_NEW) {
3049 /* the inode and parent dir are two different roots */
3050 BTRFS_I(inode)->root = found_root;
3051 BTRFS_I(inode)->location.objectid = ref_objectid;
3052 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
3053 BTRFS_I(inode)->location.offset = 0;
3054 btrfs_read_locked_inode(inode);
3055 unlock_new_inode(inode);
3056
3057 }
3058 /* this can happen if the reference is not against
3059 * the latest version of the tree root
3060 */
3061 if (is_bad_inode(inode))
3062 goto out;
3063
3064 *last_file_objectid = inode->i_ino;
3065 *last_file_root = found_root->root_key.objectid;
3066 *last_file_offset = ref_offset;
3067
3068 relocate_inode_pages(inode, ref_offset, extent_key->offset);
3069 iput(inode);
3070 } else {
3071 struct btrfs_trans_handle *trans;
3072 struct extent_buffer *eb;
3073 int needs_lock = 0;
3074
3075 eb = read_tree_block(found_root, extent_key->objectid,
3076 extent_key->offset, 0);
3077 btrfs_tree_lock(eb);
3078 level = btrfs_header_level(eb);
3079
3080 if (level == 0)
3081 btrfs_item_key_to_cpu(eb, &found_key, 0);
3082 else
3083 btrfs_node_key_to_cpu(eb, &found_key, 0);
3084
3085 btrfs_tree_unlock(eb);
3086 free_extent_buffer(eb);
3087
3088 ret = find_root_for_ref(extent_root, path, &found_key,
3089 level, 0, &found_root,
3090 extent_key->objectid);
3091
3092 if (ret)
3093 goto out;
3094
3095 /*
3096 * right here almost anything could happen to our key,
3097 * but that's ok. The cow below will either relocate it
3098 * or someone else will have relocated it. Either way,
3099 * it is in a different spot than it was before and
3100 * we're happy.
3101 */
3102
3103 trans = btrfs_start_transaction(found_root, 1);
3104
3105 if (found_root == extent_root->fs_info->extent_root ||
3106 found_root == extent_root->fs_info->chunk_root ||
3107 found_root == extent_root->fs_info->dev_root) {
3108 needs_lock = 1;
3109 mutex_lock(&extent_root->fs_info->alloc_mutex);
3110 }
3111
3112 path->lowest_level = level;
3113 path->reada = 2;
3114 ret = btrfs_search_slot(trans, found_root, &found_key, path,
3115 0, 1);
3116 path->lowest_level = 0;
3117 btrfs_release_path(found_root, path);
3118
3119 if (found_root == found_root->fs_info->extent_root)
3120 btrfs_extent_post_op(trans, found_root);
3121 if (needs_lock)
3122 mutex_unlock(&extent_root->fs_info->alloc_mutex);
3123
3124 btrfs_end_transaction(trans, found_root);
3125
3126 }
3127out:
3128 mutex_lock(&extent_root->fs_info->alloc_mutex);
3129 return 0;
3130}
3131
3132static int noinline del_extent_zero(struct btrfs_root *extent_root,
3133 struct btrfs_path *path,
3134 struct btrfs_key *extent_key)
3135{
3136 int ret;
3137 struct btrfs_trans_handle *trans;
3138
3139 trans = btrfs_start_transaction(extent_root, 1);
3140 ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
3141 if (ret > 0) {
3142 ret = -EIO;
3143 goto out;
3144 }
3145 if (ret < 0)
3146 goto out;
3147 ret = btrfs_del_item(trans, extent_root, path);
3148out:
3149 btrfs_end_transaction(trans, extent_root);
3150 return ret;
3151}
3152
3153static int noinline relocate_one_extent(struct btrfs_root *extent_root,
3154 struct btrfs_path *path,
3155 struct btrfs_key *extent_key)
3156{
3157 struct btrfs_key key;
3158 struct btrfs_key found_key;
3159 struct extent_buffer *leaf;
3160 u64 last_file_objectid = 0;
3161 u64 last_file_root = 0;
3162 u64 last_file_offset = (u64)-1;
3163 u64 last_extent = 0;
3164 u32 nritems;
3165 u32 item_size;
3166 int ret = 0;
3167
3168 if (extent_key->objectid == 0) {
3169 ret = del_extent_zero(extent_root, path, extent_key);
3170 goto out;
3171 }
3172 key.objectid = extent_key->objectid;
3173 key.type = BTRFS_EXTENT_REF_KEY;
3174 key.offset = 0;
3175
3176 while(1) {
3177 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3178
3179 if (ret < 0)
3180 goto out;
3181
3182 ret = 0;
3183 leaf = path->nodes[0];
3184 nritems = btrfs_header_nritems(leaf);
3185 if (path->slots[0] == nritems) {
3186 ret = btrfs_next_leaf(extent_root, path);
3187 if (ret > 0) {
3188 ret = 0;
3189 goto out;
3190 }
3191 if (ret < 0)
3192 goto out;
3193 leaf = path->nodes[0];
3194 }
3195
3196 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3197 if (found_key.objectid != extent_key->objectid) {
3198 break;
3199 }
3200
3201 if (found_key.type != BTRFS_EXTENT_REF_KEY) {
3202 break;
3203 }
3204
3205 key.offset = found_key.offset + 1;
3206 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3207
3208 ret = relocate_one_reference(extent_root, path, extent_key,
3209 &last_file_objectid,
3210 &last_file_offset,
3211 &last_file_root, last_extent);
3212 if (ret)
3213 goto out;
3214 last_extent = extent_key->objectid;
3215 }
3216 ret = 0;
3217out:
3218 btrfs_release_path(extent_root, path);
3219 return ret;
3220}
3221
3222static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
3223{
3224 u64 num_devices;
3225 u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
3226 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
3227
3228 num_devices = root->fs_info->fs_devices->num_devices;
3229 if (num_devices == 1) {
3230 stripped |= BTRFS_BLOCK_GROUP_DUP;
3231 stripped = flags & ~stripped;
3232
3233 /* turn raid0 into single device chunks */
3234 if (flags & BTRFS_BLOCK_GROUP_RAID0)
3235 return stripped;
3236
3237 /* turn mirroring into duplication */
3238 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3239 BTRFS_BLOCK_GROUP_RAID10))
3240 return stripped | BTRFS_BLOCK_GROUP_DUP;
3241 return flags;
3242 } else {
3243 /* they already had raid on here, just return */
3244 if (flags & stripped)
3245 return flags;
3246
3247 stripped |= BTRFS_BLOCK_GROUP_DUP;
3248 stripped = flags & ~stripped;
3249
3250 /* switch duplicated blocks with raid1 */
3251 if (flags & BTRFS_BLOCK_GROUP_DUP)
3252 return stripped | BTRFS_BLOCK_GROUP_RAID1;
3253
3254 /* turn single device chunks into raid0 */
3255 return stripped | BTRFS_BLOCK_GROUP_RAID0;
3256 }
3257 return flags;
3258}
3259
3260int __alloc_chunk_for_shrink(struct btrfs_root *root,
3261 struct btrfs_block_group_cache *shrink_block_group,
3262 int force)
3263{
3264 struct btrfs_trans_handle *trans;
3265 u64 new_alloc_flags;
3266 u64 calc;
3267
3268 spin_lock(&shrink_block_group->lock);
3269 if (btrfs_block_group_used(&shrink_block_group->item) > 0) {
3270 spin_unlock(&shrink_block_group->lock);
3271 mutex_unlock(&root->fs_info->alloc_mutex);
3272
3273 trans = btrfs_start_transaction(root, 1);
3274 mutex_lock(&root->fs_info->alloc_mutex);
3275 spin_lock(&shrink_block_group->lock);
3276
3277 new_alloc_flags = update_block_group_flags(root,
3278 shrink_block_group->flags);
3279 if (new_alloc_flags != shrink_block_group->flags) {
3280 calc =
3281 btrfs_block_group_used(&shrink_block_group->item);
3282 } else {
3283 calc = shrink_block_group->key.offset;
3284 }
3285 spin_unlock(&shrink_block_group->lock);
3286
3287 do_chunk_alloc(trans, root->fs_info->extent_root,
3288 calc + 2 * 1024 * 1024, new_alloc_flags, force);
3289
3290 mutex_unlock(&root->fs_info->alloc_mutex);
3291 btrfs_end_transaction(trans, root);
3292 mutex_lock(&root->fs_info->alloc_mutex);
3293 } else
3294 spin_unlock(&shrink_block_group->lock);
3295 return 0;
3296}
3297
3298int btrfs_shrink_extent_tree(struct btrfs_root *root, u64 shrink_start)
3299{
3300 struct btrfs_trans_handle *trans;
3301 struct btrfs_root *tree_root = root->fs_info->tree_root;
3302 struct btrfs_path *path;
3303 u64 cur_byte;
3304 u64 total_found;
3305 u64 shrink_last_byte;
3306 struct btrfs_block_group_cache *shrink_block_group;
3307 struct btrfs_fs_info *info = root->fs_info;
3308 struct btrfs_key key;
3309 struct btrfs_key found_key;
3310 struct extent_buffer *leaf;
3311 u32 nritems;
3312 int ret;
3313 int progress;
3314
3315 mutex_lock(&root->fs_info->alloc_mutex);
3316 shrink_block_group = btrfs_lookup_block_group(root->fs_info,
3317 shrink_start);
3318 BUG_ON(!shrink_block_group);
3319
3320 shrink_last_byte = shrink_block_group->key.objectid +
3321 shrink_block_group->key.offset;
3322
3323 shrink_block_group->space_info->total_bytes -=
3324 shrink_block_group->key.offset;
3325 path = btrfs_alloc_path();
3326 root = root->fs_info->extent_root;
3327 path->reada = 2;
3328
3329 printk("btrfs relocating block group %llu flags %llu\n",
3330 (unsigned long long)shrink_start,
3331 (unsigned long long)shrink_block_group->flags);
3332
3333 __alloc_chunk_for_shrink(root, shrink_block_group, 1);
3334
3335again:
3336
3337 shrink_block_group->ro = 1;
3338
3339 total_found = 0;
3340 progress = 0;
3341 key.objectid = shrink_start;
3342 key.offset = 0;
3343 key.type = 0;
3344 cur_byte = key.objectid;
3345
3346 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3347 if (ret < 0)
3348 goto out;
3349
3350 ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY);
3351 if (ret < 0)
3352 goto out;
3353
3354 if (ret == 0) {
3355 leaf = path->nodes[0];
3356 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3357 if (found_key.objectid + found_key.offset > shrink_start &&
3358 found_key.objectid < shrink_last_byte) {
3359 cur_byte = found_key.objectid;
3360 key.objectid = cur_byte;
3361 }
3362 }
3363 btrfs_release_path(root, path);
3364
3365 while(1) {
3366 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3367 if (ret < 0)
3368 goto out;
3369
3370next:
3371 leaf = path->nodes[0];
3372 nritems = btrfs_header_nritems(leaf);
3373 if (path->slots[0] >= nritems) {
3374 ret = btrfs_next_leaf(root, path);
3375 if (ret < 0)
3376 goto out;
3377 if (ret == 1) {
3378 ret = 0;
3379 break;
3380 }
3381 leaf = path->nodes[0];
3382 nritems = btrfs_header_nritems(leaf);
3383 }
3384
3385 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3386
3387 if (found_key.objectid >= shrink_last_byte)
3388 break;
3389
3390 if (progress && need_resched()) {
3391 memcpy(&key, &found_key, sizeof(key));
3392 cond_resched();
3393 btrfs_release_path(root, path);
3394 btrfs_search_slot(NULL, root, &key, path, 0, 0);
3395 progress = 0;
3396 goto next;
3397 }
3398 progress = 1;
3399
3400 if (btrfs_key_type(&found_key) != BTRFS_EXTENT_ITEM_KEY ||
3401 found_key.objectid + found_key.offset <= cur_byte) {
3402 memcpy(&key, &found_key, sizeof(key));
3403 key.offset++;
3404 path->slots[0]++;
3405 goto next;
3406 }
3407
3408 total_found++;
3409 cur_byte = found_key.objectid + found_key.offset;
3410 key.objectid = cur_byte;
3411 btrfs_release_path(root, path);
3412 ret = relocate_one_extent(root, path, &found_key);
3413 __alloc_chunk_for_shrink(root, shrink_block_group, 0);
3414 }
3415
3416 btrfs_release_path(root, path);
3417
3418 if (total_found > 0) {
3419 printk("btrfs relocate found %llu last extent was %llu\n",
3420 (unsigned long long)total_found,
3421 (unsigned long long)found_key.objectid);
3422 mutex_unlock(&root->fs_info->alloc_mutex);
3423 trans = btrfs_start_transaction(tree_root, 1);
3424 btrfs_commit_transaction(trans, tree_root);
3425
3426 btrfs_clean_old_snapshots(tree_root);
3427
3428 btrfs_wait_ordered_extents(tree_root);
3429
3430 trans = btrfs_start_transaction(tree_root, 1);
3431 btrfs_commit_transaction(trans, tree_root);
3432 mutex_lock(&root->fs_info->alloc_mutex);
3433 goto again;
3434 }
3435
3436 /*
3437 * we've freed all the extents, now remove the block
3438 * group item from the tree
3439 */
3440 mutex_unlock(&root->fs_info->alloc_mutex);
3441
3442 trans = btrfs_start_transaction(root, 1);
3443
3444 mutex_lock(&root->fs_info->alloc_mutex);
3445 memcpy(&key, &shrink_block_group->key, sizeof(key));
3446
3447 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3448 if (ret > 0)
3449 ret = -EIO;
3450 if (ret < 0) {
3451 btrfs_end_transaction(trans, root);
3452 goto out;
3453 }
3454
3455 clear_extent_bits(&info->block_group_cache, key.objectid,
3456 key.objectid + key.offset - 1,
3457 (unsigned int)-1, GFP_NOFS);
3458
3459
3460 clear_extent_bits(&info->free_space_cache,
3461 key.objectid, key.objectid + key.offset - 1,
3462 (unsigned int)-1, GFP_NOFS);
3463
3464 memset(shrink_block_group, 0, sizeof(*shrink_block_group));
3465 kfree(shrink_block_group);
3466
3467 btrfs_del_item(trans, root, path);
3468 btrfs_release_path(root, path);
3469 mutex_unlock(&root->fs_info->alloc_mutex);
3470 btrfs_commit_transaction(trans, root);
3471
3472 mutex_lock(&root->fs_info->alloc_mutex);
3473
3474 /* the code to unpin extents might set a few bits in the free
3475 * space cache for this range again
3476 */
3477 clear_extent_bits(&info->free_space_cache,
3478 key.objectid, key.objectid + key.offset - 1,
3479 (unsigned int)-1, GFP_NOFS);
3480out:
3481 btrfs_free_path(path);
3482 mutex_unlock(&root->fs_info->alloc_mutex);
3483 return ret;
3484}
3485
3486int find_first_block_group(struct btrfs_root *root, struct btrfs_path *path,
3487 struct btrfs_key *key)
3488{
3489 int ret = 0;
3490 struct btrfs_key found_key;
3491 struct extent_buffer *leaf;
3492 int slot;
3493
3494 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
3495 if (ret < 0)
3496 goto out;
3497
3498 while(1) {
3499 slot = path->slots[0];
3500 leaf = path->nodes[0];
3501 if (slot >= btrfs_header_nritems(leaf)) {
3502 ret = btrfs_next_leaf(root, path);
3503 if (ret == 0)
3504 continue;
3505 if (ret < 0)
3506 goto out;
3507 break;
3508 }
3509 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3510
3511 if (found_key.objectid >= key->objectid &&
3512 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
3513 ret = 0;
3514 goto out;
3515 }
3516 path->slots[0]++;
3517 }
3518 ret = -ENOENT;
3519out:
3520 return ret;
3521}
3522
3523int btrfs_read_block_groups(struct btrfs_root *root)
3524{
3525 struct btrfs_path *path;
3526 int ret;
3527 int bit;
3528 struct btrfs_block_group_cache *cache;
3529 struct btrfs_fs_info *info = root->fs_info;
3530 struct btrfs_space_info *space_info;
3531 struct extent_io_tree *block_group_cache;
3532 struct btrfs_key key;
3533 struct btrfs_key found_key;
3534 struct extent_buffer *leaf;
3535
3536 block_group_cache = &info->block_group_cache;
3537 root = info->extent_root;
3538 key.objectid = 0;
3539 key.offset = 0;
3540 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
3541 path = btrfs_alloc_path();
3542 if (!path)
3543 return -ENOMEM;
3544
3545 mutex_lock(&root->fs_info->alloc_mutex);
3546 while(1) {
3547 ret = find_first_block_group(root, path, &key);
3548 if (ret > 0) {
3549 ret = 0;
3550 goto error;
3551 }
3552 if (ret != 0)
3553 goto error;
3554
3555 leaf = path->nodes[0];
3556 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3557 cache = kzalloc(sizeof(*cache), GFP_NOFS);
3558 if (!cache) {
3559 ret = -ENOMEM;
3560 break;
3561 }
3562
3563 spin_lock_init(&cache->lock);
3564 read_extent_buffer(leaf, &cache->item,
3565 btrfs_item_ptr_offset(leaf, path->slots[0]),
3566 sizeof(cache->item));
3567 memcpy(&cache->key, &found_key, sizeof(found_key));
3568
3569 key.objectid = found_key.objectid + found_key.offset;
3570 btrfs_release_path(root, path);
3571 cache->flags = btrfs_block_group_flags(&cache->item);
3572 bit = 0;
3573 if (cache->flags & BTRFS_BLOCK_GROUP_DATA) {
3574 bit = BLOCK_GROUP_DATA;
3575 } else if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
3576 bit = BLOCK_GROUP_SYSTEM;
3577 } else if (cache->flags & BTRFS_BLOCK_GROUP_METADATA) {
3578 bit = BLOCK_GROUP_METADATA;
3579 }
3580 set_avail_alloc_bits(info, cache->flags);
3581
3582 ret = update_space_info(info, cache->flags, found_key.offset,
3583 btrfs_block_group_used(&cache->item),
3584 &space_info);
3585 BUG_ON(ret);
3586 cache->space_info = space_info;
3587
3588 /* use EXTENT_LOCKED to prevent merging */
3589 set_extent_bits(block_group_cache, found_key.objectid,
3590 found_key.objectid + found_key.offset - 1,
3591 EXTENT_LOCKED, GFP_NOFS);
3592 set_state_private(block_group_cache, found_key.objectid,
3593 (unsigned long)cache);
3594 set_extent_bits(block_group_cache, found_key.objectid,
3595 found_key.objectid + found_key.offset - 1,
3596 bit | EXTENT_LOCKED, GFP_NOFS);
3597 if (key.objectid >=
3598 btrfs_super_total_bytes(&info->super_copy))
3599 break;
3600 }
3601 ret = 0;
3602error:
3603 btrfs_free_path(path);
3604 mutex_unlock(&root->fs_info->alloc_mutex);
3605 return ret;
3606}
3607
3608int btrfs_make_block_group(struct btrfs_trans_handle *trans,
3609 struct btrfs_root *root, u64 bytes_used,
3610 u64 type, u64 chunk_objectid, u64 chunk_offset,
3611 u64 size)
3612{
3613 int ret;
3614 int bit = 0;
3615 struct btrfs_root *extent_root;
3616 struct btrfs_block_group_cache *cache;
3617 struct extent_io_tree *block_group_cache;
3618
3619 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
3620 extent_root = root->fs_info->extent_root;
3621 block_group_cache = &root->fs_info->block_group_cache;
3622
3623 cache = kzalloc(sizeof(*cache), GFP_NOFS);
3624 BUG_ON(!cache);
3625 cache->key.objectid = chunk_offset;
3626 cache->key.offset = size;
3627 spin_lock_init(&cache->lock);
3628 btrfs_set_key_type(&cache->key, BTRFS_BLOCK_GROUP_ITEM_KEY);
3629
3630 btrfs_set_block_group_used(&cache->item, bytes_used);
3631 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
3632 cache->flags = type;
3633 btrfs_set_block_group_flags(&cache->item, type);
3634
3635 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
3636 &cache->space_info);
3637 BUG_ON(ret);
3638
3639 bit = block_group_state_bits(type);
3640 set_extent_bits(block_group_cache, chunk_offset,
3641 chunk_offset + size - 1,
3642 EXTENT_LOCKED, GFP_NOFS);
3643 set_state_private(block_group_cache, chunk_offset,
3644 (unsigned long)cache);
3645 set_extent_bits(block_group_cache, chunk_offset,
3646 chunk_offset + size - 1,
3647 bit | EXTENT_LOCKED, GFP_NOFS);
3648
3649 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
3650 sizeof(cache->item));
3651 BUG_ON(ret);
3652
3653 finish_current_insert(trans, extent_root);
3654 ret = del_pending_extents(trans, extent_root);
3655 BUG_ON(ret);
3656 set_avail_alloc_bits(extent_root->fs_info, type);
3657
3658 return 0;
3659}