]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/btrfs/free-space-cache.c
Btrfs: use hybrid extents+bitmap rb tree for free space
[mirror_ubuntu-artful-kernel.git] / fs / btrfs / free-space-cache.c
CommitLineData
0f9dd46c
JB
1/*
2 * Copyright (C) 2008 Red Hat. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
96303081 19#include <linux/pagemap.h>
0f9dd46c 20#include <linux/sched.h>
96303081 21#include <linux/math64.h>
0f9dd46c 22#include "ctree.h"
fa9c0d79
CM
23#include "free-space-cache.h"
24#include "transaction.h"
25
96303081
JB
26#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
27#define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
0f9dd46c 28
96303081
JB
29static inline unsigned long offset_to_bit(u64 bitmap_start, u64 sectorsize,
30 u64 offset)
0f9dd46c 31{
96303081
JB
32 BUG_ON(offset < bitmap_start);
33 offset -= bitmap_start;
34 return (unsigned long)(div64_u64(offset, sectorsize));
35}
0f9dd46c 36
96303081
JB
37static inline unsigned long bytes_to_bits(u64 bytes, u64 sectorsize)
38{
39 return (unsigned long)(div64_u64(bytes, sectorsize));
40}
0f9dd46c 41
96303081
JB
42static inline u64 offset_to_bitmap(struct btrfs_block_group_cache *block_group,
43 u64 offset)
44{
45 u64 bitmap_start;
46 u64 bytes_per_bitmap;
0f9dd46c 47
96303081
JB
48 bytes_per_bitmap = BITS_PER_BITMAP * block_group->sectorsize;
49 bitmap_start = offset - block_group->key.objectid;
50 bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
51 bitmap_start *= bytes_per_bitmap;
52 bitmap_start += block_group->key.objectid;
0f9dd46c 53
96303081 54 return bitmap_start;
0f9dd46c
JB
55}
56
96303081
JB
57static int tree_insert_offset(struct rb_root *root, u64 offset,
58 struct rb_node *node, int bitmap)
0f9dd46c
JB
59{
60 struct rb_node **p = &root->rb_node;
61 struct rb_node *parent = NULL;
62 struct btrfs_free_space *info;
63
64 while (*p) {
65 parent = *p;
96303081 66 info = rb_entry(parent, struct btrfs_free_space, offset_index);
0f9dd46c 67
96303081 68 if (offset < info->offset) {
0f9dd46c 69 p = &(*p)->rb_left;
96303081 70 } else if (offset > info->offset) {
0f9dd46c 71 p = &(*p)->rb_right;
96303081
JB
72 } else {
73 /*
74 * we could have a bitmap entry and an extent entry
75 * share the same offset. If this is the case, we want
76 * the extent entry to always be found first if we do a
77 * linear search through the tree, since we want to have
78 * the quickest allocation time, and allocating from an
79 * extent is faster than allocating from a bitmap. So
80 * if we're inserting a bitmap and we find an entry at
81 * this offset, we want to go right, or after this entry
82 * logically. If we are inserting an extent and we've
83 * found a bitmap, we want to go left, or before
84 * logically.
85 */
86 if (bitmap) {
87 WARN_ON(info->bitmap);
88 p = &(*p)->rb_right;
89 } else {
90 WARN_ON(!info->bitmap);
91 p = &(*p)->rb_left;
92 }
93 }
0f9dd46c
JB
94 }
95
96 rb_link_node(node, parent, p);
97 rb_insert_color(node, root);
98
99 return 0;
100}
101
102/*
70cb0743
JB
103 * searches the tree for the given offset.
104 *
96303081
JB
105 * fuzzy - If this is set, then we are trying to make an allocation, and we just
106 * want a section that has at least bytes size and comes at or after the given
107 * offset.
0f9dd46c 108 */
96303081
JB
109static struct btrfs_free_space *
110tree_search_offset(struct btrfs_block_group_cache *block_group,
111 u64 offset, int bitmap_only, int fuzzy)
0f9dd46c 112{
96303081
JB
113 struct rb_node *n = block_group->free_space_offset.rb_node;
114 struct btrfs_free_space *entry, *prev = NULL;
115
116 /* find entry that is closest to the 'offset' */
117 while (1) {
118 if (!n) {
119 entry = NULL;
120 break;
121 }
0f9dd46c 122
0f9dd46c 123 entry = rb_entry(n, struct btrfs_free_space, offset_index);
96303081 124 prev = entry;
0f9dd46c 125
96303081 126 if (offset < entry->offset)
0f9dd46c 127 n = n->rb_left;
96303081 128 else if (offset > entry->offset)
0f9dd46c 129 n = n->rb_right;
96303081 130 else
0f9dd46c 131 break;
0f9dd46c
JB
132 }
133
96303081
JB
134 if (bitmap_only) {
135 if (!entry)
136 return NULL;
137 if (entry->bitmap)
138 return entry;
0f9dd46c 139
96303081
JB
140 /*
141 * bitmap entry and extent entry may share same offset,
142 * in that case, bitmap entry comes after extent entry.
143 */
144 n = rb_next(n);
145 if (!n)
146 return NULL;
147 entry = rb_entry(n, struct btrfs_free_space, offset_index);
148 if (entry->offset != offset)
149 return NULL;
0f9dd46c 150
96303081
JB
151 WARN_ON(!entry->bitmap);
152 return entry;
153 } else if (entry) {
154 if (entry->bitmap) {
0f9dd46c 155 /*
96303081
JB
156 * if previous extent entry covers the offset,
157 * we should return it instead of the bitmap entry
0f9dd46c 158 */
96303081
JB
159 n = &entry->offset_index;
160 while (1) {
161 n = rb_prev(n);
162 if (!n)
163 break;
164 prev = rb_entry(n, struct btrfs_free_space,
165 offset_index);
166 if (!prev->bitmap) {
167 if (prev->offset + prev->bytes > offset)
168 entry = prev;
169 break;
170 }
0f9dd46c 171 }
96303081
JB
172 }
173 return entry;
174 }
175
176 if (!prev)
177 return NULL;
178
179 /* find last entry before the 'offset' */
180 entry = prev;
181 if (entry->offset > offset) {
182 n = rb_prev(&entry->offset_index);
183 if (n) {
184 entry = rb_entry(n, struct btrfs_free_space,
185 offset_index);
186 BUG_ON(entry->offset > offset);
0f9dd46c 187 } else {
96303081
JB
188 if (fuzzy)
189 return entry;
190 else
191 return NULL;
0f9dd46c
JB
192 }
193 }
194
96303081
JB
195 if (entry->bitmap) {
196 n = &entry->offset_index;
197 while (1) {
198 n = rb_prev(n);
199 if (!n)
200 break;
201 prev = rb_entry(n, struct btrfs_free_space,
202 offset_index);
203 if (!prev->bitmap) {
204 if (prev->offset + prev->bytes > offset)
205 return prev;
206 break;
207 }
208 }
209 if (entry->offset + BITS_PER_BITMAP *
210 block_group->sectorsize > offset)
211 return entry;
212 } else if (entry->offset + entry->bytes > offset)
213 return entry;
214
215 if (!fuzzy)
216 return NULL;
217
218 while (1) {
219 if (entry->bitmap) {
220 if (entry->offset + BITS_PER_BITMAP *
221 block_group->sectorsize > offset)
222 break;
223 } else {
224 if (entry->offset + entry->bytes > offset)
225 break;
226 }
227
228 n = rb_next(&entry->offset_index);
229 if (!n)
230 return NULL;
231 entry = rb_entry(n, struct btrfs_free_space, offset_index);
232 }
233 return entry;
0f9dd46c
JB
234}
235
236static void unlink_free_space(struct btrfs_block_group_cache *block_group,
237 struct btrfs_free_space *info)
238{
239 rb_erase(&info->offset_index, &block_group->free_space_offset);
96303081 240 block_group->free_extents--;
0f9dd46c
JB
241}
242
243static int link_free_space(struct btrfs_block_group_cache *block_group,
244 struct btrfs_free_space *info)
245{
246 int ret = 0;
247
96303081 248 BUG_ON(!info->bitmap && !info->bytes);
0f9dd46c 249 ret = tree_insert_offset(&block_group->free_space_offset, info->offset,
96303081 250 &info->offset_index, (info->bitmap != NULL));
0f9dd46c
JB
251 if (ret)
252 return ret;
253
96303081
JB
254 block_group->free_extents++;
255 return ret;
256}
257
258static void recalculate_thresholds(struct btrfs_block_group_cache *block_group)
259{
260 u64 max_bytes, possible_bytes;
261
262 /*
263 * The goal is to keep the total amount of memory used per 1gb of space
264 * at or below 32k, so we need to adjust how much memory we allow to be
265 * used by extent based free space tracking
266 */
267 max_bytes = MAX_CACHE_BYTES_PER_GIG *
268 (div64_u64(block_group->key.offset, 1024 * 1024 * 1024));
269
270 possible_bytes = (block_group->total_bitmaps * PAGE_CACHE_SIZE) +
271 (sizeof(struct btrfs_free_space) *
272 block_group->extents_thresh);
273
274 if (possible_bytes > max_bytes) {
275 int extent_bytes = max_bytes -
276 (block_group->total_bitmaps * PAGE_CACHE_SIZE);
277
278 if (extent_bytes <= 0) {
279 block_group->extents_thresh = 0;
280 return;
281 }
282
283 block_group->extents_thresh = extent_bytes /
284 (sizeof(struct btrfs_free_space));
285 }
286}
287
288static void bitmap_clear_bits(struct btrfs_free_space *info, u64 offset, u64 bytes,
289 u64 sectorsize)
290{
291 unsigned long start, end;
292 unsigned long i;
293
294 start = offset_to_bit(info->offset, sectorsize, offset);
295 end = start + bytes_to_bits(bytes, sectorsize);
296 BUG_ON(end > BITS_PER_BITMAP);
297
298 for (i = start; i < end; i++)
299 clear_bit(i, info->bitmap);
300
301 info->bytes -= bytes;
302}
303
304static void bitmap_set_bits(struct btrfs_free_space *info, u64 offset, u64 bytes,
305 u64 sectorsize)
306{
307 unsigned long start, end;
308 unsigned long i;
309
310 start = offset_to_bit(info->offset, sectorsize, offset);
311 end = start + bytes_to_bits(bytes, sectorsize);
312 BUG_ON(end > BITS_PER_BITMAP);
313
314 for (i = start; i < end; i++)
315 set_bit(i, info->bitmap);
316
317 info->bytes += bytes;
318}
319
320static int search_bitmap(struct btrfs_block_group_cache *block_group,
321 struct btrfs_free_space *bitmap_info, u64 *offset,
322 u64 *bytes)
323{
324 unsigned long found_bits = 0;
325 unsigned long bits, i;
326 unsigned long next_zero;
327
328 i = offset_to_bit(bitmap_info->offset, block_group->sectorsize,
329 max_t(u64, *offset, bitmap_info->offset));
330 bits = bytes_to_bits(*bytes, block_group->sectorsize);
331
332 for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i);
333 i < BITS_PER_BITMAP;
334 i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i + 1)) {
335 next_zero = find_next_zero_bit(bitmap_info->bitmap,
336 BITS_PER_BITMAP, i);
337 if ((next_zero - i) >= bits) {
338 found_bits = next_zero - i;
339 break;
340 }
341 i = next_zero;
342 }
343
344 if (found_bits) {
345 *offset = (u64)(i * block_group->sectorsize) +
346 bitmap_info->offset;
347 *bytes = (u64)(found_bits) * block_group->sectorsize;
348 return 0;
349 }
350
351 return -1;
352}
353
354static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache
355 *block_group, u64 *offset,
356 u64 *bytes, int debug)
357{
358 struct btrfs_free_space *entry;
359 struct rb_node *node;
360 int ret;
361
362 if (!block_group->free_space_offset.rb_node)
363 return NULL;
364
365 entry = tree_search_offset(block_group,
366 offset_to_bitmap(block_group, *offset),
367 0, 1);
368 if (!entry)
369 return NULL;
370
371 for (node = &entry->offset_index; node; node = rb_next(node)) {
372 entry = rb_entry(node, struct btrfs_free_space, offset_index);
373 if (entry->bytes < *bytes)
374 continue;
375
376 if (entry->bitmap) {
377 ret = search_bitmap(block_group, entry, offset, bytes);
378 if (!ret)
379 return entry;
380 continue;
381 }
382
383 *offset = entry->offset;
384 *bytes = entry->bytes;
385 return entry;
386 }
387
388 return NULL;
389}
390
391static void add_new_bitmap(struct btrfs_block_group_cache *block_group,
392 struct btrfs_free_space *info, u64 offset)
393{
394 u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize;
395 int max_bitmaps = (int)div64_u64(block_group->key.offset +
396 bytes_per_bg - 1, bytes_per_bg);
397 BUG_ON(block_group->total_bitmaps >= max_bitmaps);
398
399 info->offset = offset_to_bitmap(block_group, offset);
400 link_free_space(block_group, info);
401 block_group->total_bitmaps++;
402
403 recalculate_thresholds(block_group);
404}
405
406static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group,
407 struct btrfs_free_space *bitmap_info,
408 u64 *offset, u64 *bytes)
409{
410 u64 end;
411
412again:
413 end = bitmap_info->offset +
414 (u64)(BITS_PER_BITMAP * block_group->sectorsize) - 1;
415
416 if (*offset > bitmap_info->offset && *offset + *bytes > end) {
417 bitmap_clear_bits(bitmap_info, *offset,
418 end - *offset + 1, block_group->sectorsize);
419 *bytes -= end - *offset + 1;
420 *offset = end + 1;
421 } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) {
422 bitmap_clear_bits(bitmap_info, *offset,
423 *bytes, block_group->sectorsize);
424 *bytes = 0;
425 }
426
427 if (*bytes) {
428 if (!bitmap_info->bytes) {
429 unlink_free_space(block_group, bitmap_info);
430 kfree(bitmap_info->bitmap);
431 kfree(bitmap_info);
432 block_group->total_bitmaps--;
433 recalculate_thresholds(block_group);
434 }
435
436 bitmap_info = tree_search_offset(block_group,
437 offset_to_bitmap(block_group,
438 *offset),
439 1, 0);
440 if (!bitmap_info)
441 return -EINVAL;
442
443 if (!bitmap_info->bitmap)
444 return -EAGAIN;
445
446 goto again;
447 } else if (!bitmap_info->bytes) {
448 unlink_free_space(block_group, bitmap_info);
449 kfree(bitmap_info->bitmap);
450 kfree(bitmap_info);
451 block_group->total_bitmaps--;
452 recalculate_thresholds(block_group);
453 }
454
455 return 0;
456}
457
458static int insert_into_bitmap(struct btrfs_block_group_cache *block_group,
459 struct btrfs_free_space *info)
460{
461 struct btrfs_free_space *bitmap_info;
462 int added = 0;
463 u64 bytes, offset, end;
464 int ret;
465
466 /*
467 * If we are below the extents threshold then we can add this as an
468 * extent, and don't have to deal with the bitmap
469 */
470 if (block_group->free_extents < block_group->extents_thresh &&
471 info->bytes > block_group->sectorsize * 4)
472 return 0;
473
474 /*
475 * some block groups are so tiny they can't be enveloped by a bitmap, so
476 * don't even bother to create a bitmap for this
477 */
478 if (BITS_PER_BITMAP * block_group->sectorsize >
479 block_group->key.offset)
480 return 0;
481
482 bytes = info->bytes;
483 offset = info->offset;
484
485again:
486 bitmap_info = tree_search_offset(block_group,
487 offset_to_bitmap(block_group, offset),
488 1, 0);
489 if (!bitmap_info) {
490 BUG_ON(added);
491 goto new_bitmap;
492 }
493
494 end = bitmap_info->offset +
495 (u64)(BITS_PER_BITMAP * block_group->sectorsize);
496
497 if (offset >= bitmap_info->offset && offset + bytes > end) {
498 bitmap_set_bits(bitmap_info, offset, end - offset,
499 block_group->sectorsize);
500 bytes -= end - offset;
501 offset = end;
502 added = 0;
503 } else if (offset >= bitmap_info->offset && offset + bytes <= end) {
504 bitmap_set_bits(bitmap_info, offset, bytes,
505 block_group->sectorsize);
506 bytes = 0;
507 } else {
508 BUG();
509 }
510
511 if (!bytes) {
512 ret = 1;
513 goto out;
514 } else
515 goto again;
516
517new_bitmap:
518 if (info && info->bitmap) {
519 add_new_bitmap(block_group, info, offset);
520 added = 1;
521 info = NULL;
522 goto again;
523 } else {
524 spin_unlock(&block_group->tree_lock);
525
526 /* no pre-allocated info, allocate a new one */
527 if (!info) {
528 info = kzalloc(sizeof(struct btrfs_free_space),
529 GFP_NOFS);
530 if (!info) {
531 spin_lock(&block_group->tree_lock);
532 ret = -ENOMEM;
533 goto out;
534 }
535 }
536
537 /* allocate the bitmap */
538 info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
539 spin_lock(&block_group->tree_lock);
540 if (!info->bitmap) {
541 ret = -ENOMEM;
542 goto out;
543 }
544 goto again;
545 }
546
547out:
548 if (info) {
549 if (info->bitmap)
550 kfree(info->bitmap);
551 kfree(info);
552 }
0f9dd46c
JB
553
554 return ret;
555}
556
6226cb0a
JB
557int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
558 u64 offset, u64 bytes)
0f9dd46c 559{
96303081
JB
560 struct btrfs_free_space *right_info = NULL;
561 struct btrfs_free_space *left_info = NULL;
0f9dd46c 562 struct btrfs_free_space *info = NULL;
0f9dd46c
JB
563 int ret = 0;
564
6226cb0a
JB
565 info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
566 if (!info)
567 return -ENOMEM;
568
569 info->offset = offset;
570 info->bytes = bytes;
571
572 spin_lock(&block_group->tree_lock);
573
0f9dd46c
JB
574 /*
575 * first we want to see if there is free space adjacent to the range we
576 * are adding, if there is remove that struct and add a new one to
577 * cover the entire range
578 */
96303081
JB
579 right_info = tree_search_offset(block_group, offset + bytes, 0, 0);
580 if (right_info && rb_prev(&right_info->offset_index))
581 left_info = rb_entry(rb_prev(&right_info->offset_index),
582 struct btrfs_free_space, offset_index);
583 else
584 left_info = tree_search_offset(block_group, offset - 1, 0, 0);
0f9dd46c 585
96303081
JB
586 /*
587 * If there was no extent directly to the left or right of this new
588 * extent then we know we're going to have to allocate a new extent, so
589 * before we do that see if we need to drop this into a bitmap
590 */
591 if ((!left_info || left_info->bitmap) &&
592 (!right_info || right_info->bitmap)) {
593 ret = insert_into_bitmap(block_group, info);
594
595 if (ret < 0) {
596 goto out;
597 } else if (ret) {
598 ret = 0;
599 goto out;
600 }
601 }
602
603 if (right_info && !right_info->bitmap) {
0f9dd46c 604 unlink_free_space(block_group, right_info);
6226cb0a
JB
605 info->bytes += right_info->bytes;
606 kfree(right_info);
0f9dd46c
JB
607 }
608
96303081
JB
609 if (left_info && !left_info->bitmap &&
610 left_info->offset + left_info->bytes == offset) {
0f9dd46c 611 unlink_free_space(block_group, left_info);
6226cb0a
JB
612 info->offset = left_info->offset;
613 info->bytes += left_info->bytes;
614 kfree(left_info);
0f9dd46c
JB
615 }
616
0f9dd46c
JB
617 ret = link_free_space(block_group, info);
618 if (ret)
619 kfree(info);
96303081 620out:
6226cb0a
JB
621 spin_unlock(&block_group->tree_lock);
622
0f9dd46c 623 if (ret) {
96303081 624 printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret);
c293498b 625 BUG_ON(ret == -EEXIST);
0f9dd46c
JB
626 }
627
0f9dd46c
JB
628 return ret;
629}
630
6226cb0a
JB
631int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
632 u64 offset, u64 bytes)
0f9dd46c
JB
633{
634 struct btrfs_free_space *info;
96303081 635 struct btrfs_free_space *next_info = NULL;
0f9dd46c
JB
636 int ret = 0;
637
6226cb0a
JB
638 spin_lock(&block_group->tree_lock);
639
96303081
JB
640again:
641 info = tree_search_offset(block_group, offset, 0, 0);
642 if (!info) {
643 WARN_ON(1);
644 goto out_lock;
645 }
646
647 if (info->bytes < bytes && rb_next(&info->offset_index)) {
648 u64 end;
649 next_info = rb_entry(rb_next(&info->offset_index),
650 struct btrfs_free_space,
651 offset_index);
652
653 if (next_info->bitmap)
654 end = next_info->offset + BITS_PER_BITMAP *
655 block_group->sectorsize - 1;
656 else
657 end = next_info->offset + next_info->bytes;
658
659 if (next_info->bytes < bytes ||
660 next_info->offset > offset || offset > end) {
661 printk(KERN_CRIT "Found free space at %llu, size %llu,"
662 " trying to use %llu\n",
663 (unsigned long long)info->offset,
664 (unsigned long long)info->bytes,
665 (unsigned long long)bytes);
0f9dd46c
JB
666 WARN_ON(1);
667 ret = -EINVAL;
96303081 668 goto out_lock;
0f9dd46c 669 }
0f9dd46c 670
96303081
JB
671 info = next_info;
672 }
673
674 if (info->bytes == bytes) {
675 unlink_free_space(block_group, info);
676 if (info->bitmap) {
677 kfree(info->bitmap);
678 block_group->total_bitmaps--;
0f9dd46c 679 }
96303081
JB
680 kfree(info);
681 goto out_lock;
682 }
0f9dd46c 683
96303081
JB
684 if (!info->bitmap && info->offset == offset) {
685 unlink_free_space(block_group, info);
0f9dd46c
JB
686 info->offset += bytes;
687 info->bytes -= bytes;
96303081
JB
688 link_free_space(block_group, info);
689 goto out_lock;
690 }
0f9dd46c 691
96303081
JB
692 if (!info->bitmap && info->offset <= offset &&
693 info->offset + info->bytes >= offset + bytes) {
9b49c9b9
CM
694 u64 old_start = info->offset;
695 /*
696 * we're freeing space in the middle of the info,
697 * this can happen during tree log replay
698 *
699 * first unlink the old info and then
700 * insert it again after the hole we're creating
701 */
702 unlink_free_space(block_group, info);
703 if (offset + bytes < info->offset + info->bytes) {
704 u64 old_end = info->offset + info->bytes;
705
706 info->offset = offset + bytes;
707 info->bytes = old_end - info->offset;
708 ret = link_free_space(block_group, info);
96303081
JB
709 WARN_ON(ret);
710 if (ret)
711 goto out_lock;
9b49c9b9
CM
712 } else {
713 /* the hole we're creating ends at the end
714 * of the info struct, just free the info
715 */
716 kfree(info);
717 }
6226cb0a 718 spin_unlock(&block_group->tree_lock);
96303081
JB
719
720 /* step two, insert a new info struct to cover
721 * anything before the hole
9b49c9b9 722 */
6226cb0a
JB
723 ret = btrfs_add_free_space(block_group, old_start,
724 offset - old_start);
96303081
JB
725 WARN_ON(ret);
726 goto out;
0f9dd46c 727 }
96303081
JB
728
729 ret = remove_from_bitmap(block_group, info, &offset, &bytes);
730 if (ret == -EAGAIN)
731 goto again;
732 BUG_ON(ret);
733out_lock:
734 spin_unlock(&block_group->tree_lock);
0f9dd46c 735out:
25179201
JB
736 return ret;
737}
738
0f9dd46c
JB
739void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
740 u64 bytes)
741{
742 struct btrfs_free_space *info;
743 struct rb_node *n;
744 int count = 0;
745
746 for (n = rb_first(&block_group->free_space_offset); n; n = rb_next(n)) {
747 info = rb_entry(n, struct btrfs_free_space, offset_index);
748 if (info->bytes >= bytes)
749 count++;
96303081 750 printk(KERN_CRIT "entry offset %llu, bytes %llu, bitmap %s\n",
21380931 751 (unsigned long long)info->offset,
96303081
JB
752 (unsigned long long)info->bytes,
753 (info->bitmap) ? "yes" : "no");
0f9dd46c 754 }
96303081
JB
755 printk(KERN_INFO "block group has cluster?: %s\n",
756 list_empty(&block_group->cluster_list) ? "no" : "yes");
0f9dd46c
JB
757 printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
758 "\n", count);
759}
760
761u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group)
762{
763 struct btrfs_free_space *info;
764 struct rb_node *n;
765 u64 ret = 0;
766
767 for (n = rb_first(&block_group->free_space_offset); n;
768 n = rb_next(n)) {
769 info = rb_entry(n, struct btrfs_free_space, offset_index);
770 ret += info->bytes;
771 }
772
773 return ret;
774}
775
fa9c0d79
CM
776/*
777 * for a given cluster, put all of its extents back into the free
778 * space cache. If the block group passed doesn't match the block group
779 * pointed to by the cluster, someone else raced in and freed the
780 * cluster already. In that case, we just return without changing anything
781 */
782static int
783__btrfs_return_cluster_to_free_space(
784 struct btrfs_block_group_cache *block_group,
785 struct btrfs_free_cluster *cluster)
786{
787 struct btrfs_free_space *entry;
788 struct rb_node *node;
96303081 789 bool bitmap;
fa9c0d79
CM
790
791 spin_lock(&cluster->lock);
792 if (cluster->block_group != block_group)
793 goto out;
794
96303081
JB
795 bitmap = cluster->points_to_bitmap;
796 cluster->block_group = NULL;
fa9c0d79 797 cluster->window_start = 0;
96303081
JB
798 list_del_init(&cluster->block_group_list);
799 cluster->points_to_bitmap = false;
800
801 if (bitmap)
802 goto out;
803
fa9c0d79 804 node = rb_first(&cluster->root);
96303081 805 while (node) {
fa9c0d79
CM
806 entry = rb_entry(node, struct btrfs_free_space, offset_index);
807 node = rb_next(&entry->offset_index);
808 rb_erase(&entry->offset_index, &cluster->root);
96303081
JB
809 BUG_ON(entry->bitmap);
810 tree_insert_offset(&block_group->free_space_offset,
811 entry->offset, &entry->offset_index, 0);
fa9c0d79 812 }
fa9c0d79 813 cluster->root.rb_node = NULL;
96303081 814
fa9c0d79
CM
815out:
816 spin_unlock(&cluster->lock);
96303081 817 btrfs_put_block_group(block_group);
fa9c0d79
CM
818 return 0;
819}
820
0f9dd46c
JB
821void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
822{
823 struct btrfs_free_space *info;
824 struct rb_node *node;
fa9c0d79 825 struct btrfs_free_cluster *cluster;
96303081 826 struct list_head *head;
0f9dd46c 827
6226cb0a 828 spin_lock(&block_group->tree_lock);
96303081
JB
829 while ((head = block_group->cluster_list.next) !=
830 &block_group->cluster_list) {
831 cluster = list_entry(head, struct btrfs_free_cluster,
832 block_group_list);
fa9c0d79
CM
833
834 WARN_ON(cluster->block_group != block_group);
835 __btrfs_return_cluster_to_free_space(block_group, cluster);
96303081
JB
836 if (need_resched()) {
837 spin_unlock(&block_group->tree_lock);
838 cond_resched();
839 spin_lock(&block_group->tree_lock);
840 }
fa9c0d79
CM
841 }
842
96303081
JB
843 while ((node = rb_last(&block_group->free_space_offset)) != NULL) {
844 info = rb_entry(node, struct btrfs_free_space, offset_index);
0f9dd46c 845 unlink_free_space(block_group, info);
96303081
JB
846 if (info->bitmap)
847 kfree(info->bitmap);
0f9dd46c
JB
848 kfree(info);
849 if (need_resched()) {
6226cb0a 850 spin_unlock(&block_group->tree_lock);
0f9dd46c 851 cond_resched();
6226cb0a 852 spin_lock(&block_group->tree_lock);
0f9dd46c
JB
853 }
854 }
96303081 855
6226cb0a 856 spin_unlock(&block_group->tree_lock);
0f9dd46c
JB
857}
858
6226cb0a
JB
859u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
860 u64 offset, u64 bytes, u64 empty_size)
0f9dd46c 861{
6226cb0a 862 struct btrfs_free_space *entry = NULL;
96303081 863 u64 bytes_search = bytes + empty_size;
6226cb0a 864 u64 ret = 0;
0f9dd46c 865
6226cb0a 866 spin_lock(&block_group->tree_lock);
96303081 867 entry = find_free_space(block_group, &offset, &bytes_search, 0);
6226cb0a 868 if (!entry)
96303081
JB
869 goto out;
870
871 ret = offset;
872 if (entry->bitmap) {
873 bitmap_clear_bits(entry, offset, bytes,
874 block_group->sectorsize);
875 if (!entry->bytes) {
876 unlink_free_space(block_group, entry);
877 kfree(entry->bitmap);
878 kfree(entry);
879 block_group->total_bitmaps--;
880 recalculate_thresholds(block_group);
881 }
882 } else {
6226cb0a 883 unlink_free_space(block_group, entry);
6226cb0a
JB
884 entry->offset += bytes;
885 entry->bytes -= bytes;
6226cb0a
JB
886 if (!entry->bytes)
887 kfree(entry);
888 else
889 link_free_space(block_group, entry);
890 }
0f9dd46c 891
96303081
JB
892out:
893 spin_unlock(&block_group->tree_lock);
0f9dd46c
JB
894 return ret;
895}
fa9c0d79
CM
896
897/*
898 * given a cluster, put all of its extents back into the free space
899 * cache. If a block group is passed, this function will only free
900 * a cluster that belongs to the passed block group.
901 *
902 * Otherwise, it'll get a reference on the block group pointed to by the
903 * cluster and remove the cluster from it.
904 */
905int btrfs_return_cluster_to_free_space(
906 struct btrfs_block_group_cache *block_group,
907 struct btrfs_free_cluster *cluster)
908{
909 int ret;
910
911 /* first, get a safe pointer to the block group */
912 spin_lock(&cluster->lock);
913 if (!block_group) {
914 block_group = cluster->block_group;
915 if (!block_group) {
916 spin_unlock(&cluster->lock);
917 return 0;
918 }
919 } else if (cluster->block_group != block_group) {
920 /* someone else has already freed it don't redo their work */
921 spin_unlock(&cluster->lock);
922 return 0;
923 }
924 atomic_inc(&block_group->count);
925 spin_unlock(&cluster->lock);
926
927 /* now return any extents the cluster had on it */
928 spin_lock(&block_group->tree_lock);
929 ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
930 spin_unlock(&block_group->tree_lock);
931
932 /* finally drop our ref */
933 btrfs_put_block_group(block_group);
934 return ret;
935}
936
96303081
JB
937static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
938 struct btrfs_free_cluster *cluster,
939 u64 bytes, u64 min_start)
940{
941 struct btrfs_free_space *entry;
942 int err;
943 u64 search_start = cluster->window_start;
944 u64 search_bytes = bytes;
945 u64 ret = 0;
946
947 spin_lock(&block_group->tree_lock);
948 spin_lock(&cluster->lock);
949
950 if (!cluster->points_to_bitmap)
951 goto out;
952
953 if (cluster->block_group != block_group)
954 goto out;
955
956 entry = tree_search_offset(block_group, search_start, 0, 0);
957
958 if (!entry || !entry->bitmap)
959 goto out;
960
961 search_start = min_start;
962 search_bytes = bytes;
963
964 err = search_bitmap(block_group, entry, &search_start,
965 &search_bytes);
966 if (err)
967 goto out;
968
969 ret = search_start;
970 bitmap_clear_bits(entry, ret, bytes, block_group->sectorsize);
971out:
972 spin_unlock(&cluster->lock);
973 spin_unlock(&block_group->tree_lock);
974
975 return ret;
976}
977
fa9c0d79
CM
978/*
979 * given a cluster, try to allocate 'bytes' from it, returns 0
980 * if it couldn't find anything suitably large, or a logical disk offset
981 * if things worked out
982 */
983u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
984 struct btrfs_free_cluster *cluster, u64 bytes,
985 u64 min_start)
986{
987 struct btrfs_free_space *entry = NULL;
988 struct rb_node *node;
989 u64 ret = 0;
990
96303081
JB
991 if (cluster->points_to_bitmap)
992 return btrfs_alloc_from_bitmap(block_group, cluster, bytes,
993 min_start);
994
fa9c0d79
CM
995 spin_lock(&cluster->lock);
996 if (bytes > cluster->max_size)
997 goto out;
998
999 if (cluster->block_group != block_group)
1000 goto out;
1001
1002 node = rb_first(&cluster->root);
1003 if (!node)
1004 goto out;
1005
1006 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1007
1008 while(1) {
1009 if (entry->bytes < bytes || entry->offset < min_start) {
1010 struct rb_node *node;
1011
1012 node = rb_next(&entry->offset_index);
1013 if (!node)
1014 break;
1015 entry = rb_entry(node, struct btrfs_free_space,
1016 offset_index);
1017 continue;
1018 }
1019 ret = entry->offset;
1020
1021 entry->offset += bytes;
1022 entry->bytes -= bytes;
1023
1024 if (entry->bytes == 0) {
1025 rb_erase(&entry->offset_index, &cluster->root);
1026 kfree(entry);
1027 }
1028 break;
1029 }
1030out:
1031 spin_unlock(&cluster->lock);
96303081 1032
fa9c0d79
CM
1033 return ret;
1034}
1035
96303081
JB
1036static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
1037 struct btrfs_free_space *entry,
1038 struct btrfs_free_cluster *cluster,
1039 u64 offset, u64 bytes, u64 min_bytes)
1040{
1041 unsigned long next_zero;
1042 unsigned long i;
1043 unsigned long search_bits;
1044 unsigned long total_bits;
1045 unsigned long found_bits;
1046 unsigned long start = 0;
1047 unsigned long total_found = 0;
1048 bool found = false;
1049
1050 i = offset_to_bit(entry->offset, block_group->sectorsize,
1051 max_t(u64, offset, entry->offset));
1052 search_bits = bytes_to_bits(min_bytes, block_group->sectorsize);
1053 total_bits = bytes_to_bits(bytes, block_group->sectorsize);
1054
1055again:
1056 found_bits = 0;
1057 for (i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i);
1058 i < BITS_PER_BITMAP;
1059 i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i + 1)) {
1060 next_zero = find_next_zero_bit(entry->bitmap,
1061 BITS_PER_BITMAP, i);
1062 if (next_zero - i >= search_bits) {
1063 found_bits = next_zero - i;
1064 break;
1065 }
1066 i = next_zero;
1067 }
1068
1069 if (!found_bits)
1070 return -1;
1071
1072 if (!found) {
1073 start = i;
1074 found = true;
1075 }
1076
1077 total_found += found_bits;
1078
1079 if (cluster->max_size < found_bits * block_group->sectorsize)
1080 cluster->max_size = found_bits * block_group->sectorsize;
1081
1082 if (total_found < total_bits) {
1083 i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, next_zero);
1084 if (i - start > total_bits * 2) {
1085 total_found = 0;
1086 cluster->max_size = 0;
1087 found = false;
1088 }
1089 goto again;
1090 }
1091
1092 cluster->window_start = start * block_group->sectorsize +
1093 entry->offset;
1094 cluster->points_to_bitmap = true;
1095
1096 return 0;
1097}
1098
fa9c0d79
CM
1099/*
1100 * here we try to find a cluster of blocks in a block group. The goal
1101 * is to find at least bytes free and up to empty_size + bytes free.
1102 * We might not find them all in one contiguous area.
1103 *
1104 * returns zero and sets up cluster if things worked out, otherwise
1105 * it returns -enospc
1106 */
1107int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
451d7585 1108 struct btrfs_root *root,
fa9c0d79
CM
1109 struct btrfs_block_group_cache *block_group,
1110 struct btrfs_free_cluster *cluster,
1111 u64 offset, u64 bytes, u64 empty_size)
1112{
1113 struct btrfs_free_space *entry = NULL;
1114 struct rb_node *node;
1115 struct btrfs_free_space *next;
96303081 1116 struct btrfs_free_space *last = NULL;
fa9c0d79
CM
1117 u64 min_bytes;
1118 u64 window_start;
1119 u64 window_free;
1120 u64 max_extent = 0;
96303081 1121 bool found_bitmap = false;
fa9c0d79
CM
1122 int ret;
1123
1124 /* for metadata, allow allocates with more holes */
451d7585
CM
1125 if (btrfs_test_opt(root, SSD_SPREAD)) {
1126 min_bytes = bytes + empty_size;
1127 } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
fa9c0d79
CM
1128 /*
1129 * we want to do larger allocations when we are
1130 * flushing out the delayed refs, it helps prevent
1131 * making more work as we go along.
1132 */
1133 if (trans->transaction->delayed_refs.flushing)
1134 min_bytes = max(bytes, (bytes + empty_size) >> 1);
1135 else
1136 min_bytes = max(bytes, (bytes + empty_size) >> 4);
1137 } else
1138 min_bytes = max(bytes, (bytes + empty_size) >> 2);
1139
1140 spin_lock(&block_group->tree_lock);
1141 spin_lock(&cluster->lock);
1142
1143 /* someone already found a cluster, hooray */
1144 if (cluster->block_group) {
1145 ret = 0;
1146 goto out;
1147 }
1148again:
96303081 1149 entry = tree_search_offset(block_group, offset, found_bitmap, 1);
fa9c0d79
CM
1150 if (!entry) {
1151 ret = -ENOSPC;
1152 goto out;
1153 }
96303081
JB
1154
1155 /*
1156 * If found_bitmap is true, we exhausted our search for extent entries,
1157 * and we just want to search all of the bitmaps that we can find, and
1158 * ignore any extent entries we find.
1159 */
1160 while (entry->bitmap || found_bitmap ||
1161 (!entry->bitmap && entry->bytes < min_bytes)) {
1162 struct rb_node *node = rb_next(&entry->offset_index);
1163
1164 if (entry->bitmap && entry->bytes > bytes + empty_size) {
1165 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
1166 offset, bytes + empty_size,
1167 min_bytes);
1168 if (!ret)
1169 goto got_it;
1170 }
1171
1172 if (!node) {
1173 ret = -ENOSPC;
1174 goto out;
1175 }
1176 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1177 }
1178
1179 /*
1180 * We already searched all the extent entries from the passed in offset
1181 * to the end and didn't find enough space for the cluster, and we also
1182 * didn't find any bitmaps that met our criteria, just go ahead and exit
1183 */
1184 if (found_bitmap) {
1185 ret = -ENOSPC;
1186 goto out;
1187 }
1188
1189 cluster->points_to_bitmap = false;
fa9c0d79
CM
1190 window_start = entry->offset;
1191 window_free = entry->bytes;
1192 last = entry;
1193 max_extent = entry->bytes;
1194
96303081 1195 while (1) {
fa9c0d79
CM
1196 /* out window is just right, lets fill it */
1197 if (window_free >= bytes + empty_size)
1198 break;
1199
1200 node = rb_next(&last->offset_index);
1201 if (!node) {
96303081
JB
1202 if (found_bitmap)
1203 goto again;
fa9c0d79
CM
1204 ret = -ENOSPC;
1205 goto out;
1206 }
1207 next = rb_entry(node, struct btrfs_free_space, offset_index);
1208
96303081
JB
1209 /*
1210 * we found a bitmap, so if this search doesn't result in a
1211 * cluster, we know to go and search again for the bitmaps and
1212 * start looking for space there
1213 */
1214 if (next->bitmap) {
1215 if (!found_bitmap)
1216 offset = next->offset;
1217 found_bitmap = true;
1218 last = next;
1219 continue;
1220 }
1221
fa9c0d79
CM
1222 /*
1223 * we haven't filled the empty size and the window is
1224 * very large. reset and try again
1225 */
c6044801
CM
1226 if (next->offset - (last->offset + last->bytes) > 128 * 1024 ||
1227 next->offset - window_start > (bytes + empty_size) * 2) {
fa9c0d79
CM
1228 entry = next;
1229 window_start = entry->offset;
1230 window_free = entry->bytes;
1231 last = entry;
1232 max_extent = 0;
fa9c0d79
CM
1233 } else {
1234 last = next;
1235 window_free += next->bytes;
1236 if (entry->bytes > max_extent)
1237 max_extent = entry->bytes;
1238 }
1239 }
1240
1241 cluster->window_start = entry->offset;
1242
1243 /*
1244 * now we've found our entries, pull them out of the free space
1245 * cache and put them into the cluster rbtree
1246 *
1247 * The cluster includes an rbtree, but only uses the offset index
1248 * of each free space cache entry.
1249 */
96303081 1250 while (1) {
fa9c0d79 1251 node = rb_next(&entry->offset_index);
96303081
JB
1252 if (entry->bitmap && node) {
1253 entry = rb_entry(node, struct btrfs_free_space,
1254 offset_index);
1255 continue;
1256 } else if (entry->bitmap && !node) {
1257 break;
1258 }
1259
1260 rb_erase(&entry->offset_index, &block_group->free_space_offset);
fa9c0d79 1261 ret = tree_insert_offset(&cluster->root, entry->offset,
96303081 1262 &entry->offset_index, 0);
fa9c0d79
CM
1263 BUG_ON(ret);
1264
1265 if (!node || entry == last)
1266 break;
1267
1268 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1269 }
96303081 1270
fa9c0d79 1271 cluster->max_size = max_extent;
96303081
JB
1272got_it:
1273 ret = 0;
fa9c0d79
CM
1274 atomic_inc(&block_group->count);
1275 list_add_tail(&cluster->block_group_list, &block_group->cluster_list);
1276 cluster->block_group = block_group;
1277out:
1278 spin_unlock(&cluster->lock);
1279 spin_unlock(&block_group->tree_lock);
1280
1281 return ret;
1282}
1283
1284/*
1285 * simple code to zero out a cluster
1286 */
1287void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
1288{
1289 spin_lock_init(&cluster->lock);
1290 spin_lock_init(&cluster->refill_lock);
1291 cluster->root.rb_node = NULL;
1292 cluster->max_size = 0;
96303081 1293 cluster->points_to_bitmap = false;
fa9c0d79
CM
1294 INIT_LIST_HEAD(&cluster->block_group_list);
1295 cluster->block_group = NULL;
1296}
1297