]>
Commit | Line | Data |
---|---|---|
0f9dd46c JB |
1 | /* |
2 | * Copyright (C) 2008 Red Hat. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public | |
6 | * License v2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public | |
14 | * License along with this program; if not, write to the | |
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
16 | * Boston, MA 021110-1307, USA. | |
17 | */ | |
18 | ||
96303081 | 19 | #include <linux/pagemap.h> |
0f9dd46c | 20 | #include <linux/sched.h> |
5a0e3ad6 | 21 | #include <linux/slab.h> |
96303081 | 22 | #include <linux/math64.h> |
6ab60601 | 23 | #include <linux/ratelimit.h> |
0f9dd46c | 24 | #include "ctree.h" |
fa9c0d79 CM |
25 | #include "free-space-cache.h" |
26 | #include "transaction.h" | |
0af3d00b | 27 | #include "disk-io.h" |
43be2146 | 28 | #include "extent_io.h" |
581bb050 | 29 | #include "inode-map.h" |
04216820 | 30 | #include "volumes.h" |
fa9c0d79 | 31 | |
96303081 JB |
32 | #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) |
33 | #define MAX_CACHE_BYTES_PER_GIG (32 * 1024) | |
0f9dd46c | 34 | |
55507ce3 FM |
35 | struct btrfs_trim_range { |
36 | u64 start; | |
37 | u64 bytes; | |
38 | struct list_head list; | |
39 | }; | |
40 | ||
34d52cb6 | 41 | static int link_free_space(struct btrfs_free_space_ctl *ctl, |
0cb59c99 | 42 | struct btrfs_free_space *info); |
cd023e7b JB |
43 | static void unlink_free_space(struct btrfs_free_space_ctl *ctl, |
44 | struct btrfs_free_space *info); | |
0cb59c99 | 45 | |
0414efae LZ |
46 | static struct inode *__lookup_free_space_inode(struct btrfs_root *root, |
47 | struct btrfs_path *path, | |
48 | u64 offset) | |
0af3d00b JB |
49 | { |
50 | struct btrfs_key key; | |
51 | struct btrfs_key location; | |
52 | struct btrfs_disk_key disk_key; | |
53 | struct btrfs_free_space_header *header; | |
54 | struct extent_buffer *leaf; | |
55 | struct inode *inode = NULL; | |
56 | int ret; | |
57 | ||
0af3d00b | 58 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; |
0414efae | 59 | key.offset = offset; |
0af3d00b JB |
60 | key.type = 0; |
61 | ||
62 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
63 | if (ret < 0) | |
64 | return ERR_PTR(ret); | |
65 | if (ret > 0) { | |
b3b4aa74 | 66 | btrfs_release_path(path); |
0af3d00b JB |
67 | return ERR_PTR(-ENOENT); |
68 | } | |
69 | ||
70 | leaf = path->nodes[0]; | |
71 | header = btrfs_item_ptr(leaf, path->slots[0], | |
72 | struct btrfs_free_space_header); | |
73 | btrfs_free_space_key(leaf, header, &disk_key); | |
74 | btrfs_disk_key_to_cpu(&location, &disk_key); | |
b3b4aa74 | 75 | btrfs_release_path(path); |
0af3d00b JB |
76 | |
77 | inode = btrfs_iget(root->fs_info->sb, &location, root, NULL); | |
78 | if (!inode) | |
79 | return ERR_PTR(-ENOENT); | |
80 | if (IS_ERR(inode)) | |
81 | return inode; | |
82 | if (is_bad_inode(inode)) { | |
83 | iput(inode); | |
84 | return ERR_PTR(-ENOENT); | |
85 | } | |
86 | ||
528c0327 | 87 | mapping_set_gfp_mask(inode->i_mapping, |
2b108268 CM |
88 | mapping_gfp_mask(inode->i_mapping) & |
89 | ~(GFP_NOFS & ~__GFP_HIGHMEM)); | |
adae52b9 | 90 | |
0414efae LZ |
91 | return inode; |
92 | } | |
93 | ||
94 | struct inode *lookup_free_space_inode(struct btrfs_root *root, | |
95 | struct btrfs_block_group_cache | |
96 | *block_group, struct btrfs_path *path) | |
97 | { | |
98 | struct inode *inode = NULL; | |
5b0e95bf | 99 | u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW; |
0414efae LZ |
100 | |
101 | spin_lock(&block_group->lock); | |
102 | if (block_group->inode) | |
103 | inode = igrab(block_group->inode); | |
104 | spin_unlock(&block_group->lock); | |
105 | if (inode) | |
106 | return inode; | |
107 | ||
108 | inode = __lookup_free_space_inode(root, path, | |
109 | block_group->key.objectid); | |
110 | if (IS_ERR(inode)) | |
111 | return inode; | |
112 | ||
0af3d00b | 113 | spin_lock(&block_group->lock); |
5b0e95bf | 114 | if (!((BTRFS_I(inode)->flags & flags) == flags)) { |
c2cf52eb SK |
115 | btrfs_info(root->fs_info, |
116 | "Old style space inode found, converting."); | |
5b0e95bf JB |
117 | BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM | |
118 | BTRFS_INODE_NODATACOW; | |
2f356126 JB |
119 | block_group->disk_cache_state = BTRFS_DC_CLEAR; |
120 | } | |
121 | ||
300e4f8a | 122 | if (!block_group->iref) { |
0af3d00b JB |
123 | block_group->inode = igrab(inode); |
124 | block_group->iref = 1; | |
125 | } | |
126 | spin_unlock(&block_group->lock); | |
127 | ||
128 | return inode; | |
129 | } | |
130 | ||
48a3b636 ES |
131 | static int __create_free_space_inode(struct btrfs_root *root, |
132 | struct btrfs_trans_handle *trans, | |
133 | struct btrfs_path *path, | |
134 | u64 ino, u64 offset) | |
0af3d00b JB |
135 | { |
136 | struct btrfs_key key; | |
137 | struct btrfs_disk_key disk_key; | |
138 | struct btrfs_free_space_header *header; | |
139 | struct btrfs_inode_item *inode_item; | |
140 | struct extent_buffer *leaf; | |
5b0e95bf | 141 | u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC; |
0af3d00b JB |
142 | int ret; |
143 | ||
0414efae | 144 | ret = btrfs_insert_empty_inode(trans, root, path, ino); |
0af3d00b JB |
145 | if (ret) |
146 | return ret; | |
147 | ||
5b0e95bf JB |
148 | /* We inline crc's for the free disk space cache */ |
149 | if (ino != BTRFS_FREE_INO_OBJECTID) | |
150 | flags |= BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW; | |
151 | ||
0af3d00b JB |
152 | leaf = path->nodes[0]; |
153 | inode_item = btrfs_item_ptr(leaf, path->slots[0], | |
154 | struct btrfs_inode_item); | |
155 | btrfs_item_key(leaf, &disk_key, path->slots[0]); | |
156 | memset_extent_buffer(leaf, 0, (unsigned long)inode_item, | |
157 | sizeof(*inode_item)); | |
158 | btrfs_set_inode_generation(leaf, inode_item, trans->transid); | |
159 | btrfs_set_inode_size(leaf, inode_item, 0); | |
160 | btrfs_set_inode_nbytes(leaf, inode_item, 0); | |
161 | btrfs_set_inode_uid(leaf, inode_item, 0); | |
162 | btrfs_set_inode_gid(leaf, inode_item, 0); | |
163 | btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600); | |
5b0e95bf | 164 | btrfs_set_inode_flags(leaf, inode_item, flags); |
0af3d00b JB |
165 | btrfs_set_inode_nlink(leaf, inode_item, 1); |
166 | btrfs_set_inode_transid(leaf, inode_item, trans->transid); | |
0414efae | 167 | btrfs_set_inode_block_group(leaf, inode_item, offset); |
0af3d00b | 168 | btrfs_mark_buffer_dirty(leaf); |
b3b4aa74 | 169 | btrfs_release_path(path); |
0af3d00b JB |
170 | |
171 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; | |
0414efae | 172 | key.offset = offset; |
0af3d00b | 173 | key.type = 0; |
0af3d00b JB |
174 | ret = btrfs_insert_empty_item(trans, root, path, &key, |
175 | sizeof(struct btrfs_free_space_header)); | |
176 | if (ret < 0) { | |
b3b4aa74 | 177 | btrfs_release_path(path); |
0af3d00b JB |
178 | return ret; |
179 | } | |
c9dc4c65 | 180 | |
0af3d00b JB |
181 | leaf = path->nodes[0]; |
182 | header = btrfs_item_ptr(leaf, path->slots[0], | |
183 | struct btrfs_free_space_header); | |
184 | memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header)); | |
185 | btrfs_set_free_space_key(leaf, header, &disk_key); | |
186 | btrfs_mark_buffer_dirty(leaf); | |
b3b4aa74 | 187 | btrfs_release_path(path); |
0af3d00b JB |
188 | |
189 | return 0; | |
190 | } | |
191 | ||
0414efae LZ |
192 | int create_free_space_inode(struct btrfs_root *root, |
193 | struct btrfs_trans_handle *trans, | |
194 | struct btrfs_block_group_cache *block_group, | |
195 | struct btrfs_path *path) | |
196 | { | |
197 | int ret; | |
198 | u64 ino; | |
199 | ||
200 | ret = btrfs_find_free_objectid(root, &ino); | |
201 | if (ret < 0) | |
202 | return ret; | |
203 | ||
204 | return __create_free_space_inode(root, trans, path, ino, | |
205 | block_group->key.objectid); | |
206 | } | |
207 | ||
7b61cd92 MX |
208 | int btrfs_check_trunc_cache_free_space(struct btrfs_root *root, |
209 | struct btrfs_block_rsv *rsv) | |
0af3d00b | 210 | { |
c8174313 | 211 | u64 needed_bytes; |
7b61cd92 | 212 | int ret; |
c8174313 JB |
213 | |
214 | /* 1 for slack space, 1 for updating the inode */ | |
215 | needed_bytes = btrfs_calc_trunc_metadata_size(root, 1) + | |
216 | btrfs_calc_trans_metadata_size(root, 1); | |
217 | ||
7b61cd92 MX |
218 | spin_lock(&rsv->lock); |
219 | if (rsv->reserved < needed_bytes) | |
220 | ret = -ENOSPC; | |
221 | else | |
222 | ret = 0; | |
223 | spin_unlock(&rsv->lock); | |
4b286cd1 | 224 | return ret; |
7b61cd92 MX |
225 | } |
226 | ||
227 | int btrfs_truncate_free_space_cache(struct btrfs_root *root, | |
228 | struct btrfs_trans_handle *trans, | |
1bbc621e | 229 | struct btrfs_block_group_cache *block_group, |
7b61cd92 MX |
230 | struct inode *inode) |
231 | { | |
7b61cd92 | 232 | int ret = 0; |
1bbc621e CM |
233 | struct btrfs_path *path = btrfs_alloc_path(); |
234 | ||
235 | if (!path) { | |
236 | ret = -ENOMEM; | |
237 | goto fail; | |
238 | } | |
239 | ||
240 | if (block_group) { | |
241 | mutex_lock(&trans->transaction->cache_write_mutex); | |
242 | if (!list_empty(&block_group->io_list)) { | |
243 | list_del_init(&block_group->io_list); | |
244 | ||
245 | btrfs_wait_cache_io(root, trans, block_group, | |
246 | &block_group->io_ctl, path, | |
247 | block_group->key.objectid); | |
248 | btrfs_put_block_group(block_group); | |
249 | } | |
250 | ||
251 | /* | |
252 | * now that we've truncated the cache away, its no longer | |
253 | * setup or written | |
254 | */ | |
255 | spin_lock(&block_group->lock); | |
256 | block_group->disk_cache_state = BTRFS_DC_CLEAR; | |
257 | spin_unlock(&block_group->lock); | |
258 | } | |
259 | btrfs_free_path(path); | |
0af3d00b | 260 | |
0af3d00b | 261 | btrfs_i_size_write(inode, 0); |
7caef267 | 262 | truncate_pagecache(inode, 0); |
0af3d00b JB |
263 | |
264 | /* | |
265 | * We don't need an orphan item because truncating the free space cache | |
266 | * will never be split across transactions. | |
28ed1345 CM |
267 | * We don't need to check for -EAGAIN because we're a free space |
268 | * cache inode | |
0af3d00b JB |
269 | */ |
270 | ret = btrfs_truncate_inode_items(trans, root, inode, | |
271 | 0, BTRFS_EXTENT_DATA_KEY); | |
272 | if (ret) { | |
1bbc621e | 273 | mutex_unlock(&trans->transaction->cache_write_mutex); |
79787eaa | 274 | btrfs_abort_transaction(trans, root, ret); |
0af3d00b JB |
275 | return ret; |
276 | } | |
277 | ||
82d5902d | 278 | ret = btrfs_update_inode(trans, root, inode); |
1bbc621e CM |
279 | |
280 | if (block_group) | |
281 | mutex_unlock(&trans->transaction->cache_write_mutex); | |
282 | ||
283 | fail: | |
79787eaa JM |
284 | if (ret) |
285 | btrfs_abort_transaction(trans, root, ret); | |
c8174313 | 286 | |
82d5902d | 287 | return ret; |
0af3d00b JB |
288 | } |
289 | ||
9d66e233 JB |
290 | static int readahead_cache(struct inode *inode) |
291 | { | |
292 | struct file_ra_state *ra; | |
293 | unsigned long last_index; | |
294 | ||
295 | ra = kzalloc(sizeof(*ra), GFP_NOFS); | |
296 | if (!ra) | |
297 | return -ENOMEM; | |
298 | ||
299 | file_ra_state_init(ra, inode->i_mapping); | |
300 | last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; | |
301 | ||
302 | page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index); | |
303 | ||
304 | kfree(ra); | |
305 | ||
306 | return 0; | |
307 | } | |
308 | ||
4c6d1d85 | 309 | static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode, |
5349d6c3 | 310 | struct btrfs_root *root, int write) |
a67509c3 | 311 | { |
5349d6c3 MX |
312 | int num_pages; |
313 | int check_crcs = 0; | |
314 | ||
ed6078f7 | 315 | num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE); |
5349d6c3 MX |
316 | |
317 | if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID) | |
318 | check_crcs = 1; | |
319 | ||
320 | /* Make sure we can fit our crcs into the first page */ | |
321 | if (write && check_crcs && | |
322 | (num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE) | |
323 | return -ENOSPC; | |
324 | ||
4c6d1d85 | 325 | memset(io_ctl, 0, sizeof(struct btrfs_io_ctl)); |
5349d6c3 | 326 | |
31e818fe | 327 | io_ctl->pages = kcalloc(num_pages, sizeof(struct page *), GFP_NOFS); |
a67509c3 JB |
328 | if (!io_ctl->pages) |
329 | return -ENOMEM; | |
5349d6c3 MX |
330 | |
331 | io_ctl->num_pages = num_pages; | |
a67509c3 | 332 | io_ctl->root = root; |
5349d6c3 | 333 | io_ctl->check_crcs = check_crcs; |
c9dc4c65 | 334 | io_ctl->inode = inode; |
5349d6c3 | 335 | |
a67509c3 JB |
336 | return 0; |
337 | } | |
338 | ||
4c6d1d85 | 339 | static void io_ctl_free(struct btrfs_io_ctl *io_ctl) |
a67509c3 JB |
340 | { |
341 | kfree(io_ctl->pages); | |
c9dc4c65 | 342 | io_ctl->pages = NULL; |
a67509c3 JB |
343 | } |
344 | ||
4c6d1d85 | 345 | static void io_ctl_unmap_page(struct btrfs_io_ctl *io_ctl) |
a67509c3 JB |
346 | { |
347 | if (io_ctl->cur) { | |
a67509c3 JB |
348 | io_ctl->cur = NULL; |
349 | io_ctl->orig = NULL; | |
350 | } | |
351 | } | |
352 | ||
4c6d1d85 | 353 | static void io_ctl_map_page(struct btrfs_io_ctl *io_ctl, int clear) |
a67509c3 | 354 | { |
b12d6869 | 355 | ASSERT(io_ctl->index < io_ctl->num_pages); |
a67509c3 | 356 | io_ctl->page = io_ctl->pages[io_ctl->index++]; |
2b108268 | 357 | io_ctl->cur = page_address(io_ctl->page); |
a67509c3 JB |
358 | io_ctl->orig = io_ctl->cur; |
359 | io_ctl->size = PAGE_CACHE_SIZE; | |
360 | if (clear) | |
361 | memset(io_ctl->cur, 0, PAGE_CACHE_SIZE); | |
362 | } | |
363 | ||
4c6d1d85 | 364 | static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl) |
a67509c3 JB |
365 | { |
366 | int i; | |
367 | ||
368 | io_ctl_unmap_page(io_ctl); | |
369 | ||
370 | for (i = 0; i < io_ctl->num_pages; i++) { | |
a1ee5a45 LZ |
371 | if (io_ctl->pages[i]) { |
372 | ClearPageChecked(io_ctl->pages[i]); | |
373 | unlock_page(io_ctl->pages[i]); | |
374 | page_cache_release(io_ctl->pages[i]); | |
375 | } | |
a67509c3 JB |
376 | } |
377 | } | |
378 | ||
4c6d1d85 | 379 | static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, struct inode *inode, |
a67509c3 JB |
380 | int uptodate) |
381 | { | |
382 | struct page *page; | |
383 | gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); | |
384 | int i; | |
385 | ||
386 | for (i = 0; i < io_ctl->num_pages; i++) { | |
387 | page = find_or_create_page(inode->i_mapping, i, mask); | |
388 | if (!page) { | |
389 | io_ctl_drop_pages(io_ctl); | |
390 | return -ENOMEM; | |
391 | } | |
392 | io_ctl->pages[i] = page; | |
393 | if (uptodate && !PageUptodate(page)) { | |
394 | btrfs_readpage(NULL, page); | |
395 | lock_page(page); | |
396 | if (!PageUptodate(page)) { | |
efe120a0 FH |
397 | btrfs_err(BTRFS_I(inode)->root->fs_info, |
398 | "error reading free space cache"); | |
a67509c3 JB |
399 | io_ctl_drop_pages(io_ctl); |
400 | return -EIO; | |
401 | } | |
402 | } | |
403 | } | |
404 | ||
f7d61dcd JB |
405 | for (i = 0; i < io_ctl->num_pages; i++) { |
406 | clear_page_dirty_for_io(io_ctl->pages[i]); | |
407 | set_page_extent_mapped(io_ctl->pages[i]); | |
408 | } | |
409 | ||
a67509c3 JB |
410 | return 0; |
411 | } | |
412 | ||
4c6d1d85 | 413 | static void io_ctl_set_generation(struct btrfs_io_ctl *io_ctl, u64 generation) |
a67509c3 | 414 | { |
528c0327 | 415 | __le64 *val; |
a67509c3 JB |
416 | |
417 | io_ctl_map_page(io_ctl, 1); | |
418 | ||
419 | /* | |
5b0e95bf JB |
420 | * Skip the csum areas. If we don't check crcs then we just have a |
421 | * 64bit chunk at the front of the first page. | |
a67509c3 | 422 | */ |
5b0e95bf JB |
423 | if (io_ctl->check_crcs) { |
424 | io_ctl->cur += (sizeof(u32) * io_ctl->num_pages); | |
425 | io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages); | |
426 | } else { | |
427 | io_ctl->cur += sizeof(u64); | |
428 | io_ctl->size -= sizeof(u64) * 2; | |
429 | } | |
a67509c3 JB |
430 | |
431 | val = io_ctl->cur; | |
432 | *val = cpu_to_le64(generation); | |
433 | io_ctl->cur += sizeof(u64); | |
a67509c3 JB |
434 | } |
435 | ||
4c6d1d85 | 436 | static int io_ctl_check_generation(struct btrfs_io_ctl *io_ctl, u64 generation) |
a67509c3 | 437 | { |
528c0327 | 438 | __le64 *gen; |
a67509c3 | 439 | |
5b0e95bf JB |
440 | /* |
441 | * Skip the crc area. If we don't check crcs then we just have a 64bit | |
442 | * chunk at the front of the first page. | |
443 | */ | |
444 | if (io_ctl->check_crcs) { | |
445 | io_ctl->cur += sizeof(u32) * io_ctl->num_pages; | |
446 | io_ctl->size -= sizeof(u64) + | |
447 | (sizeof(u32) * io_ctl->num_pages); | |
448 | } else { | |
449 | io_ctl->cur += sizeof(u64); | |
450 | io_ctl->size -= sizeof(u64) * 2; | |
451 | } | |
a67509c3 | 452 | |
a67509c3 JB |
453 | gen = io_ctl->cur; |
454 | if (le64_to_cpu(*gen) != generation) { | |
efe120a0 | 455 | printk_ratelimited(KERN_ERR "BTRFS: space cache generation " |
a67509c3 JB |
456 | "(%Lu) does not match inode (%Lu)\n", *gen, |
457 | generation); | |
458 | io_ctl_unmap_page(io_ctl); | |
459 | return -EIO; | |
460 | } | |
461 | io_ctl->cur += sizeof(u64); | |
5b0e95bf JB |
462 | return 0; |
463 | } | |
464 | ||
4c6d1d85 | 465 | static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index) |
5b0e95bf JB |
466 | { |
467 | u32 *tmp; | |
468 | u32 crc = ~(u32)0; | |
469 | unsigned offset = 0; | |
470 | ||
471 | if (!io_ctl->check_crcs) { | |
472 | io_ctl_unmap_page(io_ctl); | |
473 | return; | |
474 | } | |
475 | ||
476 | if (index == 0) | |
cb54f257 | 477 | offset = sizeof(u32) * io_ctl->num_pages; |
5b0e95bf | 478 | |
b0496686 | 479 | crc = btrfs_csum_data(io_ctl->orig + offset, crc, |
5b0e95bf JB |
480 | PAGE_CACHE_SIZE - offset); |
481 | btrfs_csum_final(crc, (char *)&crc); | |
482 | io_ctl_unmap_page(io_ctl); | |
2b108268 | 483 | tmp = page_address(io_ctl->pages[0]); |
5b0e95bf JB |
484 | tmp += index; |
485 | *tmp = crc; | |
5b0e95bf JB |
486 | } |
487 | ||
4c6d1d85 | 488 | static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index) |
5b0e95bf JB |
489 | { |
490 | u32 *tmp, val; | |
491 | u32 crc = ~(u32)0; | |
492 | unsigned offset = 0; | |
493 | ||
494 | if (!io_ctl->check_crcs) { | |
495 | io_ctl_map_page(io_ctl, 0); | |
496 | return 0; | |
497 | } | |
498 | ||
499 | if (index == 0) | |
500 | offset = sizeof(u32) * io_ctl->num_pages; | |
501 | ||
2b108268 | 502 | tmp = page_address(io_ctl->pages[0]); |
5b0e95bf JB |
503 | tmp += index; |
504 | val = *tmp; | |
5b0e95bf JB |
505 | |
506 | io_ctl_map_page(io_ctl, 0); | |
b0496686 | 507 | crc = btrfs_csum_data(io_ctl->orig + offset, crc, |
5b0e95bf JB |
508 | PAGE_CACHE_SIZE - offset); |
509 | btrfs_csum_final(crc, (char *)&crc); | |
510 | if (val != crc) { | |
efe120a0 | 511 | printk_ratelimited(KERN_ERR "BTRFS: csum mismatch on free " |
5b0e95bf JB |
512 | "space cache\n"); |
513 | io_ctl_unmap_page(io_ctl); | |
514 | return -EIO; | |
515 | } | |
516 | ||
a67509c3 JB |
517 | return 0; |
518 | } | |
519 | ||
4c6d1d85 | 520 | static int io_ctl_add_entry(struct btrfs_io_ctl *io_ctl, u64 offset, u64 bytes, |
a67509c3 JB |
521 | void *bitmap) |
522 | { | |
523 | struct btrfs_free_space_entry *entry; | |
524 | ||
525 | if (!io_ctl->cur) | |
526 | return -ENOSPC; | |
527 | ||
528 | entry = io_ctl->cur; | |
529 | entry->offset = cpu_to_le64(offset); | |
530 | entry->bytes = cpu_to_le64(bytes); | |
531 | entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP : | |
532 | BTRFS_FREE_SPACE_EXTENT; | |
533 | io_ctl->cur += sizeof(struct btrfs_free_space_entry); | |
534 | io_ctl->size -= sizeof(struct btrfs_free_space_entry); | |
535 | ||
536 | if (io_ctl->size >= sizeof(struct btrfs_free_space_entry)) | |
537 | return 0; | |
538 | ||
5b0e95bf | 539 | io_ctl_set_crc(io_ctl, io_ctl->index - 1); |
a67509c3 JB |
540 | |
541 | /* No more pages to map */ | |
542 | if (io_ctl->index >= io_ctl->num_pages) | |
543 | return 0; | |
544 | ||
545 | /* map the next page */ | |
546 | io_ctl_map_page(io_ctl, 1); | |
547 | return 0; | |
548 | } | |
549 | ||
4c6d1d85 | 550 | static int io_ctl_add_bitmap(struct btrfs_io_ctl *io_ctl, void *bitmap) |
a67509c3 JB |
551 | { |
552 | if (!io_ctl->cur) | |
553 | return -ENOSPC; | |
554 | ||
555 | /* | |
556 | * If we aren't at the start of the current page, unmap this one and | |
557 | * map the next one if there is any left. | |
558 | */ | |
559 | if (io_ctl->cur != io_ctl->orig) { | |
5b0e95bf | 560 | io_ctl_set_crc(io_ctl, io_ctl->index - 1); |
a67509c3 JB |
561 | if (io_ctl->index >= io_ctl->num_pages) |
562 | return -ENOSPC; | |
563 | io_ctl_map_page(io_ctl, 0); | |
564 | } | |
565 | ||
566 | memcpy(io_ctl->cur, bitmap, PAGE_CACHE_SIZE); | |
5b0e95bf | 567 | io_ctl_set_crc(io_ctl, io_ctl->index - 1); |
a67509c3 JB |
568 | if (io_ctl->index < io_ctl->num_pages) |
569 | io_ctl_map_page(io_ctl, 0); | |
570 | return 0; | |
571 | } | |
572 | ||
4c6d1d85 | 573 | static void io_ctl_zero_remaining_pages(struct btrfs_io_ctl *io_ctl) |
a67509c3 | 574 | { |
5b0e95bf JB |
575 | /* |
576 | * If we're not on the boundary we know we've modified the page and we | |
577 | * need to crc the page. | |
578 | */ | |
579 | if (io_ctl->cur != io_ctl->orig) | |
580 | io_ctl_set_crc(io_ctl, io_ctl->index - 1); | |
581 | else | |
582 | io_ctl_unmap_page(io_ctl); | |
a67509c3 JB |
583 | |
584 | while (io_ctl->index < io_ctl->num_pages) { | |
585 | io_ctl_map_page(io_ctl, 1); | |
5b0e95bf | 586 | io_ctl_set_crc(io_ctl, io_ctl->index - 1); |
a67509c3 JB |
587 | } |
588 | } | |
589 | ||
4c6d1d85 | 590 | static int io_ctl_read_entry(struct btrfs_io_ctl *io_ctl, |
5b0e95bf | 591 | struct btrfs_free_space *entry, u8 *type) |
a67509c3 JB |
592 | { |
593 | struct btrfs_free_space_entry *e; | |
2f120c05 JB |
594 | int ret; |
595 | ||
596 | if (!io_ctl->cur) { | |
597 | ret = io_ctl_check_crc(io_ctl, io_ctl->index); | |
598 | if (ret) | |
599 | return ret; | |
600 | } | |
a67509c3 JB |
601 | |
602 | e = io_ctl->cur; | |
603 | entry->offset = le64_to_cpu(e->offset); | |
604 | entry->bytes = le64_to_cpu(e->bytes); | |
5b0e95bf | 605 | *type = e->type; |
a67509c3 JB |
606 | io_ctl->cur += sizeof(struct btrfs_free_space_entry); |
607 | io_ctl->size -= sizeof(struct btrfs_free_space_entry); | |
608 | ||
609 | if (io_ctl->size >= sizeof(struct btrfs_free_space_entry)) | |
5b0e95bf | 610 | return 0; |
a67509c3 JB |
611 | |
612 | io_ctl_unmap_page(io_ctl); | |
613 | ||
2f120c05 | 614 | return 0; |
a67509c3 JB |
615 | } |
616 | ||
4c6d1d85 | 617 | static int io_ctl_read_bitmap(struct btrfs_io_ctl *io_ctl, |
5b0e95bf | 618 | struct btrfs_free_space *entry) |
a67509c3 | 619 | { |
5b0e95bf JB |
620 | int ret; |
621 | ||
5b0e95bf JB |
622 | ret = io_ctl_check_crc(io_ctl, io_ctl->index); |
623 | if (ret) | |
624 | return ret; | |
625 | ||
a67509c3 JB |
626 | memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE); |
627 | io_ctl_unmap_page(io_ctl); | |
5b0e95bf JB |
628 | |
629 | return 0; | |
a67509c3 JB |
630 | } |
631 | ||
cd023e7b JB |
632 | /* |
633 | * Since we attach pinned extents after the fact we can have contiguous sections | |
634 | * of free space that are split up in entries. This poses a problem with the | |
635 | * tree logging stuff since it could have allocated across what appears to be 2 | |
636 | * entries since we would have merged the entries when adding the pinned extents | |
637 | * back to the free space cache. So run through the space cache that we just | |
638 | * loaded and merge contiguous entries. This will make the log replay stuff not | |
639 | * blow up and it will make for nicer allocator behavior. | |
640 | */ | |
641 | static void merge_space_tree(struct btrfs_free_space_ctl *ctl) | |
642 | { | |
643 | struct btrfs_free_space *e, *prev = NULL; | |
644 | struct rb_node *n; | |
645 | ||
646 | again: | |
647 | spin_lock(&ctl->tree_lock); | |
648 | for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) { | |
649 | e = rb_entry(n, struct btrfs_free_space, offset_index); | |
650 | if (!prev) | |
651 | goto next; | |
652 | if (e->bitmap || prev->bitmap) | |
653 | goto next; | |
654 | if (prev->offset + prev->bytes == e->offset) { | |
655 | unlink_free_space(ctl, prev); | |
656 | unlink_free_space(ctl, e); | |
657 | prev->bytes += e->bytes; | |
658 | kmem_cache_free(btrfs_free_space_cachep, e); | |
659 | link_free_space(ctl, prev); | |
660 | prev = NULL; | |
661 | spin_unlock(&ctl->tree_lock); | |
662 | goto again; | |
663 | } | |
664 | next: | |
665 | prev = e; | |
666 | } | |
667 | spin_unlock(&ctl->tree_lock); | |
668 | } | |
669 | ||
48a3b636 ES |
670 | static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, |
671 | struct btrfs_free_space_ctl *ctl, | |
672 | struct btrfs_path *path, u64 offset) | |
9d66e233 | 673 | { |
9d66e233 JB |
674 | struct btrfs_free_space_header *header; |
675 | struct extent_buffer *leaf; | |
4c6d1d85 | 676 | struct btrfs_io_ctl io_ctl; |
9d66e233 | 677 | struct btrfs_key key; |
a67509c3 | 678 | struct btrfs_free_space *e, *n; |
b76808fc | 679 | LIST_HEAD(bitmaps); |
9d66e233 JB |
680 | u64 num_entries; |
681 | u64 num_bitmaps; | |
682 | u64 generation; | |
a67509c3 | 683 | u8 type; |
f6a39829 | 684 | int ret = 0; |
9d66e233 | 685 | |
9d66e233 | 686 | /* Nothing in the space cache, goodbye */ |
0414efae | 687 | if (!i_size_read(inode)) |
a67509c3 | 688 | return 0; |
9d66e233 JB |
689 | |
690 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; | |
0414efae | 691 | key.offset = offset; |
9d66e233 JB |
692 | key.type = 0; |
693 | ||
694 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
0414efae | 695 | if (ret < 0) |
a67509c3 | 696 | return 0; |
0414efae | 697 | else if (ret > 0) { |
945d8962 | 698 | btrfs_release_path(path); |
a67509c3 | 699 | return 0; |
9d66e233 JB |
700 | } |
701 | ||
0414efae LZ |
702 | ret = -1; |
703 | ||
9d66e233 JB |
704 | leaf = path->nodes[0]; |
705 | header = btrfs_item_ptr(leaf, path->slots[0], | |
706 | struct btrfs_free_space_header); | |
707 | num_entries = btrfs_free_space_entries(leaf, header); | |
708 | num_bitmaps = btrfs_free_space_bitmaps(leaf, header); | |
709 | generation = btrfs_free_space_generation(leaf, header); | |
945d8962 | 710 | btrfs_release_path(path); |
9d66e233 | 711 | |
e570fd27 MX |
712 | if (!BTRFS_I(inode)->generation) { |
713 | btrfs_info(root->fs_info, | |
714 | "The free space cache file (%llu) is invalid. skip it\n", | |
715 | offset); | |
716 | return 0; | |
717 | } | |
718 | ||
9d66e233 | 719 | if (BTRFS_I(inode)->generation != generation) { |
c2cf52eb SK |
720 | btrfs_err(root->fs_info, |
721 | "free space inode generation (%llu) " | |
722 | "did not match free space cache generation (%llu)", | |
c1c9ff7c | 723 | BTRFS_I(inode)->generation, generation); |
a67509c3 | 724 | return 0; |
9d66e233 JB |
725 | } |
726 | ||
727 | if (!num_entries) | |
a67509c3 | 728 | return 0; |
9d66e233 | 729 | |
5349d6c3 | 730 | ret = io_ctl_init(&io_ctl, inode, root, 0); |
706efc66 LZ |
731 | if (ret) |
732 | return ret; | |
733 | ||
9d66e233 | 734 | ret = readahead_cache(inode); |
0414efae | 735 | if (ret) |
9d66e233 | 736 | goto out; |
9d66e233 | 737 | |
a67509c3 JB |
738 | ret = io_ctl_prepare_pages(&io_ctl, inode, 1); |
739 | if (ret) | |
740 | goto out; | |
9d66e233 | 741 | |
5b0e95bf JB |
742 | ret = io_ctl_check_crc(&io_ctl, 0); |
743 | if (ret) | |
744 | goto free_cache; | |
745 | ||
a67509c3 JB |
746 | ret = io_ctl_check_generation(&io_ctl, generation); |
747 | if (ret) | |
748 | goto free_cache; | |
9d66e233 | 749 | |
a67509c3 JB |
750 | while (num_entries) { |
751 | e = kmem_cache_zalloc(btrfs_free_space_cachep, | |
752 | GFP_NOFS); | |
753 | if (!e) | |
9d66e233 | 754 | goto free_cache; |
9d66e233 | 755 | |
5b0e95bf JB |
756 | ret = io_ctl_read_entry(&io_ctl, e, &type); |
757 | if (ret) { | |
758 | kmem_cache_free(btrfs_free_space_cachep, e); | |
759 | goto free_cache; | |
760 | } | |
761 | ||
a67509c3 JB |
762 | if (!e->bytes) { |
763 | kmem_cache_free(btrfs_free_space_cachep, e); | |
764 | goto free_cache; | |
9d66e233 | 765 | } |
a67509c3 JB |
766 | |
767 | if (type == BTRFS_FREE_SPACE_EXTENT) { | |
768 | spin_lock(&ctl->tree_lock); | |
769 | ret = link_free_space(ctl, e); | |
770 | spin_unlock(&ctl->tree_lock); | |
771 | if (ret) { | |
c2cf52eb SK |
772 | btrfs_err(root->fs_info, |
773 | "Duplicate entries in free space cache, dumping"); | |
a67509c3 | 774 | kmem_cache_free(btrfs_free_space_cachep, e); |
9d66e233 JB |
775 | goto free_cache; |
776 | } | |
a67509c3 | 777 | } else { |
b12d6869 | 778 | ASSERT(num_bitmaps); |
a67509c3 JB |
779 | num_bitmaps--; |
780 | e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); | |
781 | if (!e->bitmap) { | |
782 | kmem_cache_free( | |
783 | btrfs_free_space_cachep, e); | |
9d66e233 JB |
784 | goto free_cache; |
785 | } | |
a67509c3 JB |
786 | spin_lock(&ctl->tree_lock); |
787 | ret = link_free_space(ctl, e); | |
788 | ctl->total_bitmaps++; | |
789 | ctl->op->recalc_thresholds(ctl); | |
790 | spin_unlock(&ctl->tree_lock); | |
791 | if (ret) { | |
c2cf52eb SK |
792 | btrfs_err(root->fs_info, |
793 | "Duplicate entries in free space cache, dumping"); | |
dc89e982 | 794 | kmem_cache_free(btrfs_free_space_cachep, e); |
9d66e233 JB |
795 | goto free_cache; |
796 | } | |
a67509c3 | 797 | list_add_tail(&e->list, &bitmaps); |
9d66e233 JB |
798 | } |
799 | ||
a67509c3 JB |
800 | num_entries--; |
801 | } | |
9d66e233 | 802 | |
2f120c05 JB |
803 | io_ctl_unmap_page(&io_ctl); |
804 | ||
a67509c3 JB |
805 | /* |
806 | * We add the bitmaps at the end of the entries in order that | |
807 | * the bitmap entries are added to the cache. | |
808 | */ | |
809 | list_for_each_entry_safe(e, n, &bitmaps, list) { | |
9d66e233 | 810 | list_del_init(&e->list); |
5b0e95bf JB |
811 | ret = io_ctl_read_bitmap(&io_ctl, e); |
812 | if (ret) | |
813 | goto free_cache; | |
9d66e233 JB |
814 | } |
815 | ||
a67509c3 | 816 | io_ctl_drop_pages(&io_ctl); |
cd023e7b | 817 | merge_space_tree(ctl); |
9d66e233 JB |
818 | ret = 1; |
819 | out: | |
a67509c3 | 820 | io_ctl_free(&io_ctl); |
9d66e233 | 821 | return ret; |
9d66e233 | 822 | free_cache: |
a67509c3 | 823 | io_ctl_drop_pages(&io_ctl); |
0414efae | 824 | __btrfs_remove_free_space_cache(ctl); |
9d66e233 JB |
825 | goto out; |
826 | } | |
827 | ||
0414efae LZ |
828 | int load_free_space_cache(struct btrfs_fs_info *fs_info, |
829 | struct btrfs_block_group_cache *block_group) | |
0cb59c99 | 830 | { |
34d52cb6 | 831 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
0414efae LZ |
832 | struct btrfs_root *root = fs_info->tree_root; |
833 | struct inode *inode; | |
834 | struct btrfs_path *path; | |
5b0e95bf | 835 | int ret = 0; |
0414efae LZ |
836 | bool matched; |
837 | u64 used = btrfs_block_group_used(&block_group->item); | |
838 | ||
0414efae LZ |
839 | /* |
840 | * If this block group has been marked to be cleared for one reason or | |
841 | * another then we can't trust the on disk cache, so just return. | |
842 | */ | |
9d66e233 | 843 | spin_lock(&block_group->lock); |
0414efae LZ |
844 | if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { |
845 | spin_unlock(&block_group->lock); | |
846 | return 0; | |
847 | } | |
9d66e233 | 848 | spin_unlock(&block_group->lock); |
0414efae LZ |
849 | |
850 | path = btrfs_alloc_path(); | |
851 | if (!path) | |
852 | return 0; | |
d53ba474 JB |
853 | path->search_commit_root = 1; |
854 | path->skip_locking = 1; | |
0414efae LZ |
855 | |
856 | inode = lookup_free_space_inode(root, block_group, path); | |
857 | if (IS_ERR(inode)) { | |
858 | btrfs_free_path(path); | |
859 | return 0; | |
860 | } | |
861 | ||
5b0e95bf JB |
862 | /* We may have converted the inode and made the cache invalid. */ |
863 | spin_lock(&block_group->lock); | |
864 | if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { | |
865 | spin_unlock(&block_group->lock); | |
a7e221e9 | 866 | btrfs_free_path(path); |
5b0e95bf JB |
867 | goto out; |
868 | } | |
869 | spin_unlock(&block_group->lock); | |
870 | ||
0414efae LZ |
871 | ret = __load_free_space_cache(fs_info->tree_root, inode, ctl, |
872 | path, block_group->key.objectid); | |
873 | btrfs_free_path(path); | |
874 | if (ret <= 0) | |
875 | goto out; | |
876 | ||
877 | spin_lock(&ctl->tree_lock); | |
878 | matched = (ctl->free_space == (block_group->key.offset - used - | |
879 | block_group->bytes_super)); | |
880 | spin_unlock(&ctl->tree_lock); | |
881 | ||
882 | if (!matched) { | |
883 | __btrfs_remove_free_space_cache(ctl); | |
32d6b47f | 884 | btrfs_warn(fs_info, "block group %llu has wrong amount of free space", |
c2cf52eb | 885 | block_group->key.objectid); |
0414efae LZ |
886 | ret = -1; |
887 | } | |
888 | out: | |
889 | if (ret < 0) { | |
890 | /* This cache is bogus, make sure it gets cleared */ | |
891 | spin_lock(&block_group->lock); | |
892 | block_group->disk_cache_state = BTRFS_DC_CLEAR; | |
893 | spin_unlock(&block_group->lock); | |
82d5902d | 894 | ret = 0; |
0414efae | 895 | |
32d6b47f | 896 | btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuild it now", |
c2cf52eb | 897 | block_group->key.objectid); |
0414efae LZ |
898 | } |
899 | ||
900 | iput(inode); | |
901 | return ret; | |
9d66e233 JB |
902 | } |
903 | ||
d4452bc5 | 904 | static noinline_for_stack |
4c6d1d85 | 905 | int write_cache_extent_entries(struct btrfs_io_ctl *io_ctl, |
d4452bc5 CM |
906 | struct btrfs_free_space_ctl *ctl, |
907 | struct btrfs_block_group_cache *block_group, | |
908 | int *entries, int *bitmaps, | |
909 | struct list_head *bitmap_list) | |
0cb59c99 | 910 | { |
c09544e0 | 911 | int ret; |
d4452bc5 | 912 | struct btrfs_free_cluster *cluster = NULL; |
1bbc621e | 913 | struct btrfs_free_cluster *cluster_locked = NULL; |
d4452bc5 | 914 | struct rb_node *node = rb_first(&ctl->free_space_offset); |
55507ce3 | 915 | struct btrfs_trim_range *trim_entry; |
be1a12a0 | 916 | |
43be2146 | 917 | /* Get the cluster for this block_group if it exists */ |
d4452bc5 | 918 | if (block_group && !list_empty(&block_group->cluster_list)) { |
43be2146 JB |
919 | cluster = list_entry(block_group->cluster_list.next, |
920 | struct btrfs_free_cluster, | |
921 | block_group_list); | |
d4452bc5 | 922 | } |
43be2146 | 923 | |
f75b130e | 924 | if (!node && cluster) { |
1bbc621e CM |
925 | cluster_locked = cluster; |
926 | spin_lock(&cluster_locked->lock); | |
f75b130e JB |
927 | node = rb_first(&cluster->root); |
928 | cluster = NULL; | |
929 | } | |
930 | ||
a67509c3 JB |
931 | /* Write out the extent entries */ |
932 | while (node) { | |
933 | struct btrfs_free_space *e; | |
0cb59c99 | 934 | |
a67509c3 | 935 | e = rb_entry(node, struct btrfs_free_space, offset_index); |
d4452bc5 | 936 | *entries += 1; |
0cb59c99 | 937 | |
d4452bc5 | 938 | ret = io_ctl_add_entry(io_ctl, e->offset, e->bytes, |
a67509c3 JB |
939 | e->bitmap); |
940 | if (ret) | |
d4452bc5 | 941 | goto fail; |
2f356126 | 942 | |
a67509c3 | 943 | if (e->bitmap) { |
d4452bc5 CM |
944 | list_add_tail(&e->list, bitmap_list); |
945 | *bitmaps += 1; | |
2f356126 | 946 | } |
a67509c3 JB |
947 | node = rb_next(node); |
948 | if (!node && cluster) { | |
949 | node = rb_first(&cluster->root); | |
1bbc621e CM |
950 | cluster_locked = cluster; |
951 | spin_lock(&cluster_locked->lock); | |
a67509c3 | 952 | cluster = NULL; |
43be2146 | 953 | } |
a67509c3 | 954 | } |
1bbc621e CM |
955 | if (cluster_locked) { |
956 | spin_unlock(&cluster_locked->lock); | |
957 | cluster_locked = NULL; | |
958 | } | |
55507ce3 FM |
959 | |
960 | /* | |
961 | * Make sure we don't miss any range that was removed from our rbtree | |
962 | * because trimming is running. Otherwise after a umount+mount (or crash | |
963 | * after committing the transaction) we would leak free space and get | |
964 | * an inconsistent free space cache report from fsck. | |
965 | */ | |
966 | list_for_each_entry(trim_entry, &ctl->trimming_ranges, list) { | |
967 | ret = io_ctl_add_entry(io_ctl, trim_entry->start, | |
968 | trim_entry->bytes, NULL); | |
969 | if (ret) | |
970 | goto fail; | |
971 | *entries += 1; | |
972 | } | |
973 | ||
d4452bc5 CM |
974 | return 0; |
975 | fail: | |
1bbc621e CM |
976 | if (cluster_locked) |
977 | spin_unlock(&cluster_locked->lock); | |
d4452bc5 CM |
978 | return -ENOSPC; |
979 | } | |
980 | ||
981 | static noinline_for_stack int | |
982 | update_cache_item(struct btrfs_trans_handle *trans, | |
983 | struct btrfs_root *root, | |
984 | struct inode *inode, | |
985 | struct btrfs_path *path, u64 offset, | |
986 | int entries, int bitmaps) | |
987 | { | |
988 | struct btrfs_key key; | |
989 | struct btrfs_free_space_header *header; | |
990 | struct extent_buffer *leaf; | |
991 | int ret; | |
992 | ||
993 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; | |
994 | key.offset = offset; | |
995 | key.type = 0; | |
996 | ||
997 | ret = btrfs_search_slot(trans, root, &key, path, 0, 1); | |
998 | if (ret < 0) { | |
999 | clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, | |
1000 | EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL, | |
1001 | GFP_NOFS); | |
1002 | goto fail; | |
1003 | } | |
1004 | leaf = path->nodes[0]; | |
1005 | if (ret > 0) { | |
1006 | struct btrfs_key found_key; | |
1007 | ASSERT(path->slots[0]); | |
1008 | path->slots[0]--; | |
1009 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | |
1010 | if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID || | |
1011 | found_key.offset != offset) { | |
1012 | clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, | |
1013 | inode->i_size - 1, | |
1014 | EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, | |
1015 | NULL, GFP_NOFS); | |
1016 | btrfs_release_path(path); | |
1017 | goto fail; | |
1018 | } | |
1019 | } | |
1020 | ||
1021 | BTRFS_I(inode)->generation = trans->transid; | |
1022 | header = btrfs_item_ptr(leaf, path->slots[0], | |
1023 | struct btrfs_free_space_header); | |
1024 | btrfs_set_free_space_entries(leaf, header, entries); | |
1025 | btrfs_set_free_space_bitmaps(leaf, header, bitmaps); | |
1026 | btrfs_set_free_space_generation(leaf, header, trans->transid); | |
1027 | btrfs_mark_buffer_dirty(leaf); | |
1028 | btrfs_release_path(path); | |
1029 | ||
1030 | return 0; | |
1031 | ||
1032 | fail: | |
1033 | return -1; | |
1034 | } | |
1035 | ||
1036 | static noinline_for_stack int | |
5349d6c3 MX |
1037 | write_pinned_extent_entries(struct btrfs_root *root, |
1038 | struct btrfs_block_group_cache *block_group, | |
4c6d1d85 | 1039 | struct btrfs_io_ctl *io_ctl, |
5349d6c3 | 1040 | int *entries) |
d4452bc5 CM |
1041 | { |
1042 | u64 start, extent_start, extent_end, len; | |
d4452bc5 CM |
1043 | struct extent_io_tree *unpin = NULL; |
1044 | int ret; | |
43be2146 | 1045 | |
5349d6c3 MX |
1046 | if (!block_group) |
1047 | return 0; | |
1048 | ||
a67509c3 JB |
1049 | /* |
1050 | * We want to add any pinned extents to our free space cache | |
1051 | * so we don't leak the space | |
d4452bc5 | 1052 | * |
db804f23 LZ |
1053 | * We shouldn't have switched the pinned extents yet so this is the |
1054 | * right one | |
1055 | */ | |
1056 | unpin = root->fs_info->pinned_extents; | |
1057 | ||
5349d6c3 | 1058 | start = block_group->key.objectid; |
db804f23 | 1059 | |
5349d6c3 | 1060 | while (start < block_group->key.objectid + block_group->key.offset) { |
db804f23 LZ |
1061 | ret = find_first_extent_bit(unpin, start, |
1062 | &extent_start, &extent_end, | |
e6138876 | 1063 | EXTENT_DIRTY, NULL); |
5349d6c3 MX |
1064 | if (ret) |
1065 | return 0; | |
0cb59c99 | 1066 | |
a67509c3 | 1067 | /* This pinned extent is out of our range */ |
db804f23 | 1068 | if (extent_start >= block_group->key.objectid + |
a67509c3 | 1069 | block_group->key.offset) |
5349d6c3 | 1070 | return 0; |
2f356126 | 1071 | |
db804f23 LZ |
1072 | extent_start = max(extent_start, start); |
1073 | extent_end = min(block_group->key.objectid + | |
1074 | block_group->key.offset, extent_end + 1); | |
1075 | len = extent_end - extent_start; | |
0cb59c99 | 1076 | |
d4452bc5 CM |
1077 | *entries += 1; |
1078 | ret = io_ctl_add_entry(io_ctl, extent_start, len, NULL); | |
a67509c3 | 1079 | if (ret) |
5349d6c3 | 1080 | return -ENOSPC; |
0cb59c99 | 1081 | |
db804f23 | 1082 | start = extent_end; |
a67509c3 | 1083 | } |
0cb59c99 | 1084 | |
5349d6c3 MX |
1085 | return 0; |
1086 | } | |
1087 | ||
1088 | static noinline_for_stack int | |
4c6d1d85 | 1089 | write_bitmap_entries(struct btrfs_io_ctl *io_ctl, struct list_head *bitmap_list) |
5349d6c3 MX |
1090 | { |
1091 | struct list_head *pos, *n; | |
1092 | int ret; | |
1093 | ||
0cb59c99 | 1094 | /* Write out the bitmaps */ |
d4452bc5 | 1095 | list_for_each_safe(pos, n, bitmap_list) { |
0cb59c99 JB |
1096 | struct btrfs_free_space *entry = |
1097 | list_entry(pos, struct btrfs_free_space, list); | |
1098 | ||
d4452bc5 | 1099 | ret = io_ctl_add_bitmap(io_ctl, entry->bitmap); |
a67509c3 | 1100 | if (ret) |
5349d6c3 | 1101 | return -ENOSPC; |
0cb59c99 | 1102 | list_del_init(&entry->list); |
be1a12a0 JB |
1103 | } |
1104 | ||
5349d6c3 MX |
1105 | return 0; |
1106 | } | |
0cb59c99 | 1107 | |
5349d6c3 MX |
1108 | static int flush_dirty_cache(struct inode *inode) |
1109 | { | |
1110 | int ret; | |
be1a12a0 | 1111 | |
0ef8b726 | 1112 | ret = btrfs_wait_ordered_range(inode, 0, (u64)-1); |
5349d6c3 | 1113 | if (ret) |
0ef8b726 JB |
1114 | clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, |
1115 | EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL, | |
1116 | GFP_NOFS); | |
0cb59c99 | 1117 | |
5349d6c3 | 1118 | return ret; |
d4452bc5 CM |
1119 | } |
1120 | ||
1121 | static void noinline_for_stack | |
1122 | cleanup_write_cache_enospc(struct inode *inode, | |
4c6d1d85 | 1123 | struct btrfs_io_ctl *io_ctl, |
d4452bc5 CM |
1124 | struct extent_state **cached_state, |
1125 | struct list_head *bitmap_list) | |
1126 | { | |
1127 | struct list_head *pos, *n; | |
5349d6c3 | 1128 | |
d4452bc5 CM |
1129 | list_for_each_safe(pos, n, bitmap_list) { |
1130 | struct btrfs_free_space *entry = | |
1131 | list_entry(pos, struct btrfs_free_space, list); | |
1132 | list_del_init(&entry->list); | |
0cb59c99 | 1133 | } |
d4452bc5 CM |
1134 | io_ctl_drop_pages(io_ctl); |
1135 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, | |
1136 | i_size_read(inode) - 1, cached_state, | |
1137 | GFP_NOFS); | |
1138 | } | |
549b4fdb | 1139 | |
c9dc4c65 CM |
1140 | int btrfs_wait_cache_io(struct btrfs_root *root, |
1141 | struct btrfs_trans_handle *trans, | |
1142 | struct btrfs_block_group_cache *block_group, | |
1143 | struct btrfs_io_ctl *io_ctl, | |
1144 | struct btrfs_path *path, u64 offset) | |
1145 | { | |
1146 | int ret; | |
1147 | struct inode *inode = io_ctl->inode; | |
1148 | ||
1bbc621e CM |
1149 | if (!inode) |
1150 | return 0; | |
1151 | ||
85db36cf CM |
1152 | if (block_group) |
1153 | root = root->fs_info->tree_root; | |
c9dc4c65 CM |
1154 | |
1155 | /* Flush the dirty pages in the cache file. */ | |
1156 | ret = flush_dirty_cache(inode); | |
1157 | if (ret) | |
1158 | goto out; | |
1159 | ||
1160 | /* Update the cache item to tell everyone this cache file is valid. */ | |
1161 | ret = update_cache_item(trans, root, inode, path, offset, | |
1162 | io_ctl->entries, io_ctl->bitmaps); | |
1163 | out: | |
1164 | io_ctl_free(io_ctl); | |
1165 | if (ret) { | |
1166 | invalidate_inode_pages2(inode->i_mapping); | |
1167 | BTRFS_I(inode)->generation = 0; | |
1168 | if (block_group) { | |
1169 | #ifdef DEBUG | |
1170 | btrfs_err(root->fs_info, | |
1171 | "failed to write free space cache for block group %llu", | |
1172 | block_group->key.objectid); | |
1173 | #endif | |
1174 | } | |
1175 | } | |
1176 | btrfs_update_inode(trans, root, inode); | |
1177 | ||
1178 | if (block_group) { | |
1bbc621e CM |
1179 | /* the dirty list is protected by the dirty_bgs_lock */ |
1180 | spin_lock(&trans->transaction->dirty_bgs_lock); | |
1181 | ||
1182 | /* the disk_cache_state is protected by the block group lock */ | |
c9dc4c65 CM |
1183 | spin_lock(&block_group->lock); |
1184 | ||
1185 | /* | |
1186 | * only mark this as written if we didn't get put back on | |
1bbc621e CM |
1187 | * the dirty list while waiting for IO. Otherwise our |
1188 | * cache state won't be right, and we won't get written again | |
c9dc4c65 CM |
1189 | */ |
1190 | if (!ret && list_empty(&block_group->dirty_list)) | |
1191 | block_group->disk_cache_state = BTRFS_DC_WRITTEN; | |
1192 | else if (ret) | |
1193 | block_group->disk_cache_state = BTRFS_DC_ERROR; | |
1194 | ||
1195 | spin_unlock(&block_group->lock); | |
1bbc621e | 1196 | spin_unlock(&trans->transaction->dirty_bgs_lock); |
c9dc4c65 CM |
1197 | io_ctl->inode = NULL; |
1198 | iput(inode); | |
1199 | } | |
1200 | ||
1201 | return ret; | |
1202 | ||
1203 | } | |
1204 | ||
d4452bc5 CM |
1205 | /** |
1206 | * __btrfs_write_out_cache - write out cached info to an inode | |
1207 | * @root - the root the inode belongs to | |
1208 | * @ctl - the free space cache we are going to write out | |
1209 | * @block_group - the block_group for this cache if it belongs to a block_group | |
1210 | * @trans - the trans handle | |
1211 | * @path - the path to use | |
1212 | * @offset - the offset for the key we'll insert | |
1213 | * | |
1214 | * This function writes out a free space cache struct to disk for quick recovery | |
1215 | * on mount. This will return 0 if it was successfull in writing the cache out, | |
1216 | * and -1 if it was not. | |
1217 | */ | |
1218 | static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | |
1219 | struct btrfs_free_space_ctl *ctl, | |
1220 | struct btrfs_block_group_cache *block_group, | |
c9dc4c65 | 1221 | struct btrfs_io_ctl *io_ctl, |
d4452bc5 CM |
1222 | struct btrfs_trans_handle *trans, |
1223 | struct btrfs_path *path, u64 offset) | |
1224 | { | |
1225 | struct extent_state *cached_state = NULL; | |
5349d6c3 | 1226 | LIST_HEAD(bitmap_list); |
d4452bc5 CM |
1227 | int entries = 0; |
1228 | int bitmaps = 0; | |
1229 | int ret; | |
c9dc4c65 | 1230 | int must_iput = 0; |
d4452bc5 CM |
1231 | |
1232 | if (!i_size_read(inode)) | |
1233 | return -1; | |
1234 | ||
c9dc4c65 CM |
1235 | WARN_ON(io_ctl->pages); |
1236 | ret = io_ctl_init(io_ctl, inode, root, 1); | |
d4452bc5 CM |
1237 | if (ret) |
1238 | return -1; | |
1239 | ||
e570fd27 MX |
1240 | if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) { |
1241 | down_write(&block_group->data_rwsem); | |
1242 | spin_lock(&block_group->lock); | |
1243 | if (block_group->delalloc_bytes) { | |
1244 | block_group->disk_cache_state = BTRFS_DC_WRITTEN; | |
1245 | spin_unlock(&block_group->lock); | |
1246 | up_write(&block_group->data_rwsem); | |
1247 | BTRFS_I(inode)->generation = 0; | |
1248 | ret = 0; | |
c9dc4c65 | 1249 | must_iput = 1; |
e570fd27 MX |
1250 | goto out; |
1251 | } | |
1252 | spin_unlock(&block_group->lock); | |
1253 | } | |
1254 | ||
d4452bc5 | 1255 | /* Lock all pages first so we can lock the extent safely. */ |
c9dc4c65 | 1256 | io_ctl_prepare_pages(io_ctl, inode, 0); |
d4452bc5 CM |
1257 | |
1258 | lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, | |
1259 | 0, &cached_state); | |
1260 | ||
c9dc4c65 | 1261 | io_ctl_set_generation(io_ctl, trans->transid); |
d4452bc5 | 1262 | |
55507ce3 | 1263 | mutex_lock(&ctl->cache_writeout_mutex); |
5349d6c3 | 1264 | /* Write out the extent entries in the free space cache */ |
1bbc621e | 1265 | spin_lock(&ctl->tree_lock); |
c9dc4c65 | 1266 | ret = write_cache_extent_entries(io_ctl, ctl, |
d4452bc5 CM |
1267 | block_group, &entries, &bitmaps, |
1268 | &bitmap_list); | |
1bbc621e | 1269 | spin_unlock(&ctl->tree_lock); |
55507ce3 FM |
1270 | if (ret) { |
1271 | mutex_unlock(&ctl->cache_writeout_mutex); | |
d4452bc5 | 1272 | goto out_nospc; |
55507ce3 | 1273 | } |
d4452bc5 | 1274 | |
5349d6c3 MX |
1275 | /* |
1276 | * Some spaces that are freed in the current transaction are pinned, | |
1277 | * they will be added into free space cache after the transaction is | |
1278 | * committed, we shouldn't lose them. | |
1bbc621e CM |
1279 | * |
1280 | * If this changes while we are working we'll get added back to | |
1281 | * the dirty list and redo it. No locking needed | |
5349d6c3 | 1282 | */ |
c9dc4c65 | 1283 | ret = write_pinned_extent_entries(root, block_group, io_ctl, &entries); |
55507ce3 FM |
1284 | if (ret) { |
1285 | mutex_unlock(&ctl->cache_writeout_mutex); | |
5349d6c3 | 1286 | goto out_nospc; |
55507ce3 | 1287 | } |
5349d6c3 | 1288 | |
55507ce3 FM |
1289 | /* |
1290 | * At last, we write out all the bitmaps and keep cache_writeout_mutex | |
1291 | * locked while doing it because a concurrent trim can be manipulating | |
1292 | * or freeing the bitmap. | |
1293 | */ | |
1bbc621e | 1294 | spin_lock(&ctl->tree_lock); |
c9dc4c65 | 1295 | ret = write_bitmap_entries(io_ctl, &bitmap_list); |
1bbc621e | 1296 | spin_unlock(&ctl->tree_lock); |
55507ce3 | 1297 | mutex_unlock(&ctl->cache_writeout_mutex); |
5349d6c3 MX |
1298 | if (ret) |
1299 | goto out_nospc; | |
1300 | ||
1301 | /* Zero out the rest of the pages just to make sure */ | |
c9dc4c65 | 1302 | io_ctl_zero_remaining_pages(io_ctl); |
d4452bc5 | 1303 | |
5349d6c3 | 1304 | /* Everything is written out, now we dirty the pages in the file. */ |
c9dc4c65 | 1305 | ret = btrfs_dirty_pages(root, inode, io_ctl->pages, io_ctl->num_pages, |
5349d6c3 MX |
1306 | 0, i_size_read(inode), &cached_state); |
1307 | if (ret) | |
d4452bc5 | 1308 | goto out_nospc; |
5349d6c3 | 1309 | |
e570fd27 MX |
1310 | if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) |
1311 | up_write(&block_group->data_rwsem); | |
5349d6c3 MX |
1312 | /* |
1313 | * Release the pages and unlock the extent, we will flush | |
1314 | * them out later | |
1315 | */ | |
c9dc4c65 | 1316 | io_ctl_drop_pages(io_ctl); |
5349d6c3 MX |
1317 | |
1318 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, | |
1319 | i_size_read(inode) - 1, &cached_state, GFP_NOFS); | |
1320 | ||
c9dc4c65 CM |
1321 | /* |
1322 | * at this point the pages are under IO and we're happy, | |
1323 | * The caller is responsible for waiting on them and updating the | |
1324 | * the cache and the inode | |
1325 | */ | |
1326 | io_ctl->entries = entries; | |
1327 | io_ctl->bitmaps = bitmaps; | |
1328 | ||
1329 | ret = btrfs_fdatawrite_range(inode, 0, (u64)-1); | |
5349d6c3 | 1330 | if (ret) |
d4452bc5 CM |
1331 | goto out; |
1332 | ||
c9dc4c65 CM |
1333 | return 0; |
1334 | ||
2f356126 | 1335 | out: |
c9dc4c65 CM |
1336 | io_ctl->inode = NULL; |
1337 | io_ctl_free(io_ctl); | |
5349d6c3 | 1338 | if (ret) { |
a67509c3 | 1339 | invalidate_inode_pages2(inode->i_mapping); |
0cb59c99 JB |
1340 | BTRFS_I(inode)->generation = 0; |
1341 | } | |
0cb59c99 | 1342 | btrfs_update_inode(trans, root, inode); |
c9dc4c65 CM |
1343 | if (must_iput) |
1344 | iput(inode); | |
5349d6c3 | 1345 | return ret; |
a67509c3 JB |
1346 | |
1347 | out_nospc: | |
c9dc4c65 | 1348 | cleanup_write_cache_enospc(inode, io_ctl, &cached_state, &bitmap_list); |
e570fd27 MX |
1349 | |
1350 | if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) | |
1351 | up_write(&block_group->data_rwsem); | |
1352 | ||
a67509c3 | 1353 | goto out; |
0414efae LZ |
1354 | } |
1355 | ||
1356 | int btrfs_write_out_cache(struct btrfs_root *root, | |
1357 | struct btrfs_trans_handle *trans, | |
1358 | struct btrfs_block_group_cache *block_group, | |
1359 | struct btrfs_path *path) | |
1360 | { | |
1361 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | |
1362 | struct inode *inode; | |
1363 | int ret = 0; | |
1364 | ||
1365 | root = root->fs_info->tree_root; | |
1366 | ||
1367 | spin_lock(&block_group->lock); | |
1368 | if (block_group->disk_cache_state < BTRFS_DC_SETUP) { | |
1369 | spin_unlock(&block_group->lock); | |
e570fd27 MX |
1370 | return 0; |
1371 | } | |
0414efae LZ |
1372 | spin_unlock(&block_group->lock); |
1373 | ||
1374 | inode = lookup_free_space_inode(root, block_group, path); | |
1375 | if (IS_ERR(inode)) | |
1376 | return 0; | |
1377 | ||
c9dc4c65 CM |
1378 | ret = __btrfs_write_out_cache(root, inode, ctl, block_group, |
1379 | &block_group->io_ctl, trans, | |
0414efae | 1380 | path, block_group->key.objectid); |
c09544e0 | 1381 | if (ret) { |
c09544e0 | 1382 | #ifdef DEBUG |
c2cf52eb SK |
1383 | btrfs_err(root->fs_info, |
1384 | "failed to write free space cache for block group %llu", | |
1385 | block_group->key.objectid); | |
c09544e0 | 1386 | #endif |
c9dc4c65 CM |
1387 | spin_lock(&block_group->lock); |
1388 | block_group->disk_cache_state = BTRFS_DC_ERROR; | |
1389 | spin_unlock(&block_group->lock); | |
1390 | ||
1391 | block_group->io_ctl.inode = NULL; | |
1392 | iput(inode); | |
0414efae LZ |
1393 | } |
1394 | ||
c9dc4c65 CM |
1395 | /* |
1396 | * if ret == 0 the caller is expected to call btrfs_wait_cache_io | |
1397 | * to wait for IO and put the inode | |
1398 | */ | |
1399 | ||
0cb59c99 JB |
1400 | return ret; |
1401 | } | |
1402 | ||
34d52cb6 | 1403 | static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit, |
96303081 | 1404 | u64 offset) |
0f9dd46c | 1405 | { |
b12d6869 | 1406 | ASSERT(offset >= bitmap_start); |
96303081 | 1407 | offset -= bitmap_start; |
34d52cb6 | 1408 | return (unsigned long)(div_u64(offset, unit)); |
96303081 | 1409 | } |
0f9dd46c | 1410 | |
34d52cb6 | 1411 | static inline unsigned long bytes_to_bits(u64 bytes, u32 unit) |
96303081 | 1412 | { |
34d52cb6 | 1413 | return (unsigned long)(div_u64(bytes, unit)); |
96303081 | 1414 | } |
0f9dd46c | 1415 | |
34d52cb6 | 1416 | static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl, |
96303081 JB |
1417 | u64 offset) |
1418 | { | |
1419 | u64 bitmap_start; | |
b8b93add | 1420 | u32 bytes_per_bitmap; |
0f9dd46c | 1421 | |
34d52cb6 LZ |
1422 | bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit; |
1423 | bitmap_start = offset - ctl->start; | |
b8b93add | 1424 | bitmap_start = div_u64(bitmap_start, bytes_per_bitmap); |
96303081 | 1425 | bitmap_start *= bytes_per_bitmap; |
34d52cb6 | 1426 | bitmap_start += ctl->start; |
0f9dd46c | 1427 | |
96303081 | 1428 | return bitmap_start; |
0f9dd46c JB |
1429 | } |
1430 | ||
96303081 JB |
1431 | static int tree_insert_offset(struct rb_root *root, u64 offset, |
1432 | struct rb_node *node, int bitmap) | |
0f9dd46c JB |
1433 | { |
1434 | struct rb_node **p = &root->rb_node; | |
1435 | struct rb_node *parent = NULL; | |
1436 | struct btrfs_free_space *info; | |
1437 | ||
1438 | while (*p) { | |
1439 | parent = *p; | |
96303081 | 1440 | info = rb_entry(parent, struct btrfs_free_space, offset_index); |
0f9dd46c | 1441 | |
96303081 | 1442 | if (offset < info->offset) { |
0f9dd46c | 1443 | p = &(*p)->rb_left; |
96303081 | 1444 | } else if (offset > info->offset) { |
0f9dd46c | 1445 | p = &(*p)->rb_right; |
96303081 JB |
1446 | } else { |
1447 | /* | |
1448 | * we could have a bitmap entry and an extent entry | |
1449 | * share the same offset. If this is the case, we want | |
1450 | * the extent entry to always be found first if we do a | |
1451 | * linear search through the tree, since we want to have | |
1452 | * the quickest allocation time, and allocating from an | |
1453 | * extent is faster than allocating from a bitmap. So | |
1454 | * if we're inserting a bitmap and we find an entry at | |
1455 | * this offset, we want to go right, or after this entry | |
1456 | * logically. If we are inserting an extent and we've | |
1457 | * found a bitmap, we want to go left, or before | |
1458 | * logically. | |
1459 | */ | |
1460 | if (bitmap) { | |
207dde82 JB |
1461 | if (info->bitmap) { |
1462 | WARN_ON_ONCE(1); | |
1463 | return -EEXIST; | |
1464 | } | |
96303081 JB |
1465 | p = &(*p)->rb_right; |
1466 | } else { | |
207dde82 JB |
1467 | if (!info->bitmap) { |
1468 | WARN_ON_ONCE(1); | |
1469 | return -EEXIST; | |
1470 | } | |
96303081 JB |
1471 | p = &(*p)->rb_left; |
1472 | } | |
1473 | } | |
0f9dd46c JB |
1474 | } |
1475 | ||
1476 | rb_link_node(node, parent, p); | |
1477 | rb_insert_color(node, root); | |
1478 | ||
1479 | return 0; | |
1480 | } | |
1481 | ||
1482 | /* | |
70cb0743 JB |
1483 | * searches the tree for the given offset. |
1484 | * | |
96303081 JB |
1485 | * fuzzy - If this is set, then we are trying to make an allocation, and we just |
1486 | * want a section that has at least bytes size and comes at or after the given | |
1487 | * offset. | |
0f9dd46c | 1488 | */ |
96303081 | 1489 | static struct btrfs_free_space * |
34d52cb6 | 1490 | tree_search_offset(struct btrfs_free_space_ctl *ctl, |
96303081 | 1491 | u64 offset, int bitmap_only, int fuzzy) |
0f9dd46c | 1492 | { |
34d52cb6 | 1493 | struct rb_node *n = ctl->free_space_offset.rb_node; |
96303081 JB |
1494 | struct btrfs_free_space *entry, *prev = NULL; |
1495 | ||
1496 | /* find entry that is closest to the 'offset' */ | |
1497 | while (1) { | |
1498 | if (!n) { | |
1499 | entry = NULL; | |
1500 | break; | |
1501 | } | |
0f9dd46c | 1502 | |
0f9dd46c | 1503 | entry = rb_entry(n, struct btrfs_free_space, offset_index); |
96303081 | 1504 | prev = entry; |
0f9dd46c | 1505 | |
96303081 | 1506 | if (offset < entry->offset) |
0f9dd46c | 1507 | n = n->rb_left; |
96303081 | 1508 | else if (offset > entry->offset) |
0f9dd46c | 1509 | n = n->rb_right; |
96303081 | 1510 | else |
0f9dd46c | 1511 | break; |
0f9dd46c JB |
1512 | } |
1513 | ||
96303081 JB |
1514 | if (bitmap_only) { |
1515 | if (!entry) | |
1516 | return NULL; | |
1517 | if (entry->bitmap) | |
1518 | return entry; | |
0f9dd46c | 1519 | |
96303081 JB |
1520 | /* |
1521 | * bitmap entry and extent entry may share same offset, | |
1522 | * in that case, bitmap entry comes after extent entry. | |
1523 | */ | |
1524 | n = rb_next(n); | |
1525 | if (!n) | |
1526 | return NULL; | |
1527 | entry = rb_entry(n, struct btrfs_free_space, offset_index); | |
1528 | if (entry->offset != offset) | |
1529 | return NULL; | |
0f9dd46c | 1530 | |
96303081 JB |
1531 | WARN_ON(!entry->bitmap); |
1532 | return entry; | |
1533 | } else if (entry) { | |
1534 | if (entry->bitmap) { | |
0f9dd46c | 1535 | /* |
96303081 JB |
1536 | * if previous extent entry covers the offset, |
1537 | * we should return it instead of the bitmap entry | |
0f9dd46c | 1538 | */ |
de6c4115 MX |
1539 | n = rb_prev(&entry->offset_index); |
1540 | if (n) { | |
96303081 JB |
1541 | prev = rb_entry(n, struct btrfs_free_space, |
1542 | offset_index); | |
de6c4115 MX |
1543 | if (!prev->bitmap && |
1544 | prev->offset + prev->bytes > offset) | |
1545 | entry = prev; | |
0f9dd46c | 1546 | } |
96303081 JB |
1547 | } |
1548 | return entry; | |
1549 | } | |
1550 | ||
1551 | if (!prev) | |
1552 | return NULL; | |
1553 | ||
1554 | /* find last entry before the 'offset' */ | |
1555 | entry = prev; | |
1556 | if (entry->offset > offset) { | |
1557 | n = rb_prev(&entry->offset_index); | |
1558 | if (n) { | |
1559 | entry = rb_entry(n, struct btrfs_free_space, | |
1560 | offset_index); | |
b12d6869 | 1561 | ASSERT(entry->offset <= offset); |
0f9dd46c | 1562 | } else { |
96303081 JB |
1563 | if (fuzzy) |
1564 | return entry; | |
1565 | else | |
1566 | return NULL; | |
0f9dd46c JB |
1567 | } |
1568 | } | |
1569 | ||
96303081 | 1570 | if (entry->bitmap) { |
de6c4115 MX |
1571 | n = rb_prev(&entry->offset_index); |
1572 | if (n) { | |
96303081 JB |
1573 | prev = rb_entry(n, struct btrfs_free_space, |
1574 | offset_index); | |
de6c4115 MX |
1575 | if (!prev->bitmap && |
1576 | prev->offset + prev->bytes > offset) | |
1577 | return prev; | |
96303081 | 1578 | } |
34d52cb6 | 1579 | if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset) |
96303081 JB |
1580 | return entry; |
1581 | } else if (entry->offset + entry->bytes > offset) | |
1582 | return entry; | |
1583 | ||
1584 | if (!fuzzy) | |
1585 | return NULL; | |
1586 | ||
1587 | while (1) { | |
1588 | if (entry->bitmap) { | |
1589 | if (entry->offset + BITS_PER_BITMAP * | |
34d52cb6 | 1590 | ctl->unit > offset) |
96303081 JB |
1591 | break; |
1592 | } else { | |
1593 | if (entry->offset + entry->bytes > offset) | |
1594 | break; | |
1595 | } | |
1596 | ||
1597 | n = rb_next(&entry->offset_index); | |
1598 | if (!n) | |
1599 | return NULL; | |
1600 | entry = rb_entry(n, struct btrfs_free_space, offset_index); | |
1601 | } | |
1602 | return entry; | |
0f9dd46c JB |
1603 | } |
1604 | ||
f333adb5 | 1605 | static inline void |
34d52cb6 | 1606 | __unlink_free_space(struct btrfs_free_space_ctl *ctl, |
f333adb5 | 1607 | struct btrfs_free_space *info) |
0f9dd46c | 1608 | { |
34d52cb6 LZ |
1609 | rb_erase(&info->offset_index, &ctl->free_space_offset); |
1610 | ctl->free_extents--; | |
f333adb5 LZ |
1611 | } |
1612 | ||
34d52cb6 | 1613 | static void unlink_free_space(struct btrfs_free_space_ctl *ctl, |
f333adb5 LZ |
1614 | struct btrfs_free_space *info) |
1615 | { | |
34d52cb6 LZ |
1616 | __unlink_free_space(ctl, info); |
1617 | ctl->free_space -= info->bytes; | |
0f9dd46c JB |
1618 | } |
1619 | ||
34d52cb6 | 1620 | static int link_free_space(struct btrfs_free_space_ctl *ctl, |
0f9dd46c JB |
1621 | struct btrfs_free_space *info) |
1622 | { | |
1623 | int ret = 0; | |
1624 | ||
b12d6869 | 1625 | ASSERT(info->bytes || info->bitmap); |
34d52cb6 | 1626 | ret = tree_insert_offset(&ctl->free_space_offset, info->offset, |
96303081 | 1627 | &info->offset_index, (info->bitmap != NULL)); |
0f9dd46c JB |
1628 | if (ret) |
1629 | return ret; | |
1630 | ||
34d52cb6 LZ |
1631 | ctl->free_space += info->bytes; |
1632 | ctl->free_extents++; | |
96303081 JB |
1633 | return ret; |
1634 | } | |
1635 | ||
34d52cb6 | 1636 | static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) |
96303081 | 1637 | { |
34d52cb6 | 1638 | struct btrfs_block_group_cache *block_group = ctl->private; |
25891f79 JB |
1639 | u64 max_bytes; |
1640 | u64 bitmap_bytes; | |
1641 | u64 extent_bytes; | |
8eb2d829 | 1642 | u64 size = block_group->key.offset; |
b8b93add DS |
1643 | u32 bytes_per_bg = BITS_PER_BITMAP * ctl->unit; |
1644 | u32 max_bitmaps = div_u64(size + bytes_per_bg - 1, bytes_per_bg); | |
34d52cb6 | 1645 | |
b8b93add | 1646 | max_bitmaps = max_t(u32, max_bitmaps, 1); |
dde5740f | 1647 | |
b12d6869 | 1648 | ASSERT(ctl->total_bitmaps <= max_bitmaps); |
96303081 JB |
1649 | |
1650 | /* | |
1651 | * The goal is to keep the total amount of memory used per 1gb of space | |
1652 | * at or below 32k, so we need to adjust how much memory we allow to be | |
1653 | * used by extent based free space tracking | |
1654 | */ | |
8eb2d829 LZ |
1655 | if (size < 1024 * 1024 * 1024) |
1656 | max_bytes = MAX_CACHE_BYTES_PER_GIG; | |
1657 | else | |
1658 | max_bytes = MAX_CACHE_BYTES_PER_GIG * | |
f8c269d7 | 1659 | div_u64(size, 1024 * 1024 * 1024); |
96303081 | 1660 | |
25891f79 JB |
1661 | /* |
1662 | * we want to account for 1 more bitmap than what we have so we can make | |
1663 | * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as | |
1664 | * we add more bitmaps. | |
1665 | */ | |
34d52cb6 | 1666 | bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE; |
96303081 | 1667 | |
25891f79 | 1668 | if (bitmap_bytes >= max_bytes) { |
34d52cb6 | 1669 | ctl->extents_thresh = 0; |
25891f79 JB |
1670 | return; |
1671 | } | |
96303081 | 1672 | |
25891f79 | 1673 | /* |
f8c269d7 | 1674 | * we want the extent entry threshold to always be at most 1/2 the max |
25891f79 JB |
1675 | * bytes we can have, or whatever is less than that. |
1676 | */ | |
1677 | extent_bytes = max_bytes - bitmap_bytes; | |
f8c269d7 | 1678 | extent_bytes = min_t(u64, extent_bytes, max_bytes >> 1); |
96303081 | 1679 | |
34d52cb6 | 1680 | ctl->extents_thresh = |
f8c269d7 | 1681 | div_u64(extent_bytes, sizeof(struct btrfs_free_space)); |
96303081 JB |
1682 | } |
1683 | ||
bb3ac5a4 MX |
1684 | static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, |
1685 | struct btrfs_free_space *info, | |
1686 | u64 offset, u64 bytes) | |
96303081 | 1687 | { |
f38b6e75 | 1688 | unsigned long start, count; |
96303081 | 1689 | |
34d52cb6 LZ |
1690 | start = offset_to_bit(info->offset, ctl->unit, offset); |
1691 | count = bytes_to_bits(bytes, ctl->unit); | |
b12d6869 | 1692 | ASSERT(start + count <= BITS_PER_BITMAP); |
96303081 | 1693 | |
f38b6e75 | 1694 | bitmap_clear(info->bitmap, start, count); |
96303081 JB |
1695 | |
1696 | info->bytes -= bytes; | |
bb3ac5a4 MX |
1697 | } |
1698 | ||
1699 | static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, | |
1700 | struct btrfs_free_space *info, u64 offset, | |
1701 | u64 bytes) | |
1702 | { | |
1703 | __bitmap_clear_bits(ctl, info, offset, bytes); | |
34d52cb6 | 1704 | ctl->free_space -= bytes; |
96303081 JB |
1705 | } |
1706 | ||
34d52cb6 | 1707 | static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl, |
817d52f8 JB |
1708 | struct btrfs_free_space *info, u64 offset, |
1709 | u64 bytes) | |
96303081 | 1710 | { |
f38b6e75 | 1711 | unsigned long start, count; |
96303081 | 1712 | |
34d52cb6 LZ |
1713 | start = offset_to_bit(info->offset, ctl->unit, offset); |
1714 | count = bytes_to_bits(bytes, ctl->unit); | |
b12d6869 | 1715 | ASSERT(start + count <= BITS_PER_BITMAP); |
96303081 | 1716 | |
f38b6e75 | 1717 | bitmap_set(info->bitmap, start, count); |
96303081 JB |
1718 | |
1719 | info->bytes += bytes; | |
34d52cb6 | 1720 | ctl->free_space += bytes; |
96303081 JB |
1721 | } |
1722 | ||
a4820398 MX |
1723 | /* |
1724 | * If we can not find suitable extent, we will use bytes to record | |
1725 | * the size of the max extent. | |
1726 | */ | |
34d52cb6 | 1727 | static int search_bitmap(struct btrfs_free_space_ctl *ctl, |
96303081 JB |
1728 | struct btrfs_free_space *bitmap_info, u64 *offset, |
1729 | u64 *bytes) | |
1730 | { | |
1731 | unsigned long found_bits = 0; | |
a4820398 | 1732 | unsigned long max_bits = 0; |
96303081 JB |
1733 | unsigned long bits, i; |
1734 | unsigned long next_zero; | |
a4820398 | 1735 | unsigned long extent_bits; |
96303081 | 1736 | |
34d52cb6 | 1737 | i = offset_to_bit(bitmap_info->offset, ctl->unit, |
96303081 | 1738 | max_t(u64, *offset, bitmap_info->offset)); |
34d52cb6 | 1739 | bits = bytes_to_bits(*bytes, ctl->unit); |
96303081 | 1740 | |
ebb3dad4 | 1741 | for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) { |
96303081 JB |
1742 | next_zero = find_next_zero_bit(bitmap_info->bitmap, |
1743 | BITS_PER_BITMAP, i); | |
a4820398 MX |
1744 | extent_bits = next_zero - i; |
1745 | if (extent_bits >= bits) { | |
1746 | found_bits = extent_bits; | |
96303081 | 1747 | break; |
a4820398 MX |
1748 | } else if (extent_bits > max_bits) { |
1749 | max_bits = extent_bits; | |
96303081 JB |
1750 | } |
1751 | i = next_zero; | |
1752 | } | |
1753 | ||
1754 | if (found_bits) { | |
34d52cb6 LZ |
1755 | *offset = (u64)(i * ctl->unit) + bitmap_info->offset; |
1756 | *bytes = (u64)(found_bits) * ctl->unit; | |
96303081 JB |
1757 | return 0; |
1758 | } | |
1759 | ||
a4820398 | 1760 | *bytes = (u64)(max_bits) * ctl->unit; |
96303081 JB |
1761 | return -1; |
1762 | } | |
1763 | ||
a4820398 | 1764 | /* Cache the size of the max extent in bytes */ |
34d52cb6 | 1765 | static struct btrfs_free_space * |
53b381b3 | 1766 | find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes, |
a4820398 | 1767 | unsigned long align, u64 *max_extent_size) |
96303081 JB |
1768 | { |
1769 | struct btrfs_free_space *entry; | |
1770 | struct rb_node *node; | |
53b381b3 DW |
1771 | u64 tmp; |
1772 | u64 align_off; | |
96303081 JB |
1773 | int ret; |
1774 | ||
34d52cb6 | 1775 | if (!ctl->free_space_offset.rb_node) |
a4820398 | 1776 | goto out; |
96303081 | 1777 | |
34d52cb6 | 1778 | entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1); |
96303081 | 1779 | if (!entry) |
a4820398 | 1780 | goto out; |
96303081 JB |
1781 | |
1782 | for (node = &entry->offset_index; node; node = rb_next(node)) { | |
1783 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | |
a4820398 MX |
1784 | if (entry->bytes < *bytes) { |
1785 | if (entry->bytes > *max_extent_size) | |
1786 | *max_extent_size = entry->bytes; | |
96303081 | 1787 | continue; |
a4820398 | 1788 | } |
96303081 | 1789 | |
53b381b3 DW |
1790 | /* make sure the space returned is big enough |
1791 | * to match our requested alignment | |
1792 | */ | |
1793 | if (*bytes >= align) { | |
a4820398 | 1794 | tmp = entry->offset - ctl->start + align - 1; |
47c5713f | 1795 | tmp = div64_u64(tmp, align); |
53b381b3 DW |
1796 | tmp = tmp * align + ctl->start; |
1797 | align_off = tmp - entry->offset; | |
1798 | } else { | |
1799 | align_off = 0; | |
1800 | tmp = entry->offset; | |
1801 | } | |
1802 | ||
a4820398 MX |
1803 | if (entry->bytes < *bytes + align_off) { |
1804 | if (entry->bytes > *max_extent_size) | |
1805 | *max_extent_size = entry->bytes; | |
53b381b3 | 1806 | continue; |
a4820398 | 1807 | } |
53b381b3 | 1808 | |
96303081 | 1809 | if (entry->bitmap) { |
a4820398 MX |
1810 | u64 size = *bytes; |
1811 | ||
1812 | ret = search_bitmap(ctl, entry, &tmp, &size); | |
53b381b3 DW |
1813 | if (!ret) { |
1814 | *offset = tmp; | |
a4820398 | 1815 | *bytes = size; |
96303081 | 1816 | return entry; |
a4820398 MX |
1817 | } else if (size > *max_extent_size) { |
1818 | *max_extent_size = size; | |
53b381b3 | 1819 | } |
96303081 JB |
1820 | continue; |
1821 | } | |
1822 | ||
53b381b3 DW |
1823 | *offset = tmp; |
1824 | *bytes = entry->bytes - align_off; | |
96303081 JB |
1825 | return entry; |
1826 | } | |
a4820398 | 1827 | out: |
96303081 JB |
1828 | return NULL; |
1829 | } | |
1830 | ||
34d52cb6 | 1831 | static void add_new_bitmap(struct btrfs_free_space_ctl *ctl, |
96303081 JB |
1832 | struct btrfs_free_space *info, u64 offset) |
1833 | { | |
34d52cb6 | 1834 | info->offset = offset_to_bitmap(ctl, offset); |
f019f426 | 1835 | info->bytes = 0; |
f2d0f676 | 1836 | INIT_LIST_HEAD(&info->list); |
34d52cb6 LZ |
1837 | link_free_space(ctl, info); |
1838 | ctl->total_bitmaps++; | |
96303081 | 1839 | |
34d52cb6 | 1840 | ctl->op->recalc_thresholds(ctl); |
96303081 JB |
1841 | } |
1842 | ||
34d52cb6 | 1843 | static void free_bitmap(struct btrfs_free_space_ctl *ctl, |
edf6e2d1 LZ |
1844 | struct btrfs_free_space *bitmap_info) |
1845 | { | |
34d52cb6 | 1846 | unlink_free_space(ctl, bitmap_info); |
edf6e2d1 | 1847 | kfree(bitmap_info->bitmap); |
dc89e982 | 1848 | kmem_cache_free(btrfs_free_space_cachep, bitmap_info); |
34d52cb6 LZ |
1849 | ctl->total_bitmaps--; |
1850 | ctl->op->recalc_thresholds(ctl); | |
edf6e2d1 LZ |
1851 | } |
1852 | ||
34d52cb6 | 1853 | static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl, |
96303081 JB |
1854 | struct btrfs_free_space *bitmap_info, |
1855 | u64 *offset, u64 *bytes) | |
1856 | { | |
1857 | u64 end; | |
6606bb97 JB |
1858 | u64 search_start, search_bytes; |
1859 | int ret; | |
96303081 JB |
1860 | |
1861 | again: | |
34d52cb6 | 1862 | end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1; |
96303081 | 1863 | |
6606bb97 | 1864 | /* |
bdb7d303 JB |
1865 | * We need to search for bits in this bitmap. We could only cover some |
1866 | * of the extent in this bitmap thanks to how we add space, so we need | |
1867 | * to search for as much as it as we can and clear that amount, and then | |
1868 | * go searching for the next bit. | |
6606bb97 JB |
1869 | */ |
1870 | search_start = *offset; | |
bdb7d303 | 1871 | search_bytes = ctl->unit; |
13dbc089 | 1872 | search_bytes = min(search_bytes, end - search_start + 1); |
34d52cb6 | 1873 | ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes); |
b50c6e25 JB |
1874 | if (ret < 0 || search_start != *offset) |
1875 | return -EINVAL; | |
6606bb97 | 1876 | |
bdb7d303 JB |
1877 | /* We may have found more bits than what we need */ |
1878 | search_bytes = min(search_bytes, *bytes); | |
1879 | ||
1880 | /* Cannot clear past the end of the bitmap */ | |
1881 | search_bytes = min(search_bytes, end - search_start + 1); | |
1882 | ||
1883 | bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes); | |
1884 | *offset += search_bytes; | |
1885 | *bytes -= search_bytes; | |
96303081 JB |
1886 | |
1887 | if (*bytes) { | |
6606bb97 | 1888 | struct rb_node *next = rb_next(&bitmap_info->offset_index); |
edf6e2d1 | 1889 | if (!bitmap_info->bytes) |
34d52cb6 | 1890 | free_bitmap(ctl, bitmap_info); |
96303081 | 1891 | |
6606bb97 JB |
1892 | /* |
1893 | * no entry after this bitmap, but we still have bytes to | |
1894 | * remove, so something has gone wrong. | |
1895 | */ | |
1896 | if (!next) | |
96303081 JB |
1897 | return -EINVAL; |
1898 | ||
6606bb97 JB |
1899 | bitmap_info = rb_entry(next, struct btrfs_free_space, |
1900 | offset_index); | |
1901 | ||
1902 | /* | |
1903 | * if the next entry isn't a bitmap we need to return to let the | |
1904 | * extent stuff do its work. | |
1905 | */ | |
96303081 JB |
1906 | if (!bitmap_info->bitmap) |
1907 | return -EAGAIN; | |
1908 | ||
6606bb97 JB |
1909 | /* |
1910 | * Ok the next item is a bitmap, but it may not actually hold | |
1911 | * the information for the rest of this free space stuff, so | |
1912 | * look for it, and if we don't find it return so we can try | |
1913 | * everything over again. | |
1914 | */ | |
1915 | search_start = *offset; | |
bdb7d303 | 1916 | search_bytes = ctl->unit; |
34d52cb6 | 1917 | ret = search_bitmap(ctl, bitmap_info, &search_start, |
6606bb97 JB |
1918 | &search_bytes); |
1919 | if (ret < 0 || search_start != *offset) | |
1920 | return -EAGAIN; | |
1921 | ||
96303081 | 1922 | goto again; |
edf6e2d1 | 1923 | } else if (!bitmap_info->bytes) |
34d52cb6 | 1924 | free_bitmap(ctl, bitmap_info); |
96303081 JB |
1925 | |
1926 | return 0; | |
1927 | } | |
1928 | ||
2cdc342c JB |
1929 | static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl, |
1930 | struct btrfs_free_space *info, u64 offset, | |
1931 | u64 bytes) | |
1932 | { | |
1933 | u64 bytes_to_set = 0; | |
1934 | u64 end; | |
1935 | ||
1936 | end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit); | |
1937 | ||
1938 | bytes_to_set = min(end - offset, bytes); | |
1939 | ||
1940 | bitmap_set_bits(ctl, info, offset, bytes_to_set); | |
1941 | ||
1942 | return bytes_to_set; | |
1943 | ||
1944 | } | |
1945 | ||
34d52cb6 LZ |
1946 | static bool use_bitmap(struct btrfs_free_space_ctl *ctl, |
1947 | struct btrfs_free_space *info) | |
96303081 | 1948 | { |
34d52cb6 | 1949 | struct btrfs_block_group_cache *block_group = ctl->private; |
96303081 JB |
1950 | |
1951 | /* | |
1952 | * If we are below the extents threshold then we can add this as an | |
1953 | * extent, and don't have to deal with the bitmap | |
1954 | */ | |
34d52cb6 | 1955 | if (ctl->free_extents < ctl->extents_thresh) { |
32cb0840 JB |
1956 | /* |
1957 | * If this block group has some small extents we don't want to | |
1958 | * use up all of our free slots in the cache with them, we want | |
1959 | * to reserve them to larger extents, however if we have plent | |
1960 | * of cache left then go ahead an dadd them, no sense in adding | |
1961 | * the overhead of a bitmap if we don't have to. | |
1962 | */ | |
1963 | if (info->bytes <= block_group->sectorsize * 4) { | |
34d52cb6 LZ |
1964 | if (ctl->free_extents * 2 <= ctl->extents_thresh) |
1965 | return false; | |
32cb0840 | 1966 | } else { |
34d52cb6 | 1967 | return false; |
32cb0840 JB |
1968 | } |
1969 | } | |
96303081 JB |
1970 | |
1971 | /* | |
dde5740f JB |
1972 | * The original block groups from mkfs can be really small, like 8 |
1973 | * megabytes, so don't bother with a bitmap for those entries. However | |
1974 | * some block groups can be smaller than what a bitmap would cover but | |
1975 | * are still large enough that they could overflow the 32k memory limit, | |
1976 | * so allow those block groups to still be allowed to have a bitmap | |
1977 | * entry. | |
96303081 | 1978 | */ |
dde5740f | 1979 | if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->key.offset) |
34d52cb6 LZ |
1980 | return false; |
1981 | ||
1982 | return true; | |
1983 | } | |
1984 | ||
2cdc342c JB |
1985 | static struct btrfs_free_space_op free_space_op = { |
1986 | .recalc_thresholds = recalculate_thresholds, | |
1987 | .use_bitmap = use_bitmap, | |
1988 | }; | |
1989 | ||
34d52cb6 LZ |
1990 | static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, |
1991 | struct btrfs_free_space *info) | |
1992 | { | |
1993 | struct btrfs_free_space *bitmap_info; | |
2cdc342c | 1994 | struct btrfs_block_group_cache *block_group = NULL; |
34d52cb6 | 1995 | int added = 0; |
2cdc342c | 1996 | u64 bytes, offset, bytes_added; |
34d52cb6 | 1997 | int ret; |
96303081 JB |
1998 | |
1999 | bytes = info->bytes; | |
2000 | offset = info->offset; | |
2001 | ||
34d52cb6 LZ |
2002 | if (!ctl->op->use_bitmap(ctl, info)) |
2003 | return 0; | |
2004 | ||
2cdc342c JB |
2005 | if (ctl->op == &free_space_op) |
2006 | block_group = ctl->private; | |
38e87880 | 2007 | again: |
2cdc342c JB |
2008 | /* |
2009 | * Since we link bitmaps right into the cluster we need to see if we | |
2010 | * have a cluster here, and if so and it has our bitmap we need to add | |
2011 | * the free space to that bitmap. | |
2012 | */ | |
2013 | if (block_group && !list_empty(&block_group->cluster_list)) { | |
2014 | struct btrfs_free_cluster *cluster; | |
2015 | struct rb_node *node; | |
2016 | struct btrfs_free_space *entry; | |
2017 | ||
2018 | cluster = list_entry(block_group->cluster_list.next, | |
2019 | struct btrfs_free_cluster, | |
2020 | block_group_list); | |
2021 | spin_lock(&cluster->lock); | |
2022 | node = rb_first(&cluster->root); | |
2023 | if (!node) { | |
2024 | spin_unlock(&cluster->lock); | |
38e87880 | 2025 | goto no_cluster_bitmap; |
2cdc342c JB |
2026 | } |
2027 | ||
2028 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | |
2029 | if (!entry->bitmap) { | |
2030 | spin_unlock(&cluster->lock); | |
38e87880 | 2031 | goto no_cluster_bitmap; |
2cdc342c JB |
2032 | } |
2033 | ||
2034 | if (entry->offset == offset_to_bitmap(ctl, offset)) { | |
2035 | bytes_added = add_bytes_to_bitmap(ctl, entry, | |
2036 | offset, bytes); | |
2037 | bytes -= bytes_added; | |
2038 | offset += bytes_added; | |
2039 | } | |
2040 | spin_unlock(&cluster->lock); | |
2041 | if (!bytes) { | |
2042 | ret = 1; | |
2043 | goto out; | |
2044 | } | |
2045 | } | |
38e87880 CM |
2046 | |
2047 | no_cluster_bitmap: | |
34d52cb6 | 2048 | bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), |
96303081 JB |
2049 | 1, 0); |
2050 | if (!bitmap_info) { | |
b12d6869 | 2051 | ASSERT(added == 0); |
96303081 JB |
2052 | goto new_bitmap; |
2053 | } | |
2054 | ||
2cdc342c JB |
2055 | bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes); |
2056 | bytes -= bytes_added; | |
2057 | offset += bytes_added; | |
2058 | added = 0; | |
96303081 JB |
2059 | |
2060 | if (!bytes) { | |
2061 | ret = 1; | |
2062 | goto out; | |
2063 | } else | |
2064 | goto again; | |
2065 | ||
2066 | new_bitmap: | |
2067 | if (info && info->bitmap) { | |
34d52cb6 | 2068 | add_new_bitmap(ctl, info, offset); |
96303081 JB |
2069 | added = 1; |
2070 | info = NULL; | |
2071 | goto again; | |
2072 | } else { | |
34d52cb6 | 2073 | spin_unlock(&ctl->tree_lock); |
96303081 JB |
2074 | |
2075 | /* no pre-allocated info, allocate a new one */ | |
2076 | if (!info) { | |
dc89e982 JB |
2077 | info = kmem_cache_zalloc(btrfs_free_space_cachep, |
2078 | GFP_NOFS); | |
96303081 | 2079 | if (!info) { |
34d52cb6 | 2080 | spin_lock(&ctl->tree_lock); |
96303081 JB |
2081 | ret = -ENOMEM; |
2082 | goto out; | |
2083 | } | |
2084 | } | |
2085 | ||
2086 | /* allocate the bitmap */ | |
2087 | info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); | |
34d52cb6 | 2088 | spin_lock(&ctl->tree_lock); |
96303081 JB |
2089 | if (!info->bitmap) { |
2090 | ret = -ENOMEM; | |
2091 | goto out; | |
2092 | } | |
2093 | goto again; | |
2094 | } | |
2095 | ||
2096 | out: | |
2097 | if (info) { | |
2098 | if (info->bitmap) | |
2099 | kfree(info->bitmap); | |
dc89e982 | 2100 | kmem_cache_free(btrfs_free_space_cachep, info); |
96303081 | 2101 | } |
0f9dd46c JB |
2102 | |
2103 | return ret; | |
2104 | } | |
2105 | ||
945d8962 | 2106 | static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl, |
f333adb5 | 2107 | struct btrfs_free_space *info, bool update_stat) |
0f9dd46c | 2108 | { |
120d66ee LZ |
2109 | struct btrfs_free_space *left_info; |
2110 | struct btrfs_free_space *right_info; | |
2111 | bool merged = false; | |
2112 | u64 offset = info->offset; | |
2113 | u64 bytes = info->bytes; | |
6226cb0a | 2114 | |
0f9dd46c JB |
2115 | /* |
2116 | * first we want to see if there is free space adjacent to the range we | |
2117 | * are adding, if there is remove that struct and add a new one to | |
2118 | * cover the entire range | |
2119 | */ | |
34d52cb6 | 2120 | right_info = tree_search_offset(ctl, offset + bytes, 0, 0); |
96303081 JB |
2121 | if (right_info && rb_prev(&right_info->offset_index)) |
2122 | left_info = rb_entry(rb_prev(&right_info->offset_index), | |
2123 | struct btrfs_free_space, offset_index); | |
2124 | else | |
34d52cb6 | 2125 | left_info = tree_search_offset(ctl, offset - 1, 0, 0); |
0f9dd46c | 2126 | |
96303081 | 2127 | if (right_info && !right_info->bitmap) { |
f333adb5 | 2128 | if (update_stat) |
34d52cb6 | 2129 | unlink_free_space(ctl, right_info); |
f333adb5 | 2130 | else |
34d52cb6 | 2131 | __unlink_free_space(ctl, right_info); |
6226cb0a | 2132 | info->bytes += right_info->bytes; |
dc89e982 | 2133 | kmem_cache_free(btrfs_free_space_cachep, right_info); |
120d66ee | 2134 | merged = true; |
0f9dd46c JB |
2135 | } |
2136 | ||
96303081 JB |
2137 | if (left_info && !left_info->bitmap && |
2138 | left_info->offset + left_info->bytes == offset) { | |
f333adb5 | 2139 | if (update_stat) |
34d52cb6 | 2140 | unlink_free_space(ctl, left_info); |
f333adb5 | 2141 | else |
34d52cb6 | 2142 | __unlink_free_space(ctl, left_info); |
6226cb0a JB |
2143 | info->offset = left_info->offset; |
2144 | info->bytes += left_info->bytes; | |
dc89e982 | 2145 | kmem_cache_free(btrfs_free_space_cachep, left_info); |
120d66ee | 2146 | merged = true; |
0f9dd46c JB |
2147 | } |
2148 | ||
120d66ee LZ |
2149 | return merged; |
2150 | } | |
2151 | ||
20005523 FM |
2152 | static bool steal_from_bitmap_to_end(struct btrfs_free_space_ctl *ctl, |
2153 | struct btrfs_free_space *info, | |
2154 | bool update_stat) | |
2155 | { | |
2156 | struct btrfs_free_space *bitmap; | |
2157 | unsigned long i; | |
2158 | unsigned long j; | |
2159 | const u64 end = info->offset + info->bytes; | |
2160 | const u64 bitmap_offset = offset_to_bitmap(ctl, end); | |
2161 | u64 bytes; | |
2162 | ||
2163 | bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0); | |
2164 | if (!bitmap) | |
2165 | return false; | |
2166 | ||
2167 | i = offset_to_bit(bitmap->offset, ctl->unit, end); | |
2168 | j = find_next_zero_bit(bitmap->bitmap, BITS_PER_BITMAP, i); | |
2169 | if (j == i) | |
2170 | return false; | |
2171 | bytes = (j - i) * ctl->unit; | |
2172 | info->bytes += bytes; | |
2173 | ||
2174 | if (update_stat) | |
2175 | bitmap_clear_bits(ctl, bitmap, end, bytes); | |
2176 | else | |
2177 | __bitmap_clear_bits(ctl, bitmap, end, bytes); | |
2178 | ||
2179 | if (!bitmap->bytes) | |
2180 | free_bitmap(ctl, bitmap); | |
2181 | ||
2182 | return true; | |
2183 | } | |
2184 | ||
2185 | static bool steal_from_bitmap_to_front(struct btrfs_free_space_ctl *ctl, | |
2186 | struct btrfs_free_space *info, | |
2187 | bool update_stat) | |
2188 | { | |
2189 | struct btrfs_free_space *bitmap; | |
2190 | u64 bitmap_offset; | |
2191 | unsigned long i; | |
2192 | unsigned long j; | |
2193 | unsigned long prev_j; | |
2194 | u64 bytes; | |
2195 | ||
2196 | bitmap_offset = offset_to_bitmap(ctl, info->offset); | |
2197 | /* If we're on a boundary, try the previous logical bitmap. */ | |
2198 | if (bitmap_offset == info->offset) { | |
2199 | if (info->offset == 0) | |
2200 | return false; | |
2201 | bitmap_offset = offset_to_bitmap(ctl, info->offset - 1); | |
2202 | } | |
2203 | ||
2204 | bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0); | |
2205 | if (!bitmap) | |
2206 | return false; | |
2207 | ||
2208 | i = offset_to_bit(bitmap->offset, ctl->unit, info->offset) - 1; | |
2209 | j = 0; | |
2210 | prev_j = (unsigned long)-1; | |
2211 | for_each_clear_bit_from(j, bitmap->bitmap, BITS_PER_BITMAP) { | |
2212 | if (j > i) | |
2213 | break; | |
2214 | prev_j = j; | |
2215 | } | |
2216 | if (prev_j == i) | |
2217 | return false; | |
2218 | ||
2219 | if (prev_j == (unsigned long)-1) | |
2220 | bytes = (i + 1) * ctl->unit; | |
2221 | else | |
2222 | bytes = (i - prev_j) * ctl->unit; | |
2223 | ||
2224 | info->offset -= bytes; | |
2225 | info->bytes += bytes; | |
2226 | ||
2227 | if (update_stat) | |
2228 | bitmap_clear_bits(ctl, bitmap, info->offset, bytes); | |
2229 | else | |
2230 | __bitmap_clear_bits(ctl, bitmap, info->offset, bytes); | |
2231 | ||
2232 | if (!bitmap->bytes) | |
2233 | free_bitmap(ctl, bitmap); | |
2234 | ||
2235 | return true; | |
2236 | } | |
2237 | ||
2238 | /* | |
2239 | * We prefer always to allocate from extent entries, both for clustered and | |
2240 | * non-clustered allocation requests. So when attempting to add a new extent | |
2241 | * entry, try to see if there's adjacent free space in bitmap entries, and if | |
2242 | * there is, migrate that space from the bitmaps to the extent. | |
2243 | * Like this we get better chances of satisfying space allocation requests | |
2244 | * because we attempt to satisfy them based on a single cache entry, and never | |
2245 | * on 2 or more entries - even if the entries represent a contiguous free space | |
2246 | * region (e.g. 1 extent entry + 1 bitmap entry starting where the extent entry | |
2247 | * ends). | |
2248 | */ | |
2249 | static void steal_from_bitmap(struct btrfs_free_space_ctl *ctl, | |
2250 | struct btrfs_free_space *info, | |
2251 | bool update_stat) | |
2252 | { | |
2253 | /* | |
2254 | * Only work with disconnected entries, as we can change their offset, | |
2255 | * and must be extent entries. | |
2256 | */ | |
2257 | ASSERT(!info->bitmap); | |
2258 | ASSERT(RB_EMPTY_NODE(&info->offset_index)); | |
2259 | ||
2260 | if (ctl->total_bitmaps > 0) { | |
2261 | bool stole_end; | |
2262 | bool stole_front = false; | |
2263 | ||
2264 | stole_end = steal_from_bitmap_to_end(ctl, info, update_stat); | |
2265 | if (ctl->total_bitmaps > 0) | |
2266 | stole_front = steal_from_bitmap_to_front(ctl, info, | |
2267 | update_stat); | |
2268 | ||
2269 | if (stole_end || stole_front) | |
2270 | try_merge_free_space(ctl, info, update_stat); | |
2271 | } | |
2272 | } | |
2273 | ||
581bb050 LZ |
2274 | int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl, |
2275 | u64 offset, u64 bytes) | |
120d66ee LZ |
2276 | { |
2277 | struct btrfs_free_space *info; | |
2278 | int ret = 0; | |
2279 | ||
dc89e982 | 2280 | info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS); |
120d66ee LZ |
2281 | if (!info) |
2282 | return -ENOMEM; | |
2283 | ||
2284 | info->offset = offset; | |
2285 | info->bytes = bytes; | |
20005523 | 2286 | RB_CLEAR_NODE(&info->offset_index); |
120d66ee | 2287 | |
34d52cb6 | 2288 | spin_lock(&ctl->tree_lock); |
120d66ee | 2289 | |
34d52cb6 | 2290 | if (try_merge_free_space(ctl, info, true)) |
120d66ee LZ |
2291 | goto link; |
2292 | ||
2293 | /* | |
2294 | * There was no extent directly to the left or right of this new | |
2295 | * extent then we know we're going to have to allocate a new extent, so | |
2296 | * before we do that see if we need to drop this into a bitmap | |
2297 | */ | |
34d52cb6 | 2298 | ret = insert_into_bitmap(ctl, info); |
120d66ee LZ |
2299 | if (ret < 0) { |
2300 | goto out; | |
2301 | } else if (ret) { | |
2302 | ret = 0; | |
2303 | goto out; | |
2304 | } | |
2305 | link: | |
20005523 FM |
2306 | /* |
2307 | * Only steal free space from adjacent bitmaps if we're sure we're not | |
2308 | * going to add the new free space to existing bitmap entries - because | |
2309 | * that would mean unnecessary work that would be reverted. Therefore | |
2310 | * attempt to steal space from bitmaps if we're adding an extent entry. | |
2311 | */ | |
2312 | steal_from_bitmap(ctl, info, true); | |
2313 | ||
34d52cb6 | 2314 | ret = link_free_space(ctl, info); |
0f9dd46c | 2315 | if (ret) |
dc89e982 | 2316 | kmem_cache_free(btrfs_free_space_cachep, info); |
96303081 | 2317 | out: |
34d52cb6 | 2318 | spin_unlock(&ctl->tree_lock); |
6226cb0a | 2319 | |
0f9dd46c | 2320 | if (ret) { |
efe120a0 | 2321 | printk(KERN_CRIT "BTRFS: unable to add free space :%d\n", ret); |
b12d6869 | 2322 | ASSERT(ret != -EEXIST); |
0f9dd46c JB |
2323 | } |
2324 | ||
0f9dd46c JB |
2325 | return ret; |
2326 | } | |
2327 | ||
6226cb0a JB |
2328 | int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, |
2329 | u64 offset, u64 bytes) | |
0f9dd46c | 2330 | { |
34d52cb6 | 2331 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
0f9dd46c | 2332 | struct btrfs_free_space *info; |
b0175117 JB |
2333 | int ret; |
2334 | bool re_search = false; | |
0f9dd46c | 2335 | |
34d52cb6 | 2336 | spin_lock(&ctl->tree_lock); |
6226cb0a | 2337 | |
96303081 | 2338 | again: |
b0175117 | 2339 | ret = 0; |
bdb7d303 JB |
2340 | if (!bytes) |
2341 | goto out_lock; | |
2342 | ||
34d52cb6 | 2343 | info = tree_search_offset(ctl, offset, 0, 0); |
96303081 | 2344 | if (!info) { |
6606bb97 JB |
2345 | /* |
2346 | * oops didn't find an extent that matched the space we wanted | |
2347 | * to remove, look for a bitmap instead | |
2348 | */ | |
34d52cb6 | 2349 | info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), |
6606bb97 JB |
2350 | 1, 0); |
2351 | if (!info) { | |
b0175117 JB |
2352 | /* |
2353 | * If we found a partial bit of our free space in a | |
2354 | * bitmap but then couldn't find the other part this may | |
2355 | * be a problem, so WARN about it. | |
24a70313 | 2356 | */ |
b0175117 | 2357 | WARN_ON(re_search); |
6606bb97 JB |
2358 | goto out_lock; |
2359 | } | |
96303081 JB |
2360 | } |
2361 | ||
b0175117 | 2362 | re_search = false; |
bdb7d303 | 2363 | if (!info->bitmap) { |
34d52cb6 | 2364 | unlink_free_space(ctl, info); |
bdb7d303 JB |
2365 | if (offset == info->offset) { |
2366 | u64 to_free = min(bytes, info->bytes); | |
2367 | ||
2368 | info->bytes -= to_free; | |
2369 | info->offset += to_free; | |
2370 | if (info->bytes) { | |
2371 | ret = link_free_space(ctl, info); | |
2372 | WARN_ON(ret); | |
2373 | } else { | |
2374 | kmem_cache_free(btrfs_free_space_cachep, info); | |
2375 | } | |
0f9dd46c | 2376 | |
bdb7d303 JB |
2377 | offset += to_free; |
2378 | bytes -= to_free; | |
2379 | goto again; | |
2380 | } else { | |
2381 | u64 old_end = info->bytes + info->offset; | |
9b49c9b9 | 2382 | |
bdb7d303 | 2383 | info->bytes = offset - info->offset; |
34d52cb6 | 2384 | ret = link_free_space(ctl, info); |
96303081 JB |
2385 | WARN_ON(ret); |
2386 | if (ret) | |
2387 | goto out_lock; | |
96303081 | 2388 | |
bdb7d303 JB |
2389 | /* Not enough bytes in this entry to satisfy us */ |
2390 | if (old_end < offset + bytes) { | |
2391 | bytes -= old_end - offset; | |
2392 | offset = old_end; | |
2393 | goto again; | |
2394 | } else if (old_end == offset + bytes) { | |
2395 | /* all done */ | |
2396 | goto out_lock; | |
2397 | } | |
2398 | spin_unlock(&ctl->tree_lock); | |
2399 | ||
2400 | ret = btrfs_add_free_space(block_group, offset + bytes, | |
2401 | old_end - (offset + bytes)); | |
2402 | WARN_ON(ret); | |
2403 | goto out; | |
2404 | } | |
0f9dd46c | 2405 | } |
96303081 | 2406 | |
34d52cb6 | 2407 | ret = remove_from_bitmap(ctl, info, &offset, &bytes); |
b0175117 JB |
2408 | if (ret == -EAGAIN) { |
2409 | re_search = true; | |
96303081 | 2410 | goto again; |
b0175117 | 2411 | } |
96303081 | 2412 | out_lock: |
34d52cb6 | 2413 | spin_unlock(&ctl->tree_lock); |
0f9dd46c | 2414 | out: |
25179201 JB |
2415 | return ret; |
2416 | } | |
2417 | ||
0f9dd46c JB |
2418 | void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, |
2419 | u64 bytes) | |
2420 | { | |
34d52cb6 | 2421 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
0f9dd46c JB |
2422 | struct btrfs_free_space *info; |
2423 | struct rb_node *n; | |
2424 | int count = 0; | |
2425 | ||
34d52cb6 | 2426 | for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) { |
0f9dd46c | 2427 | info = rb_entry(n, struct btrfs_free_space, offset_index); |
f6175efa | 2428 | if (info->bytes >= bytes && !block_group->ro) |
0f9dd46c | 2429 | count++; |
efe120a0 FH |
2430 | btrfs_crit(block_group->fs_info, |
2431 | "entry offset %llu, bytes %llu, bitmap %s", | |
2432 | info->offset, info->bytes, | |
96303081 | 2433 | (info->bitmap) ? "yes" : "no"); |
0f9dd46c | 2434 | } |
efe120a0 | 2435 | btrfs_info(block_group->fs_info, "block group has cluster?: %s", |
96303081 | 2436 | list_empty(&block_group->cluster_list) ? "no" : "yes"); |
efe120a0 FH |
2437 | btrfs_info(block_group->fs_info, |
2438 | "%d blocks of free space at or bigger than bytes is", count); | |
0f9dd46c JB |
2439 | } |
2440 | ||
34d52cb6 | 2441 | void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group) |
0f9dd46c | 2442 | { |
34d52cb6 | 2443 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
0f9dd46c | 2444 | |
34d52cb6 LZ |
2445 | spin_lock_init(&ctl->tree_lock); |
2446 | ctl->unit = block_group->sectorsize; | |
2447 | ctl->start = block_group->key.objectid; | |
2448 | ctl->private = block_group; | |
2449 | ctl->op = &free_space_op; | |
55507ce3 FM |
2450 | INIT_LIST_HEAD(&ctl->trimming_ranges); |
2451 | mutex_init(&ctl->cache_writeout_mutex); | |
0f9dd46c | 2452 | |
34d52cb6 LZ |
2453 | /* |
2454 | * we only want to have 32k of ram per block group for keeping | |
2455 | * track of free space, and if we pass 1/2 of that we want to | |
2456 | * start converting things over to using bitmaps | |
2457 | */ | |
2458 | ctl->extents_thresh = ((1024 * 32) / 2) / | |
2459 | sizeof(struct btrfs_free_space); | |
0f9dd46c JB |
2460 | } |
2461 | ||
fa9c0d79 CM |
2462 | /* |
2463 | * for a given cluster, put all of its extents back into the free | |
2464 | * space cache. If the block group passed doesn't match the block group | |
2465 | * pointed to by the cluster, someone else raced in and freed the | |
2466 | * cluster already. In that case, we just return without changing anything | |
2467 | */ | |
2468 | static int | |
2469 | __btrfs_return_cluster_to_free_space( | |
2470 | struct btrfs_block_group_cache *block_group, | |
2471 | struct btrfs_free_cluster *cluster) | |
2472 | { | |
34d52cb6 | 2473 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
fa9c0d79 CM |
2474 | struct btrfs_free_space *entry; |
2475 | struct rb_node *node; | |
2476 | ||
2477 | spin_lock(&cluster->lock); | |
2478 | if (cluster->block_group != block_group) | |
2479 | goto out; | |
2480 | ||
96303081 | 2481 | cluster->block_group = NULL; |
fa9c0d79 | 2482 | cluster->window_start = 0; |
96303081 | 2483 | list_del_init(&cluster->block_group_list); |
96303081 | 2484 | |
fa9c0d79 | 2485 | node = rb_first(&cluster->root); |
96303081 | 2486 | while (node) { |
4e69b598 JB |
2487 | bool bitmap; |
2488 | ||
fa9c0d79 CM |
2489 | entry = rb_entry(node, struct btrfs_free_space, offset_index); |
2490 | node = rb_next(&entry->offset_index); | |
2491 | rb_erase(&entry->offset_index, &cluster->root); | |
20005523 | 2492 | RB_CLEAR_NODE(&entry->offset_index); |
4e69b598 JB |
2493 | |
2494 | bitmap = (entry->bitmap != NULL); | |
20005523 | 2495 | if (!bitmap) { |
34d52cb6 | 2496 | try_merge_free_space(ctl, entry, false); |
20005523 FM |
2497 | steal_from_bitmap(ctl, entry, false); |
2498 | } | |
34d52cb6 | 2499 | tree_insert_offset(&ctl->free_space_offset, |
4e69b598 | 2500 | entry->offset, &entry->offset_index, bitmap); |
fa9c0d79 | 2501 | } |
6bef4d31 | 2502 | cluster->root = RB_ROOT; |
96303081 | 2503 | |
fa9c0d79 CM |
2504 | out: |
2505 | spin_unlock(&cluster->lock); | |
96303081 | 2506 | btrfs_put_block_group(block_group); |
fa9c0d79 CM |
2507 | return 0; |
2508 | } | |
2509 | ||
48a3b636 ES |
2510 | static void __btrfs_remove_free_space_cache_locked( |
2511 | struct btrfs_free_space_ctl *ctl) | |
0f9dd46c JB |
2512 | { |
2513 | struct btrfs_free_space *info; | |
2514 | struct rb_node *node; | |
581bb050 | 2515 | |
581bb050 LZ |
2516 | while ((node = rb_last(&ctl->free_space_offset)) != NULL) { |
2517 | info = rb_entry(node, struct btrfs_free_space, offset_index); | |
9b90f513 JB |
2518 | if (!info->bitmap) { |
2519 | unlink_free_space(ctl, info); | |
2520 | kmem_cache_free(btrfs_free_space_cachep, info); | |
2521 | } else { | |
2522 | free_bitmap(ctl, info); | |
2523 | } | |
351810c1 DS |
2524 | |
2525 | cond_resched_lock(&ctl->tree_lock); | |
581bb050 | 2526 | } |
09655373 CM |
2527 | } |
2528 | ||
2529 | void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl) | |
2530 | { | |
2531 | spin_lock(&ctl->tree_lock); | |
2532 | __btrfs_remove_free_space_cache_locked(ctl); | |
581bb050 LZ |
2533 | spin_unlock(&ctl->tree_lock); |
2534 | } | |
2535 | ||
2536 | void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) | |
2537 | { | |
2538 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | |
fa9c0d79 | 2539 | struct btrfs_free_cluster *cluster; |
96303081 | 2540 | struct list_head *head; |
0f9dd46c | 2541 | |
34d52cb6 | 2542 | spin_lock(&ctl->tree_lock); |
96303081 JB |
2543 | while ((head = block_group->cluster_list.next) != |
2544 | &block_group->cluster_list) { | |
2545 | cluster = list_entry(head, struct btrfs_free_cluster, | |
2546 | block_group_list); | |
fa9c0d79 CM |
2547 | |
2548 | WARN_ON(cluster->block_group != block_group); | |
2549 | __btrfs_return_cluster_to_free_space(block_group, cluster); | |
351810c1 DS |
2550 | |
2551 | cond_resched_lock(&ctl->tree_lock); | |
fa9c0d79 | 2552 | } |
09655373 | 2553 | __btrfs_remove_free_space_cache_locked(ctl); |
34d52cb6 | 2554 | spin_unlock(&ctl->tree_lock); |
fa9c0d79 | 2555 | |
0f9dd46c JB |
2556 | } |
2557 | ||
6226cb0a | 2558 | u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, |
a4820398 MX |
2559 | u64 offset, u64 bytes, u64 empty_size, |
2560 | u64 *max_extent_size) | |
0f9dd46c | 2561 | { |
34d52cb6 | 2562 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
6226cb0a | 2563 | struct btrfs_free_space *entry = NULL; |
96303081 | 2564 | u64 bytes_search = bytes + empty_size; |
6226cb0a | 2565 | u64 ret = 0; |
53b381b3 DW |
2566 | u64 align_gap = 0; |
2567 | u64 align_gap_len = 0; | |
0f9dd46c | 2568 | |
34d52cb6 | 2569 | spin_lock(&ctl->tree_lock); |
53b381b3 | 2570 | entry = find_free_space(ctl, &offset, &bytes_search, |
a4820398 | 2571 | block_group->full_stripe_len, max_extent_size); |
6226cb0a | 2572 | if (!entry) |
96303081 JB |
2573 | goto out; |
2574 | ||
2575 | ret = offset; | |
2576 | if (entry->bitmap) { | |
34d52cb6 | 2577 | bitmap_clear_bits(ctl, entry, offset, bytes); |
edf6e2d1 | 2578 | if (!entry->bytes) |
34d52cb6 | 2579 | free_bitmap(ctl, entry); |
96303081 | 2580 | } else { |
34d52cb6 | 2581 | unlink_free_space(ctl, entry); |
53b381b3 DW |
2582 | align_gap_len = offset - entry->offset; |
2583 | align_gap = entry->offset; | |
2584 | ||
2585 | entry->offset = offset + bytes; | |
2586 | WARN_ON(entry->bytes < bytes + align_gap_len); | |
2587 | ||
2588 | entry->bytes -= bytes + align_gap_len; | |
6226cb0a | 2589 | if (!entry->bytes) |
dc89e982 | 2590 | kmem_cache_free(btrfs_free_space_cachep, entry); |
6226cb0a | 2591 | else |
34d52cb6 | 2592 | link_free_space(ctl, entry); |
6226cb0a | 2593 | } |
96303081 | 2594 | out: |
34d52cb6 | 2595 | spin_unlock(&ctl->tree_lock); |
817d52f8 | 2596 | |
53b381b3 DW |
2597 | if (align_gap_len) |
2598 | __btrfs_add_free_space(ctl, align_gap, align_gap_len); | |
0f9dd46c JB |
2599 | return ret; |
2600 | } | |
fa9c0d79 CM |
2601 | |
2602 | /* | |
2603 | * given a cluster, put all of its extents back into the free space | |
2604 | * cache. If a block group is passed, this function will only free | |
2605 | * a cluster that belongs to the passed block group. | |
2606 | * | |
2607 | * Otherwise, it'll get a reference on the block group pointed to by the | |
2608 | * cluster and remove the cluster from it. | |
2609 | */ | |
2610 | int btrfs_return_cluster_to_free_space( | |
2611 | struct btrfs_block_group_cache *block_group, | |
2612 | struct btrfs_free_cluster *cluster) | |
2613 | { | |
34d52cb6 | 2614 | struct btrfs_free_space_ctl *ctl; |
fa9c0d79 CM |
2615 | int ret; |
2616 | ||
2617 | /* first, get a safe pointer to the block group */ | |
2618 | spin_lock(&cluster->lock); | |
2619 | if (!block_group) { | |
2620 | block_group = cluster->block_group; | |
2621 | if (!block_group) { | |
2622 | spin_unlock(&cluster->lock); | |
2623 | return 0; | |
2624 | } | |
2625 | } else if (cluster->block_group != block_group) { | |
2626 | /* someone else has already freed it don't redo their work */ | |
2627 | spin_unlock(&cluster->lock); | |
2628 | return 0; | |
2629 | } | |
2630 | atomic_inc(&block_group->count); | |
2631 | spin_unlock(&cluster->lock); | |
2632 | ||
34d52cb6 LZ |
2633 | ctl = block_group->free_space_ctl; |
2634 | ||
fa9c0d79 | 2635 | /* now return any extents the cluster had on it */ |
34d52cb6 | 2636 | spin_lock(&ctl->tree_lock); |
fa9c0d79 | 2637 | ret = __btrfs_return_cluster_to_free_space(block_group, cluster); |
34d52cb6 | 2638 | spin_unlock(&ctl->tree_lock); |
fa9c0d79 CM |
2639 | |
2640 | /* finally drop our ref */ | |
2641 | btrfs_put_block_group(block_group); | |
2642 | return ret; | |
2643 | } | |
2644 | ||
96303081 JB |
2645 | static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, |
2646 | struct btrfs_free_cluster *cluster, | |
4e69b598 | 2647 | struct btrfs_free_space *entry, |
a4820398 MX |
2648 | u64 bytes, u64 min_start, |
2649 | u64 *max_extent_size) | |
96303081 | 2650 | { |
34d52cb6 | 2651 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
96303081 JB |
2652 | int err; |
2653 | u64 search_start = cluster->window_start; | |
2654 | u64 search_bytes = bytes; | |
2655 | u64 ret = 0; | |
2656 | ||
96303081 JB |
2657 | search_start = min_start; |
2658 | search_bytes = bytes; | |
2659 | ||
34d52cb6 | 2660 | err = search_bitmap(ctl, entry, &search_start, &search_bytes); |
a4820398 MX |
2661 | if (err) { |
2662 | if (search_bytes > *max_extent_size) | |
2663 | *max_extent_size = search_bytes; | |
4e69b598 | 2664 | return 0; |
a4820398 | 2665 | } |
96303081 JB |
2666 | |
2667 | ret = search_start; | |
bb3ac5a4 | 2668 | __bitmap_clear_bits(ctl, entry, ret, bytes); |
96303081 JB |
2669 | |
2670 | return ret; | |
2671 | } | |
2672 | ||
fa9c0d79 CM |
2673 | /* |
2674 | * given a cluster, try to allocate 'bytes' from it, returns 0 | |
2675 | * if it couldn't find anything suitably large, or a logical disk offset | |
2676 | * if things worked out | |
2677 | */ | |
2678 | u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, | |
2679 | struct btrfs_free_cluster *cluster, u64 bytes, | |
a4820398 | 2680 | u64 min_start, u64 *max_extent_size) |
fa9c0d79 | 2681 | { |
34d52cb6 | 2682 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
fa9c0d79 CM |
2683 | struct btrfs_free_space *entry = NULL; |
2684 | struct rb_node *node; | |
2685 | u64 ret = 0; | |
2686 | ||
2687 | spin_lock(&cluster->lock); | |
2688 | if (bytes > cluster->max_size) | |
2689 | goto out; | |
2690 | ||
2691 | if (cluster->block_group != block_group) | |
2692 | goto out; | |
2693 | ||
2694 | node = rb_first(&cluster->root); | |
2695 | if (!node) | |
2696 | goto out; | |
2697 | ||
2698 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | |
67871254 | 2699 | while (1) { |
a4820398 MX |
2700 | if (entry->bytes < bytes && entry->bytes > *max_extent_size) |
2701 | *max_extent_size = entry->bytes; | |
2702 | ||
4e69b598 JB |
2703 | if (entry->bytes < bytes || |
2704 | (!entry->bitmap && entry->offset < min_start)) { | |
fa9c0d79 CM |
2705 | node = rb_next(&entry->offset_index); |
2706 | if (!node) | |
2707 | break; | |
2708 | entry = rb_entry(node, struct btrfs_free_space, | |
2709 | offset_index); | |
2710 | continue; | |
2711 | } | |
fa9c0d79 | 2712 | |
4e69b598 JB |
2713 | if (entry->bitmap) { |
2714 | ret = btrfs_alloc_from_bitmap(block_group, | |
2715 | cluster, entry, bytes, | |
a4820398 MX |
2716 | cluster->window_start, |
2717 | max_extent_size); | |
4e69b598 | 2718 | if (ret == 0) { |
4e69b598 JB |
2719 | node = rb_next(&entry->offset_index); |
2720 | if (!node) | |
2721 | break; | |
2722 | entry = rb_entry(node, struct btrfs_free_space, | |
2723 | offset_index); | |
2724 | continue; | |
2725 | } | |
9b230628 | 2726 | cluster->window_start += bytes; |
4e69b598 | 2727 | } else { |
4e69b598 JB |
2728 | ret = entry->offset; |
2729 | ||
2730 | entry->offset += bytes; | |
2731 | entry->bytes -= bytes; | |
2732 | } | |
fa9c0d79 | 2733 | |
5e71b5d5 | 2734 | if (entry->bytes == 0) |
fa9c0d79 | 2735 | rb_erase(&entry->offset_index, &cluster->root); |
fa9c0d79 CM |
2736 | break; |
2737 | } | |
2738 | out: | |
2739 | spin_unlock(&cluster->lock); | |
96303081 | 2740 | |
5e71b5d5 LZ |
2741 | if (!ret) |
2742 | return 0; | |
2743 | ||
34d52cb6 | 2744 | spin_lock(&ctl->tree_lock); |
5e71b5d5 | 2745 | |
34d52cb6 | 2746 | ctl->free_space -= bytes; |
5e71b5d5 | 2747 | if (entry->bytes == 0) { |
34d52cb6 | 2748 | ctl->free_extents--; |
4e69b598 JB |
2749 | if (entry->bitmap) { |
2750 | kfree(entry->bitmap); | |
34d52cb6 LZ |
2751 | ctl->total_bitmaps--; |
2752 | ctl->op->recalc_thresholds(ctl); | |
4e69b598 | 2753 | } |
dc89e982 | 2754 | kmem_cache_free(btrfs_free_space_cachep, entry); |
5e71b5d5 LZ |
2755 | } |
2756 | ||
34d52cb6 | 2757 | spin_unlock(&ctl->tree_lock); |
5e71b5d5 | 2758 | |
fa9c0d79 CM |
2759 | return ret; |
2760 | } | |
2761 | ||
96303081 JB |
2762 | static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group, |
2763 | struct btrfs_free_space *entry, | |
2764 | struct btrfs_free_cluster *cluster, | |
1bb91902 AO |
2765 | u64 offset, u64 bytes, |
2766 | u64 cont1_bytes, u64 min_bytes) | |
96303081 | 2767 | { |
34d52cb6 | 2768 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
96303081 JB |
2769 | unsigned long next_zero; |
2770 | unsigned long i; | |
1bb91902 AO |
2771 | unsigned long want_bits; |
2772 | unsigned long min_bits; | |
96303081 JB |
2773 | unsigned long found_bits; |
2774 | unsigned long start = 0; | |
2775 | unsigned long total_found = 0; | |
4e69b598 | 2776 | int ret; |
96303081 | 2777 | |
96009762 | 2778 | i = offset_to_bit(entry->offset, ctl->unit, |
96303081 | 2779 | max_t(u64, offset, entry->offset)); |
96009762 WSH |
2780 | want_bits = bytes_to_bits(bytes, ctl->unit); |
2781 | min_bits = bytes_to_bits(min_bytes, ctl->unit); | |
96303081 JB |
2782 | |
2783 | again: | |
2784 | found_bits = 0; | |
ebb3dad4 | 2785 | for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) { |
96303081 JB |
2786 | next_zero = find_next_zero_bit(entry->bitmap, |
2787 | BITS_PER_BITMAP, i); | |
1bb91902 | 2788 | if (next_zero - i >= min_bits) { |
96303081 JB |
2789 | found_bits = next_zero - i; |
2790 | break; | |
2791 | } | |
2792 | i = next_zero; | |
2793 | } | |
2794 | ||
2795 | if (!found_bits) | |
4e69b598 | 2796 | return -ENOSPC; |
96303081 | 2797 | |
1bb91902 | 2798 | if (!total_found) { |
96303081 | 2799 | start = i; |
b78d09bc | 2800 | cluster->max_size = 0; |
96303081 JB |
2801 | } |
2802 | ||
2803 | total_found += found_bits; | |
2804 | ||
96009762 WSH |
2805 | if (cluster->max_size < found_bits * ctl->unit) |
2806 | cluster->max_size = found_bits * ctl->unit; | |
96303081 | 2807 | |
1bb91902 AO |
2808 | if (total_found < want_bits || cluster->max_size < cont1_bytes) { |
2809 | i = next_zero + 1; | |
96303081 JB |
2810 | goto again; |
2811 | } | |
2812 | ||
96009762 | 2813 | cluster->window_start = start * ctl->unit + entry->offset; |
34d52cb6 | 2814 | rb_erase(&entry->offset_index, &ctl->free_space_offset); |
4e69b598 JB |
2815 | ret = tree_insert_offset(&cluster->root, entry->offset, |
2816 | &entry->offset_index, 1); | |
b12d6869 | 2817 | ASSERT(!ret); /* -EEXIST; Logic error */ |
96303081 | 2818 | |
3f7de037 | 2819 | trace_btrfs_setup_cluster(block_group, cluster, |
96009762 | 2820 | total_found * ctl->unit, 1); |
96303081 JB |
2821 | return 0; |
2822 | } | |
2823 | ||
4e69b598 JB |
2824 | /* |
2825 | * This searches the block group for just extents to fill the cluster with. | |
1bb91902 AO |
2826 | * Try to find a cluster with at least bytes total bytes, at least one |
2827 | * extent of cont1_bytes, and other clusters of at least min_bytes. | |
4e69b598 | 2828 | */ |
3de85bb9 JB |
2829 | static noinline int |
2830 | setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, | |
2831 | struct btrfs_free_cluster *cluster, | |
2832 | struct list_head *bitmaps, u64 offset, u64 bytes, | |
1bb91902 | 2833 | u64 cont1_bytes, u64 min_bytes) |
4e69b598 | 2834 | { |
34d52cb6 | 2835 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
4e69b598 JB |
2836 | struct btrfs_free_space *first = NULL; |
2837 | struct btrfs_free_space *entry = NULL; | |
4e69b598 JB |
2838 | struct btrfs_free_space *last; |
2839 | struct rb_node *node; | |
4e69b598 JB |
2840 | u64 window_free; |
2841 | u64 max_extent; | |
3f7de037 | 2842 | u64 total_size = 0; |
4e69b598 | 2843 | |
34d52cb6 | 2844 | entry = tree_search_offset(ctl, offset, 0, 1); |
4e69b598 JB |
2845 | if (!entry) |
2846 | return -ENOSPC; | |
2847 | ||
2848 | /* | |
2849 | * We don't want bitmaps, so just move along until we find a normal | |
2850 | * extent entry. | |
2851 | */ | |
1bb91902 AO |
2852 | while (entry->bitmap || entry->bytes < min_bytes) { |
2853 | if (entry->bitmap && list_empty(&entry->list)) | |
86d4a77b | 2854 | list_add_tail(&entry->list, bitmaps); |
4e69b598 JB |
2855 | node = rb_next(&entry->offset_index); |
2856 | if (!node) | |
2857 | return -ENOSPC; | |
2858 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | |
2859 | } | |
2860 | ||
4e69b598 JB |
2861 | window_free = entry->bytes; |
2862 | max_extent = entry->bytes; | |
2863 | first = entry; | |
2864 | last = entry; | |
4e69b598 | 2865 | |
1bb91902 AO |
2866 | for (node = rb_next(&entry->offset_index); node; |
2867 | node = rb_next(&entry->offset_index)) { | |
4e69b598 JB |
2868 | entry = rb_entry(node, struct btrfs_free_space, offset_index); |
2869 | ||
86d4a77b JB |
2870 | if (entry->bitmap) { |
2871 | if (list_empty(&entry->list)) | |
2872 | list_add_tail(&entry->list, bitmaps); | |
4e69b598 | 2873 | continue; |
86d4a77b JB |
2874 | } |
2875 | ||
1bb91902 AO |
2876 | if (entry->bytes < min_bytes) |
2877 | continue; | |
2878 | ||
2879 | last = entry; | |
2880 | window_free += entry->bytes; | |
2881 | if (entry->bytes > max_extent) | |
4e69b598 | 2882 | max_extent = entry->bytes; |
4e69b598 JB |
2883 | } |
2884 | ||
1bb91902 AO |
2885 | if (window_free < bytes || max_extent < cont1_bytes) |
2886 | return -ENOSPC; | |
2887 | ||
4e69b598 JB |
2888 | cluster->window_start = first->offset; |
2889 | ||
2890 | node = &first->offset_index; | |
2891 | ||
2892 | /* | |
2893 | * now we've found our entries, pull them out of the free space | |
2894 | * cache and put them into the cluster rbtree | |
2895 | */ | |
2896 | do { | |
2897 | int ret; | |
2898 | ||
2899 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | |
2900 | node = rb_next(&entry->offset_index); | |
1bb91902 | 2901 | if (entry->bitmap || entry->bytes < min_bytes) |
4e69b598 JB |
2902 | continue; |
2903 | ||
34d52cb6 | 2904 | rb_erase(&entry->offset_index, &ctl->free_space_offset); |
4e69b598 JB |
2905 | ret = tree_insert_offset(&cluster->root, entry->offset, |
2906 | &entry->offset_index, 0); | |
3f7de037 | 2907 | total_size += entry->bytes; |
b12d6869 | 2908 | ASSERT(!ret); /* -EEXIST; Logic error */ |
4e69b598 JB |
2909 | } while (node && entry != last); |
2910 | ||
2911 | cluster->max_size = max_extent; | |
3f7de037 | 2912 | trace_btrfs_setup_cluster(block_group, cluster, total_size, 0); |
4e69b598 JB |
2913 | return 0; |
2914 | } | |
2915 | ||
2916 | /* | |
2917 | * This specifically looks for bitmaps that may work in the cluster, we assume | |
2918 | * that we have already failed to find extents that will work. | |
2919 | */ | |
3de85bb9 JB |
2920 | static noinline int |
2921 | setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, | |
2922 | struct btrfs_free_cluster *cluster, | |
2923 | struct list_head *bitmaps, u64 offset, u64 bytes, | |
1bb91902 | 2924 | u64 cont1_bytes, u64 min_bytes) |
4e69b598 | 2925 | { |
34d52cb6 | 2926 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
4e69b598 | 2927 | struct btrfs_free_space *entry; |
4e69b598 | 2928 | int ret = -ENOSPC; |
0f0fbf1d | 2929 | u64 bitmap_offset = offset_to_bitmap(ctl, offset); |
4e69b598 | 2930 | |
34d52cb6 | 2931 | if (ctl->total_bitmaps == 0) |
4e69b598 JB |
2932 | return -ENOSPC; |
2933 | ||
0f0fbf1d LZ |
2934 | /* |
2935 | * The bitmap that covers offset won't be in the list unless offset | |
2936 | * is just its start offset. | |
2937 | */ | |
2938 | entry = list_first_entry(bitmaps, struct btrfs_free_space, list); | |
2939 | if (entry->offset != bitmap_offset) { | |
2940 | entry = tree_search_offset(ctl, bitmap_offset, 1, 0); | |
2941 | if (entry && list_empty(&entry->list)) | |
2942 | list_add(&entry->list, bitmaps); | |
2943 | } | |
2944 | ||
86d4a77b | 2945 | list_for_each_entry(entry, bitmaps, list) { |
357b9784 | 2946 | if (entry->bytes < bytes) |
86d4a77b JB |
2947 | continue; |
2948 | ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset, | |
1bb91902 | 2949 | bytes, cont1_bytes, min_bytes); |
86d4a77b JB |
2950 | if (!ret) |
2951 | return 0; | |
2952 | } | |
2953 | ||
2954 | /* | |
52621cb6 LZ |
2955 | * The bitmaps list has all the bitmaps that record free space |
2956 | * starting after offset, so no more search is required. | |
86d4a77b | 2957 | */ |
52621cb6 | 2958 | return -ENOSPC; |
4e69b598 JB |
2959 | } |
2960 | ||
fa9c0d79 CM |
2961 | /* |
2962 | * here we try to find a cluster of blocks in a block group. The goal | |
1bb91902 | 2963 | * is to find at least bytes+empty_size. |
fa9c0d79 CM |
2964 | * We might not find them all in one contiguous area. |
2965 | * | |
2966 | * returns zero and sets up cluster if things worked out, otherwise | |
2967 | * it returns -enospc | |
2968 | */ | |
00361589 | 2969 | int btrfs_find_space_cluster(struct btrfs_root *root, |
fa9c0d79 CM |
2970 | struct btrfs_block_group_cache *block_group, |
2971 | struct btrfs_free_cluster *cluster, | |
2972 | u64 offset, u64 bytes, u64 empty_size) | |
2973 | { | |
34d52cb6 | 2974 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
86d4a77b | 2975 | struct btrfs_free_space *entry, *tmp; |
52621cb6 | 2976 | LIST_HEAD(bitmaps); |
fa9c0d79 | 2977 | u64 min_bytes; |
1bb91902 | 2978 | u64 cont1_bytes; |
fa9c0d79 CM |
2979 | int ret; |
2980 | ||
1bb91902 AO |
2981 | /* |
2982 | * Choose the minimum extent size we'll require for this | |
2983 | * cluster. For SSD_SPREAD, don't allow any fragmentation. | |
2984 | * For metadata, allow allocates with smaller extents. For | |
2985 | * data, keep it dense. | |
2986 | */ | |
451d7585 | 2987 | if (btrfs_test_opt(root, SSD_SPREAD)) { |
1bb91902 | 2988 | cont1_bytes = min_bytes = bytes + empty_size; |
451d7585 | 2989 | } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) { |
1bb91902 AO |
2990 | cont1_bytes = bytes; |
2991 | min_bytes = block_group->sectorsize; | |
2992 | } else { | |
2993 | cont1_bytes = max(bytes, (bytes + empty_size) >> 2); | |
2994 | min_bytes = block_group->sectorsize; | |
2995 | } | |
fa9c0d79 | 2996 | |
34d52cb6 | 2997 | spin_lock(&ctl->tree_lock); |
7d0d2e8e JB |
2998 | |
2999 | /* | |
3000 | * If we know we don't have enough space to make a cluster don't even | |
3001 | * bother doing all the work to try and find one. | |
3002 | */ | |
1bb91902 | 3003 | if (ctl->free_space < bytes) { |
34d52cb6 | 3004 | spin_unlock(&ctl->tree_lock); |
7d0d2e8e JB |
3005 | return -ENOSPC; |
3006 | } | |
3007 | ||
fa9c0d79 CM |
3008 | spin_lock(&cluster->lock); |
3009 | ||
3010 | /* someone already found a cluster, hooray */ | |
3011 | if (cluster->block_group) { | |
3012 | ret = 0; | |
3013 | goto out; | |
3014 | } | |
fa9c0d79 | 3015 | |
3f7de037 JB |
3016 | trace_btrfs_find_cluster(block_group, offset, bytes, empty_size, |
3017 | min_bytes); | |
3018 | ||
86d4a77b | 3019 | ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, |
1bb91902 AO |
3020 | bytes + empty_size, |
3021 | cont1_bytes, min_bytes); | |
4e69b598 | 3022 | if (ret) |
86d4a77b | 3023 | ret = setup_cluster_bitmap(block_group, cluster, &bitmaps, |
1bb91902 AO |
3024 | offset, bytes + empty_size, |
3025 | cont1_bytes, min_bytes); | |
86d4a77b JB |
3026 | |
3027 | /* Clear our temporary list */ | |
3028 | list_for_each_entry_safe(entry, tmp, &bitmaps, list) | |
3029 | list_del_init(&entry->list); | |
fa9c0d79 | 3030 | |
4e69b598 JB |
3031 | if (!ret) { |
3032 | atomic_inc(&block_group->count); | |
3033 | list_add_tail(&cluster->block_group_list, | |
3034 | &block_group->cluster_list); | |
3035 | cluster->block_group = block_group; | |
3f7de037 JB |
3036 | } else { |
3037 | trace_btrfs_failed_cluster_setup(block_group); | |
fa9c0d79 | 3038 | } |
fa9c0d79 CM |
3039 | out: |
3040 | spin_unlock(&cluster->lock); | |
34d52cb6 | 3041 | spin_unlock(&ctl->tree_lock); |
fa9c0d79 CM |
3042 | |
3043 | return ret; | |
3044 | } | |
3045 | ||
3046 | /* | |
3047 | * simple code to zero out a cluster | |
3048 | */ | |
3049 | void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) | |
3050 | { | |
3051 | spin_lock_init(&cluster->lock); | |
3052 | spin_lock_init(&cluster->refill_lock); | |
6bef4d31 | 3053 | cluster->root = RB_ROOT; |
fa9c0d79 CM |
3054 | cluster->max_size = 0; |
3055 | INIT_LIST_HEAD(&cluster->block_group_list); | |
3056 | cluster->block_group = NULL; | |
3057 | } | |
3058 | ||
7fe1e641 LZ |
3059 | static int do_trimming(struct btrfs_block_group_cache *block_group, |
3060 | u64 *total_trimmed, u64 start, u64 bytes, | |
55507ce3 FM |
3061 | u64 reserved_start, u64 reserved_bytes, |
3062 | struct btrfs_trim_range *trim_entry) | |
f7039b1d | 3063 | { |
7fe1e641 | 3064 | struct btrfs_space_info *space_info = block_group->space_info; |
f7039b1d | 3065 | struct btrfs_fs_info *fs_info = block_group->fs_info; |
55507ce3 | 3066 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
7fe1e641 LZ |
3067 | int ret; |
3068 | int update = 0; | |
3069 | u64 trimmed = 0; | |
f7039b1d | 3070 | |
7fe1e641 LZ |
3071 | spin_lock(&space_info->lock); |
3072 | spin_lock(&block_group->lock); | |
3073 | if (!block_group->ro) { | |
3074 | block_group->reserved += reserved_bytes; | |
3075 | space_info->bytes_reserved += reserved_bytes; | |
3076 | update = 1; | |
3077 | } | |
3078 | spin_unlock(&block_group->lock); | |
3079 | spin_unlock(&space_info->lock); | |
3080 | ||
1edb647b FM |
3081 | ret = btrfs_discard_extent(fs_info->extent_root, |
3082 | start, bytes, &trimmed); | |
7fe1e641 LZ |
3083 | if (!ret) |
3084 | *total_trimmed += trimmed; | |
3085 | ||
55507ce3 | 3086 | mutex_lock(&ctl->cache_writeout_mutex); |
7fe1e641 | 3087 | btrfs_add_free_space(block_group, reserved_start, reserved_bytes); |
55507ce3 FM |
3088 | list_del(&trim_entry->list); |
3089 | mutex_unlock(&ctl->cache_writeout_mutex); | |
7fe1e641 LZ |
3090 | |
3091 | if (update) { | |
3092 | spin_lock(&space_info->lock); | |
3093 | spin_lock(&block_group->lock); | |
3094 | if (block_group->ro) | |
3095 | space_info->bytes_readonly += reserved_bytes; | |
3096 | block_group->reserved -= reserved_bytes; | |
3097 | space_info->bytes_reserved -= reserved_bytes; | |
3098 | spin_unlock(&space_info->lock); | |
3099 | spin_unlock(&block_group->lock); | |
3100 | } | |
3101 | ||
3102 | return ret; | |
3103 | } | |
3104 | ||
3105 | static int trim_no_bitmap(struct btrfs_block_group_cache *block_group, | |
3106 | u64 *total_trimmed, u64 start, u64 end, u64 minlen) | |
3107 | { | |
3108 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | |
3109 | struct btrfs_free_space *entry; | |
3110 | struct rb_node *node; | |
3111 | int ret = 0; | |
3112 | u64 extent_start; | |
3113 | u64 extent_bytes; | |
3114 | u64 bytes; | |
f7039b1d LD |
3115 | |
3116 | while (start < end) { | |
55507ce3 FM |
3117 | struct btrfs_trim_range trim_entry; |
3118 | ||
3119 | mutex_lock(&ctl->cache_writeout_mutex); | |
34d52cb6 | 3120 | spin_lock(&ctl->tree_lock); |
f7039b1d | 3121 | |
34d52cb6 LZ |
3122 | if (ctl->free_space < minlen) { |
3123 | spin_unlock(&ctl->tree_lock); | |
55507ce3 | 3124 | mutex_unlock(&ctl->cache_writeout_mutex); |
f7039b1d LD |
3125 | break; |
3126 | } | |
3127 | ||
34d52cb6 | 3128 | entry = tree_search_offset(ctl, start, 0, 1); |
7fe1e641 | 3129 | if (!entry) { |
34d52cb6 | 3130 | spin_unlock(&ctl->tree_lock); |
55507ce3 | 3131 | mutex_unlock(&ctl->cache_writeout_mutex); |
f7039b1d LD |
3132 | break; |
3133 | } | |
3134 | ||
7fe1e641 LZ |
3135 | /* skip bitmaps */ |
3136 | while (entry->bitmap) { | |
3137 | node = rb_next(&entry->offset_index); | |
3138 | if (!node) { | |
34d52cb6 | 3139 | spin_unlock(&ctl->tree_lock); |
55507ce3 | 3140 | mutex_unlock(&ctl->cache_writeout_mutex); |
7fe1e641 | 3141 | goto out; |
f7039b1d | 3142 | } |
7fe1e641 LZ |
3143 | entry = rb_entry(node, struct btrfs_free_space, |
3144 | offset_index); | |
f7039b1d LD |
3145 | } |
3146 | ||
7fe1e641 LZ |
3147 | if (entry->offset >= end) { |
3148 | spin_unlock(&ctl->tree_lock); | |
55507ce3 | 3149 | mutex_unlock(&ctl->cache_writeout_mutex); |
7fe1e641 | 3150 | break; |
f7039b1d LD |
3151 | } |
3152 | ||
7fe1e641 LZ |
3153 | extent_start = entry->offset; |
3154 | extent_bytes = entry->bytes; | |
3155 | start = max(start, extent_start); | |
3156 | bytes = min(extent_start + extent_bytes, end) - start; | |
3157 | if (bytes < minlen) { | |
3158 | spin_unlock(&ctl->tree_lock); | |
55507ce3 | 3159 | mutex_unlock(&ctl->cache_writeout_mutex); |
7fe1e641 | 3160 | goto next; |
f7039b1d LD |
3161 | } |
3162 | ||
7fe1e641 LZ |
3163 | unlink_free_space(ctl, entry); |
3164 | kmem_cache_free(btrfs_free_space_cachep, entry); | |
3165 | ||
34d52cb6 | 3166 | spin_unlock(&ctl->tree_lock); |
55507ce3 FM |
3167 | trim_entry.start = extent_start; |
3168 | trim_entry.bytes = extent_bytes; | |
3169 | list_add_tail(&trim_entry.list, &ctl->trimming_ranges); | |
3170 | mutex_unlock(&ctl->cache_writeout_mutex); | |
f7039b1d | 3171 | |
7fe1e641 | 3172 | ret = do_trimming(block_group, total_trimmed, start, bytes, |
55507ce3 | 3173 | extent_start, extent_bytes, &trim_entry); |
7fe1e641 LZ |
3174 | if (ret) |
3175 | break; | |
3176 | next: | |
3177 | start += bytes; | |
f7039b1d | 3178 | |
7fe1e641 LZ |
3179 | if (fatal_signal_pending(current)) { |
3180 | ret = -ERESTARTSYS; | |
3181 | break; | |
3182 | } | |
3183 | ||
3184 | cond_resched(); | |
3185 | } | |
3186 | out: | |
3187 | return ret; | |
3188 | } | |
3189 | ||
3190 | static int trim_bitmaps(struct btrfs_block_group_cache *block_group, | |
3191 | u64 *total_trimmed, u64 start, u64 end, u64 minlen) | |
3192 | { | |
3193 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | |
3194 | struct btrfs_free_space *entry; | |
3195 | int ret = 0; | |
3196 | int ret2; | |
3197 | u64 bytes; | |
3198 | u64 offset = offset_to_bitmap(ctl, start); | |
3199 | ||
3200 | while (offset < end) { | |
3201 | bool next_bitmap = false; | |
55507ce3 | 3202 | struct btrfs_trim_range trim_entry; |
7fe1e641 | 3203 | |
55507ce3 | 3204 | mutex_lock(&ctl->cache_writeout_mutex); |
7fe1e641 LZ |
3205 | spin_lock(&ctl->tree_lock); |
3206 | ||
3207 | if (ctl->free_space < minlen) { | |
3208 | spin_unlock(&ctl->tree_lock); | |
55507ce3 | 3209 | mutex_unlock(&ctl->cache_writeout_mutex); |
7fe1e641 LZ |
3210 | break; |
3211 | } | |
3212 | ||
3213 | entry = tree_search_offset(ctl, offset, 1, 0); | |
3214 | if (!entry) { | |
3215 | spin_unlock(&ctl->tree_lock); | |
55507ce3 | 3216 | mutex_unlock(&ctl->cache_writeout_mutex); |
7fe1e641 LZ |
3217 | next_bitmap = true; |
3218 | goto next; | |
3219 | } | |
3220 | ||
3221 | bytes = minlen; | |
3222 | ret2 = search_bitmap(ctl, entry, &start, &bytes); | |
3223 | if (ret2 || start >= end) { | |
3224 | spin_unlock(&ctl->tree_lock); | |
55507ce3 | 3225 | mutex_unlock(&ctl->cache_writeout_mutex); |
7fe1e641 LZ |
3226 | next_bitmap = true; |
3227 | goto next; | |
3228 | } | |
3229 | ||
3230 | bytes = min(bytes, end - start); | |
3231 | if (bytes < minlen) { | |
3232 | spin_unlock(&ctl->tree_lock); | |
55507ce3 | 3233 | mutex_unlock(&ctl->cache_writeout_mutex); |
7fe1e641 LZ |
3234 | goto next; |
3235 | } | |
3236 | ||
3237 | bitmap_clear_bits(ctl, entry, start, bytes); | |
3238 | if (entry->bytes == 0) | |
3239 | free_bitmap(ctl, entry); | |
3240 | ||
3241 | spin_unlock(&ctl->tree_lock); | |
55507ce3 FM |
3242 | trim_entry.start = start; |
3243 | trim_entry.bytes = bytes; | |
3244 | list_add_tail(&trim_entry.list, &ctl->trimming_ranges); | |
3245 | mutex_unlock(&ctl->cache_writeout_mutex); | |
7fe1e641 LZ |
3246 | |
3247 | ret = do_trimming(block_group, total_trimmed, start, bytes, | |
55507ce3 | 3248 | start, bytes, &trim_entry); |
7fe1e641 LZ |
3249 | if (ret) |
3250 | break; | |
3251 | next: | |
3252 | if (next_bitmap) { | |
3253 | offset += BITS_PER_BITMAP * ctl->unit; | |
3254 | } else { | |
3255 | start += bytes; | |
3256 | if (start >= offset + BITS_PER_BITMAP * ctl->unit) | |
3257 | offset += BITS_PER_BITMAP * ctl->unit; | |
f7039b1d | 3258 | } |
f7039b1d LD |
3259 | |
3260 | if (fatal_signal_pending(current)) { | |
3261 | ret = -ERESTARTSYS; | |
3262 | break; | |
3263 | } | |
3264 | ||
3265 | cond_resched(); | |
3266 | } | |
3267 | ||
3268 | return ret; | |
3269 | } | |
581bb050 | 3270 | |
7fe1e641 LZ |
3271 | int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, |
3272 | u64 *trimmed, u64 start, u64 end, u64 minlen) | |
3273 | { | |
3274 | int ret; | |
3275 | ||
3276 | *trimmed = 0; | |
3277 | ||
04216820 FM |
3278 | spin_lock(&block_group->lock); |
3279 | if (block_group->removed) { | |
3280 | spin_unlock(&block_group->lock); | |
3281 | return 0; | |
3282 | } | |
3283 | atomic_inc(&block_group->trimming); | |
3284 | spin_unlock(&block_group->lock); | |
3285 | ||
7fe1e641 LZ |
3286 | ret = trim_no_bitmap(block_group, trimmed, start, end, minlen); |
3287 | if (ret) | |
04216820 | 3288 | goto out; |
7fe1e641 LZ |
3289 | |
3290 | ret = trim_bitmaps(block_group, trimmed, start, end, minlen); | |
04216820 FM |
3291 | out: |
3292 | spin_lock(&block_group->lock); | |
3293 | if (atomic_dec_and_test(&block_group->trimming) && | |
3294 | block_group->removed) { | |
3295 | struct extent_map_tree *em_tree; | |
3296 | struct extent_map *em; | |
3297 | ||
3298 | spin_unlock(&block_group->lock); | |
3299 | ||
a1e7e16e | 3300 | lock_chunks(block_group->fs_info->chunk_root); |
04216820 FM |
3301 | em_tree = &block_group->fs_info->mapping_tree.map_tree; |
3302 | write_lock(&em_tree->lock); | |
3303 | em = lookup_extent_mapping(em_tree, block_group->key.objectid, | |
3304 | 1); | |
3305 | BUG_ON(!em); /* logic error, can't happen */ | |
a1e7e16e FM |
3306 | /* |
3307 | * remove_extent_mapping() will delete us from the pinned_chunks | |
3308 | * list, which is protected by the chunk mutex. | |
3309 | */ | |
04216820 FM |
3310 | remove_extent_mapping(em_tree, em); |
3311 | write_unlock(&em_tree->lock); | |
04216820 FM |
3312 | unlock_chunks(block_group->fs_info->chunk_root); |
3313 | ||
3314 | /* once for us and once for the tree */ | |
3315 | free_extent_map(em); | |
3316 | free_extent_map(em); | |
946ddbe8 FM |
3317 | |
3318 | /* | |
3319 | * We've left one free space entry and other tasks trimming | |
3320 | * this block group have left 1 entry each one. Free them. | |
3321 | */ | |
3322 | __btrfs_remove_free_space_cache(block_group->free_space_ctl); | |
04216820 FM |
3323 | } else { |
3324 | spin_unlock(&block_group->lock); | |
3325 | } | |
7fe1e641 LZ |
3326 | |
3327 | return ret; | |
3328 | } | |
3329 | ||
581bb050 LZ |
3330 | /* |
3331 | * Find the left-most item in the cache tree, and then return the | |
3332 | * smallest inode number in the item. | |
3333 | * | |
3334 | * Note: the returned inode number may not be the smallest one in | |
3335 | * the tree, if the left-most item is a bitmap. | |
3336 | */ | |
3337 | u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root) | |
3338 | { | |
3339 | struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl; | |
3340 | struct btrfs_free_space *entry = NULL; | |
3341 | u64 ino = 0; | |
3342 | ||
3343 | spin_lock(&ctl->tree_lock); | |
3344 | ||
3345 | if (RB_EMPTY_ROOT(&ctl->free_space_offset)) | |
3346 | goto out; | |
3347 | ||
3348 | entry = rb_entry(rb_first(&ctl->free_space_offset), | |
3349 | struct btrfs_free_space, offset_index); | |
3350 | ||
3351 | if (!entry->bitmap) { | |
3352 | ino = entry->offset; | |
3353 | ||
3354 | unlink_free_space(ctl, entry); | |
3355 | entry->offset++; | |
3356 | entry->bytes--; | |
3357 | if (!entry->bytes) | |
3358 | kmem_cache_free(btrfs_free_space_cachep, entry); | |
3359 | else | |
3360 | link_free_space(ctl, entry); | |
3361 | } else { | |
3362 | u64 offset = 0; | |
3363 | u64 count = 1; | |
3364 | int ret; | |
3365 | ||
3366 | ret = search_bitmap(ctl, entry, &offset, &count); | |
79787eaa | 3367 | /* Logic error; Should be empty if it can't find anything */ |
b12d6869 | 3368 | ASSERT(!ret); |
581bb050 LZ |
3369 | |
3370 | ino = offset; | |
3371 | bitmap_clear_bits(ctl, entry, offset, 1); | |
3372 | if (entry->bytes == 0) | |
3373 | free_bitmap(ctl, entry); | |
3374 | } | |
3375 | out: | |
3376 | spin_unlock(&ctl->tree_lock); | |
3377 | ||
3378 | return ino; | |
3379 | } | |
82d5902d LZ |
3380 | |
3381 | struct inode *lookup_free_ino_inode(struct btrfs_root *root, | |
3382 | struct btrfs_path *path) | |
3383 | { | |
3384 | struct inode *inode = NULL; | |
3385 | ||
57cdc8db DS |
3386 | spin_lock(&root->ino_cache_lock); |
3387 | if (root->ino_cache_inode) | |
3388 | inode = igrab(root->ino_cache_inode); | |
3389 | spin_unlock(&root->ino_cache_lock); | |
82d5902d LZ |
3390 | if (inode) |
3391 | return inode; | |
3392 | ||
3393 | inode = __lookup_free_space_inode(root, path, 0); | |
3394 | if (IS_ERR(inode)) | |
3395 | return inode; | |
3396 | ||
57cdc8db | 3397 | spin_lock(&root->ino_cache_lock); |
7841cb28 | 3398 | if (!btrfs_fs_closing(root->fs_info)) |
57cdc8db DS |
3399 | root->ino_cache_inode = igrab(inode); |
3400 | spin_unlock(&root->ino_cache_lock); | |
82d5902d LZ |
3401 | |
3402 | return inode; | |
3403 | } | |
3404 | ||
3405 | int create_free_ino_inode(struct btrfs_root *root, | |
3406 | struct btrfs_trans_handle *trans, | |
3407 | struct btrfs_path *path) | |
3408 | { | |
3409 | return __create_free_space_inode(root, trans, path, | |
3410 | BTRFS_FREE_INO_OBJECTID, 0); | |
3411 | } | |
3412 | ||
3413 | int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root) | |
3414 | { | |
3415 | struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; | |
3416 | struct btrfs_path *path; | |
3417 | struct inode *inode; | |
3418 | int ret = 0; | |
3419 | u64 root_gen = btrfs_root_generation(&root->root_item); | |
3420 | ||
4b9465cb CM |
3421 | if (!btrfs_test_opt(root, INODE_MAP_CACHE)) |
3422 | return 0; | |
3423 | ||
82d5902d LZ |
3424 | /* |
3425 | * If we're unmounting then just return, since this does a search on the | |
3426 | * normal root and not the commit root and we could deadlock. | |
3427 | */ | |
7841cb28 | 3428 | if (btrfs_fs_closing(fs_info)) |
82d5902d LZ |
3429 | return 0; |
3430 | ||
3431 | path = btrfs_alloc_path(); | |
3432 | if (!path) | |
3433 | return 0; | |
3434 | ||
3435 | inode = lookup_free_ino_inode(root, path); | |
3436 | if (IS_ERR(inode)) | |
3437 | goto out; | |
3438 | ||
3439 | if (root_gen != BTRFS_I(inode)->generation) | |
3440 | goto out_put; | |
3441 | ||
3442 | ret = __load_free_space_cache(root, inode, ctl, path, 0); | |
3443 | ||
3444 | if (ret < 0) | |
c2cf52eb SK |
3445 | btrfs_err(fs_info, |
3446 | "failed to load free ino cache for root %llu", | |
3447 | root->root_key.objectid); | |
82d5902d LZ |
3448 | out_put: |
3449 | iput(inode); | |
3450 | out: | |
3451 | btrfs_free_path(path); | |
3452 | return ret; | |
3453 | } | |
3454 | ||
3455 | int btrfs_write_out_ino_cache(struct btrfs_root *root, | |
3456 | struct btrfs_trans_handle *trans, | |
53645a91 FDBM |
3457 | struct btrfs_path *path, |
3458 | struct inode *inode) | |
82d5902d LZ |
3459 | { |
3460 | struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; | |
82d5902d | 3461 | int ret; |
c9dc4c65 | 3462 | struct btrfs_io_ctl io_ctl; |
82d5902d | 3463 | |
4b9465cb CM |
3464 | if (!btrfs_test_opt(root, INODE_MAP_CACHE)) |
3465 | return 0; | |
3466 | ||
85db36cf | 3467 | memset(&io_ctl, 0, sizeof(io_ctl)); |
c9dc4c65 | 3468 | ret = __btrfs_write_out_cache(root, inode, ctl, NULL, &io_ctl, |
85db36cf CM |
3469 | trans, path, 0); |
3470 | if (!ret) | |
3471 | ret = btrfs_wait_cache_io(root, trans, NULL, &io_ctl, path, 0); | |
3472 | ||
c09544e0 JB |
3473 | if (ret) { |
3474 | btrfs_delalloc_release_metadata(inode, inode->i_size); | |
3475 | #ifdef DEBUG | |
c2cf52eb SK |
3476 | btrfs_err(root->fs_info, |
3477 | "failed to write free ino cache for root %llu", | |
3478 | root->root_key.objectid); | |
c09544e0 JB |
3479 | #endif |
3480 | } | |
82d5902d | 3481 | |
82d5902d LZ |
3482 | return ret; |
3483 | } | |
74255aa0 JB |
3484 | |
3485 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS | |
dc11dd5d JB |
3486 | /* |
3487 | * Use this if you need to make a bitmap or extent entry specifically, it | |
3488 | * doesn't do any of the merging that add_free_space does, this acts a lot like | |
3489 | * how the free space cache loading stuff works, so you can get really weird | |
3490 | * configurations. | |
3491 | */ | |
3492 | int test_add_free_space_entry(struct btrfs_block_group_cache *cache, | |
3493 | u64 offset, u64 bytes, bool bitmap) | |
74255aa0 | 3494 | { |
dc11dd5d JB |
3495 | struct btrfs_free_space_ctl *ctl = cache->free_space_ctl; |
3496 | struct btrfs_free_space *info = NULL, *bitmap_info; | |
3497 | void *map = NULL; | |
3498 | u64 bytes_added; | |
3499 | int ret; | |
74255aa0 | 3500 | |
dc11dd5d JB |
3501 | again: |
3502 | if (!info) { | |
3503 | info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS); | |
3504 | if (!info) | |
3505 | return -ENOMEM; | |
74255aa0 JB |
3506 | } |
3507 | ||
dc11dd5d JB |
3508 | if (!bitmap) { |
3509 | spin_lock(&ctl->tree_lock); | |
3510 | info->offset = offset; | |
3511 | info->bytes = bytes; | |
3512 | ret = link_free_space(ctl, info); | |
3513 | spin_unlock(&ctl->tree_lock); | |
3514 | if (ret) | |
3515 | kmem_cache_free(btrfs_free_space_cachep, info); | |
3516 | return ret; | |
3517 | } | |
3518 | ||
3519 | if (!map) { | |
3520 | map = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); | |
3521 | if (!map) { | |
3522 | kmem_cache_free(btrfs_free_space_cachep, info); | |
3523 | return -ENOMEM; | |
3524 | } | |
3525 | } | |
3526 | ||
3527 | spin_lock(&ctl->tree_lock); | |
3528 | bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), | |
3529 | 1, 0); | |
3530 | if (!bitmap_info) { | |
3531 | info->bitmap = map; | |
3532 | map = NULL; | |
3533 | add_new_bitmap(ctl, info, offset); | |
3534 | bitmap_info = info; | |
20005523 | 3535 | info = NULL; |
dc11dd5d | 3536 | } |
74255aa0 | 3537 | |
dc11dd5d JB |
3538 | bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes); |
3539 | bytes -= bytes_added; | |
3540 | offset += bytes_added; | |
3541 | spin_unlock(&ctl->tree_lock); | |
74255aa0 | 3542 | |
dc11dd5d JB |
3543 | if (bytes) |
3544 | goto again; | |
74255aa0 | 3545 | |
20005523 FM |
3546 | if (info) |
3547 | kmem_cache_free(btrfs_free_space_cachep, info); | |
dc11dd5d JB |
3548 | if (map) |
3549 | kfree(map); | |
3550 | return 0; | |
74255aa0 JB |
3551 | } |
3552 | ||
3553 | /* | |
3554 | * Checks to see if the given range is in the free space cache. This is really | |
3555 | * just used to check the absence of space, so if there is free space in the | |
3556 | * range at all we will return 1. | |
3557 | */ | |
dc11dd5d JB |
3558 | int test_check_exists(struct btrfs_block_group_cache *cache, |
3559 | u64 offset, u64 bytes) | |
74255aa0 JB |
3560 | { |
3561 | struct btrfs_free_space_ctl *ctl = cache->free_space_ctl; | |
3562 | struct btrfs_free_space *info; | |
3563 | int ret = 0; | |
3564 | ||
3565 | spin_lock(&ctl->tree_lock); | |
3566 | info = tree_search_offset(ctl, offset, 0, 0); | |
3567 | if (!info) { | |
3568 | info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), | |
3569 | 1, 0); | |
3570 | if (!info) | |
3571 | goto out; | |
3572 | } | |
3573 | ||
3574 | have_info: | |
3575 | if (info->bitmap) { | |
3576 | u64 bit_off, bit_bytes; | |
3577 | struct rb_node *n; | |
3578 | struct btrfs_free_space *tmp; | |
3579 | ||
3580 | bit_off = offset; | |
3581 | bit_bytes = ctl->unit; | |
3582 | ret = search_bitmap(ctl, info, &bit_off, &bit_bytes); | |
3583 | if (!ret) { | |
3584 | if (bit_off == offset) { | |
3585 | ret = 1; | |
3586 | goto out; | |
3587 | } else if (bit_off > offset && | |
3588 | offset + bytes > bit_off) { | |
3589 | ret = 1; | |
3590 | goto out; | |
3591 | } | |
3592 | } | |
3593 | ||
3594 | n = rb_prev(&info->offset_index); | |
3595 | while (n) { | |
3596 | tmp = rb_entry(n, struct btrfs_free_space, | |
3597 | offset_index); | |
3598 | if (tmp->offset + tmp->bytes < offset) | |
3599 | break; | |
3600 | if (offset + bytes < tmp->offset) { | |
3601 | n = rb_prev(&info->offset_index); | |
3602 | continue; | |
3603 | } | |
3604 | info = tmp; | |
3605 | goto have_info; | |
3606 | } | |
3607 | ||
3608 | n = rb_next(&info->offset_index); | |
3609 | while (n) { | |
3610 | tmp = rb_entry(n, struct btrfs_free_space, | |
3611 | offset_index); | |
3612 | if (offset + bytes < tmp->offset) | |
3613 | break; | |
3614 | if (tmp->offset + tmp->bytes < offset) { | |
3615 | n = rb_next(&info->offset_index); | |
3616 | continue; | |
3617 | } | |
3618 | info = tmp; | |
3619 | goto have_info; | |
3620 | } | |
3621 | ||
20005523 | 3622 | ret = 0; |
74255aa0 JB |
3623 | goto out; |
3624 | } | |
3625 | ||
3626 | if (info->offset == offset) { | |
3627 | ret = 1; | |
3628 | goto out; | |
3629 | } | |
3630 | ||
3631 | if (offset > info->offset && offset < info->offset + info->bytes) | |
3632 | ret = 1; | |
3633 | out: | |
3634 | spin_unlock(&ctl->tree_lock); | |
3635 | return ret; | |
3636 | } | |
dc11dd5d | 3637 | #endif /* CONFIG_BTRFS_FS_RUN_SANITY_TESTS */ |