]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/btrfs/extent-tree.c
Btrfs: Add sparse endian annotations to struct header
[mirror_ubuntu-artful-kernel.git] / fs / btrfs / extent-tree.c
CommitLineData
fec577fb
CM
1#include <stdio.h>
2#include <stdlib.h>
3#include "kerncompat.h"
4#include "radix-tree.h"
5#include "ctree.h"
6#include "disk-io.h"
7#include "print-tree.h"
8
037e6390
CM
9static int find_free_extent(struct ctree_root *orig_root, u64 num_blocks,
10 u64 search_start, u64 search_end, struct key *ins);
11static int finish_current_insert(struct ctree_root *extent_root);
12static int run_pending(struct ctree_root *extent_root);
13
fec577fb
CM
14/*
15 * pending extents are blocks that we're trying to allocate in the extent
16 * map while trying to grow the map because of other allocations. To avoid
17 * recursing, they are tagged in the radix tree and cleaned up after
18 * other allocations are done. The pending tag is also used in the same
19 * manner for deletes.
20 */
037e6390 21#define CTREE_EXTENT_PENDING_DEL 0
fec577fb 22
02217ed2
CM
23static int inc_block_ref(struct ctree_root *root, u64 blocknr)
24{
25 struct ctree_path path;
26 int ret;
27 struct key key;
28 struct leaf *l;
29 struct extent_item *item;
037e6390
CM
30 struct key ins;
31
32 find_free_extent(root->extent_root, 0, 0, (u64)-1, &ins);
02217ed2
CM
33 init_path(&path);
34 key.objectid = blocknr;
35 key.flags = 0;
36 key.offset = 1;
37 ret = search_slot(root->extent_root, &key, &path, 0, 1);
a28ec197
CM
38 if (ret != 0)
39 BUG();
02217ed2
CM
40 BUG_ON(ret != 0);
41 l = &path.nodes[0]->leaf;
42 item = (struct extent_item *)(l->data +
43 l->items[path.slots[0]].offset);
44 item->refs++;
a28ec197 45
02217ed2
CM
46 BUG_ON(list_empty(&path.nodes[0]->dirty));
47 release_path(root->extent_root, &path);
037e6390
CM
48 finish_current_insert(root->extent_root);
49 run_pending(root->extent_root);
02217ed2
CM
50 return 0;
51}
52
20524f02 53static int lookup_block_ref(struct ctree_root *root, u64 blocknr, u32 *refs)
a28ec197
CM
54{
55 struct ctree_path path;
56 int ret;
57 struct key key;
58 struct leaf *l;
59 struct extent_item *item;
60 init_path(&path);
61 key.objectid = blocknr;
62 key.flags = 0;
63 key.offset = 1;
64 ret = search_slot(root->extent_root, &key, &path, 0, 0);
65 if (ret != 0)
66 BUG();
67 l = &path.nodes[0]->leaf;
68 item = (struct extent_item *)(l->data +
69 l->items[path.slots[0]].offset);
70 *refs = item->refs;
71 release_path(root->extent_root, &path);
72 return 0;
73}
74
02217ed2
CM
75int btrfs_inc_ref(struct ctree_root *root, struct tree_buffer *buf)
76{
77 u64 blocknr;
78 int i;
a28ec197
CM
79
80 if (root == root->extent_root)
81 return 0;
7518a238 82 if (btrfs_is_leaf(&buf->node))
a28ec197
CM
83 return 0;
84
7518a238 85 for (i = 0; i < btrfs_header_nritems(&buf->node.header); i++) {
02217ed2
CM
86 blocknr = buf->node.blockptrs[i];
87 inc_block_ref(root, blocknr);
88 }
89 return 0;
90}
91
a28ec197
CM
92int btrfs_finish_extent_commit(struct ctree_root *root)
93{
94 struct ctree_root *extent_root = root->extent_root;
95 unsigned long gang[8];
96 int ret;
97 int i;
98
99 while(1) {
100 ret = radix_tree_gang_lookup(&extent_root->pinned_radix,
101 (void **)gang, 0,
102 ARRAY_SIZE(gang));
103 if (!ret)
104 break;
0579da42 105 for (i = 0; i < ret; i++) {
a28ec197 106 radix_tree_delete(&extent_root->pinned_radix, gang[i]);
0579da42 107 }
a28ec197 108 }
0579da42
CM
109 extent_root->last_insert.objectid = 0;
110 extent_root->last_insert.offset = 0;
a28ec197
CM
111 return 0;
112}
113
037e6390
CM
114static int finish_current_insert(struct ctree_root *extent_root)
115{
116 struct key ins;
117 struct extent_item extent_item;
118 int i;
119 int ret;
120
121 extent_item.refs = 1;
7518a238
CM
122 extent_item.owner =
123 btrfs_header_parentid(&extent_root->node->node.header);
037e6390
CM
124 ins.offset = 1;
125 ins.flags = 0;
126
127 for (i = 0; i < extent_root->current_insert.flags; i++) {
128 ins.objectid = extent_root->current_insert.objectid + i;
129 ret = insert_item(extent_root, &ins, &extent_item,
130 sizeof(extent_item));
131 BUG_ON(ret);
132 }
133 extent_root->current_insert.offset = 0;
134 return 0;
135}
136
fec577fb 137/*
a28ec197 138 * remove an extent from the root, returns 0 on success
fec577fb 139 */
a28ec197
CM
140int __free_extent(struct ctree_root *root, u64 blocknr, u64 num_blocks)
141{
142 struct ctree_path path;
143 struct key key;
144 struct ctree_root *extent_root = root->extent_root;
145 int ret;
146 struct item *item;
147 struct extent_item *ei;
037e6390
CM
148 struct key ins;
149
a28ec197
CM
150 key.objectid = blocknr;
151 key.flags = 0;
152 key.offset = num_blocks;
153
037e6390 154 find_free_extent(root, 0, 0, (u64)-1, &ins);
a28ec197
CM
155 init_path(&path);
156 ret = search_slot(extent_root, &key, &path, -1, 1);
157 if (ret) {
158 printf("failed to find %Lu\n", key.objectid);
159 print_tree(extent_root, extent_root->node);
160 printf("failed to find %Lu\n", key.objectid);
161 BUG();
162 }
163 item = path.nodes[0]->leaf.items + path.slots[0];
164 ei = (struct extent_item *)(path.nodes[0]->leaf.data + item->offset);
165 BUG_ON(ei->refs == 0);
166 ei->refs--;
167 if (ei->refs == 0) {
168 if (root == extent_root) {
169 int err;
170 radix_tree_preload(GFP_KERNEL);
171 err = radix_tree_insert(&extent_root->pinned_radix,
172 blocknr, (void *)blocknr);
173 BUG_ON(err);
174 radix_tree_preload_end();
175 }
176 ret = del_item(extent_root, &path);
0579da42
CM
177 if (root != extent_root &&
178 extent_root->last_insert.objectid < blocknr)
179 extent_root->last_insert.objectid = blocknr;
a28ec197
CM
180 if (ret)
181 BUG();
182 }
183 release_path(extent_root, &path);
037e6390 184 finish_current_insert(extent_root);
a28ec197
CM
185 return ret;
186}
187
a28ec197
CM
188/*
189 * find all the blocks marked as pending in the radix tree and remove
190 * them from the extent map
191 */
192static int del_pending_extents(struct ctree_root *extent_root)
193{
194 int ret;
195 struct tree_buffer *gang[4];
196 int i;
197
198 while(1) {
199 ret = radix_tree_gang_lookup_tag(&extent_root->cache_radix,
200 (void **)gang, 0,
201 ARRAY_SIZE(gang),
202 CTREE_EXTENT_PENDING_DEL);
203 if (!ret)
204 break;
205 for (i = 0; i < ret; i++) {
206 ret = __free_extent(extent_root, gang[i]->blocknr, 1);
fec577fb
CM
207 radix_tree_tag_clear(&extent_root->cache_radix,
208 gang[i]->blocknr,
a28ec197 209 CTREE_EXTENT_PENDING_DEL);
fec577fb
CM
210 tree_block_release(extent_root, gang[i]);
211 }
212 }
213 return 0;
214}
215
a28ec197
CM
216static int run_pending(struct ctree_root *extent_root)
217{
218 while(radix_tree_tagged(&extent_root->cache_radix,
037e6390 219 CTREE_EXTENT_PENDING_DEL))
a28ec197 220 del_pending_extents(extent_root);
a28ec197
CM
221 return 0;
222}
223
224
fec577fb
CM
225/*
226 * remove an extent from the root, returns 0 on success
227 */
228int free_extent(struct ctree_root *root, u64 blocknr, u64 num_blocks)
229{
fec577fb
CM
230 struct key key;
231 struct ctree_root *extent_root = root->extent_root;
232 struct tree_buffer *t;
233 int pending_ret;
234 int ret;
a28ec197 235
fec577fb 236 if (root == extent_root) {
a28ec197 237 t = find_tree_block(root, blocknr);
037e6390 238 radix_tree_tag_set(&root->cache_radix, blocknr,
a28ec197 239 CTREE_EXTENT_PENDING_DEL);
fec577fb
CM
240 return 0;
241 }
a28ec197
CM
242 key.objectid = blocknr;
243 key.flags = 0;
244 key.offset = num_blocks;
245 ret = __free_extent(root, blocknr, num_blocks);
246 pending_ret = run_pending(root->extent_root);
fec577fb
CM
247 return ret ? ret : pending_ret;
248}
249
250/*
251 * walks the btree of allocated extents and find a hole of a given size.
252 * The key ins is changed to record the hole:
253 * ins->objectid == block start
254 * ins->flags = 0
255 * ins->offset == number of blocks
256 * Any available blocks before search_start are skipped.
257 */
0f70abe2
CM
258static int find_free_extent(struct ctree_root *orig_root, u64 num_blocks,
259 u64 search_start, u64 search_end, struct key *ins)
fec577fb
CM
260{
261 struct ctree_path path;
262 struct key *key;
263 int ret;
264 u64 hole_size = 0;
265 int slot = 0;
266 u64 last_block;
037e6390 267 u64 test_block;
fec577fb
CM
268 int start_found;
269 struct leaf *l;
270 struct ctree_root * root = orig_root->extent_root;
0579da42 271 int total_needed = num_blocks;
fec577fb 272
7518a238 273 total_needed += (btrfs_header_level(&root->node->node.header) + 1) * 3;
0579da42
CM
274 if (root->last_insert.objectid > search_start)
275 search_start = root->last_insert.objectid;
fec577fb
CM
276check_failed:
277 init_path(&path);
278 ins->objectid = search_start;
279 ins->offset = 0;
280 ins->flags = 0;
281 start_found = 0;
02217ed2 282 ret = search_slot(root, ins, &path, 0, 0);
0f70abe2
CM
283 if (ret < 0)
284 goto error;
aa5d6bed 285
0579da42
CM
286 if (path.slots[0] > 0)
287 path.slots[0]--;
288
fec577fb
CM
289 while (1) {
290 l = &path.nodes[0]->leaf;
291 slot = path.slots[0];
7518a238 292 if (slot >= btrfs_header_nritems(&l->header)) {
fec577fb
CM
293 ret = next_leaf(root, &path);
294 if (ret == 0)
295 continue;
0f70abe2
CM
296 if (ret < 0)
297 goto error;
fec577fb
CM
298 if (!start_found) {
299 ins->objectid = search_start;
037e6390 300 ins->offset = (u64)-1;
fec577fb
CM
301 start_found = 1;
302 goto check_pending;
303 }
304 ins->objectid = last_block > search_start ?
305 last_block : search_start;
037e6390 306 ins->offset = (u64)-1;
fec577fb
CM
307 goto check_pending;
308 }
309 key = &l->items[slot].key;
310 if (key->objectid >= search_start) {
311 if (start_found) {
0579da42
CM
312 if (last_block < search_start)
313 last_block = search_start;
fec577fb 314 hole_size = key->objectid - last_block;
037e6390 315 if (hole_size > total_needed) {
fec577fb 316 ins->objectid = last_block;
037e6390 317 ins->offset = hole_size;
fec577fb
CM
318 goto check_pending;
319 }
0579da42 320 }
fec577fb 321 }
0579da42
CM
322 start_found = 1;
323 last_block = key->objectid + key->offset;
fec577fb
CM
324 path.slots[0]++;
325 }
326 // FIXME -ENOSPC
327check_pending:
328 /* we have to make sure we didn't find an extent that has already
329 * been allocated by the map tree or the original allocation
330 */
331 release_path(root, &path);
332 BUG_ON(ins->objectid < search_start);
037e6390
CM
333 for (test_block = ins->objectid;
334 test_block < ins->objectid + total_needed; test_block++) {
335 if (radix_tree_lookup(&root->pinned_radix, test_block)) {
336 search_start = test_block + 1;
fec577fb
CM
337 goto check_failed;
338 }
339 }
037e6390 340 BUG_ON(root->current_insert.offset);
0579da42 341 root->current_insert.offset = total_needed - num_blocks;
037e6390
CM
342 root->current_insert.objectid = ins->objectid + num_blocks;
343 root->current_insert.flags = 0;
0579da42 344 root->last_insert.objectid = ins->objectid;
037e6390 345 ins->offset = num_blocks;
fec577fb 346 return 0;
0f70abe2
CM
347error:
348 release_path(root, &path);
349 return ret;
fec577fb
CM
350}
351
fec577fb
CM
352/*
353 * finds a free extent and does all the dirty work required for allocation
354 * returns the key for the extent through ins, and a tree buffer for
355 * the first block of the extent through buf.
356 *
357 * returns 0 if everything worked, non-zero otherwise.
358 */
359int alloc_extent(struct ctree_root *root, u64 num_blocks, u64 search_start,
037e6390 360 u64 search_end, u64 owner, struct key *ins)
fec577fb
CM
361{
362 int ret;
363 int pending_ret;
037e6390 364 struct ctree_root *extent_root = root->extent_root;
fec577fb 365 struct extent_item extent_item;
037e6390 366
fec577fb
CM
367 extent_item.refs = 1;
368 extent_item.owner = owner;
369
037e6390
CM
370 if (root == extent_root) {
371 BUG_ON(extent_root->current_insert.offset == 0);
372 BUG_ON(num_blocks != 1);
373 BUG_ON(extent_root->current_insert.flags ==
374 extent_root->current_insert.offset);
375 ins->offset = 1;
376 ins->objectid = extent_root->current_insert.objectid +
377 extent_root->current_insert.flags++;
fec577fb
CM
378 return 0;
379 }
037e6390
CM
380 ret = find_free_extent(root, num_blocks, search_start,
381 search_end, ins);
382 if (ret)
383 return ret;
fec577fb 384
037e6390
CM
385 ret = insert_item(extent_root, ins, &extent_item,
386 sizeof(extent_item));
387
388 finish_current_insert(extent_root);
389 pending_ret = run_pending(extent_root);
390 if (ret)
391 return ret;
392 if (pending_ret)
393 return pending_ret;
394 return 0;
fec577fb
CM
395}
396
397/*
398 * helper function to allocate a block for a given tree
399 * returns the tree buffer or NULL.
400 */
401struct tree_buffer *alloc_free_block(struct ctree_root *root)
402{
403 struct key ins;
404 int ret;
037e6390 405 struct tree_buffer *buf;
fec577fb
CM
406
407 ret = alloc_extent(root, 1, 0, (unsigned long)-1,
7518a238 408 btrfs_header_parentid(&root->node->node.header),
037e6390 409 &ins);
fec577fb
CM
410 if (ret) {
411 BUG();
412 return NULL;
413 }
037e6390
CM
414 buf = find_tree_block(root, ins.objectid);
415 dirty_tree_block(root, buf);
fec577fb
CM
416 return buf;
417}
a28ec197 418
20524f02
CM
419int walk_down_tree(struct ctree_root *root, struct ctree_path *path, int *level)
420{
421 struct tree_buffer *next;
422 struct tree_buffer *cur;
423 u64 blocknr;
424 int ret;
425 u32 refs;
426
427 ret = lookup_block_ref(root, path->nodes[*level]->blocknr, &refs);
428 BUG_ON(ret);
429 if (refs > 1)
430 goto out;
431 while(*level > 0) {
432 cur = path->nodes[*level];
7518a238
CM
433 if (path->slots[*level] >=
434 btrfs_header_nritems(&cur->node.header))
20524f02
CM
435 break;
436 blocknr = cur->node.blockptrs[path->slots[*level]];
437 ret = lookup_block_ref(root, blocknr, &refs);
438 if (refs != 1 || *level == 1) {
439 path->slots[*level]++;
440 ret = free_extent(root, blocknr, 1);
441 BUG_ON(ret);
442 continue;
443 }
444 BUG_ON(ret);
445 next = read_tree_block(root, blocknr);
83e15a28 446 if (path->nodes[*level-1])
20524f02 447 tree_block_release(root, path->nodes[*level-1]);
20524f02 448 path->nodes[*level-1] = next;
7518a238 449 *level = btrfs_header_level(&next->node.header);
20524f02
CM
450 path->slots[*level] = 0;
451 }
452out:
453 ret = free_extent(root, path->nodes[*level]->blocknr, 1);
83e15a28 454 tree_block_release(root, path->nodes[*level]);
20524f02
CM
455 path->nodes[*level] = NULL;
456 *level += 1;
457 BUG_ON(ret);
458 return 0;
459}
460
461int walk_up_tree(struct ctree_root *root, struct ctree_path *path, int *level)
462{
463 int i;
464 int slot;
465 int ret;
466 for(i = *level; i < MAX_LEVEL - 1 && path->nodes[i]; i++) {
467 slot = path->slots[i];
7518a238
CM
468 if (slot <
469 btrfs_header_nritems(&path->nodes[i]->node.header)- 1) {
20524f02
CM
470 path->slots[i]++;
471 *level = i;
472 return 0;
473 } else {
474 ret = free_extent(root,
475 path->nodes[*level]->blocknr, 1);
83e15a28
CM
476 tree_block_release(root, path->nodes[*level]);
477 path->nodes[*level] = NULL;
20524f02
CM
478 *level = i + 1;
479 BUG_ON(ret);
480 }
481 }
482 return 1;
483}
484
485int btrfs_drop_snapshot(struct ctree_root *root, struct tree_buffer *snap)
486{
487 int ret;
488 int level;
489 struct ctree_path path;
490 int i;
491 int orig_level;
492
493 init_path(&path);
494
7518a238 495 level = btrfs_header_level(&snap->node.header);
20524f02
CM
496 orig_level = level;
497 path.nodes[level] = snap;
498 path.slots[level] = 0;
499 while(1) {
500 ret = walk_down_tree(root, &path, &level);
501 if (ret > 0)
502 break;
503 ret = walk_up_tree(root, &path, &level);
504 if (ret > 0)
505 break;
506 }
83e15a28
CM
507 for (i = 0; i <= orig_level; i++) {
508 if (path.nodes[i]) {
20524f02 509 tree_block_release(root, path.nodes[i]);
83e15a28 510 }
20524f02
CM
511 }
512
513 return 0;
514}