]>
Commit | Line | Data |
---|---|---|
16cdcec7 MX |
1 | /* |
2 | * Copyright (C) 2011 Fujitsu. All rights reserved. | |
3 | * Written by Miao Xie <miaox@cn.fujitsu.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public | |
7 | * License v2 as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
12 | * General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public | |
15 | * License along with this program; if not, write to the | |
16 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
17 | * Boston, MA 021110-1307, USA. | |
18 | */ | |
19 | ||
20 | #include <linux/slab.h> | |
21 | #include "delayed-inode.h" | |
22 | #include "disk-io.h" | |
23 | #include "transaction.h" | |
24 | ||
de3cb945 CM |
25 | #define BTRFS_DELAYED_WRITEBACK 512 |
26 | #define BTRFS_DELAYED_BACKGROUND 128 | |
27 | #define BTRFS_DELAYED_BATCH 16 | |
16cdcec7 MX |
28 | |
29 | static struct kmem_cache *delayed_node_cache; | |
30 | ||
31 | int __init btrfs_delayed_inode_init(void) | |
32 | { | |
837e1972 | 33 | delayed_node_cache = kmem_cache_create("btrfs_delayed_node", |
16cdcec7 MX |
34 | sizeof(struct btrfs_delayed_node), |
35 | 0, | |
36 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, | |
37 | NULL); | |
38 | if (!delayed_node_cache) | |
39 | return -ENOMEM; | |
40 | return 0; | |
41 | } | |
42 | ||
43 | void btrfs_delayed_inode_exit(void) | |
44 | { | |
45 | if (delayed_node_cache) | |
46 | kmem_cache_destroy(delayed_node_cache); | |
47 | } | |
48 | ||
49 | static inline void btrfs_init_delayed_node( | |
50 | struct btrfs_delayed_node *delayed_node, | |
51 | struct btrfs_root *root, u64 inode_id) | |
52 | { | |
53 | delayed_node->root = root; | |
54 | delayed_node->inode_id = inode_id; | |
55 | atomic_set(&delayed_node->refs, 0); | |
56 | delayed_node->count = 0; | |
57 | delayed_node->in_list = 0; | |
58 | delayed_node->inode_dirty = 0; | |
59 | delayed_node->ins_root = RB_ROOT; | |
60 | delayed_node->del_root = RB_ROOT; | |
61 | mutex_init(&delayed_node->mutex); | |
62 | delayed_node->index_cnt = 0; | |
63 | INIT_LIST_HEAD(&delayed_node->n_list); | |
64 | INIT_LIST_HEAD(&delayed_node->p_list); | |
65 | delayed_node->bytes_reserved = 0; | |
293f7e07 | 66 | memset(&delayed_node->inode_item, 0, sizeof(delayed_node->inode_item)); |
16cdcec7 MX |
67 | } |
68 | ||
69 | static inline int btrfs_is_continuous_delayed_item( | |
70 | struct btrfs_delayed_item *item1, | |
71 | struct btrfs_delayed_item *item2) | |
72 | { | |
73 | if (item1->key.type == BTRFS_DIR_INDEX_KEY && | |
74 | item1->key.objectid == item2->key.objectid && | |
75 | item1->key.type == item2->key.type && | |
76 | item1->key.offset + 1 == item2->key.offset) | |
77 | return 1; | |
78 | return 0; | |
79 | } | |
80 | ||
81 | static inline struct btrfs_delayed_root *btrfs_get_delayed_root( | |
82 | struct btrfs_root *root) | |
83 | { | |
84 | return root->fs_info->delayed_root; | |
85 | } | |
86 | ||
2f7e33d4 | 87 | static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode) |
16cdcec7 | 88 | { |
16cdcec7 MX |
89 | struct btrfs_inode *btrfs_inode = BTRFS_I(inode); |
90 | struct btrfs_root *root = btrfs_inode->root; | |
0d0ca30f | 91 | u64 ino = btrfs_ino(inode); |
2f7e33d4 | 92 | struct btrfs_delayed_node *node; |
16cdcec7 | 93 | |
16cdcec7 MX |
94 | node = ACCESS_ONCE(btrfs_inode->delayed_node); |
95 | if (node) { | |
2f7e33d4 | 96 | atomic_inc(&node->refs); |
16cdcec7 MX |
97 | return node; |
98 | } | |
99 | ||
100 | spin_lock(&root->inode_lock); | |
0d0ca30f | 101 | node = radix_tree_lookup(&root->delayed_nodes_tree, ino); |
16cdcec7 MX |
102 | if (node) { |
103 | if (btrfs_inode->delayed_node) { | |
2f7e33d4 MX |
104 | atomic_inc(&node->refs); /* can be accessed */ |
105 | BUG_ON(btrfs_inode->delayed_node != node); | |
16cdcec7 | 106 | spin_unlock(&root->inode_lock); |
2f7e33d4 | 107 | return node; |
16cdcec7 MX |
108 | } |
109 | btrfs_inode->delayed_node = node; | |
110 | atomic_inc(&node->refs); /* can be accessed */ | |
111 | atomic_inc(&node->refs); /* cached in the inode */ | |
112 | spin_unlock(&root->inode_lock); | |
113 | return node; | |
114 | } | |
115 | spin_unlock(&root->inode_lock); | |
116 | ||
2f7e33d4 MX |
117 | return NULL; |
118 | } | |
119 | ||
79787eaa | 120 | /* Will return either the node or PTR_ERR(-ENOMEM) */ |
2f7e33d4 MX |
121 | static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( |
122 | struct inode *inode) | |
123 | { | |
124 | struct btrfs_delayed_node *node; | |
125 | struct btrfs_inode *btrfs_inode = BTRFS_I(inode); | |
126 | struct btrfs_root *root = btrfs_inode->root; | |
127 | u64 ino = btrfs_ino(inode); | |
128 | int ret; | |
129 | ||
130 | again: | |
131 | node = btrfs_get_delayed_node(inode); | |
132 | if (node) | |
133 | return node; | |
134 | ||
16cdcec7 MX |
135 | node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS); |
136 | if (!node) | |
137 | return ERR_PTR(-ENOMEM); | |
0d0ca30f | 138 | btrfs_init_delayed_node(node, root, ino); |
16cdcec7 MX |
139 | |
140 | atomic_inc(&node->refs); /* cached in the btrfs inode */ | |
141 | atomic_inc(&node->refs); /* can be accessed */ | |
142 | ||
143 | ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); | |
144 | if (ret) { | |
145 | kmem_cache_free(delayed_node_cache, node); | |
146 | return ERR_PTR(ret); | |
147 | } | |
148 | ||
149 | spin_lock(&root->inode_lock); | |
0d0ca30f | 150 | ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node); |
16cdcec7 MX |
151 | if (ret == -EEXIST) { |
152 | kmem_cache_free(delayed_node_cache, node); | |
153 | spin_unlock(&root->inode_lock); | |
154 | radix_tree_preload_end(); | |
155 | goto again; | |
156 | } | |
157 | btrfs_inode->delayed_node = node; | |
158 | spin_unlock(&root->inode_lock); | |
159 | radix_tree_preload_end(); | |
160 | ||
161 | return node; | |
162 | } | |
163 | ||
164 | /* | |
165 | * Call it when holding delayed_node->mutex | |
166 | * | |
167 | * If mod = 1, add this node into the prepared list. | |
168 | */ | |
169 | static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root, | |
170 | struct btrfs_delayed_node *node, | |
171 | int mod) | |
172 | { | |
173 | spin_lock(&root->lock); | |
174 | if (node->in_list) { | |
175 | if (!list_empty(&node->p_list)) | |
176 | list_move_tail(&node->p_list, &root->prepare_list); | |
177 | else if (mod) | |
178 | list_add_tail(&node->p_list, &root->prepare_list); | |
179 | } else { | |
180 | list_add_tail(&node->n_list, &root->node_list); | |
181 | list_add_tail(&node->p_list, &root->prepare_list); | |
182 | atomic_inc(&node->refs); /* inserted into list */ | |
183 | root->nodes++; | |
184 | node->in_list = 1; | |
185 | } | |
186 | spin_unlock(&root->lock); | |
187 | } | |
188 | ||
189 | /* Call it when holding delayed_node->mutex */ | |
190 | static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root, | |
191 | struct btrfs_delayed_node *node) | |
192 | { | |
193 | spin_lock(&root->lock); | |
194 | if (node->in_list) { | |
195 | root->nodes--; | |
196 | atomic_dec(&node->refs); /* not in the list */ | |
197 | list_del_init(&node->n_list); | |
198 | if (!list_empty(&node->p_list)) | |
199 | list_del_init(&node->p_list); | |
200 | node->in_list = 0; | |
201 | } | |
202 | spin_unlock(&root->lock); | |
203 | } | |
204 | ||
205 | struct btrfs_delayed_node *btrfs_first_delayed_node( | |
206 | struct btrfs_delayed_root *delayed_root) | |
207 | { | |
208 | struct list_head *p; | |
209 | struct btrfs_delayed_node *node = NULL; | |
210 | ||
211 | spin_lock(&delayed_root->lock); | |
212 | if (list_empty(&delayed_root->node_list)) | |
213 | goto out; | |
214 | ||
215 | p = delayed_root->node_list.next; | |
216 | node = list_entry(p, struct btrfs_delayed_node, n_list); | |
217 | atomic_inc(&node->refs); | |
218 | out: | |
219 | spin_unlock(&delayed_root->lock); | |
220 | ||
221 | return node; | |
222 | } | |
223 | ||
224 | struct btrfs_delayed_node *btrfs_next_delayed_node( | |
225 | struct btrfs_delayed_node *node) | |
226 | { | |
227 | struct btrfs_delayed_root *delayed_root; | |
228 | struct list_head *p; | |
229 | struct btrfs_delayed_node *next = NULL; | |
230 | ||
231 | delayed_root = node->root->fs_info->delayed_root; | |
232 | spin_lock(&delayed_root->lock); | |
233 | if (!node->in_list) { /* not in the list */ | |
234 | if (list_empty(&delayed_root->node_list)) | |
235 | goto out; | |
236 | p = delayed_root->node_list.next; | |
237 | } else if (list_is_last(&node->n_list, &delayed_root->node_list)) | |
238 | goto out; | |
239 | else | |
240 | p = node->n_list.next; | |
241 | ||
242 | next = list_entry(p, struct btrfs_delayed_node, n_list); | |
243 | atomic_inc(&next->refs); | |
244 | out: | |
245 | spin_unlock(&delayed_root->lock); | |
246 | ||
247 | return next; | |
248 | } | |
249 | ||
250 | static void __btrfs_release_delayed_node( | |
251 | struct btrfs_delayed_node *delayed_node, | |
252 | int mod) | |
253 | { | |
254 | struct btrfs_delayed_root *delayed_root; | |
255 | ||
256 | if (!delayed_node) | |
257 | return; | |
258 | ||
259 | delayed_root = delayed_node->root->fs_info->delayed_root; | |
260 | ||
261 | mutex_lock(&delayed_node->mutex); | |
262 | if (delayed_node->count) | |
263 | btrfs_queue_delayed_node(delayed_root, delayed_node, mod); | |
264 | else | |
265 | btrfs_dequeue_delayed_node(delayed_root, delayed_node); | |
266 | mutex_unlock(&delayed_node->mutex); | |
267 | ||
268 | if (atomic_dec_and_test(&delayed_node->refs)) { | |
269 | struct btrfs_root *root = delayed_node->root; | |
270 | spin_lock(&root->inode_lock); | |
271 | if (atomic_read(&delayed_node->refs) == 0) { | |
272 | radix_tree_delete(&root->delayed_nodes_tree, | |
273 | delayed_node->inode_id); | |
274 | kmem_cache_free(delayed_node_cache, delayed_node); | |
275 | } | |
276 | spin_unlock(&root->inode_lock); | |
277 | } | |
278 | } | |
279 | ||
280 | static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node) | |
281 | { | |
282 | __btrfs_release_delayed_node(node, 0); | |
283 | } | |
284 | ||
285 | struct btrfs_delayed_node *btrfs_first_prepared_delayed_node( | |
286 | struct btrfs_delayed_root *delayed_root) | |
287 | { | |
288 | struct list_head *p; | |
289 | struct btrfs_delayed_node *node = NULL; | |
290 | ||
291 | spin_lock(&delayed_root->lock); | |
292 | if (list_empty(&delayed_root->prepare_list)) | |
293 | goto out; | |
294 | ||
295 | p = delayed_root->prepare_list.next; | |
296 | list_del_init(p); | |
297 | node = list_entry(p, struct btrfs_delayed_node, p_list); | |
298 | atomic_inc(&node->refs); | |
299 | out: | |
300 | spin_unlock(&delayed_root->lock); | |
301 | ||
302 | return node; | |
303 | } | |
304 | ||
305 | static inline void btrfs_release_prepared_delayed_node( | |
306 | struct btrfs_delayed_node *node) | |
307 | { | |
308 | __btrfs_release_delayed_node(node, 1); | |
309 | } | |
310 | ||
311 | struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len) | |
312 | { | |
313 | struct btrfs_delayed_item *item; | |
314 | item = kmalloc(sizeof(*item) + data_len, GFP_NOFS); | |
315 | if (item) { | |
316 | item->data_len = data_len; | |
317 | item->ins_or_del = 0; | |
318 | item->bytes_reserved = 0; | |
16cdcec7 MX |
319 | item->delayed_node = NULL; |
320 | atomic_set(&item->refs, 1); | |
321 | } | |
322 | return item; | |
323 | } | |
324 | ||
325 | /* | |
326 | * __btrfs_lookup_delayed_item - look up the delayed item by key | |
327 | * @delayed_node: pointer to the delayed node | |
328 | * @key: the key to look up | |
329 | * @prev: used to store the prev item if the right item isn't found | |
330 | * @next: used to store the next item if the right item isn't found | |
331 | * | |
332 | * Note: if we don't find the right item, we will return the prev item and | |
333 | * the next item. | |
334 | */ | |
335 | static struct btrfs_delayed_item *__btrfs_lookup_delayed_item( | |
336 | struct rb_root *root, | |
337 | struct btrfs_key *key, | |
338 | struct btrfs_delayed_item **prev, | |
339 | struct btrfs_delayed_item **next) | |
340 | { | |
341 | struct rb_node *node, *prev_node = NULL; | |
342 | struct btrfs_delayed_item *delayed_item = NULL; | |
343 | int ret = 0; | |
344 | ||
345 | node = root->rb_node; | |
346 | ||
347 | while (node) { | |
348 | delayed_item = rb_entry(node, struct btrfs_delayed_item, | |
349 | rb_node); | |
350 | prev_node = node; | |
351 | ret = btrfs_comp_cpu_keys(&delayed_item->key, key); | |
352 | if (ret < 0) | |
353 | node = node->rb_right; | |
354 | else if (ret > 0) | |
355 | node = node->rb_left; | |
356 | else | |
357 | return delayed_item; | |
358 | } | |
359 | ||
360 | if (prev) { | |
361 | if (!prev_node) | |
362 | *prev = NULL; | |
363 | else if (ret < 0) | |
364 | *prev = delayed_item; | |
365 | else if ((node = rb_prev(prev_node)) != NULL) { | |
366 | *prev = rb_entry(node, struct btrfs_delayed_item, | |
367 | rb_node); | |
368 | } else | |
369 | *prev = NULL; | |
370 | } | |
371 | ||
372 | if (next) { | |
373 | if (!prev_node) | |
374 | *next = NULL; | |
375 | else if (ret > 0) | |
376 | *next = delayed_item; | |
377 | else if ((node = rb_next(prev_node)) != NULL) { | |
378 | *next = rb_entry(node, struct btrfs_delayed_item, | |
379 | rb_node); | |
380 | } else | |
381 | *next = NULL; | |
382 | } | |
383 | return NULL; | |
384 | } | |
385 | ||
386 | struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item( | |
387 | struct btrfs_delayed_node *delayed_node, | |
388 | struct btrfs_key *key) | |
389 | { | |
390 | struct btrfs_delayed_item *item; | |
391 | ||
392 | item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key, | |
393 | NULL, NULL); | |
394 | return item; | |
395 | } | |
396 | ||
397 | struct btrfs_delayed_item *__btrfs_lookup_delayed_deletion_item( | |
398 | struct btrfs_delayed_node *delayed_node, | |
399 | struct btrfs_key *key) | |
400 | { | |
401 | struct btrfs_delayed_item *item; | |
402 | ||
403 | item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key, | |
404 | NULL, NULL); | |
405 | return item; | |
406 | } | |
407 | ||
408 | struct btrfs_delayed_item *__btrfs_search_delayed_insertion_item( | |
409 | struct btrfs_delayed_node *delayed_node, | |
410 | struct btrfs_key *key) | |
411 | { | |
412 | struct btrfs_delayed_item *item, *next; | |
413 | ||
414 | item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key, | |
415 | NULL, &next); | |
416 | if (!item) | |
417 | item = next; | |
418 | ||
419 | return item; | |
420 | } | |
421 | ||
422 | struct btrfs_delayed_item *__btrfs_search_delayed_deletion_item( | |
423 | struct btrfs_delayed_node *delayed_node, | |
424 | struct btrfs_key *key) | |
425 | { | |
426 | struct btrfs_delayed_item *item, *next; | |
427 | ||
428 | item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key, | |
429 | NULL, &next); | |
430 | if (!item) | |
431 | item = next; | |
432 | ||
433 | return item; | |
434 | } | |
435 | ||
436 | static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node, | |
437 | struct btrfs_delayed_item *ins, | |
438 | int action) | |
439 | { | |
440 | struct rb_node **p, *node; | |
441 | struct rb_node *parent_node = NULL; | |
442 | struct rb_root *root; | |
443 | struct btrfs_delayed_item *item; | |
444 | int cmp; | |
445 | ||
446 | if (action == BTRFS_DELAYED_INSERTION_ITEM) | |
447 | root = &delayed_node->ins_root; | |
448 | else if (action == BTRFS_DELAYED_DELETION_ITEM) | |
449 | root = &delayed_node->del_root; | |
450 | else | |
451 | BUG(); | |
452 | p = &root->rb_node; | |
453 | node = &ins->rb_node; | |
454 | ||
455 | while (*p) { | |
456 | parent_node = *p; | |
457 | item = rb_entry(parent_node, struct btrfs_delayed_item, | |
458 | rb_node); | |
459 | ||
460 | cmp = btrfs_comp_cpu_keys(&item->key, &ins->key); | |
461 | if (cmp < 0) | |
462 | p = &(*p)->rb_right; | |
463 | else if (cmp > 0) | |
464 | p = &(*p)->rb_left; | |
465 | else | |
466 | return -EEXIST; | |
467 | } | |
468 | ||
469 | rb_link_node(node, parent_node, p); | |
470 | rb_insert_color(node, root); | |
471 | ins->delayed_node = delayed_node; | |
472 | ins->ins_or_del = action; | |
473 | ||
474 | if (ins->key.type == BTRFS_DIR_INDEX_KEY && | |
475 | action == BTRFS_DELAYED_INSERTION_ITEM && | |
476 | ins->key.offset >= delayed_node->index_cnt) | |
477 | delayed_node->index_cnt = ins->key.offset + 1; | |
478 | ||
479 | delayed_node->count++; | |
480 | atomic_inc(&delayed_node->root->fs_info->delayed_root->items); | |
481 | return 0; | |
482 | } | |
483 | ||
484 | static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node, | |
485 | struct btrfs_delayed_item *item) | |
486 | { | |
487 | return __btrfs_add_delayed_item(node, item, | |
488 | BTRFS_DELAYED_INSERTION_ITEM); | |
489 | } | |
490 | ||
491 | static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node, | |
492 | struct btrfs_delayed_item *item) | |
493 | { | |
494 | return __btrfs_add_delayed_item(node, item, | |
495 | BTRFS_DELAYED_DELETION_ITEM); | |
496 | } | |
497 | ||
de3cb945 CM |
498 | static void finish_one_item(struct btrfs_delayed_root *delayed_root) |
499 | { | |
500 | int seq = atomic_inc_return(&delayed_root->items_seq); | |
501 | if ((atomic_dec_return(&delayed_root->items) < | |
502 | BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) && | |
503 | waitqueue_active(&delayed_root->wait)) | |
504 | wake_up(&delayed_root->wait); | |
505 | } | |
506 | ||
16cdcec7 MX |
507 | static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item) |
508 | { | |
509 | struct rb_root *root; | |
510 | struct btrfs_delayed_root *delayed_root; | |
511 | ||
512 | delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root; | |
513 | ||
514 | BUG_ON(!delayed_root); | |
515 | BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM && | |
516 | delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM); | |
517 | ||
518 | if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM) | |
519 | root = &delayed_item->delayed_node->ins_root; | |
520 | else | |
521 | root = &delayed_item->delayed_node->del_root; | |
522 | ||
523 | rb_erase(&delayed_item->rb_node, root); | |
524 | delayed_item->delayed_node->count--; | |
de3cb945 CM |
525 | |
526 | finish_one_item(delayed_root); | |
16cdcec7 MX |
527 | } |
528 | ||
529 | static void btrfs_release_delayed_item(struct btrfs_delayed_item *item) | |
530 | { | |
531 | if (item) { | |
532 | __btrfs_remove_delayed_item(item); | |
533 | if (atomic_dec_and_test(&item->refs)) | |
534 | kfree(item); | |
535 | } | |
536 | } | |
537 | ||
538 | struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item( | |
539 | struct btrfs_delayed_node *delayed_node) | |
540 | { | |
541 | struct rb_node *p; | |
542 | struct btrfs_delayed_item *item = NULL; | |
543 | ||
544 | p = rb_first(&delayed_node->ins_root); | |
545 | if (p) | |
546 | item = rb_entry(p, struct btrfs_delayed_item, rb_node); | |
547 | ||
548 | return item; | |
549 | } | |
550 | ||
551 | struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item( | |
552 | struct btrfs_delayed_node *delayed_node) | |
553 | { | |
554 | struct rb_node *p; | |
555 | struct btrfs_delayed_item *item = NULL; | |
556 | ||
557 | p = rb_first(&delayed_node->del_root); | |
558 | if (p) | |
559 | item = rb_entry(p, struct btrfs_delayed_item, rb_node); | |
560 | ||
561 | return item; | |
562 | } | |
563 | ||
564 | struct btrfs_delayed_item *__btrfs_next_delayed_item( | |
565 | struct btrfs_delayed_item *item) | |
566 | { | |
567 | struct rb_node *p; | |
568 | struct btrfs_delayed_item *next = NULL; | |
569 | ||
570 | p = rb_next(&item->rb_node); | |
571 | if (p) | |
572 | next = rb_entry(p, struct btrfs_delayed_item, rb_node); | |
573 | ||
574 | return next; | |
575 | } | |
576 | ||
16cdcec7 MX |
577 | static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root, |
578 | u64 root_id) | |
579 | { | |
580 | struct btrfs_key root_key; | |
581 | ||
582 | if (root->objectid == root_id) | |
583 | return root; | |
584 | ||
585 | root_key.objectid = root_id; | |
586 | root_key.type = BTRFS_ROOT_ITEM_KEY; | |
587 | root_key.offset = (u64)-1; | |
588 | return btrfs_read_fs_root_no_name(root->fs_info, &root_key); | |
589 | } | |
590 | ||
591 | static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans, | |
592 | struct btrfs_root *root, | |
593 | struct btrfs_delayed_item *item) | |
594 | { | |
595 | struct btrfs_block_rsv *src_rsv; | |
596 | struct btrfs_block_rsv *dst_rsv; | |
597 | u64 num_bytes; | |
598 | int ret; | |
599 | ||
600 | if (!trans->bytes_reserved) | |
601 | return 0; | |
602 | ||
603 | src_rsv = trans->block_rsv; | |
6d668dda | 604 | dst_rsv = &root->fs_info->delayed_block_rsv; |
16cdcec7 MX |
605 | |
606 | num_bytes = btrfs_calc_trans_metadata_size(root, 1); | |
607 | ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); | |
8c2a3ca2 JB |
608 | if (!ret) { |
609 | trace_btrfs_space_reservation(root->fs_info, "delayed_item", | |
610 | item->key.objectid, | |
611 | num_bytes, 1); | |
16cdcec7 | 612 | item->bytes_reserved = num_bytes; |
8c2a3ca2 | 613 | } |
16cdcec7 MX |
614 | |
615 | return ret; | |
616 | } | |
617 | ||
618 | static void btrfs_delayed_item_release_metadata(struct btrfs_root *root, | |
619 | struct btrfs_delayed_item *item) | |
620 | { | |
19fd2949 MX |
621 | struct btrfs_block_rsv *rsv; |
622 | ||
16cdcec7 MX |
623 | if (!item->bytes_reserved) |
624 | return; | |
625 | ||
6d668dda | 626 | rsv = &root->fs_info->delayed_block_rsv; |
8c2a3ca2 JB |
627 | trace_btrfs_space_reservation(root->fs_info, "delayed_item", |
628 | item->key.objectid, item->bytes_reserved, | |
629 | 0); | |
19fd2949 | 630 | btrfs_block_rsv_release(root, rsv, |
16cdcec7 MX |
631 | item->bytes_reserved); |
632 | } | |
633 | ||
634 | static int btrfs_delayed_inode_reserve_metadata( | |
635 | struct btrfs_trans_handle *trans, | |
636 | struct btrfs_root *root, | |
7fd2ae21 | 637 | struct inode *inode, |
16cdcec7 MX |
638 | struct btrfs_delayed_node *node) |
639 | { | |
640 | struct btrfs_block_rsv *src_rsv; | |
641 | struct btrfs_block_rsv *dst_rsv; | |
642 | u64 num_bytes; | |
643 | int ret; | |
8c2a3ca2 | 644 | bool release = false; |
16cdcec7 | 645 | |
16cdcec7 | 646 | src_rsv = trans->block_rsv; |
6d668dda | 647 | dst_rsv = &root->fs_info->delayed_block_rsv; |
16cdcec7 MX |
648 | |
649 | num_bytes = btrfs_calc_trans_metadata_size(root, 1); | |
c06a0e12 JB |
650 | |
651 | /* | |
652 | * btrfs_dirty_inode will update the inode under btrfs_join_transaction | |
653 | * which doesn't reserve space for speed. This is a problem since we | |
654 | * still need to reserve space for this update, so try to reserve the | |
655 | * space. | |
656 | * | |
657 | * Now if src_rsv == delalloc_block_rsv we'll let it just steal since | |
658 | * we're accounted for. | |
659 | */ | |
e755d9ab | 660 | if (!src_rsv || (!trans->bytes_reserved && |
66d8f3dd | 661 | src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) { |
08e007d2 MX |
662 | ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes, |
663 | BTRFS_RESERVE_NO_FLUSH); | |
c06a0e12 JB |
664 | /* |
665 | * Since we're under a transaction reserve_metadata_bytes could | |
666 | * try to commit the transaction which will make it return | |
667 | * EAGAIN to make us stop the transaction we have, so return | |
668 | * ENOSPC instead so that btrfs_dirty_inode knows what to do. | |
669 | */ | |
670 | if (ret == -EAGAIN) | |
671 | ret = -ENOSPC; | |
8c2a3ca2 | 672 | if (!ret) { |
c06a0e12 | 673 | node->bytes_reserved = num_bytes; |
8c2a3ca2 JB |
674 | trace_btrfs_space_reservation(root->fs_info, |
675 | "delayed_inode", | |
676 | btrfs_ino(inode), | |
677 | num_bytes, 1); | |
678 | } | |
c06a0e12 | 679 | return ret; |
66d8f3dd | 680 | } else if (src_rsv->type == BTRFS_BLOCK_RSV_DELALLOC) { |
7fd2ae21 | 681 | spin_lock(&BTRFS_I(inode)->lock); |
72ac3c0d JB |
682 | if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED, |
683 | &BTRFS_I(inode)->runtime_flags)) { | |
7fd2ae21 JB |
684 | spin_unlock(&BTRFS_I(inode)->lock); |
685 | release = true; | |
686 | goto migrate; | |
687 | } | |
688 | spin_unlock(&BTRFS_I(inode)->lock); | |
689 | ||
690 | /* Ok we didn't have space pre-reserved. This shouldn't happen | |
691 | * too often but it can happen if we do delalloc to an existing | |
692 | * inode which gets dirtied because of the time update, and then | |
693 | * isn't touched again until after the transaction commits and | |
694 | * then we try to write out the data. First try to be nice and | |
695 | * reserve something strictly for us. If not be a pain and try | |
696 | * to steal from the delalloc block rsv. | |
697 | */ | |
08e007d2 MX |
698 | ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes, |
699 | BTRFS_RESERVE_NO_FLUSH); | |
7fd2ae21 JB |
700 | if (!ret) |
701 | goto out; | |
702 | ||
703 | ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); | |
704 | if (!ret) | |
705 | goto out; | |
706 | ||
707 | /* | |
708 | * Ok this is a problem, let's just steal from the global rsv | |
709 | * since this really shouldn't happen that often. | |
710 | */ | |
711 | WARN_ON(1); | |
712 | ret = btrfs_block_rsv_migrate(&root->fs_info->global_block_rsv, | |
713 | dst_rsv, num_bytes); | |
714 | goto out; | |
c06a0e12 JB |
715 | } |
716 | ||
7fd2ae21 | 717 | migrate: |
16cdcec7 | 718 | ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); |
7fd2ae21 JB |
719 | |
720 | out: | |
721 | /* | |
722 | * Migrate only takes a reservation, it doesn't touch the size of the | |
723 | * block_rsv. This is to simplify people who don't normally have things | |
724 | * migrated from their block rsv. If they go to release their | |
725 | * reservation, that will decrease the size as well, so if migrate | |
726 | * reduced size we'd end up with a negative size. But for the | |
727 | * delalloc_meta_reserved stuff we will only know to drop 1 reservation, | |
728 | * but we could in fact do this reserve/migrate dance several times | |
729 | * between the time we did the original reservation and we'd clean it | |
730 | * up. So to take care of this, release the space for the meta | |
731 | * reservation here. I think it may be time for a documentation page on | |
732 | * how block rsvs. work. | |
733 | */ | |
8c2a3ca2 JB |
734 | if (!ret) { |
735 | trace_btrfs_space_reservation(root->fs_info, "delayed_inode", | |
736 | btrfs_ino(inode), num_bytes, 1); | |
16cdcec7 | 737 | node->bytes_reserved = num_bytes; |
8c2a3ca2 | 738 | } |
16cdcec7 | 739 | |
8c2a3ca2 JB |
740 | if (release) { |
741 | trace_btrfs_space_reservation(root->fs_info, "delalloc", | |
742 | btrfs_ino(inode), num_bytes, 0); | |
7fd2ae21 | 743 | btrfs_block_rsv_release(root, src_rsv, num_bytes); |
8c2a3ca2 | 744 | } |
16cdcec7 MX |
745 | |
746 | return ret; | |
747 | } | |
748 | ||
749 | static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root, | |
750 | struct btrfs_delayed_node *node) | |
751 | { | |
752 | struct btrfs_block_rsv *rsv; | |
753 | ||
754 | if (!node->bytes_reserved) | |
755 | return; | |
756 | ||
6d668dda | 757 | rsv = &root->fs_info->delayed_block_rsv; |
8c2a3ca2 JB |
758 | trace_btrfs_space_reservation(root->fs_info, "delayed_inode", |
759 | node->inode_id, node->bytes_reserved, 0); | |
16cdcec7 MX |
760 | btrfs_block_rsv_release(root, rsv, |
761 | node->bytes_reserved); | |
762 | node->bytes_reserved = 0; | |
763 | } | |
764 | ||
765 | /* | |
766 | * This helper will insert some continuous items into the same leaf according | |
767 | * to the free space of the leaf. | |
768 | */ | |
afe5fea7 TI |
769 | static int btrfs_batch_insert_items(struct btrfs_root *root, |
770 | struct btrfs_path *path, | |
771 | struct btrfs_delayed_item *item) | |
16cdcec7 MX |
772 | { |
773 | struct btrfs_delayed_item *curr, *next; | |
774 | int free_space; | |
775 | int total_data_size = 0, total_size = 0; | |
776 | struct extent_buffer *leaf; | |
777 | char *data_ptr; | |
778 | struct btrfs_key *keys; | |
779 | u32 *data_size; | |
780 | struct list_head head; | |
781 | int slot; | |
782 | int nitems; | |
783 | int i; | |
784 | int ret = 0; | |
785 | ||
786 | BUG_ON(!path->nodes[0]); | |
787 | ||
788 | leaf = path->nodes[0]; | |
789 | free_space = btrfs_leaf_free_space(root, leaf); | |
790 | INIT_LIST_HEAD(&head); | |
791 | ||
792 | next = item; | |
17aca1c9 | 793 | nitems = 0; |
16cdcec7 MX |
794 | |
795 | /* | |
796 | * count the number of the continuous items that we can insert in batch | |
797 | */ | |
798 | while (total_size + next->data_len + sizeof(struct btrfs_item) <= | |
799 | free_space) { | |
800 | total_data_size += next->data_len; | |
801 | total_size += next->data_len + sizeof(struct btrfs_item); | |
802 | list_add_tail(&next->tree_list, &head); | |
803 | nitems++; | |
804 | ||
805 | curr = next; | |
806 | next = __btrfs_next_delayed_item(curr); | |
807 | if (!next) | |
808 | break; | |
809 | ||
810 | if (!btrfs_is_continuous_delayed_item(curr, next)) | |
811 | break; | |
812 | } | |
813 | ||
814 | if (!nitems) { | |
815 | ret = 0; | |
816 | goto out; | |
817 | } | |
818 | ||
819 | /* | |
820 | * we need allocate some memory space, but it might cause the task | |
821 | * to sleep, so we set all locked nodes in the path to blocking locks | |
822 | * first. | |
823 | */ | |
824 | btrfs_set_path_blocking(path); | |
825 | ||
826 | keys = kmalloc(sizeof(struct btrfs_key) * nitems, GFP_NOFS); | |
827 | if (!keys) { | |
828 | ret = -ENOMEM; | |
829 | goto out; | |
830 | } | |
831 | ||
832 | data_size = kmalloc(sizeof(u32) * nitems, GFP_NOFS); | |
833 | if (!data_size) { | |
834 | ret = -ENOMEM; | |
835 | goto error; | |
836 | } | |
837 | ||
838 | /* get keys of all the delayed items */ | |
839 | i = 0; | |
840 | list_for_each_entry(next, &head, tree_list) { | |
841 | keys[i] = next->key; | |
842 | data_size[i] = next->data_len; | |
843 | i++; | |
844 | } | |
845 | ||
846 | /* reset all the locked nodes in the patch to spinning locks. */ | |
bd681513 | 847 | btrfs_clear_path_blocking(path, NULL, 0); |
16cdcec7 MX |
848 | |
849 | /* insert the keys of the items */ | |
afe5fea7 | 850 | setup_items_for_insert(root, path, keys, data_size, |
143bede5 | 851 | total_data_size, total_size, nitems); |
16cdcec7 MX |
852 | |
853 | /* insert the dir index items */ | |
854 | slot = path->slots[0]; | |
855 | list_for_each_entry_safe(curr, next, &head, tree_list) { | |
856 | data_ptr = btrfs_item_ptr(leaf, slot, char); | |
857 | write_extent_buffer(leaf, &curr->data, | |
858 | (unsigned long)data_ptr, | |
859 | curr->data_len); | |
860 | slot++; | |
861 | ||
862 | btrfs_delayed_item_release_metadata(root, curr); | |
863 | ||
864 | list_del(&curr->tree_list); | |
865 | btrfs_release_delayed_item(curr); | |
866 | } | |
867 | ||
868 | error: | |
869 | kfree(data_size); | |
870 | kfree(keys); | |
871 | out: | |
872 | return ret; | |
873 | } | |
874 | ||
875 | /* | |
876 | * This helper can just do simple insertion that needn't extend item for new | |
877 | * data, such as directory name index insertion, inode insertion. | |
878 | */ | |
879 | static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans, | |
880 | struct btrfs_root *root, | |
881 | struct btrfs_path *path, | |
882 | struct btrfs_delayed_item *delayed_item) | |
883 | { | |
884 | struct extent_buffer *leaf; | |
16cdcec7 MX |
885 | char *ptr; |
886 | int ret; | |
887 | ||
888 | ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key, | |
889 | delayed_item->data_len); | |
890 | if (ret < 0 && ret != -EEXIST) | |
891 | return ret; | |
892 | ||
893 | leaf = path->nodes[0]; | |
894 | ||
16cdcec7 MX |
895 | ptr = btrfs_item_ptr(leaf, path->slots[0], char); |
896 | ||
897 | write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr, | |
898 | delayed_item->data_len); | |
899 | btrfs_mark_buffer_dirty(leaf); | |
900 | ||
901 | btrfs_delayed_item_release_metadata(root, delayed_item); | |
902 | return 0; | |
903 | } | |
904 | ||
905 | /* | |
906 | * we insert an item first, then if there are some continuous items, we try | |
907 | * to insert those items into the same leaf. | |
908 | */ | |
909 | static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans, | |
910 | struct btrfs_path *path, | |
911 | struct btrfs_root *root, | |
912 | struct btrfs_delayed_node *node) | |
913 | { | |
914 | struct btrfs_delayed_item *curr, *prev; | |
915 | int ret = 0; | |
916 | ||
917 | do_again: | |
918 | mutex_lock(&node->mutex); | |
919 | curr = __btrfs_first_delayed_insertion_item(node); | |
920 | if (!curr) | |
921 | goto insert_end; | |
922 | ||
923 | ret = btrfs_insert_delayed_item(trans, root, path, curr); | |
924 | if (ret < 0) { | |
945d8962 | 925 | btrfs_release_path(path); |
16cdcec7 MX |
926 | goto insert_end; |
927 | } | |
928 | ||
929 | prev = curr; | |
930 | curr = __btrfs_next_delayed_item(prev); | |
931 | if (curr && btrfs_is_continuous_delayed_item(prev, curr)) { | |
932 | /* insert the continuous items into the same leaf */ | |
933 | path->slots[0]++; | |
afe5fea7 | 934 | btrfs_batch_insert_items(root, path, curr); |
16cdcec7 MX |
935 | } |
936 | btrfs_release_delayed_item(prev); | |
937 | btrfs_mark_buffer_dirty(path->nodes[0]); | |
938 | ||
945d8962 | 939 | btrfs_release_path(path); |
16cdcec7 MX |
940 | mutex_unlock(&node->mutex); |
941 | goto do_again; | |
942 | ||
943 | insert_end: | |
944 | mutex_unlock(&node->mutex); | |
945 | return ret; | |
946 | } | |
947 | ||
948 | static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans, | |
949 | struct btrfs_root *root, | |
950 | struct btrfs_path *path, | |
951 | struct btrfs_delayed_item *item) | |
952 | { | |
953 | struct btrfs_delayed_item *curr, *next; | |
954 | struct extent_buffer *leaf; | |
955 | struct btrfs_key key; | |
956 | struct list_head head; | |
957 | int nitems, i, last_item; | |
958 | int ret = 0; | |
959 | ||
960 | BUG_ON(!path->nodes[0]); | |
961 | ||
962 | leaf = path->nodes[0]; | |
963 | ||
964 | i = path->slots[0]; | |
965 | last_item = btrfs_header_nritems(leaf) - 1; | |
966 | if (i > last_item) | |
967 | return -ENOENT; /* FIXME: Is errno suitable? */ | |
968 | ||
969 | next = item; | |
970 | INIT_LIST_HEAD(&head); | |
971 | btrfs_item_key_to_cpu(leaf, &key, i); | |
972 | nitems = 0; | |
973 | /* | |
974 | * count the number of the dir index items that we can delete in batch | |
975 | */ | |
976 | while (btrfs_comp_cpu_keys(&next->key, &key) == 0) { | |
977 | list_add_tail(&next->tree_list, &head); | |
978 | nitems++; | |
979 | ||
980 | curr = next; | |
981 | next = __btrfs_next_delayed_item(curr); | |
982 | if (!next) | |
983 | break; | |
984 | ||
985 | if (!btrfs_is_continuous_delayed_item(curr, next)) | |
986 | break; | |
987 | ||
988 | i++; | |
989 | if (i > last_item) | |
990 | break; | |
991 | btrfs_item_key_to_cpu(leaf, &key, i); | |
992 | } | |
993 | ||
994 | if (!nitems) | |
995 | return 0; | |
996 | ||
997 | ret = btrfs_del_items(trans, root, path, path->slots[0], nitems); | |
998 | if (ret) | |
999 | goto out; | |
1000 | ||
1001 | list_for_each_entry_safe(curr, next, &head, tree_list) { | |
1002 | btrfs_delayed_item_release_metadata(root, curr); | |
1003 | list_del(&curr->tree_list); | |
1004 | btrfs_release_delayed_item(curr); | |
1005 | } | |
1006 | ||
1007 | out: | |
1008 | return ret; | |
1009 | } | |
1010 | ||
1011 | static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans, | |
1012 | struct btrfs_path *path, | |
1013 | struct btrfs_root *root, | |
1014 | struct btrfs_delayed_node *node) | |
1015 | { | |
1016 | struct btrfs_delayed_item *curr, *prev; | |
1017 | int ret = 0; | |
1018 | ||
1019 | do_again: | |
1020 | mutex_lock(&node->mutex); | |
1021 | curr = __btrfs_first_delayed_deletion_item(node); | |
1022 | if (!curr) | |
1023 | goto delete_fail; | |
1024 | ||
1025 | ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1); | |
1026 | if (ret < 0) | |
1027 | goto delete_fail; | |
1028 | else if (ret > 0) { | |
1029 | /* | |
1030 | * can't find the item which the node points to, so this node | |
1031 | * is invalid, just drop it. | |
1032 | */ | |
1033 | prev = curr; | |
1034 | curr = __btrfs_next_delayed_item(prev); | |
1035 | btrfs_release_delayed_item(prev); | |
1036 | ret = 0; | |
945d8962 | 1037 | btrfs_release_path(path); |
62095265 FW |
1038 | if (curr) { |
1039 | mutex_unlock(&node->mutex); | |
16cdcec7 | 1040 | goto do_again; |
62095265 | 1041 | } else |
16cdcec7 MX |
1042 | goto delete_fail; |
1043 | } | |
1044 | ||
1045 | btrfs_batch_delete_items(trans, root, path, curr); | |
945d8962 | 1046 | btrfs_release_path(path); |
16cdcec7 MX |
1047 | mutex_unlock(&node->mutex); |
1048 | goto do_again; | |
1049 | ||
1050 | delete_fail: | |
945d8962 | 1051 | btrfs_release_path(path); |
16cdcec7 MX |
1052 | mutex_unlock(&node->mutex); |
1053 | return ret; | |
1054 | } | |
1055 | ||
1056 | static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node) | |
1057 | { | |
1058 | struct btrfs_delayed_root *delayed_root; | |
1059 | ||
1060 | if (delayed_node && delayed_node->inode_dirty) { | |
1061 | BUG_ON(!delayed_node->root); | |
1062 | delayed_node->inode_dirty = 0; | |
1063 | delayed_node->count--; | |
1064 | ||
1065 | delayed_root = delayed_node->root->fs_info->delayed_root; | |
de3cb945 | 1066 | finish_one_item(delayed_root); |
16cdcec7 MX |
1067 | } |
1068 | } | |
1069 | ||
0e8c36a9 MX |
1070 | static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, |
1071 | struct btrfs_root *root, | |
1072 | struct btrfs_path *path, | |
1073 | struct btrfs_delayed_node *node) | |
16cdcec7 MX |
1074 | { |
1075 | struct btrfs_key key; | |
1076 | struct btrfs_inode_item *inode_item; | |
1077 | struct extent_buffer *leaf; | |
1078 | int ret; | |
1079 | ||
16cdcec7 MX |
1080 | key.objectid = node->inode_id; |
1081 | btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); | |
1082 | key.offset = 0; | |
0e8c36a9 | 1083 | |
16cdcec7 MX |
1084 | ret = btrfs_lookup_inode(trans, root, path, &key, 1); |
1085 | if (ret > 0) { | |
945d8962 | 1086 | btrfs_release_path(path); |
16cdcec7 MX |
1087 | return -ENOENT; |
1088 | } else if (ret < 0) { | |
16cdcec7 MX |
1089 | return ret; |
1090 | } | |
1091 | ||
1092 | btrfs_unlock_up_safe(path, 1); | |
1093 | leaf = path->nodes[0]; | |
1094 | inode_item = btrfs_item_ptr(leaf, path->slots[0], | |
1095 | struct btrfs_inode_item); | |
1096 | write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item, | |
1097 | sizeof(struct btrfs_inode_item)); | |
1098 | btrfs_mark_buffer_dirty(leaf); | |
945d8962 | 1099 | btrfs_release_path(path); |
16cdcec7 MX |
1100 | |
1101 | btrfs_delayed_inode_release_metadata(root, node); | |
1102 | btrfs_release_delayed_inode(node); | |
16cdcec7 MX |
1103 | |
1104 | return 0; | |
1105 | } | |
1106 | ||
0e8c36a9 MX |
1107 | static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, |
1108 | struct btrfs_root *root, | |
1109 | struct btrfs_path *path, | |
1110 | struct btrfs_delayed_node *node) | |
1111 | { | |
1112 | int ret; | |
1113 | ||
1114 | mutex_lock(&node->mutex); | |
1115 | if (!node->inode_dirty) { | |
1116 | mutex_unlock(&node->mutex); | |
1117 | return 0; | |
1118 | } | |
1119 | ||
1120 | ret = __btrfs_update_delayed_inode(trans, root, path, node); | |
1121 | mutex_unlock(&node->mutex); | |
1122 | return ret; | |
1123 | } | |
1124 | ||
4ea41ce0 MX |
1125 | static inline int |
1126 | __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, | |
1127 | struct btrfs_path *path, | |
1128 | struct btrfs_delayed_node *node) | |
1129 | { | |
1130 | int ret; | |
1131 | ||
1132 | ret = btrfs_insert_delayed_items(trans, path, node->root, node); | |
1133 | if (ret) | |
1134 | return ret; | |
1135 | ||
1136 | ret = btrfs_delete_delayed_items(trans, path, node->root, node); | |
1137 | if (ret) | |
1138 | return ret; | |
1139 | ||
1140 | ret = btrfs_update_delayed_inode(trans, node->root, path, node); | |
1141 | return ret; | |
1142 | } | |
1143 | ||
79787eaa JM |
1144 | /* |
1145 | * Called when committing the transaction. | |
1146 | * Returns 0 on success. | |
1147 | * Returns < 0 on error and returns with an aborted transaction with any | |
1148 | * outstanding delayed items cleaned up. | |
1149 | */ | |
96c3f433 JB |
1150 | static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, |
1151 | struct btrfs_root *root, int nr) | |
16cdcec7 MX |
1152 | { |
1153 | struct btrfs_delayed_root *delayed_root; | |
1154 | struct btrfs_delayed_node *curr_node, *prev_node; | |
1155 | struct btrfs_path *path; | |
19fd2949 | 1156 | struct btrfs_block_rsv *block_rsv; |
16cdcec7 | 1157 | int ret = 0; |
96c3f433 | 1158 | bool count = (nr > 0); |
16cdcec7 | 1159 | |
79787eaa JM |
1160 | if (trans->aborted) |
1161 | return -EIO; | |
1162 | ||
16cdcec7 MX |
1163 | path = btrfs_alloc_path(); |
1164 | if (!path) | |
1165 | return -ENOMEM; | |
1166 | path->leave_spinning = 1; | |
1167 | ||
19fd2949 | 1168 | block_rsv = trans->block_rsv; |
6d668dda | 1169 | trans->block_rsv = &root->fs_info->delayed_block_rsv; |
19fd2949 | 1170 | |
16cdcec7 MX |
1171 | delayed_root = btrfs_get_delayed_root(root); |
1172 | ||
1173 | curr_node = btrfs_first_delayed_node(delayed_root); | |
96c3f433 | 1174 | while (curr_node && (!count || (count && nr--))) { |
4ea41ce0 MX |
1175 | ret = __btrfs_commit_inode_delayed_items(trans, path, |
1176 | curr_node); | |
16cdcec7 MX |
1177 | if (ret) { |
1178 | btrfs_release_delayed_node(curr_node); | |
96c3f433 | 1179 | curr_node = NULL; |
79787eaa | 1180 | btrfs_abort_transaction(trans, root, ret); |
16cdcec7 MX |
1181 | break; |
1182 | } | |
1183 | ||
1184 | prev_node = curr_node; | |
1185 | curr_node = btrfs_next_delayed_node(curr_node); | |
1186 | btrfs_release_delayed_node(prev_node); | |
1187 | } | |
1188 | ||
96c3f433 JB |
1189 | if (curr_node) |
1190 | btrfs_release_delayed_node(curr_node); | |
16cdcec7 | 1191 | btrfs_free_path(path); |
19fd2949 | 1192 | trans->block_rsv = block_rsv; |
79787eaa | 1193 | |
16cdcec7 MX |
1194 | return ret; |
1195 | } | |
1196 | ||
96c3f433 JB |
1197 | int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, |
1198 | struct btrfs_root *root) | |
1199 | { | |
1200 | return __btrfs_run_delayed_items(trans, root, -1); | |
1201 | } | |
1202 | ||
1203 | int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, | |
1204 | struct btrfs_root *root, int nr) | |
1205 | { | |
1206 | return __btrfs_run_delayed_items(trans, root, nr); | |
1207 | } | |
1208 | ||
16cdcec7 MX |
1209 | int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, |
1210 | struct inode *inode) | |
1211 | { | |
1212 | struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode); | |
4ea41ce0 MX |
1213 | struct btrfs_path *path; |
1214 | struct btrfs_block_rsv *block_rsv; | |
16cdcec7 MX |
1215 | int ret; |
1216 | ||
1217 | if (!delayed_node) | |
1218 | return 0; | |
1219 | ||
1220 | mutex_lock(&delayed_node->mutex); | |
1221 | if (!delayed_node->count) { | |
1222 | mutex_unlock(&delayed_node->mutex); | |
1223 | btrfs_release_delayed_node(delayed_node); | |
1224 | return 0; | |
1225 | } | |
1226 | mutex_unlock(&delayed_node->mutex); | |
1227 | ||
4ea41ce0 MX |
1228 | path = btrfs_alloc_path(); |
1229 | if (!path) | |
1230 | return -ENOMEM; | |
1231 | path->leave_spinning = 1; | |
1232 | ||
1233 | block_rsv = trans->block_rsv; | |
1234 | trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv; | |
1235 | ||
1236 | ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node); | |
1237 | ||
16cdcec7 | 1238 | btrfs_release_delayed_node(delayed_node); |
4ea41ce0 MX |
1239 | btrfs_free_path(path); |
1240 | trans->block_rsv = block_rsv; | |
1241 | ||
16cdcec7 MX |
1242 | return ret; |
1243 | } | |
1244 | ||
0e8c36a9 MX |
1245 | int btrfs_commit_inode_delayed_inode(struct inode *inode) |
1246 | { | |
1247 | struct btrfs_trans_handle *trans; | |
1248 | struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode); | |
1249 | struct btrfs_path *path; | |
1250 | struct btrfs_block_rsv *block_rsv; | |
1251 | int ret; | |
1252 | ||
1253 | if (!delayed_node) | |
1254 | return 0; | |
1255 | ||
1256 | mutex_lock(&delayed_node->mutex); | |
1257 | if (!delayed_node->inode_dirty) { | |
1258 | mutex_unlock(&delayed_node->mutex); | |
1259 | btrfs_release_delayed_node(delayed_node); | |
1260 | return 0; | |
1261 | } | |
1262 | mutex_unlock(&delayed_node->mutex); | |
1263 | ||
1264 | trans = btrfs_join_transaction(delayed_node->root); | |
1265 | if (IS_ERR(trans)) { | |
1266 | ret = PTR_ERR(trans); | |
1267 | goto out; | |
1268 | } | |
1269 | ||
1270 | path = btrfs_alloc_path(); | |
1271 | if (!path) { | |
1272 | ret = -ENOMEM; | |
1273 | goto trans_out; | |
1274 | } | |
1275 | path->leave_spinning = 1; | |
1276 | ||
1277 | block_rsv = trans->block_rsv; | |
1278 | trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv; | |
1279 | ||
1280 | mutex_lock(&delayed_node->mutex); | |
1281 | if (delayed_node->inode_dirty) | |
1282 | ret = __btrfs_update_delayed_inode(trans, delayed_node->root, | |
1283 | path, delayed_node); | |
1284 | else | |
1285 | ret = 0; | |
1286 | mutex_unlock(&delayed_node->mutex); | |
1287 | ||
1288 | btrfs_free_path(path); | |
1289 | trans->block_rsv = block_rsv; | |
1290 | trans_out: | |
1291 | btrfs_end_transaction(trans, delayed_node->root); | |
1292 | btrfs_btree_balance_dirty(delayed_node->root); | |
1293 | out: | |
1294 | btrfs_release_delayed_node(delayed_node); | |
1295 | ||
1296 | return ret; | |
1297 | } | |
1298 | ||
16cdcec7 MX |
1299 | void btrfs_remove_delayed_node(struct inode *inode) |
1300 | { | |
1301 | struct btrfs_delayed_node *delayed_node; | |
1302 | ||
1303 | delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node); | |
1304 | if (!delayed_node) | |
1305 | return; | |
1306 | ||
1307 | BTRFS_I(inode)->delayed_node = NULL; | |
1308 | btrfs_release_delayed_node(delayed_node); | |
1309 | } | |
1310 | ||
de3cb945 CM |
1311 | struct btrfs_async_delayed_work { |
1312 | struct btrfs_delayed_root *delayed_root; | |
1313 | int nr; | |
16cdcec7 MX |
1314 | struct btrfs_work work; |
1315 | }; | |
1316 | ||
de3cb945 | 1317 | static void btrfs_async_run_delayed_root(struct btrfs_work *work) |
16cdcec7 | 1318 | { |
de3cb945 CM |
1319 | struct btrfs_async_delayed_work *async_work; |
1320 | struct btrfs_delayed_root *delayed_root; | |
16cdcec7 MX |
1321 | struct btrfs_trans_handle *trans; |
1322 | struct btrfs_path *path; | |
1323 | struct btrfs_delayed_node *delayed_node = NULL; | |
1324 | struct btrfs_root *root; | |
19fd2949 | 1325 | struct btrfs_block_rsv *block_rsv; |
de3cb945 | 1326 | int total_done = 0; |
16cdcec7 | 1327 | |
de3cb945 CM |
1328 | async_work = container_of(work, struct btrfs_async_delayed_work, work); |
1329 | delayed_root = async_work->delayed_root; | |
16cdcec7 MX |
1330 | |
1331 | path = btrfs_alloc_path(); | |
1332 | if (!path) | |
1333 | goto out; | |
16cdcec7 | 1334 | |
de3cb945 CM |
1335 | again: |
1336 | if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND / 2) | |
1337 | goto free_path; | |
1338 | ||
1339 | delayed_node = btrfs_first_prepared_delayed_node(delayed_root); | |
1340 | if (!delayed_node) | |
1341 | goto free_path; | |
1342 | ||
1343 | path->leave_spinning = 1; | |
16cdcec7 MX |
1344 | root = delayed_node->root; |
1345 | ||
ff5714cc | 1346 | trans = btrfs_join_transaction(root); |
16cdcec7 | 1347 | if (IS_ERR(trans)) |
de3cb945 | 1348 | goto release_path; |
16cdcec7 | 1349 | |
19fd2949 | 1350 | block_rsv = trans->block_rsv; |
6d668dda | 1351 | trans->block_rsv = &root->fs_info->delayed_block_rsv; |
19fd2949 | 1352 | |
4ea41ce0 | 1353 | __btrfs_commit_inode_delayed_items(trans, path, delayed_node); |
16cdcec7 MX |
1354 | /* |
1355 | * Maybe new delayed items have been inserted, so we need requeue | |
1356 | * the work. Besides that, we must dequeue the empty delayed nodes | |
1357 | * to avoid the race between delayed items balance and the worker. | |
1358 | * The race like this: | |
1359 | * Task1 Worker thread | |
1360 | * count == 0, needn't requeue | |
1361 | * also needn't insert the | |
1362 | * delayed node into prepare | |
1363 | * list again. | |
1364 | * add lots of delayed items | |
1365 | * queue the delayed node | |
1366 | * already in the list, | |
1367 | * and not in the prepare | |
1368 | * list, it means the delayed | |
1369 | * node is being dealt with | |
1370 | * by the worker. | |
1371 | * do delayed items balance | |
1372 | * the delayed node is being | |
1373 | * dealt with by the worker | |
1374 | * now, just wait. | |
1375 | * the worker goto idle. | |
1376 | * Task1 will sleep until the transaction is commited. | |
1377 | */ | |
1378 | mutex_lock(&delayed_node->mutex); | |
de3cb945 | 1379 | btrfs_dequeue_delayed_node(root->fs_info->delayed_root, delayed_node); |
16cdcec7 MX |
1380 | mutex_unlock(&delayed_node->mutex); |
1381 | ||
19fd2949 | 1382 | trans->block_rsv = block_rsv; |
16cdcec7 | 1383 | btrfs_end_transaction_dmeta(trans, root); |
b53d3f5d | 1384 | btrfs_btree_balance_dirty_nodelay(root); |
de3cb945 CM |
1385 | |
1386 | release_path: | |
1387 | btrfs_release_path(path); | |
1388 | total_done++; | |
1389 | ||
1390 | btrfs_release_prepared_delayed_node(delayed_node); | |
1391 | if (async_work->nr == 0 || total_done < async_work->nr) | |
1392 | goto again; | |
1393 | ||
16cdcec7 MX |
1394 | free_path: |
1395 | btrfs_free_path(path); | |
1396 | out: | |
de3cb945 CM |
1397 | wake_up(&delayed_root->wait); |
1398 | kfree(async_work); | |
16cdcec7 MX |
1399 | } |
1400 | ||
de3cb945 | 1401 | |
16cdcec7 | 1402 | static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root, |
de3cb945 | 1403 | struct btrfs_root *root, int nr) |
16cdcec7 | 1404 | { |
de3cb945 | 1405 | struct btrfs_async_delayed_work *async_work; |
16cdcec7 | 1406 | |
de3cb945 | 1407 | if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) |
16cdcec7 MX |
1408 | return 0; |
1409 | ||
de3cb945 CM |
1410 | async_work = kmalloc(sizeof(*async_work), GFP_NOFS); |
1411 | if (!async_work) | |
16cdcec7 | 1412 | return -ENOMEM; |
16cdcec7 | 1413 | |
de3cb945 CM |
1414 | async_work->delayed_root = delayed_root; |
1415 | async_work->work.func = btrfs_async_run_delayed_root; | |
1416 | async_work->work.flags = 0; | |
1417 | async_work->nr = nr; | |
16cdcec7 | 1418 | |
de3cb945 | 1419 | btrfs_queue_worker(&root->fs_info->delayed_workers, &async_work->work); |
16cdcec7 MX |
1420 | return 0; |
1421 | } | |
1422 | ||
e999376f CM |
1423 | void btrfs_assert_delayed_root_empty(struct btrfs_root *root) |
1424 | { | |
1425 | struct btrfs_delayed_root *delayed_root; | |
1426 | delayed_root = btrfs_get_delayed_root(root); | |
1427 | WARN_ON(btrfs_first_delayed_node(delayed_root)); | |
1428 | } | |
1429 | ||
de3cb945 CM |
1430 | static int refs_newer(struct btrfs_delayed_root *delayed_root, |
1431 | int seq, int count) | |
1432 | { | |
1433 | int val = atomic_read(&delayed_root->items_seq); | |
1434 | ||
1435 | if (val < seq || val >= seq + count) | |
1436 | return 1; | |
1437 | return 0; | |
1438 | } | |
1439 | ||
16cdcec7 MX |
1440 | void btrfs_balance_delayed_items(struct btrfs_root *root) |
1441 | { | |
1442 | struct btrfs_delayed_root *delayed_root; | |
de3cb945 | 1443 | int seq; |
16cdcec7 MX |
1444 | |
1445 | delayed_root = btrfs_get_delayed_root(root); | |
1446 | ||
1447 | if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) | |
1448 | return; | |
1449 | ||
de3cb945 CM |
1450 | seq = atomic_read(&delayed_root->items_seq); |
1451 | ||
16cdcec7 MX |
1452 | if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) { |
1453 | int ret; | |
de3cb945 CM |
1454 | DEFINE_WAIT(__wait); |
1455 | ||
1456 | ret = btrfs_wq_run_delayed_node(delayed_root, root, 0); | |
16cdcec7 MX |
1457 | if (ret) |
1458 | return; | |
1459 | ||
de3cb945 CM |
1460 | while (1) { |
1461 | prepare_to_wait(&delayed_root->wait, &__wait, | |
1462 | TASK_INTERRUPTIBLE); | |
1463 | ||
1464 | if (refs_newer(delayed_root, seq, | |
1465 | BTRFS_DELAYED_BATCH) || | |
1466 | atomic_read(&delayed_root->items) < | |
1467 | BTRFS_DELAYED_BACKGROUND) { | |
1468 | break; | |
1469 | } | |
1470 | if (!signal_pending(current)) | |
1471 | schedule(); | |
1472 | else | |
1473 | break; | |
1474 | } | |
1475 | finish_wait(&delayed_root->wait, &__wait); | |
16cdcec7 MX |
1476 | } |
1477 | ||
de3cb945 | 1478 | btrfs_wq_run_delayed_node(delayed_root, root, BTRFS_DELAYED_BATCH); |
16cdcec7 MX |
1479 | } |
1480 | ||
79787eaa | 1481 | /* Will return 0 or -ENOMEM */ |
16cdcec7 MX |
1482 | int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans, |
1483 | struct btrfs_root *root, const char *name, | |
1484 | int name_len, struct inode *dir, | |
1485 | struct btrfs_disk_key *disk_key, u8 type, | |
1486 | u64 index) | |
1487 | { | |
1488 | struct btrfs_delayed_node *delayed_node; | |
1489 | struct btrfs_delayed_item *delayed_item; | |
1490 | struct btrfs_dir_item *dir_item; | |
1491 | int ret; | |
1492 | ||
1493 | delayed_node = btrfs_get_or_create_delayed_node(dir); | |
1494 | if (IS_ERR(delayed_node)) | |
1495 | return PTR_ERR(delayed_node); | |
1496 | ||
1497 | delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len); | |
1498 | if (!delayed_item) { | |
1499 | ret = -ENOMEM; | |
1500 | goto release_node; | |
1501 | } | |
1502 | ||
0d0ca30f | 1503 | delayed_item->key.objectid = btrfs_ino(dir); |
16cdcec7 MX |
1504 | btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY); |
1505 | delayed_item->key.offset = index; | |
1506 | ||
1507 | dir_item = (struct btrfs_dir_item *)delayed_item->data; | |
1508 | dir_item->location = *disk_key; | |
1509 | dir_item->transid = cpu_to_le64(trans->transid); | |
1510 | dir_item->data_len = 0; | |
1511 | dir_item->name_len = cpu_to_le16(name_len); | |
1512 | dir_item->type = type; | |
1513 | memcpy((char *)(dir_item + 1), name, name_len); | |
1514 | ||
8c2a3ca2 JB |
1515 | ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item); |
1516 | /* | |
1517 | * we have reserved enough space when we start a new transaction, | |
1518 | * so reserving metadata failure is impossible | |
1519 | */ | |
1520 | BUG_ON(ret); | |
1521 | ||
1522 | ||
16cdcec7 MX |
1523 | mutex_lock(&delayed_node->mutex); |
1524 | ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item); | |
1525 | if (unlikely(ret)) { | |
1526 | printk(KERN_ERR "err add delayed dir index item(name: %s) into " | |
1527 | "the insertion tree of the delayed node" | |
1528 | "(root id: %llu, inode id: %llu, errno: %d)\n", | |
1529 | name, | |
1530 | (unsigned long long)delayed_node->root->objectid, | |
1531 | (unsigned long long)delayed_node->inode_id, | |
1532 | ret); | |
1533 | BUG(); | |
1534 | } | |
1535 | mutex_unlock(&delayed_node->mutex); | |
1536 | ||
1537 | release_node: | |
1538 | btrfs_release_delayed_node(delayed_node); | |
1539 | return ret; | |
1540 | } | |
1541 | ||
1542 | static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root, | |
1543 | struct btrfs_delayed_node *node, | |
1544 | struct btrfs_key *key) | |
1545 | { | |
1546 | struct btrfs_delayed_item *item; | |
1547 | ||
1548 | mutex_lock(&node->mutex); | |
1549 | item = __btrfs_lookup_delayed_insertion_item(node, key); | |
1550 | if (!item) { | |
1551 | mutex_unlock(&node->mutex); | |
1552 | return 1; | |
1553 | } | |
1554 | ||
1555 | btrfs_delayed_item_release_metadata(root, item); | |
1556 | btrfs_release_delayed_item(item); | |
1557 | mutex_unlock(&node->mutex); | |
1558 | return 0; | |
1559 | } | |
1560 | ||
1561 | int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, | |
1562 | struct btrfs_root *root, struct inode *dir, | |
1563 | u64 index) | |
1564 | { | |
1565 | struct btrfs_delayed_node *node; | |
1566 | struct btrfs_delayed_item *item; | |
1567 | struct btrfs_key item_key; | |
1568 | int ret; | |
1569 | ||
1570 | node = btrfs_get_or_create_delayed_node(dir); | |
1571 | if (IS_ERR(node)) | |
1572 | return PTR_ERR(node); | |
1573 | ||
0d0ca30f | 1574 | item_key.objectid = btrfs_ino(dir); |
16cdcec7 MX |
1575 | btrfs_set_key_type(&item_key, BTRFS_DIR_INDEX_KEY); |
1576 | item_key.offset = index; | |
1577 | ||
1578 | ret = btrfs_delete_delayed_insertion_item(root, node, &item_key); | |
1579 | if (!ret) | |
1580 | goto end; | |
1581 | ||
1582 | item = btrfs_alloc_delayed_item(0); | |
1583 | if (!item) { | |
1584 | ret = -ENOMEM; | |
1585 | goto end; | |
1586 | } | |
1587 | ||
1588 | item->key = item_key; | |
1589 | ||
1590 | ret = btrfs_delayed_item_reserve_metadata(trans, root, item); | |
1591 | /* | |
1592 | * we have reserved enough space when we start a new transaction, | |
1593 | * so reserving metadata failure is impossible. | |
1594 | */ | |
1595 | BUG_ON(ret); | |
1596 | ||
1597 | mutex_lock(&node->mutex); | |
1598 | ret = __btrfs_add_delayed_deletion_item(node, item); | |
1599 | if (unlikely(ret)) { | |
1600 | printk(KERN_ERR "err add delayed dir index item(index: %llu) " | |
1601 | "into the deletion tree of the delayed node" | |
1602 | "(root id: %llu, inode id: %llu, errno: %d)\n", | |
1603 | (unsigned long long)index, | |
1604 | (unsigned long long)node->root->objectid, | |
1605 | (unsigned long long)node->inode_id, | |
1606 | ret); | |
1607 | BUG(); | |
1608 | } | |
1609 | mutex_unlock(&node->mutex); | |
1610 | end: | |
1611 | btrfs_release_delayed_node(node); | |
1612 | return ret; | |
1613 | } | |
1614 | ||
1615 | int btrfs_inode_delayed_dir_index_count(struct inode *inode) | |
1616 | { | |
2f7e33d4 | 1617 | struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode); |
16cdcec7 MX |
1618 | |
1619 | if (!delayed_node) | |
1620 | return -ENOENT; | |
1621 | ||
1622 | /* | |
1623 | * Since we have held i_mutex of this directory, it is impossible that | |
1624 | * a new directory index is added into the delayed node and index_cnt | |
1625 | * is updated now. So we needn't lock the delayed node. | |
1626 | */ | |
2f7e33d4 MX |
1627 | if (!delayed_node->index_cnt) { |
1628 | btrfs_release_delayed_node(delayed_node); | |
16cdcec7 | 1629 | return -EINVAL; |
2f7e33d4 | 1630 | } |
16cdcec7 MX |
1631 | |
1632 | BTRFS_I(inode)->index_cnt = delayed_node->index_cnt; | |
2f7e33d4 MX |
1633 | btrfs_release_delayed_node(delayed_node); |
1634 | return 0; | |
16cdcec7 MX |
1635 | } |
1636 | ||
1637 | void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, | |
1638 | struct list_head *del_list) | |
1639 | { | |
1640 | struct btrfs_delayed_node *delayed_node; | |
1641 | struct btrfs_delayed_item *item; | |
1642 | ||
1643 | delayed_node = btrfs_get_delayed_node(inode); | |
1644 | if (!delayed_node) | |
1645 | return; | |
1646 | ||
1647 | mutex_lock(&delayed_node->mutex); | |
1648 | item = __btrfs_first_delayed_insertion_item(delayed_node); | |
1649 | while (item) { | |
1650 | atomic_inc(&item->refs); | |
1651 | list_add_tail(&item->readdir_list, ins_list); | |
1652 | item = __btrfs_next_delayed_item(item); | |
1653 | } | |
1654 | ||
1655 | item = __btrfs_first_delayed_deletion_item(delayed_node); | |
1656 | while (item) { | |
1657 | atomic_inc(&item->refs); | |
1658 | list_add_tail(&item->readdir_list, del_list); | |
1659 | item = __btrfs_next_delayed_item(item); | |
1660 | } | |
1661 | mutex_unlock(&delayed_node->mutex); | |
1662 | /* | |
1663 | * This delayed node is still cached in the btrfs inode, so refs | |
1664 | * must be > 1 now, and we needn't check it is going to be freed | |
1665 | * or not. | |
1666 | * | |
1667 | * Besides that, this function is used to read dir, we do not | |
1668 | * insert/delete delayed items in this period. So we also needn't | |
1669 | * requeue or dequeue this delayed node. | |
1670 | */ | |
1671 | atomic_dec(&delayed_node->refs); | |
1672 | } | |
1673 | ||
1674 | void btrfs_put_delayed_items(struct list_head *ins_list, | |
1675 | struct list_head *del_list) | |
1676 | { | |
1677 | struct btrfs_delayed_item *curr, *next; | |
1678 | ||
1679 | list_for_each_entry_safe(curr, next, ins_list, readdir_list) { | |
1680 | list_del(&curr->readdir_list); | |
1681 | if (atomic_dec_and_test(&curr->refs)) | |
1682 | kfree(curr); | |
1683 | } | |
1684 | ||
1685 | list_for_each_entry_safe(curr, next, del_list, readdir_list) { | |
1686 | list_del(&curr->readdir_list); | |
1687 | if (atomic_dec_and_test(&curr->refs)) | |
1688 | kfree(curr); | |
1689 | } | |
1690 | } | |
1691 | ||
1692 | int btrfs_should_delete_dir_index(struct list_head *del_list, | |
1693 | u64 index) | |
1694 | { | |
1695 | struct btrfs_delayed_item *curr, *next; | |
1696 | int ret; | |
1697 | ||
1698 | if (list_empty(del_list)) | |
1699 | return 0; | |
1700 | ||
1701 | list_for_each_entry_safe(curr, next, del_list, readdir_list) { | |
1702 | if (curr->key.offset > index) | |
1703 | break; | |
1704 | ||
1705 | list_del(&curr->readdir_list); | |
1706 | ret = (curr->key.offset == index); | |
1707 | ||
1708 | if (atomic_dec_and_test(&curr->refs)) | |
1709 | kfree(curr); | |
1710 | ||
1711 | if (ret) | |
1712 | return 1; | |
1713 | else | |
1714 | continue; | |
1715 | } | |
1716 | return 0; | |
1717 | } | |
1718 | ||
1719 | /* | |
1720 | * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree | |
1721 | * | |
1722 | */ | |
1723 | int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent, | |
1724 | filldir_t filldir, | |
1725 | struct list_head *ins_list) | |
1726 | { | |
1727 | struct btrfs_dir_item *di; | |
1728 | struct btrfs_delayed_item *curr, *next; | |
1729 | struct btrfs_key location; | |
1730 | char *name; | |
1731 | int name_len; | |
1732 | int over = 0; | |
1733 | unsigned char d_type; | |
1734 | ||
1735 | if (list_empty(ins_list)) | |
1736 | return 0; | |
1737 | ||
1738 | /* | |
1739 | * Changing the data of the delayed item is impossible. So | |
1740 | * we needn't lock them. And we have held i_mutex of the | |
1741 | * directory, nobody can delete any directory indexes now. | |
1742 | */ | |
1743 | list_for_each_entry_safe(curr, next, ins_list, readdir_list) { | |
1744 | list_del(&curr->readdir_list); | |
1745 | ||
1746 | if (curr->key.offset < filp->f_pos) { | |
1747 | if (atomic_dec_and_test(&curr->refs)) | |
1748 | kfree(curr); | |
1749 | continue; | |
1750 | } | |
1751 | ||
1752 | filp->f_pos = curr->key.offset; | |
1753 | ||
1754 | di = (struct btrfs_dir_item *)curr->data; | |
1755 | name = (char *)(di + 1); | |
1756 | name_len = le16_to_cpu(di->name_len); | |
1757 | ||
1758 | d_type = btrfs_filetype_table[di->type]; | |
1759 | btrfs_disk_key_to_cpu(&location, &di->location); | |
1760 | ||
1761 | over = filldir(dirent, name, name_len, curr->key.offset, | |
1762 | location.objectid, d_type); | |
1763 | ||
1764 | if (atomic_dec_and_test(&curr->refs)) | |
1765 | kfree(curr); | |
1766 | ||
1767 | if (over) | |
1768 | return 1; | |
1769 | } | |
1770 | return 0; | |
1771 | } | |
1772 | ||
1773 | BTRFS_SETGET_STACK_FUNCS(stack_inode_generation, struct btrfs_inode_item, | |
1774 | generation, 64); | |
1775 | BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence, struct btrfs_inode_item, | |
1776 | sequence, 64); | |
1777 | BTRFS_SETGET_STACK_FUNCS(stack_inode_transid, struct btrfs_inode_item, | |
1778 | transid, 64); | |
1779 | BTRFS_SETGET_STACK_FUNCS(stack_inode_size, struct btrfs_inode_item, size, 64); | |
1780 | BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes, struct btrfs_inode_item, | |
1781 | nbytes, 64); | |
1782 | BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group, struct btrfs_inode_item, | |
1783 | block_group, 64); | |
1784 | BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink, struct btrfs_inode_item, nlink, 32); | |
1785 | BTRFS_SETGET_STACK_FUNCS(stack_inode_uid, struct btrfs_inode_item, uid, 32); | |
1786 | BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32); | |
1787 | BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32); | |
1788 | BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64); | |
1789 | BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64); | |
1790 | ||
1791 | BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64); | |
1792 | BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32); | |
1793 | ||
1794 | static void fill_stack_inode_item(struct btrfs_trans_handle *trans, | |
1795 | struct btrfs_inode_item *inode_item, | |
1796 | struct inode *inode) | |
1797 | { | |
2f2f43d3 EB |
1798 | btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode)); |
1799 | btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode)); | |
16cdcec7 MX |
1800 | btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size); |
1801 | btrfs_set_stack_inode_mode(inode_item, inode->i_mode); | |
1802 | btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink); | |
1803 | btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode)); | |
1804 | btrfs_set_stack_inode_generation(inode_item, | |
1805 | BTRFS_I(inode)->generation); | |
0c4d2d95 | 1806 | btrfs_set_stack_inode_sequence(inode_item, inode->i_version); |
16cdcec7 MX |
1807 | btrfs_set_stack_inode_transid(inode_item, trans->transid); |
1808 | btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev); | |
1809 | btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags); | |
ff5714cc | 1810 | btrfs_set_stack_inode_block_group(inode_item, 0); |
16cdcec7 MX |
1811 | |
1812 | btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item), | |
1813 | inode->i_atime.tv_sec); | |
1814 | btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item), | |
1815 | inode->i_atime.tv_nsec); | |
1816 | ||
1817 | btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item), | |
1818 | inode->i_mtime.tv_sec); | |
1819 | btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item), | |
1820 | inode->i_mtime.tv_nsec); | |
1821 | ||
1822 | btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item), | |
1823 | inode->i_ctime.tv_sec); | |
1824 | btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item), | |
1825 | inode->i_ctime.tv_nsec); | |
1826 | } | |
1827 | ||
2f7e33d4 MX |
1828 | int btrfs_fill_inode(struct inode *inode, u32 *rdev) |
1829 | { | |
1830 | struct btrfs_delayed_node *delayed_node; | |
1831 | struct btrfs_inode_item *inode_item; | |
1832 | struct btrfs_timespec *tspec; | |
1833 | ||
1834 | delayed_node = btrfs_get_delayed_node(inode); | |
1835 | if (!delayed_node) | |
1836 | return -ENOENT; | |
1837 | ||
1838 | mutex_lock(&delayed_node->mutex); | |
1839 | if (!delayed_node->inode_dirty) { | |
1840 | mutex_unlock(&delayed_node->mutex); | |
1841 | btrfs_release_delayed_node(delayed_node); | |
1842 | return -ENOENT; | |
1843 | } | |
1844 | ||
1845 | inode_item = &delayed_node->inode_item; | |
1846 | ||
2f2f43d3 EB |
1847 | i_uid_write(inode, btrfs_stack_inode_uid(inode_item)); |
1848 | i_gid_write(inode, btrfs_stack_inode_gid(inode_item)); | |
2f7e33d4 MX |
1849 | btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item)); |
1850 | inode->i_mode = btrfs_stack_inode_mode(inode_item); | |
bfe86848 | 1851 | set_nlink(inode, btrfs_stack_inode_nlink(inode_item)); |
2f7e33d4 MX |
1852 | inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item)); |
1853 | BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item); | |
0c4d2d95 | 1854 | inode->i_version = btrfs_stack_inode_sequence(inode_item); |
2f7e33d4 MX |
1855 | inode->i_rdev = 0; |
1856 | *rdev = btrfs_stack_inode_rdev(inode_item); | |
1857 | BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item); | |
1858 | ||
1859 | tspec = btrfs_inode_atime(inode_item); | |
1860 | inode->i_atime.tv_sec = btrfs_stack_timespec_sec(tspec); | |
1861 | inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(tspec); | |
1862 | ||
1863 | tspec = btrfs_inode_mtime(inode_item); | |
1864 | inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(tspec); | |
1865 | inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(tspec); | |
1866 | ||
1867 | tspec = btrfs_inode_ctime(inode_item); | |
1868 | inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(tspec); | |
1869 | inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(tspec); | |
1870 | ||
1871 | inode->i_generation = BTRFS_I(inode)->generation; | |
1872 | BTRFS_I(inode)->index_cnt = (u64)-1; | |
1873 | ||
1874 | mutex_unlock(&delayed_node->mutex); | |
1875 | btrfs_release_delayed_node(delayed_node); | |
1876 | return 0; | |
1877 | } | |
1878 | ||
16cdcec7 MX |
1879 | int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, |
1880 | struct btrfs_root *root, struct inode *inode) | |
1881 | { | |
1882 | struct btrfs_delayed_node *delayed_node; | |
aa0467d8 | 1883 | int ret = 0; |
16cdcec7 MX |
1884 | |
1885 | delayed_node = btrfs_get_or_create_delayed_node(inode); | |
1886 | if (IS_ERR(delayed_node)) | |
1887 | return PTR_ERR(delayed_node); | |
1888 | ||
1889 | mutex_lock(&delayed_node->mutex); | |
1890 | if (delayed_node->inode_dirty) { | |
1891 | fill_stack_inode_item(trans, &delayed_node->inode_item, inode); | |
1892 | goto release_node; | |
1893 | } | |
1894 | ||
7fd2ae21 JB |
1895 | ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode, |
1896 | delayed_node); | |
c06a0e12 JB |
1897 | if (ret) |
1898 | goto release_node; | |
16cdcec7 MX |
1899 | |
1900 | fill_stack_inode_item(trans, &delayed_node->inode_item, inode); | |
1901 | delayed_node->inode_dirty = 1; | |
1902 | delayed_node->count++; | |
1903 | atomic_inc(&root->fs_info->delayed_root->items); | |
1904 | release_node: | |
1905 | mutex_unlock(&delayed_node->mutex); | |
1906 | btrfs_release_delayed_node(delayed_node); | |
1907 | return ret; | |
1908 | } | |
1909 | ||
1910 | static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node) | |
1911 | { | |
1912 | struct btrfs_root *root = delayed_node->root; | |
1913 | struct btrfs_delayed_item *curr_item, *prev_item; | |
1914 | ||
1915 | mutex_lock(&delayed_node->mutex); | |
1916 | curr_item = __btrfs_first_delayed_insertion_item(delayed_node); | |
1917 | while (curr_item) { | |
1918 | btrfs_delayed_item_release_metadata(root, curr_item); | |
1919 | prev_item = curr_item; | |
1920 | curr_item = __btrfs_next_delayed_item(prev_item); | |
1921 | btrfs_release_delayed_item(prev_item); | |
1922 | } | |
1923 | ||
1924 | curr_item = __btrfs_first_delayed_deletion_item(delayed_node); | |
1925 | while (curr_item) { | |
1926 | btrfs_delayed_item_release_metadata(root, curr_item); | |
1927 | prev_item = curr_item; | |
1928 | curr_item = __btrfs_next_delayed_item(prev_item); | |
1929 | btrfs_release_delayed_item(prev_item); | |
1930 | } | |
1931 | ||
1932 | if (delayed_node->inode_dirty) { | |
1933 | btrfs_delayed_inode_release_metadata(root, delayed_node); | |
1934 | btrfs_release_delayed_inode(delayed_node); | |
1935 | } | |
1936 | mutex_unlock(&delayed_node->mutex); | |
1937 | } | |
1938 | ||
1939 | void btrfs_kill_delayed_inode_items(struct inode *inode) | |
1940 | { | |
1941 | struct btrfs_delayed_node *delayed_node; | |
1942 | ||
1943 | delayed_node = btrfs_get_delayed_node(inode); | |
1944 | if (!delayed_node) | |
1945 | return; | |
1946 | ||
1947 | __btrfs_kill_delayed_node(delayed_node); | |
1948 | btrfs_release_delayed_node(delayed_node); | |
1949 | } | |
1950 | ||
1951 | void btrfs_kill_all_delayed_nodes(struct btrfs_root *root) | |
1952 | { | |
1953 | u64 inode_id = 0; | |
1954 | struct btrfs_delayed_node *delayed_nodes[8]; | |
1955 | int i, n; | |
1956 | ||
1957 | while (1) { | |
1958 | spin_lock(&root->inode_lock); | |
1959 | n = radix_tree_gang_lookup(&root->delayed_nodes_tree, | |
1960 | (void **)delayed_nodes, inode_id, | |
1961 | ARRAY_SIZE(delayed_nodes)); | |
1962 | if (!n) { | |
1963 | spin_unlock(&root->inode_lock); | |
1964 | break; | |
1965 | } | |
1966 | ||
1967 | inode_id = delayed_nodes[n - 1]->inode_id + 1; | |
1968 | ||
1969 | for (i = 0; i < n; i++) | |
1970 | atomic_inc(&delayed_nodes[i]->refs); | |
1971 | spin_unlock(&root->inode_lock); | |
1972 | ||
1973 | for (i = 0; i < n; i++) { | |
1974 | __btrfs_kill_delayed_node(delayed_nodes[i]); | |
1975 | btrfs_release_delayed_node(delayed_nodes[i]); | |
1976 | } | |
1977 | } | |
1978 | } | |
67cde344 MX |
1979 | |
1980 | void btrfs_destroy_delayed_inodes(struct btrfs_root *root) | |
1981 | { | |
1982 | struct btrfs_delayed_root *delayed_root; | |
1983 | struct btrfs_delayed_node *curr_node, *prev_node; | |
1984 | ||
1985 | delayed_root = btrfs_get_delayed_root(root); | |
1986 | ||
1987 | curr_node = btrfs_first_delayed_node(delayed_root); | |
1988 | while (curr_node) { | |
1989 | __btrfs_kill_delayed_node(curr_node); | |
1990 | ||
1991 | prev_node = curr_node; | |
1992 | curr_node = btrfs_next_delayed_node(curr_node); | |
1993 | btrfs_release_delayed_node(prev_node); | |
1994 | } | |
1995 | } | |
1996 |