]>
Commit | Line | Data |
---|---|---|
16cdcec7 MX |
1 | /* |
2 | * Copyright (C) 2011 Fujitsu. All rights reserved. | |
3 | * Written by Miao Xie <miaox@cn.fujitsu.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public | |
7 | * License v2 as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
12 | * General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public | |
15 | * License along with this program; if not, write to the | |
16 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
17 | * Boston, MA 021110-1307, USA. | |
18 | */ | |
19 | ||
20 | #include <linux/slab.h> | |
21 | #include "delayed-inode.h" | |
22 | #include "disk-io.h" | |
23 | #include "transaction.h" | |
24 | ||
25 | #define BTRFS_DELAYED_WRITEBACK 400 | |
26 | #define BTRFS_DELAYED_BACKGROUND 100 | |
27 | ||
28 | static struct kmem_cache *delayed_node_cache; | |
29 | ||
30 | int __init btrfs_delayed_inode_init(void) | |
31 | { | |
32 | delayed_node_cache = kmem_cache_create("delayed_node", | |
33 | sizeof(struct btrfs_delayed_node), | |
34 | 0, | |
35 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, | |
36 | NULL); | |
37 | if (!delayed_node_cache) | |
38 | return -ENOMEM; | |
39 | return 0; | |
40 | } | |
41 | ||
42 | void btrfs_delayed_inode_exit(void) | |
43 | { | |
44 | if (delayed_node_cache) | |
45 | kmem_cache_destroy(delayed_node_cache); | |
46 | } | |
47 | ||
48 | static inline void btrfs_init_delayed_node( | |
49 | struct btrfs_delayed_node *delayed_node, | |
50 | struct btrfs_root *root, u64 inode_id) | |
51 | { | |
52 | delayed_node->root = root; | |
53 | delayed_node->inode_id = inode_id; | |
54 | atomic_set(&delayed_node->refs, 0); | |
55 | delayed_node->count = 0; | |
56 | delayed_node->in_list = 0; | |
57 | delayed_node->inode_dirty = 0; | |
58 | delayed_node->ins_root = RB_ROOT; | |
59 | delayed_node->del_root = RB_ROOT; | |
60 | mutex_init(&delayed_node->mutex); | |
61 | delayed_node->index_cnt = 0; | |
62 | INIT_LIST_HEAD(&delayed_node->n_list); | |
63 | INIT_LIST_HEAD(&delayed_node->p_list); | |
64 | delayed_node->bytes_reserved = 0; | |
65 | } | |
66 | ||
67 | static inline int btrfs_is_continuous_delayed_item( | |
68 | struct btrfs_delayed_item *item1, | |
69 | struct btrfs_delayed_item *item2) | |
70 | { | |
71 | if (item1->key.type == BTRFS_DIR_INDEX_KEY && | |
72 | item1->key.objectid == item2->key.objectid && | |
73 | item1->key.type == item2->key.type && | |
74 | item1->key.offset + 1 == item2->key.offset) | |
75 | return 1; | |
76 | return 0; | |
77 | } | |
78 | ||
79 | static inline struct btrfs_delayed_root *btrfs_get_delayed_root( | |
80 | struct btrfs_root *root) | |
81 | { | |
82 | return root->fs_info->delayed_root; | |
83 | } | |
84 | ||
85 | static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( | |
86 | struct inode *inode) | |
87 | { | |
88 | struct btrfs_delayed_node *node; | |
89 | struct btrfs_inode *btrfs_inode = BTRFS_I(inode); | |
90 | struct btrfs_root *root = btrfs_inode->root; | |
0d0ca30f | 91 | u64 ino = btrfs_ino(inode); |
16cdcec7 MX |
92 | int ret; |
93 | ||
94 | again: | |
95 | node = ACCESS_ONCE(btrfs_inode->delayed_node); | |
96 | if (node) { | |
97 | atomic_inc(&node->refs); /* can be accessed */ | |
98 | return node; | |
99 | } | |
100 | ||
101 | spin_lock(&root->inode_lock); | |
0d0ca30f | 102 | node = radix_tree_lookup(&root->delayed_nodes_tree, ino); |
16cdcec7 MX |
103 | if (node) { |
104 | if (btrfs_inode->delayed_node) { | |
105 | spin_unlock(&root->inode_lock); | |
106 | goto again; | |
107 | } | |
108 | btrfs_inode->delayed_node = node; | |
109 | atomic_inc(&node->refs); /* can be accessed */ | |
110 | atomic_inc(&node->refs); /* cached in the inode */ | |
111 | spin_unlock(&root->inode_lock); | |
112 | return node; | |
113 | } | |
114 | spin_unlock(&root->inode_lock); | |
115 | ||
116 | node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS); | |
117 | if (!node) | |
118 | return ERR_PTR(-ENOMEM); | |
0d0ca30f | 119 | btrfs_init_delayed_node(node, root, ino); |
16cdcec7 MX |
120 | |
121 | atomic_inc(&node->refs); /* cached in the btrfs inode */ | |
122 | atomic_inc(&node->refs); /* can be accessed */ | |
123 | ||
124 | ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); | |
125 | if (ret) { | |
126 | kmem_cache_free(delayed_node_cache, node); | |
127 | return ERR_PTR(ret); | |
128 | } | |
129 | ||
130 | spin_lock(&root->inode_lock); | |
0d0ca30f | 131 | ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node); |
16cdcec7 MX |
132 | if (ret == -EEXIST) { |
133 | kmem_cache_free(delayed_node_cache, node); | |
134 | spin_unlock(&root->inode_lock); | |
135 | radix_tree_preload_end(); | |
136 | goto again; | |
137 | } | |
138 | btrfs_inode->delayed_node = node; | |
139 | spin_unlock(&root->inode_lock); | |
140 | radix_tree_preload_end(); | |
141 | ||
142 | return node; | |
143 | } | |
144 | ||
145 | /* | |
146 | * Call it when holding delayed_node->mutex | |
147 | * | |
148 | * If mod = 1, add this node into the prepared list. | |
149 | */ | |
150 | static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root, | |
151 | struct btrfs_delayed_node *node, | |
152 | int mod) | |
153 | { | |
154 | spin_lock(&root->lock); | |
155 | if (node->in_list) { | |
156 | if (!list_empty(&node->p_list)) | |
157 | list_move_tail(&node->p_list, &root->prepare_list); | |
158 | else if (mod) | |
159 | list_add_tail(&node->p_list, &root->prepare_list); | |
160 | } else { | |
161 | list_add_tail(&node->n_list, &root->node_list); | |
162 | list_add_tail(&node->p_list, &root->prepare_list); | |
163 | atomic_inc(&node->refs); /* inserted into list */ | |
164 | root->nodes++; | |
165 | node->in_list = 1; | |
166 | } | |
167 | spin_unlock(&root->lock); | |
168 | } | |
169 | ||
170 | /* Call it when holding delayed_node->mutex */ | |
171 | static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root, | |
172 | struct btrfs_delayed_node *node) | |
173 | { | |
174 | spin_lock(&root->lock); | |
175 | if (node->in_list) { | |
176 | root->nodes--; | |
177 | atomic_dec(&node->refs); /* not in the list */ | |
178 | list_del_init(&node->n_list); | |
179 | if (!list_empty(&node->p_list)) | |
180 | list_del_init(&node->p_list); | |
181 | node->in_list = 0; | |
182 | } | |
183 | spin_unlock(&root->lock); | |
184 | } | |
185 | ||
186 | struct btrfs_delayed_node *btrfs_first_delayed_node( | |
187 | struct btrfs_delayed_root *delayed_root) | |
188 | { | |
189 | struct list_head *p; | |
190 | struct btrfs_delayed_node *node = NULL; | |
191 | ||
192 | spin_lock(&delayed_root->lock); | |
193 | if (list_empty(&delayed_root->node_list)) | |
194 | goto out; | |
195 | ||
196 | p = delayed_root->node_list.next; | |
197 | node = list_entry(p, struct btrfs_delayed_node, n_list); | |
198 | atomic_inc(&node->refs); | |
199 | out: | |
200 | spin_unlock(&delayed_root->lock); | |
201 | ||
202 | return node; | |
203 | } | |
204 | ||
205 | struct btrfs_delayed_node *btrfs_next_delayed_node( | |
206 | struct btrfs_delayed_node *node) | |
207 | { | |
208 | struct btrfs_delayed_root *delayed_root; | |
209 | struct list_head *p; | |
210 | struct btrfs_delayed_node *next = NULL; | |
211 | ||
212 | delayed_root = node->root->fs_info->delayed_root; | |
213 | spin_lock(&delayed_root->lock); | |
214 | if (!node->in_list) { /* not in the list */ | |
215 | if (list_empty(&delayed_root->node_list)) | |
216 | goto out; | |
217 | p = delayed_root->node_list.next; | |
218 | } else if (list_is_last(&node->n_list, &delayed_root->node_list)) | |
219 | goto out; | |
220 | else | |
221 | p = node->n_list.next; | |
222 | ||
223 | next = list_entry(p, struct btrfs_delayed_node, n_list); | |
224 | atomic_inc(&next->refs); | |
225 | out: | |
226 | spin_unlock(&delayed_root->lock); | |
227 | ||
228 | return next; | |
229 | } | |
230 | ||
231 | static void __btrfs_release_delayed_node( | |
232 | struct btrfs_delayed_node *delayed_node, | |
233 | int mod) | |
234 | { | |
235 | struct btrfs_delayed_root *delayed_root; | |
236 | ||
237 | if (!delayed_node) | |
238 | return; | |
239 | ||
240 | delayed_root = delayed_node->root->fs_info->delayed_root; | |
241 | ||
242 | mutex_lock(&delayed_node->mutex); | |
243 | if (delayed_node->count) | |
244 | btrfs_queue_delayed_node(delayed_root, delayed_node, mod); | |
245 | else | |
246 | btrfs_dequeue_delayed_node(delayed_root, delayed_node); | |
247 | mutex_unlock(&delayed_node->mutex); | |
248 | ||
249 | if (atomic_dec_and_test(&delayed_node->refs)) { | |
250 | struct btrfs_root *root = delayed_node->root; | |
251 | spin_lock(&root->inode_lock); | |
252 | if (atomic_read(&delayed_node->refs) == 0) { | |
253 | radix_tree_delete(&root->delayed_nodes_tree, | |
254 | delayed_node->inode_id); | |
255 | kmem_cache_free(delayed_node_cache, delayed_node); | |
256 | } | |
257 | spin_unlock(&root->inode_lock); | |
258 | } | |
259 | } | |
260 | ||
261 | static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node) | |
262 | { | |
263 | __btrfs_release_delayed_node(node, 0); | |
264 | } | |
265 | ||
266 | struct btrfs_delayed_node *btrfs_first_prepared_delayed_node( | |
267 | struct btrfs_delayed_root *delayed_root) | |
268 | { | |
269 | struct list_head *p; | |
270 | struct btrfs_delayed_node *node = NULL; | |
271 | ||
272 | spin_lock(&delayed_root->lock); | |
273 | if (list_empty(&delayed_root->prepare_list)) | |
274 | goto out; | |
275 | ||
276 | p = delayed_root->prepare_list.next; | |
277 | list_del_init(p); | |
278 | node = list_entry(p, struct btrfs_delayed_node, p_list); | |
279 | atomic_inc(&node->refs); | |
280 | out: | |
281 | spin_unlock(&delayed_root->lock); | |
282 | ||
283 | return node; | |
284 | } | |
285 | ||
286 | static inline void btrfs_release_prepared_delayed_node( | |
287 | struct btrfs_delayed_node *node) | |
288 | { | |
289 | __btrfs_release_delayed_node(node, 1); | |
290 | } | |
291 | ||
292 | struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len) | |
293 | { | |
294 | struct btrfs_delayed_item *item; | |
295 | item = kmalloc(sizeof(*item) + data_len, GFP_NOFS); | |
296 | if (item) { | |
297 | item->data_len = data_len; | |
298 | item->ins_or_del = 0; | |
299 | item->bytes_reserved = 0; | |
300 | item->block_rsv = NULL; | |
301 | item->delayed_node = NULL; | |
302 | atomic_set(&item->refs, 1); | |
303 | } | |
304 | return item; | |
305 | } | |
306 | ||
307 | /* | |
308 | * __btrfs_lookup_delayed_item - look up the delayed item by key | |
309 | * @delayed_node: pointer to the delayed node | |
310 | * @key: the key to look up | |
311 | * @prev: used to store the prev item if the right item isn't found | |
312 | * @next: used to store the next item if the right item isn't found | |
313 | * | |
314 | * Note: if we don't find the right item, we will return the prev item and | |
315 | * the next item. | |
316 | */ | |
317 | static struct btrfs_delayed_item *__btrfs_lookup_delayed_item( | |
318 | struct rb_root *root, | |
319 | struct btrfs_key *key, | |
320 | struct btrfs_delayed_item **prev, | |
321 | struct btrfs_delayed_item **next) | |
322 | { | |
323 | struct rb_node *node, *prev_node = NULL; | |
324 | struct btrfs_delayed_item *delayed_item = NULL; | |
325 | int ret = 0; | |
326 | ||
327 | node = root->rb_node; | |
328 | ||
329 | while (node) { | |
330 | delayed_item = rb_entry(node, struct btrfs_delayed_item, | |
331 | rb_node); | |
332 | prev_node = node; | |
333 | ret = btrfs_comp_cpu_keys(&delayed_item->key, key); | |
334 | if (ret < 0) | |
335 | node = node->rb_right; | |
336 | else if (ret > 0) | |
337 | node = node->rb_left; | |
338 | else | |
339 | return delayed_item; | |
340 | } | |
341 | ||
342 | if (prev) { | |
343 | if (!prev_node) | |
344 | *prev = NULL; | |
345 | else if (ret < 0) | |
346 | *prev = delayed_item; | |
347 | else if ((node = rb_prev(prev_node)) != NULL) { | |
348 | *prev = rb_entry(node, struct btrfs_delayed_item, | |
349 | rb_node); | |
350 | } else | |
351 | *prev = NULL; | |
352 | } | |
353 | ||
354 | if (next) { | |
355 | if (!prev_node) | |
356 | *next = NULL; | |
357 | else if (ret > 0) | |
358 | *next = delayed_item; | |
359 | else if ((node = rb_next(prev_node)) != NULL) { | |
360 | *next = rb_entry(node, struct btrfs_delayed_item, | |
361 | rb_node); | |
362 | } else | |
363 | *next = NULL; | |
364 | } | |
365 | return NULL; | |
366 | } | |
367 | ||
368 | struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item( | |
369 | struct btrfs_delayed_node *delayed_node, | |
370 | struct btrfs_key *key) | |
371 | { | |
372 | struct btrfs_delayed_item *item; | |
373 | ||
374 | item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key, | |
375 | NULL, NULL); | |
376 | return item; | |
377 | } | |
378 | ||
379 | struct btrfs_delayed_item *__btrfs_lookup_delayed_deletion_item( | |
380 | struct btrfs_delayed_node *delayed_node, | |
381 | struct btrfs_key *key) | |
382 | { | |
383 | struct btrfs_delayed_item *item; | |
384 | ||
385 | item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key, | |
386 | NULL, NULL); | |
387 | return item; | |
388 | } | |
389 | ||
390 | struct btrfs_delayed_item *__btrfs_search_delayed_insertion_item( | |
391 | struct btrfs_delayed_node *delayed_node, | |
392 | struct btrfs_key *key) | |
393 | { | |
394 | struct btrfs_delayed_item *item, *next; | |
395 | ||
396 | item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key, | |
397 | NULL, &next); | |
398 | if (!item) | |
399 | item = next; | |
400 | ||
401 | return item; | |
402 | } | |
403 | ||
404 | struct btrfs_delayed_item *__btrfs_search_delayed_deletion_item( | |
405 | struct btrfs_delayed_node *delayed_node, | |
406 | struct btrfs_key *key) | |
407 | { | |
408 | struct btrfs_delayed_item *item, *next; | |
409 | ||
410 | item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key, | |
411 | NULL, &next); | |
412 | if (!item) | |
413 | item = next; | |
414 | ||
415 | return item; | |
416 | } | |
417 | ||
418 | static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node, | |
419 | struct btrfs_delayed_item *ins, | |
420 | int action) | |
421 | { | |
422 | struct rb_node **p, *node; | |
423 | struct rb_node *parent_node = NULL; | |
424 | struct rb_root *root; | |
425 | struct btrfs_delayed_item *item; | |
426 | int cmp; | |
427 | ||
428 | if (action == BTRFS_DELAYED_INSERTION_ITEM) | |
429 | root = &delayed_node->ins_root; | |
430 | else if (action == BTRFS_DELAYED_DELETION_ITEM) | |
431 | root = &delayed_node->del_root; | |
432 | else | |
433 | BUG(); | |
434 | p = &root->rb_node; | |
435 | node = &ins->rb_node; | |
436 | ||
437 | while (*p) { | |
438 | parent_node = *p; | |
439 | item = rb_entry(parent_node, struct btrfs_delayed_item, | |
440 | rb_node); | |
441 | ||
442 | cmp = btrfs_comp_cpu_keys(&item->key, &ins->key); | |
443 | if (cmp < 0) | |
444 | p = &(*p)->rb_right; | |
445 | else if (cmp > 0) | |
446 | p = &(*p)->rb_left; | |
447 | else | |
448 | return -EEXIST; | |
449 | } | |
450 | ||
451 | rb_link_node(node, parent_node, p); | |
452 | rb_insert_color(node, root); | |
453 | ins->delayed_node = delayed_node; | |
454 | ins->ins_or_del = action; | |
455 | ||
456 | if (ins->key.type == BTRFS_DIR_INDEX_KEY && | |
457 | action == BTRFS_DELAYED_INSERTION_ITEM && | |
458 | ins->key.offset >= delayed_node->index_cnt) | |
459 | delayed_node->index_cnt = ins->key.offset + 1; | |
460 | ||
461 | delayed_node->count++; | |
462 | atomic_inc(&delayed_node->root->fs_info->delayed_root->items); | |
463 | return 0; | |
464 | } | |
465 | ||
466 | static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node, | |
467 | struct btrfs_delayed_item *item) | |
468 | { | |
469 | return __btrfs_add_delayed_item(node, item, | |
470 | BTRFS_DELAYED_INSERTION_ITEM); | |
471 | } | |
472 | ||
473 | static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node, | |
474 | struct btrfs_delayed_item *item) | |
475 | { | |
476 | return __btrfs_add_delayed_item(node, item, | |
477 | BTRFS_DELAYED_DELETION_ITEM); | |
478 | } | |
479 | ||
480 | static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item) | |
481 | { | |
482 | struct rb_root *root; | |
483 | struct btrfs_delayed_root *delayed_root; | |
484 | ||
485 | delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root; | |
486 | ||
487 | BUG_ON(!delayed_root); | |
488 | BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM && | |
489 | delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM); | |
490 | ||
491 | if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM) | |
492 | root = &delayed_item->delayed_node->ins_root; | |
493 | else | |
494 | root = &delayed_item->delayed_node->del_root; | |
495 | ||
496 | rb_erase(&delayed_item->rb_node, root); | |
497 | delayed_item->delayed_node->count--; | |
498 | atomic_dec(&delayed_root->items); | |
499 | if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND && | |
500 | waitqueue_active(&delayed_root->wait)) | |
501 | wake_up(&delayed_root->wait); | |
502 | } | |
503 | ||
504 | static void btrfs_release_delayed_item(struct btrfs_delayed_item *item) | |
505 | { | |
506 | if (item) { | |
507 | __btrfs_remove_delayed_item(item); | |
508 | if (atomic_dec_and_test(&item->refs)) | |
509 | kfree(item); | |
510 | } | |
511 | } | |
512 | ||
513 | struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item( | |
514 | struct btrfs_delayed_node *delayed_node) | |
515 | { | |
516 | struct rb_node *p; | |
517 | struct btrfs_delayed_item *item = NULL; | |
518 | ||
519 | p = rb_first(&delayed_node->ins_root); | |
520 | if (p) | |
521 | item = rb_entry(p, struct btrfs_delayed_item, rb_node); | |
522 | ||
523 | return item; | |
524 | } | |
525 | ||
526 | struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item( | |
527 | struct btrfs_delayed_node *delayed_node) | |
528 | { | |
529 | struct rb_node *p; | |
530 | struct btrfs_delayed_item *item = NULL; | |
531 | ||
532 | p = rb_first(&delayed_node->del_root); | |
533 | if (p) | |
534 | item = rb_entry(p, struct btrfs_delayed_item, rb_node); | |
535 | ||
536 | return item; | |
537 | } | |
538 | ||
539 | struct btrfs_delayed_item *__btrfs_next_delayed_item( | |
540 | struct btrfs_delayed_item *item) | |
541 | { | |
542 | struct rb_node *p; | |
543 | struct btrfs_delayed_item *next = NULL; | |
544 | ||
545 | p = rb_next(&item->rb_node); | |
546 | if (p) | |
547 | next = rb_entry(p, struct btrfs_delayed_item, rb_node); | |
548 | ||
549 | return next; | |
550 | } | |
551 | ||
552 | static inline struct btrfs_delayed_node *btrfs_get_delayed_node( | |
553 | struct inode *inode) | |
554 | { | |
555 | struct btrfs_inode *btrfs_inode = BTRFS_I(inode); | |
556 | struct btrfs_delayed_node *delayed_node; | |
557 | ||
558 | delayed_node = btrfs_inode->delayed_node; | |
559 | if (delayed_node) | |
560 | atomic_inc(&delayed_node->refs); | |
561 | ||
562 | return delayed_node; | |
563 | } | |
564 | ||
565 | static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root, | |
566 | u64 root_id) | |
567 | { | |
568 | struct btrfs_key root_key; | |
569 | ||
570 | if (root->objectid == root_id) | |
571 | return root; | |
572 | ||
573 | root_key.objectid = root_id; | |
574 | root_key.type = BTRFS_ROOT_ITEM_KEY; | |
575 | root_key.offset = (u64)-1; | |
576 | return btrfs_read_fs_root_no_name(root->fs_info, &root_key); | |
577 | } | |
578 | ||
579 | static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans, | |
580 | struct btrfs_root *root, | |
581 | struct btrfs_delayed_item *item) | |
582 | { | |
583 | struct btrfs_block_rsv *src_rsv; | |
584 | struct btrfs_block_rsv *dst_rsv; | |
585 | u64 num_bytes; | |
586 | int ret; | |
587 | ||
588 | if (!trans->bytes_reserved) | |
589 | return 0; | |
590 | ||
591 | src_rsv = trans->block_rsv; | |
592 | dst_rsv = &root->fs_info->global_block_rsv; | |
593 | ||
594 | num_bytes = btrfs_calc_trans_metadata_size(root, 1); | |
595 | ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); | |
596 | if (!ret) { | |
597 | item->bytes_reserved = num_bytes; | |
598 | item->block_rsv = dst_rsv; | |
599 | } | |
600 | ||
601 | return ret; | |
602 | } | |
603 | ||
604 | static void btrfs_delayed_item_release_metadata(struct btrfs_root *root, | |
605 | struct btrfs_delayed_item *item) | |
606 | { | |
607 | if (!item->bytes_reserved) | |
608 | return; | |
609 | ||
610 | btrfs_block_rsv_release(root, item->block_rsv, | |
611 | item->bytes_reserved); | |
612 | } | |
613 | ||
614 | static int btrfs_delayed_inode_reserve_metadata( | |
615 | struct btrfs_trans_handle *trans, | |
616 | struct btrfs_root *root, | |
617 | struct btrfs_delayed_node *node) | |
618 | { | |
619 | struct btrfs_block_rsv *src_rsv; | |
620 | struct btrfs_block_rsv *dst_rsv; | |
621 | u64 num_bytes; | |
622 | int ret; | |
623 | ||
624 | if (!trans->bytes_reserved) | |
625 | return 0; | |
626 | ||
627 | src_rsv = trans->block_rsv; | |
628 | dst_rsv = &root->fs_info->global_block_rsv; | |
629 | ||
630 | num_bytes = btrfs_calc_trans_metadata_size(root, 1); | |
631 | ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); | |
632 | if (!ret) | |
633 | node->bytes_reserved = num_bytes; | |
634 | ||
635 | return ret; | |
636 | } | |
637 | ||
638 | static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root, | |
639 | struct btrfs_delayed_node *node) | |
640 | { | |
641 | struct btrfs_block_rsv *rsv; | |
642 | ||
643 | if (!node->bytes_reserved) | |
644 | return; | |
645 | ||
646 | rsv = &root->fs_info->global_block_rsv; | |
647 | btrfs_block_rsv_release(root, rsv, | |
648 | node->bytes_reserved); | |
649 | node->bytes_reserved = 0; | |
650 | } | |
651 | ||
652 | /* | |
653 | * This helper will insert some continuous items into the same leaf according | |
654 | * to the free space of the leaf. | |
655 | */ | |
656 | static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans, | |
657 | struct btrfs_root *root, | |
658 | struct btrfs_path *path, | |
659 | struct btrfs_delayed_item *item) | |
660 | { | |
661 | struct btrfs_delayed_item *curr, *next; | |
662 | int free_space; | |
663 | int total_data_size = 0, total_size = 0; | |
664 | struct extent_buffer *leaf; | |
665 | char *data_ptr; | |
666 | struct btrfs_key *keys; | |
667 | u32 *data_size; | |
668 | struct list_head head; | |
669 | int slot; | |
670 | int nitems; | |
671 | int i; | |
672 | int ret = 0; | |
673 | ||
674 | BUG_ON(!path->nodes[0]); | |
675 | ||
676 | leaf = path->nodes[0]; | |
677 | free_space = btrfs_leaf_free_space(root, leaf); | |
678 | INIT_LIST_HEAD(&head); | |
679 | ||
680 | next = item; | |
681 | ||
682 | /* | |
683 | * count the number of the continuous items that we can insert in batch | |
684 | */ | |
685 | while (total_size + next->data_len + sizeof(struct btrfs_item) <= | |
686 | free_space) { | |
687 | total_data_size += next->data_len; | |
688 | total_size += next->data_len + sizeof(struct btrfs_item); | |
689 | list_add_tail(&next->tree_list, &head); | |
690 | nitems++; | |
691 | ||
692 | curr = next; | |
693 | next = __btrfs_next_delayed_item(curr); | |
694 | if (!next) | |
695 | break; | |
696 | ||
697 | if (!btrfs_is_continuous_delayed_item(curr, next)) | |
698 | break; | |
699 | } | |
700 | ||
701 | if (!nitems) { | |
702 | ret = 0; | |
703 | goto out; | |
704 | } | |
705 | ||
706 | /* | |
707 | * we need allocate some memory space, but it might cause the task | |
708 | * to sleep, so we set all locked nodes in the path to blocking locks | |
709 | * first. | |
710 | */ | |
711 | btrfs_set_path_blocking(path); | |
712 | ||
713 | keys = kmalloc(sizeof(struct btrfs_key) * nitems, GFP_NOFS); | |
714 | if (!keys) { | |
715 | ret = -ENOMEM; | |
716 | goto out; | |
717 | } | |
718 | ||
719 | data_size = kmalloc(sizeof(u32) * nitems, GFP_NOFS); | |
720 | if (!data_size) { | |
721 | ret = -ENOMEM; | |
722 | goto error; | |
723 | } | |
724 | ||
725 | /* get keys of all the delayed items */ | |
726 | i = 0; | |
727 | list_for_each_entry(next, &head, tree_list) { | |
728 | keys[i] = next->key; | |
729 | data_size[i] = next->data_len; | |
730 | i++; | |
731 | } | |
732 | ||
733 | /* reset all the locked nodes in the patch to spinning locks. */ | |
734 | btrfs_clear_path_blocking(path, NULL); | |
735 | ||
736 | /* insert the keys of the items */ | |
737 | ret = setup_items_for_insert(trans, root, path, keys, data_size, | |
738 | total_data_size, total_size, nitems); | |
739 | if (ret) | |
740 | goto error; | |
741 | ||
742 | /* insert the dir index items */ | |
743 | slot = path->slots[0]; | |
744 | list_for_each_entry_safe(curr, next, &head, tree_list) { | |
745 | data_ptr = btrfs_item_ptr(leaf, slot, char); | |
746 | write_extent_buffer(leaf, &curr->data, | |
747 | (unsigned long)data_ptr, | |
748 | curr->data_len); | |
749 | slot++; | |
750 | ||
751 | btrfs_delayed_item_release_metadata(root, curr); | |
752 | ||
753 | list_del(&curr->tree_list); | |
754 | btrfs_release_delayed_item(curr); | |
755 | } | |
756 | ||
757 | error: | |
758 | kfree(data_size); | |
759 | kfree(keys); | |
760 | out: | |
761 | return ret; | |
762 | } | |
763 | ||
764 | /* | |
765 | * This helper can just do simple insertion that needn't extend item for new | |
766 | * data, such as directory name index insertion, inode insertion. | |
767 | */ | |
768 | static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans, | |
769 | struct btrfs_root *root, | |
770 | struct btrfs_path *path, | |
771 | struct btrfs_delayed_item *delayed_item) | |
772 | { | |
773 | struct extent_buffer *leaf; | |
774 | struct btrfs_item *item; | |
775 | char *ptr; | |
776 | int ret; | |
777 | ||
778 | ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key, | |
779 | delayed_item->data_len); | |
780 | if (ret < 0 && ret != -EEXIST) | |
781 | return ret; | |
782 | ||
783 | leaf = path->nodes[0]; | |
784 | ||
785 | item = btrfs_item_nr(leaf, path->slots[0]); | |
786 | ptr = btrfs_item_ptr(leaf, path->slots[0], char); | |
787 | ||
788 | write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr, | |
789 | delayed_item->data_len); | |
790 | btrfs_mark_buffer_dirty(leaf); | |
791 | ||
792 | btrfs_delayed_item_release_metadata(root, delayed_item); | |
793 | return 0; | |
794 | } | |
795 | ||
796 | /* | |
797 | * we insert an item first, then if there are some continuous items, we try | |
798 | * to insert those items into the same leaf. | |
799 | */ | |
800 | static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans, | |
801 | struct btrfs_path *path, | |
802 | struct btrfs_root *root, | |
803 | struct btrfs_delayed_node *node) | |
804 | { | |
805 | struct btrfs_delayed_item *curr, *prev; | |
806 | int ret = 0; | |
807 | ||
808 | do_again: | |
809 | mutex_lock(&node->mutex); | |
810 | curr = __btrfs_first_delayed_insertion_item(node); | |
811 | if (!curr) | |
812 | goto insert_end; | |
813 | ||
814 | ret = btrfs_insert_delayed_item(trans, root, path, curr); | |
815 | if (ret < 0) { | |
816 | btrfs_release_path(root, path); | |
817 | goto insert_end; | |
818 | } | |
819 | ||
820 | prev = curr; | |
821 | curr = __btrfs_next_delayed_item(prev); | |
822 | if (curr && btrfs_is_continuous_delayed_item(prev, curr)) { | |
823 | /* insert the continuous items into the same leaf */ | |
824 | path->slots[0]++; | |
825 | btrfs_batch_insert_items(trans, root, path, curr); | |
826 | } | |
827 | btrfs_release_delayed_item(prev); | |
828 | btrfs_mark_buffer_dirty(path->nodes[0]); | |
829 | ||
830 | btrfs_release_path(root, path); | |
831 | mutex_unlock(&node->mutex); | |
832 | goto do_again; | |
833 | ||
834 | insert_end: | |
835 | mutex_unlock(&node->mutex); | |
836 | return ret; | |
837 | } | |
838 | ||
839 | static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans, | |
840 | struct btrfs_root *root, | |
841 | struct btrfs_path *path, | |
842 | struct btrfs_delayed_item *item) | |
843 | { | |
844 | struct btrfs_delayed_item *curr, *next; | |
845 | struct extent_buffer *leaf; | |
846 | struct btrfs_key key; | |
847 | struct list_head head; | |
848 | int nitems, i, last_item; | |
849 | int ret = 0; | |
850 | ||
851 | BUG_ON(!path->nodes[0]); | |
852 | ||
853 | leaf = path->nodes[0]; | |
854 | ||
855 | i = path->slots[0]; | |
856 | last_item = btrfs_header_nritems(leaf) - 1; | |
857 | if (i > last_item) | |
858 | return -ENOENT; /* FIXME: Is errno suitable? */ | |
859 | ||
860 | next = item; | |
861 | INIT_LIST_HEAD(&head); | |
862 | btrfs_item_key_to_cpu(leaf, &key, i); | |
863 | nitems = 0; | |
864 | /* | |
865 | * count the number of the dir index items that we can delete in batch | |
866 | */ | |
867 | while (btrfs_comp_cpu_keys(&next->key, &key) == 0) { | |
868 | list_add_tail(&next->tree_list, &head); | |
869 | nitems++; | |
870 | ||
871 | curr = next; | |
872 | next = __btrfs_next_delayed_item(curr); | |
873 | if (!next) | |
874 | break; | |
875 | ||
876 | if (!btrfs_is_continuous_delayed_item(curr, next)) | |
877 | break; | |
878 | ||
879 | i++; | |
880 | if (i > last_item) | |
881 | break; | |
882 | btrfs_item_key_to_cpu(leaf, &key, i); | |
883 | } | |
884 | ||
885 | if (!nitems) | |
886 | return 0; | |
887 | ||
888 | ret = btrfs_del_items(trans, root, path, path->slots[0], nitems); | |
889 | if (ret) | |
890 | goto out; | |
891 | ||
892 | list_for_each_entry_safe(curr, next, &head, tree_list) { | |
893 | btrfs_delayed_item_release_metadata(root, curr); | |
894 | list_del(&curr->tree_list); | |
895 | btrfs_release_delayed_item(curr); | |
896 | } | |
897 | ||
898 | out: | |
899 | return ret; | |
900 | } | |
901 | ||
902 | static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans, | |
903 | struct btrfs_path *path, | |
904 | struct btrfs_root *root, | |
905 | struct btrfs_delayed_node *node) | |
906 | { | |
907 | struct btrfs_delayed_item *curr, *prev; | |
908 | int ret = 0; | |
909 | ||
910 | do_again: | |
911 | mutex_lock(&node->mutex); | |
912 | curr = __btrfs_first_delayed_deletion_item(node); | |
913 | if (!curr) | |
914 | goto delete_fail; | |
915 | ||
916 | ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1); | |
917 | if (ret < 0) | |
918 | goto delete_fail; | |
919 | else if (ret > 0) { | |
920 | /* | |
921 | * can't find the item which the node points to, so this node | |
922 | * is invalid, just drop it. | |
923 | */ | |
924 | prev = curr; | |
925 | curr = __btrfs_next_delayed_item(prev); | |
926 | btrfs_release_delayed_item(prev); | |
927 | ret = 0; | |
928 | btrfs_release_path(root, path); | |
929 | if (curr) | |
930 | goto do_again; | |
931 | else | |
932 | goto delete_fail; | |
933 | } | |
934 | ||
935 | btrfs_batch_delete_items(trans, root, path, curr); | |
936 | btrfs_release_path(root, path); | |
937 | mutex_unlock(&node->mutex); | |
938 | goto do_again; | |
939 | ||
940 | delete_fail: | |
941 | btrfs_release_path(root, path); | |
942 | mutex_unlock(&node->mutex); | |
943 | return ret; | |
944 | } | |
945 | ||
946 | static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node) | |
947 | { | |
948 | struct btrfs_delayed_root *delayed_root; | |
949 | ||
950 | if (delayed_node && delayed_node->inode_dirty) { | |
951 | BUG_ON(!delayed_node->root); | |
952 | delayed_node->inode_dirty = 0; | |
953 | delayed_node->count--; | |
954 | ||
955 | delayed_root = delayed_node->root->fs_info->delayed_root; | |
956 | atomic_dec(&delayed_root->items); | |
957 | if (atomic_read(&delayed_root->items) < | |
958 | BTRFS_DELAYED_BACKGROUND && | |
959 | waitqueue_active(&delayed_root->wait)) | |
960 | wake_up(&delayed_root->wait); | |
961 | } | |
962 | } | |
963 | ||
964 | static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, | |
965 | struct btrfs_root *root, | |
966 | struct btrfs_path *path, | |
967 | struct btrfs_delayed_node *node) | |
968 | { | |
969 | struct btrfs_key key; | |
970 | struct btrfs_inode_item *inode_item; | |
971 | struct extent_buffer *leaf; | |
972 | int ret; | |
973 | ||
974 | mutex_lock(&node->mutex); | |
975 | if (!node->inode_dirty) { | |
976 | mutex_unlock(&node->mutex); | |
977 | return 0; | |
978 | } | |
979 | ||
980 | key.objectid = node->inode_id; | |
981 | btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); | |
982 | key.offset = 0; | |
983 | ret = btrfs_lookup_inode(trans, root, path, &key, 1); | |
984 | if (ret > 0) { | |
985 | btrfs_release_path(root, path); | |
986 | mutex_unlock(&node->mutex); | |
987 | return -ENOENT; | |
988 | } else if (ret < 0) { | |
989 | mutex_unlock(&node->mutex); | |
990 | return ret; | |
991 | } | |
992 | ||
993 | btrfs_unlock_up_safe(path, 1); | |
994 | leaf = path->nodes[0]; | |
995 | inode_item = btrfs_item_ptr(leaf, path->slots[0], | |
996 | struct btrfs_inode_item); | |
997 | write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item, | |
998 | sizeof(struct btrfs_inode_item)); | |
999 | btrfs_mark_buffer_dirty(leaf); | |
1000 | btrfs_release_path(root, path); | |
1001 | ||
1002 | btrfs_delayed_inode_release_metadata(root, node); | |
1003 | btrfs_release_delayed_inode(node); | |
1004 | mutex_unlock(&node->mutex); | |
1005 | ||
1006 | return 0; | |
1007 | } | |
1008 | ||
1009 | /* Called when committing the transaction. */ | |
1010 | int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, | |
1011 | struct btrfs_root *root) | |
1012 | { | |
1013 | struct btrfs_delayed_root *delayed_root; | |
1014 | struct btrfs_delayed_node *curr_node, *prev_node; | |
1015 | struct btrfs_path *path; | |
1016 | int ret = 0; | |
1017 | ||
1018 | path = btrfs_alloc_path(); | |
1019 | if (!path) | |
1020 | return -ENOMEM; | |
1021 | path->leave_spinning = 1; | |
1022 | ||
1023 | delayed_root = btrfs_get_delayed_root(root); | |
1024 | ||
1025 | curr_node = btrfs_first_delayed_node(delayed_root); | |
1026 | while (curr_node) { | |
1027 | root = curr_node->root; | |
1028 | ret = btrfs_insert_delayed_items(trans, path, root, | |
1029 | curr_node); | |
1030 | if (!ret) | |
1031 | ret = btrfs_delete_delayed_items(trans, path, root, | |
1032 | curr_node); | |
1033 | if (!ret) | |
1034 | ret = btrfs_update_delayed_inode(trans, root, path, | |
1035 | curr_node); | |
1036 | if (ret) { | |
1037 | btrfs_release_delayed_node(curr_node); | |
1038 | break; | |
1039 | } | |
1040 | ||
1041 | prev_node = curr_node; | |
1042 | curr_node = btrfs_next_delayed_node(curr_node); | |
1043 | btrfs_release_delayed_node(prev_node); | |
1044 | } | |
1045 | ||
1046 | btrfs_free_path(path); | |
1047 | return ret; | |
1048 | } | |
1049 | ||
1050 | static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, | |
1051 | struct btrfs_delayed_node *node) | |
1052 | { | |
1053 | struct btrfs_path *path; | |
1054 | int ret; | |
1055 | ||
1056 | path = btrfs_alloc_path(); | |
1057 | if (!path) | |
1058 | return -ENOMEM; | |
1059 | path->leave_spinning = 1; | |
1060 | ||
1061 | ret = btrfs_insert_delayed_items(trans, path, node->root, node); | |
1062 | if (!ret) | |
1063 | ret = btrfs_delete_delayed_items(trans, path, node->root, node); | |
1064 | if (!ret) | |
1065 | ret = btrfs_update_delayed_inode(trans, node->root, path, node); | |
1066 | btrfs_free_path(path); | |
1067 | ||
1068 | return ret; | |
1069 | } | |
1070 | ||
1071 | int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, | |
1072 | struct inode *inode) | |
1073 | { | |
1074 | struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode); | |
1075 | int ret; | |
1076 | ||
1077 | if (!delayed_node) | |
1078 | return 0; | |
1079 | ||
1080 | mutex_lock(&delayed_node->mutex); | |
1081 | if (!delayed_node->count) { | |
1082 | mutex_unlock(&delayed_node->mutex); | |
1083 | btrfs_release_delayed_node(delayed_node); | |
1084 | return 0; | |
1085 | } | |
1086 | mutex_unlock(&delayed_node->mutex); | |
1087 | ||
1088 | ret = __btrfs_commit_inode_delayed_items(trans, delayed_node); | |
1089 | btrfs_release_delayed_node(delayed_node); | |
1090 | return ret; | |
1091 | } | |
1092 | ||
1093 | void btrfs_remove_delayed_node(struct inode *inode) | |
1094 | { | |
1095 | struct btrfs_delayed_node *delayed_node; | |
1096 | ||
1097 | delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node); | |
1098 | if (!delayed_node) | |
1099 | return; | |
1100 | ||
1101 | BTRFS_I(inode)->delayed_node = NULL; | |
1102 | btrfs_release_delayed_node(delayed_node); | |
1103 | } | |
1104 | ||
1105 | struct btrfs_async_delayed_node { | |
1106 | struct btrfs_root *root; | |
1107 | struct btrfs_delayed_node *delayed_node; | |
1108 | struct btrfs_work work; | |
1109 | }; | |
1110 | ||
1111 | static void btrfs_async_run_delayed_node_done(struct btrfs_work *work) | |
1112 | { | |
1113 | struct btrfs_async_delayed_node *async_node; | |
1114 | struct btrfs_trans_handle *trans; | |
1115 | struct btrfs_path *path; | |
1116 | struct btrfs_delayed_node *delayed_node = NULL; | |
1117 | struct btrfs_root *root; | |
1118 | unsigned long nr = 0; | |
1119 | int need_requeue = 0; | |
1120 | int ret; | |
1121 | ||
1122 | async_node = container_of(work, struct btrfs_async_delayed_node, work); | |
1123 | ||
1124 | path = btrfs_alloc_path(); | |
1125 | if (!path) | |
1126 | goto out; | |
1127 | path->leave_spinning = 1; | |
1128 | ||
1129 | delayed_node = async_node->delayed_node; | |
1130 | root = delayed_node->root; | |
1131 | ||
1132 | trans = btrfs_join_transaction(root, 0); | |
1133 | if (IS_ERR(trans)) | |
1134 | goto free_path; | |
1135 | ||
1136 | ret = btrfs_insert_delayed_items(trans, path, root, delayed_node); | |
1137 | if (!ret) | |
1138 | ret = btrfs_delete_delayed_items(trans, path, root, | |
1139 | delayed_node); | |
1140 | ||
1141 | if (!ret) | |
1142 | btrfs_update_delayed_inode(trans, root, path, delayed_node); | |
1143 | ||
1144 | /* | |
1145 | * Maybe new delayed items have been inserted, so we need requeue | |
1146 | * the work. Besides that, we must dequeue the empty delayed nodes | |
1147 | * to avoid the race between delayed items balance and the worker. | |
1148 | * The race like this: | |
1149 | * Task1 Worker thread | |
1150 | * count == 0, needn't requeue | |
1151 | * also needn't insert the | |
1152 | * delayed node into prepare | |
1153 | * list again. | |
1154 | * add lots of delayed items | |
1155 | * queue the delayed node | |
1156 | * already in the list, | |
1157 | * and not in the prepare | |
1158 | * list, it means the delayed | |
1159 | * node is being dealt with | |
1160 | * by the worker. | |
1161 | * do delayed items balance | |
1162 | * the delayed node is being | |
1163 | * dealt with by the worker | |
1164 | * now, just wait. | |
1165 | * the worker goto idle. | |
1166 | * Task1 will sleep until the transaction is commited. | |
1167 | */ | |
1168 | mutex_lock(&delayed_node->mutex); | |
1169 | if (delayed_node->count) | |
1170 | need_requeue = 1; | |
1171 | else | |
1172 | btrfs_dequeue_delayed_node(root->fs_info->delayed_root, | |
1173 | delayed_node); | |
1174 | mutex_unlock(&delayed_node->mutex); | |
1175 | ||
1176 | nr = trans->blocks_used; | |
1177 | ||
1178 | btrfs_end_transaction_dmeta(trans, root); | |
1179 | __btrfs_btree_balance_dirty(root, nr); | |
1180 | free_path: | |
1181 | btrfs_free_path(path); | |
1182 | out: | |
1183 | if (need_requeue) | |
1184 | btrfs_requeue_work(&async_node->work); | |
1185 | else { | |
1186 | btrfs_release_prepared_delayed_node(delayed_node); | |
1187 | kfree(async_node); | |
1188 | } | |
1189 | } | |
1190 | ||
1191 | static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root, | |
1192 | struct btrfs_root *root, int all) | |
1193 | { | |
1194 | struct btrfs_async_delayed_node *async_node; | |
1195 | struct btrfs_delayed_node *curr; | |
1196 | int count = 0; | |
1197 | ||
1198 | again: | |
1199 | curr = btrfs_first_prepared_delayed_node(delayed_root); | |
1200 | if (!curr) | |
1201 | return 0; | |
1202 | ||
1203 | async_node = kmalloc(sizeof(*async_node), GFP_NOFS); | |
1204 | if (!async_node) { | |
1205 | btrfs_release_prepared_delayed_node(curr); | |
1206 | return -ENOMEM; | |
1207 | } | |
1208 | ||
1209 | async_node->root = root; | |
1210 | async_node->delayed_node = curr; | |
1211 | ||
1212 | async_node->work.func = btrfs_async_run_delayed_node_done; | |
1213 | async_node->work.flags = 0; | |
1214 | ||
1215 | btrfs_queue_worker(&root->fs_info->delayed_workers, &async_node->work); | |
1216 | count++; | |
1217 | ||
1218 | if (all || count < 4) | |
1219 | goto again; | |
1220 | ||
1221 | return 0; | |
1222 | } | |
1223 | ||
1224 | void btrfs_balance_delayed_items(struct btrfs_root *root) | |
1225 | { | |
1226 | struct btrfs_delayed_root *delayed_root; | |
1227 | ||
1228 | delayed_root = btrfs_get_delayed_root(root); | |
1229 | ||
1230 | if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) | |
1231 | return; | |
1232 | ||
1233 | if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) { | |
1234 | int ret; | |
1235 | ret = btrfs_wq_run_delayed_node(delayed_root, root, 1); | |
1236 | if (ret) | |
1237 | return; | |
1238 | ||
1239 | wait_event_interruptible_timeout( | |
1240 | delayed_root->wait, | |
1241 | (atomic_read(&delayed_root->items) < | |
1242 | BTRFS_DELAYED_BACKGROUND), | |
1243 | HZ); | |
1244 | return; | |
1245 | } | |
1246 | ||
1247 | btrfs_wq_run_delayed_node(delayed_root, root, 0); | |
1248 | } | |
1249 | ||
1250 | int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans, | |
1251 | struct btrfs_root *root, const char *name, | |
1252 | int name_len, struct inode *dir, | |
1253 | struct btrfs_disk_key *disk_key, u8 type, | |
1254 | u64 index) | |
1255 | { | |
1256 | struct btrfs_delayed_node *delayed_node; | |
1257 | struct btrfs_delayed_item *delayed_item; | |
1258 | struct btrfs_dir_item *dir_item; | |
1259 | int ret; | |
1260 | ||
1261 | delayed_node = btrfs_get_or_create_delayed_node(dir); | |
1262 | if (IS_ERR(delayed_node)) | |
1263 | return PTR_ERR(delayed_node); | |
1264 | ||
1265 | delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len); | |
1266 | if (!delayed_item) { | |
1267 | ret = -ENOMEM; | |
1268 | goto release_node; | |
1269 | } | |
1270 | ||
1271 | ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item); | |
1272 | /* | |
1273 | * we have reserved enough space when we start a new transaction, | |
1274 | * so reserving metadata failure is impossible | |
1275 | */ | |
1276 | BUG_ON(ret); | |
1277 | ||
0d0ca30f | 1278 | delayed_item->key.objectid = btrfs_ino(dir); |
16cdcec7 MX |
1279 | btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY); |
1280 | delayed_item->key.offset = index; | |
1281 | ||
1282 | dir_item = (struct btrfs_dir_item *)delayed_item->data; | |
1283 | dir_item->location = *disk_key; | |
1284 | dir_item->transid = cpu_to_le64(trans->transid); | |
1285 | dir_item->data_len = 0; | |
1286 | dir_item->name_len = cpu_to_le16(name_len); | |
1287 | dir_item->type = type; | |
1288 | memcpy((char *)(dir_item + 1), name, name_len); | |
1289 | ||
1290 | mutex_lock(&delayed_node->mutex); | |
1291 | ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item); | |
1292 | if (unlikely(ret)) { | |
1293 | printk(KERN_ERR "err add delayed dir index item(name: %s) into " | |
1294 | "the insertion tree of the delayed node" | |
1295 | "(root id: %llu, inode id: %llu, errno: %d)\n", | |
1296 | name, | |
1297 | (unsigned long long)delayed_node->root->objectid, | |
1298 | (unsigned long long)delayed_node->inode_id, | |
1299 | ret); | |
1300 | BUG(); | |
1301 | } | |
1302 | mutex_unlock(&delayed_node->mutex); | |
1303 | ||
1304 | release_node: | |
1305 | btrfs_release_delayed_node(delayed_node); | |
1306 | return ret; | |
1307 | } | |
1308 | ||
1309 | static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root, | |
1310 | struct btrfs_delayed_node *node, | |
1311 | struct btrfs_key *key) | |
1312 | { | |
1313 | struct btrfs_delayed_item *item; | |
1314 | ||
1315 | mutex_lock(&node->mutex); | |
1316 | item = __btrfs_lookup_delayed_insertion_item(node, key); | |
1317 | if (!item) { | |
1318 | mutex_unlock(&node->mutex); | |
1319 | return 1; | |
1320 | } | |
1321 | ||
1322 | btrfs_delayed_item_release_metadata(root, item); | |
1323 | btrfs_release_delayed_item(item); | |
1324 | mutex_unlock(&node->mutex); | |
1325 | return 0; | |
1326 | } | |
1327 | ||
1328 | int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, | |
1329 | struct btrfs_root *root, struct inode *dir, | |
1330 | u64 index) | |
1331 | { | |
1332 | struct btrfs_delayed_node *node; | |
1333 | struct btrfs_delayed_item *item; | |
1334 | struct btrfs_key item_key; | |
1335 | int ret; | |
1336 | ||
1337 | node = btrfs_get_or_create_delayed_node(dir); | |
1338 | if (IS_ERR(node)) | |
1339 | return PTR_ERR(node); | |
1340 | ||
0d0ca30f | 1341 | item_key.objectid = btrfs_ino(dir); |
16cdcec7 MX |
1342 | btrfs_set_key_type(&item_key, BTRFS_DIR_INDEX_KEY); |
1343 | item_key.offset = index; | |
1344 | ||
1345 | ret = btrfs_delete_delayed_insertion_item(root, node, &item_key); | |
1346 | if (!ret) | |
1347 | goto end; | |
1348 | ||
1349 | item = btrfs_alloc_delayed_item(0); | |
1350 | if (!item) { | |
1351 | ret = -ENOMEM; | |
1352 | goto end; | |
1353 | } | |
1354 | ||
1355 | item->key = item_key; | |
1356 | ||
1357 | ret = btrfs_delayed_item_reserve_metadata(trans, root, item); | |
1358 | /* | |
1359 | * we have reserved enough space when we start a new transaction, | |
1360 | * so reserving metadata failure is impossible. | |
1361 | */ | |
1362 | BUG_ON(ret); | |
1363 | ||
1364 | mutex_lock(&node->mutex); | |
1365 | ret = __btrfs_add_delayed_deletion_item(node, item); | |
1366 | if (unlikely(ret)) { | |
1367 | printk(KERN_ERR "err add delayed dir index item(index: %llu) " | |
1368 | "into the deletion tree of the delayed node" | |
1369 | "(root id: %llu, inode id: %llu, errno: %d)\n", | |
1370 | (unsigned long long)index, | |
1371 | (unsigned long long)node->root->objectid, | |
1372 | (unsigned long long)node->inode_id, | |
1373 | ret); | |
1374 | BUG(); | |
1375 | } | |
1376 | mutex_unlock(&node->mutex); | |
1377 | end: | |
1378 | btrfs_release_delayed_node(node); | |
1379 | return ret; | |
1380 | } | |
1381 | ||
1382 | int btrfs_inode_delayed_dir_index_count(struct inode *inode) | |
1383 | { | |
1384 | struct btrfs_delayed_node *delayed_node = BTRFS_I(inode)->delayed_node; | |
1385 | int ret = 0; | |
1386 | ||
1387 | if (!delayed_node) | |
1388 | return -ENOENT; | |
1389 | ||
1390 | /* | |
1391 | * Since we have held i_mutex of this directory, it is impossible that | |
1392 | * a new directory index is added into the delayed node and index_cnt | |
1393 | * is updated now. So we needn't lock the delayed node. | |
1394 | */ | |
1395 | if (!delayed_node->index_cnt) | |
1396 | return -EINVAL; | |
1397 | ||
1398 | BTRFS_I(inode)->index_cnt = delayed_node->index_cnt; | |
1399 | return ret; | |
1400 | } | |
1401 | ||
1402 | void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, | |
1403 | struct list_head *del_list) | |
1404 | { | |
1405 | struct btrfs_delayed_node *delayed_node; | |
1406 | struct btrfs_delayed_item *item; | |
1407 | ||
1408 | delayed_node = btrfs_get_delayed_node(inode); | |
1409 | if (!delayed_node) | |
1410 | return; | |
1411 | ||
1412 | mutex_lock(&delayed_node->mutex); | |
1413 | item = __btrfs_first_delayed_insertion_item(delayed_node); | |
1414 | while (item) { | |
1415 | atomic_inc(&item->refs); | |
1416 | list_add_tail(&item->readdir_list, ins_list); | |
1417 | item = __btrfs_next_delayed_item(item); | |
1418 | } | |
1419 | ||
1420 | item = __btrfs_first_delayed_deletion_item(delayed_node); | |
1421 | while (item) { | |
1422 | atomic_inc(&item->refs); | |
1423 | list_add_tail(&item->readdir_list, del_list); | |
1424 | item = __btrfs_next_delayed_item(item); | |
1425 | } | |
1426 | mutex_unlock(&delayed_node->mutex); | |
1427 | /* | |
1428 | * This delayed node is still cached in the btrfs inode, so refs | |
1429 | * must be > 1 now, and we needn't check it is going to be freed | |
1430 | * or not. | |
1431 | * | |
1432 | * Besides that, this function is used to read dir, we do not | |
1433 | * insert/delete delayed items in this period. So we also needn't | |
1434 | * requeue or dequeue this delayed node. | |
1435 | */ | |
1436 | atomic_dec(&delayed_node->refs); | |
1437 | } | |
1438 | ||
1439 | void btrfs_put_delayed_items(struct list_head *ins_list, | |
1440 | struct list_head *del_list) | |
1441 | { | |
1442 | struct btrfs_delayed_item *curr, *next; | |
1443 | ||
1444 | list_for_each_entry_safe(curr, next, ins_list, readdir_list) { | |
1445 | list_del(&curr->readdir_list); | |
1446 | if (atomic_dec_and_test(&curr->refs)) | |
1447 | kfree(curr); | |
1448 | } | |
1449 | ||
1450 | list_for_each_entry_safe(curr, next, del_list, readdir_list) { | |
1451 | list_del(&curr->readdir_list); | |
1452 | if (atomic_dec_and_test(&curr->refs)) | |
1453 | kfree(curr); | |
1454 | } | |
1455 | } | |
1456 | ||
1457 | int btrfs_should_delete_dir_index(struct list_head *del_list, | |
1458 | u64 index) | |
1459 | { | |
1460 | struct btrfs_delayed_item *curr, *next; | |
1461 | int ret; | |
1462 | ||
1463 | if (list_empty(del_list)) | |
1464 | return 0; | |
1465 | ||
1466 | list_for_each_entry_safe(curr, next, del_list, readdir_list) { | |
1467 | if (curr->key.offset > index) | |
1468 | break; | |
1469 | ||
1470 | list_del(&curr->readdir_list); | |
1471 | ret = (curr->key.offset == index); | |
1472 | ||
1473 | if (atomic_dec_and_test(&curr->refs)) | |
1474 | kfree(curr); | |
1475 | ||
1476 | if (ret) | |
1477 | return 1; | |
1478 | else | |
1479 | continue; | |
1480 | } | |
1481 | return 0; | |
1482 | } | |
1483 | ||
1484 | /* | |
1485 | * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree | |
1486 | * | |
1487 | */ | |
1488 | int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent, | |
1489 | filldir_t filldir, | |
1490 | struct list_head *ins_list) | |
1491 | { | |
1492 | struct btrfs_dir_item *di; | |
1493 | struct btrfs_delayed_item *curr, *next; | |
1494 | struct btrfs_key location; | |
1495 | char *name; | |
1496 | int name_len; | |
1497 | int over = 0; | |
1498 | unsigned char d_type; | |
1499 | ||
1500 | if (list_empty(ins_list)) | |
1501 | return 0; | |
1502 | ||
1503 | /* | |
1504 | * Changing the data of the delayed item is impossible. So | |
1505 | * we needn't lock them. And we have held i_mutex of the | |
1506 | * directory, nobody can delete any directory indexes now. | |
1507 | */ | |
1508 | list_for_each_entry_safe(curr, next, ins_list, readdir_list) { | |
1509 | list_del(&curr->readdir_list); | |
1510 | ||
1511 | if (curr->key.offset < filp->f_pos) { | |
1512 | if (atomic_dec_and_test(&curr->refs)) | |
1513 | kfree(curr); | |
1514 | continue; | |
1515 | } | |
1516 | ||
1517 | filp->f_pos = curr->key.offset; | |
1518 | ||
1519 | di = (struct btrfs_dir_item *)curr->data; | |
1520 | name = (char *)(di + 1); | |
1521 | name_len = le16_to_cpu(di->name_len); | |
1522 | ||
1523 | d_type = btrfs_filetype_table[di->type]; | |
1524 | btrfs_disk_key_to_cpu(&location, &di->location); | |
1525 | ||
1526 | over = filldir(dirent, name, name_len, curr->key.offset, | |
1527 | location.objectid, d_type); | |
1528 | ||
1529 | if (atomic_dec_and_test(&curr->refs)) | |
1530 | kfree(curr); | |
1531 | ||
1532 | if (over) | |
1533 | return 1; | |
1534 | } | |
1535 | return 0; | |
1536 | } | |
1537 | ||
1538 | BTRFS_SETGET_STACK_FUNCS(stack_inode_generation, struct btrfs_inode_item, | |
1539 | generation, 64); | |
1540 | BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence, struct btrfs_inode_item, | |
1541 | sequence, 64); | |
1542 | BTRFS_SETGET_STACK_FUNCS(stack_inode_transid, struct btrfs_inode_item, | |
1543 | transid, 64); | |
1544 | BTRFS_SETGET_STACK_FUNCS(stack_inode_size, struct btrfs_inode_item, size, 64); | |
1545 | BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes, struct btrfs_inode_item, | |
1546 | nbytes, 64); | |
1547 | BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group, struct btrfs_inode_item, | |
1548 | block_group, 64); | |
1549 | BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink, struct btrfs_inode_item, nlink, 32); | |
1550 | BTRFS_SETGET_STACK_FUNCS(stack_inode_uid, struct btrfs_inode_item, uid, 32); | |
1551 | BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32); | |
1552 | BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32); | |
1553 | BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64); | |
1554 | BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64); | |
1555 | ||
1556 | BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64); | |
1557 | BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32); | |
1558 | ||
1559 | static void fill_stack_inode_item(struct btrfs_trans_handle *trans, | |
1560 | struct btrfs_inode_item *inode_item, | |
1561 | struct inode *inode) | |
1562 | { | |
1563 | btrfs_set_stack_inode_uid(inode_item, inode->i_uid); | |
1564 | btrfs_set_stack_inode_gid(inode_item, inode->i_gid); | |
1565 | btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size); | |
1566 | btrfs_set_stack_inode_mode(inode_item, inode->i_mode); | |
1567 | btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink); | |
1568 | btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode)); | |
1569 | btrfs_set_stack_inode_generation(inode_item, | |
1570 | BTRFS_I(inode)->generation); | |
1571 | btrfs_set_stack_inode_sequence(inode_item, BTRFS_I(inode)->sequence); | |
1572 | btrfs_set_stack_inode_transid(inode_item, trans->transid); | |
1573 | btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev); | |
1574 | btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags); | |
1575 | btrfs_set_stack_inode_block_group(inode_item, | |
1576 | BTRFS_I(inode)->block_group); | |
1577 | ||
1578 | btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item), | |
1579 | inode->i_atime.tv_sec); | |
1580 | btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item), | |
1581 | inode->i_atime.tv_nsec); | |
1582 | ||
1583 | btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item), | |
1584 | inode->i_mtime.tv_sec); | |
1585 | btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item), | |
1586 | inode->i_mtime.tv_nsec); | |
1587 | ||
1588 | btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item), | |
1589 | inode->i_ctime.tv_sec); | |
1590 | btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item), | |
1591 | inode->i_ctime.tv_nsec); | |
1592 | } | |
1593 | ||
1594 | int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, | |
1595 | struct btrfs_root *root, struct inode *inode) | |
1596 | { | |
1597 | struct btrfs_delayed_node *delayed_node; | |
1598 | int ret; | |
1599 | ||
1600 | delayed_node = btrfs_get_or_create_delayed_node(inode); | |
1601 | if (IS_ERR(delayed_node)) | |
1602 | return PTR_ERR(delayed_node); | |
1603 | ||
1604 | mutex_lock(&delayed_node->mutex); | |
1605 | if (delayed_node->inode_dirty) { | |
1606 | fill_stack_inode_item(trans, &delayed_node->inode_item, inode); | |
1607 | goto release_node; | |
1608 | } | |
1609 | ||
1610 | ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node); | |
1611 | /* | |
1612 | * we must reserve enough space when we start a new transaction, | |
1613 | * so reserving metadata failure is impossible | |
1614 | */ | |
1615 | BUG_ON(ret); | |
1616 | ||
1617 | fill_stack_inode_item(trans, &delayed_node->inode_item, inode); | |
1618 | delayed_node->inode_dirty = 1; | |
1619 | delayed_node->count++; | |
1620 | atomic_inc(&root->fs_info->delayed_root->items); | |
1621 | release_node: | |
1622 | mutex_unlock(&delayed_node->mutex); | |
1623 | btrfs_release_delayed_node(delayed_node); | |
1624 | return ret; | |
1625 | } | |
1626 | ||
1627 | static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node) | |
1628 | { | |
1629 | struct btrfs_root *root = delayed_node->root; | |
1630 | struct btrfs_delayed_item *curr_item, *prev_item; | |
1631 | ||
1632 | mutex_lock(&delayed_node->mutex); | |
1633 | curr_item = __btrfs_first_delayed_insertion_item(delayed_node); | |
1634 | while (curr_item) { | |
1635 | btrfs_delayed_item_release_metadata(root, curr_item); | |
1636 | prev_item = curr_item; | |
1637 | curr_item = __btrfs_next_delayed_item(prev_item); | |
1638 | btrfs_release_delayed_item(prev_item); | |
1639 | } | |
1640 | ||
1641 | curr_item = __btrfs_first_delayed_deletion_item(delayed_node); | |
1642 | while (curr_item) { | |
1643 | btrfs_delayed_item_release_metadata(root, curr_item); | |
1644 | prev_item = curr_item; | |
1645 | curr_item = __btrfs_next_delayed_item(prev_item); | |
1646 | btrfs_release_delayed_item(prev_item); | |
1647 | } | |
1648 | ||
1649 | if (delayed_node->inode_dirty) { | |
1650 | btrfs_delayed_inode_release_metadata(root, delayed_node); | |
1651 | btrfs_release_delayed_inode(delayed_node); | |
1652 | } | |
1653 | mutex_unlock(&delayed_node->mutex); | |
1654 | } | |
1655 | ||
1656 | void btrfs_kill_delayed_inode_items(struct inode *inode) | |
1657 | { | |
1658 | struct btrfs_delayed_node *delayed_node; | |
1659 | ||
1660 | delayed_node = btrfs_get_delayed_node(inode); | |
1661 | if (!delayed_node) | |
1662 | return; | |
1663 | ||
1664 | __btrfs_kill_delayed_node(delayed_node); | |
1665 | btrfs_release_delayed_node(delayed_node); | |
1666 | } | |
1667 | ||
1668 | void btrfs_kill_all_delayed_nodes(struct btrfs_root *root) | |
1669 | { | |
1670 | u64 inode_id = 0; | |
1671 | struct btrfs_delayed_node *delayed_nodes[8]; | |
1672 | int i, n; | |
1673 | ||
1674 | while (1) { | |
1675 | spin_lock(&root->inode_lock); | |
1676 | n = radix_tree_gang_lookup(&root->delayed_nodes_tree, | |
1677 | (void **)delayed_nodes, inode_id, | |
1678 | ARRAY_SIZE(delayed_nodes)); | |
1679 | if (!n) { | |
1680 | spin_unlock(&root->inode_lock); | |
1681 | break; | |
1682 | } | |
1683 | ||
1684 | inode_id = delayed_nodes[n - 1]->inode_id + 1; | |
1685 | ||
1686 | for (i = 0; i < n; i++) | |
1687 | atomic_inc(&delayed_nodes[i]->refs); | |
1688 | spin_unlock(&root->inode_lock); | |
1689 | ||
1690 | for (i = 0; i < n; i++) { | |
1691 | __btrfs_kill_delayed_node(delayed_nodes[i]); | |
1692 | btrfs_release_delayed_node(delayed_nodes[i]); | |
1693 | } | |
1694 | } | |
1695 | } |