]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/f2fs/extent_cache.c
2 * f2fs extent cache support
4 * Copyright (c) 2015 Motorola Mobility
5 * Copyright (c) 2015 Samsung Electronics
6 * Authors: Jaegeuk Kim <jaegeuk@kernel.org>
7 * Chao Yu <chao2.yu@samsung.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
15 #include <linux/f2fs_fs.h>
19 #include <trace/events/f2fs.h>
21 static struct rb_entry
*__lookup_rb_tree_fast(struct rb_entry
*cached_re
,
25 if (cached_re
->ofs
<= ofs
&&
26 cached_re
->ofs
+ cached_re
->len
> ofs
) {
33 static struct rb_entry
*__lookup_rb_tree_slow(struct rb_root
*root
,
36 struct rb_node
*node
= root
->rb_node
;
40 re
= rb_entry(node
, struct rb_entry
, rb_node
);
44 else if (ofs
>= re
->ofs
+ re
->len
)
45 node
= node
->rb_right
;
52 struct rb_entry
*__lookup_rb_tree(struct rb_root
*root
,
53 struct rb_entry
*cached_re
, unsigned int ofs
)
57 re
= __lookup_rb_tree_fast(cached_re
, ofs
);
59 return __lookup_rb_tree_slow(root
, ofs
);
64 struct rb_node
**__lookup_rb_tree_for_insert(struct f2fs_sb_info
*sbi
,
65 struct rb_root
*root
, struct rb_node
**parent
,
68 struct rb_node
**p
= &root
->rb_node
;
73 re
= rb_entry(*parent
, struct rb_entry
, rb_node
);
77 else if (ofs
>= re
->ofs
+ re
->len
)
87 * lookup rb entry in position of @ofs in rb-tree,
88 * if hit, return the entry, otherwise, return NULL
89 * @prev_ex: extent before ofs
90 * @next_ex: extent after ofs
91 * @insert_p: insert point for new extent at ofs
92 * in order to simpfy the insertion after.
93 * tree must stay unchanged between lookup and insertion.
95 struct rb_entry
*__lookup_rb_tree_ret(struct rb_root
*root
,
96 struct rb_entry
*cached_re
,
98 struct rb_entry
**prev_entry
,
99 struct rb_entry
**next_entry
,
100 struct rb_node
***insert_p
,
101 struct rb_node
**insert_parent
,
104 struct rb_node
**pnode
= &root
->rb_node
;
105 struct rb_node
*parent
= NULL
, *tmp_node
;
106 struct rb_entry
*re
= cached_re
;
109 *insert_parent
= NULL
;
113 if (RB_EMPTY_ROOT(root
))
117 if (re
->ofs
<= ofs
&& re
->ofs
+ re
->len
> ofs
)
118 goto lookup_neighbors
;
123 re
= rb_entry(*pnode
, struct rb_entry
, rb_node
);
126 pnode
= &(*pnode
)->rb_left
;
127 else if (ofs
>= re
->ofs
+ re
->len
)
128 pnode
= &(*pnode
)->rb_right
;
130 goto lookup_neighbors
;
134 *insert_parent
= parent
;
136 re
= rb_entry(parent
, struct rb_entry
, rb_node
);
138 if (parent
&& ofs
> re
->ofs
)
139 tmp_node
= rb_next(parent
);
140 *next_entry
= rb_entry_safe(tmp_node
, struct rb_entry
, rb_node
);
143 if (parent
&& ofs
< re
->ofs
)
144 tmp_node
= rb_prev(parent
);
145 *prev_entry
= rb_entry_safe(tmp_node
, struct rb_entry
, rb_node
);
149 if (ofs
== re
->ofs
|| force
) {
150 /* lookup prev node for merging backward later */
151 tmp_node
= rb_prev(&re
->rb_node
);
152 *prev_entry
= rb_entry_safe(tmp_node
, struct rb_entry
, rb_node
);
154 if (ofs
== re
->ofs
+ re
->len
- 1 || force
) {
155 /* lookup next node for merging frontward later */
156 tmp_node
= rb_next(&re
->rb_node
);
157 *next_entry
= rb_entry_safe(tmp_node
, struct rb_entry
, rb_node
);
162 bool __check_rb_tree_consistence(struct f2fs_sb_info
*sbi
,
163 struct rb_root
*root
)
165 #ifdef CONFIG_F2FS_CHECK_FS
166 struct rb_node
*cur
= rb_first(root
), *next
;
167 struct rb_entry
*cur_re
, *next_re
;
177 cur_re
= rb_entry(cur
, struct rb_entry
, rb_node
);
178 next_re
= rb_entry(next
, struct rb_entry
, rb_node
);
180 if (cur_re
->ofs
+ cur_re
->len
> next_re
->ofs
) {
181 f2fs_msg(sbi
->sb
, KERN_INFO
, "inconsistent rbtree, "
182 "cur(%u, %u) next(%u, %u)",
183 cur_re
->ofs
, cur_re
->len
,
184 next_re
->ofs
, next_re
->len
);
194 static struct kmem_cache
*extent_tree_slab
;
195 static struct kmem_cache
*extent_node_slab
;
197 static struct extent_node
*__attach_extent_node(struct f2fs_sb_info
*sbi
,
198 struct extent_tree
*et
, struct extent_info
*ei
,
199 struct rb_node
*parent
, struct rb_node
**p
)
201 struct extent_node
*en
;
203 en
= kmem_cache_alloc(extent_node_slab
, GFP_ATOMIC
);
208 INIT_LIST_HEAD(&en
->list
);
211 rb_link_node(&en
->rb_node
, parent
, p
);
212 rb_insert_color(&en
->rb_node
, &et
->root
);
213 atomic_inc(&et
->node_cnt
);
214 atomic_inc(&sbi
->total_ext_node
);
218 static void __detach_extent_node(struct f2fs_sb_info
*sbi
,
219 struct extent_tree
*et
, struct extent_node
*en
)
221 rb_erase(&en
->rb_node
, &et
->root
);
222 atomic_dec(&et
->node_cnt
);
223 atomic_dec(&sbi
->total_ext_node
);
225 if (et
->cached_en
== en
)
226 et
->cached_en
= NULL
;
227 kmem_cache_free(extent_node_slab
, en
);
231 * Flow to release an extent_node:
233 * 2. __detach_extent_node
234 * 3. kmem_cache_free.
236 static void __release_extent_node(struct f2fs_sb_info
*sbi
,
237 struct extent_tree
*et
, struct extent_node
*en
)
239 spin_lock(&sbi
->extent_lock
);
240 f2fs_bug_on(sbi
, list_empty(&en
->list
));
241 list_del_init(&en
->list
);
242 spin_unlock(&sbi
->extent_lock
);
244 __detach_extent_node(sbi
, et
, en
);
247 static struct extent_tree
*__grab_extent_tree(struct inode
*inode
)
249 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
250 struct extent_tree
*et
;
251 nid_t ino
= inode
->i_ino
;
253 mutex_lock(&sbi
->extent_tree_lock
);
254 et
= radix_tree_lookup(&sbi
->extent_tree_root
, ino
);
256 et
= f2fs_kmem_cache_alloc(extent_tree_slab
, GFP_NOFS
);
257 f2fs_radix_tree_insert(&sbi
->extent_tree_root
, ino
, et
);
258 memset(et
, 0, sizeof(struct extent_tree
));
261 et
->cached_en
= NULL
;
262 rwlock_init(&et
->lock
);
263 INIT_LIST_HEAD(&et
->list
);
264 atomic_set(&et
->node_cnt
, 0);
265 atomic_inc(&sbi
->total_ext_tree
);
267 atomic_dec(&sbi
->total_zombie_tree
);
268 list_del_init(&et
->list
);
270 mutex_unlock(&sbi
->extent_tree_lock
);
272 /* never died until evict_inode */
273 F2FS_I(inode
)->extent_tree
= et
;
278 static struct extent_node
*__init_extent_tree(struct f2fs_sb_info
*sbi
,
279 struct extent_tree
*et
, struct extent_info
*ei
)
281 struct rb_node
**p
= &et
->root
.rb_node
;
282 struct extent_node
*en
;
284 en
= __attach_extent_node(sbi
, et
, ei
, NULL
, p
);
288 et
->largest
= en
->ei
;
293 static unsigned int __free_extent_tree(struct f2fs_sb_info
*sbi
,
294 struct extent_tree
*et
)
296 struct rb_node
*node
, *next
;
297 struct extent_node
*en
;
298 unsigned int count
= atomic_read(&et
->node_cnt
);
300 node
= rb_first(&et
->root
);
302 next
= rb_next(node
);
303 en
= rb_entry(node
, struct extent_node
, rb_node
);
304 __release_extent_node(sbi
, et
, en
);
308 return count
- atomic_read(&et
->node_cnt
);
311 static void __drop_largest_extent(struct inode
*inode
,
312 pgoff_t fofs
, unsigned int len
)
314 struct extent_info
*largest
= &F2FS_I(inode
)->extent_tree
->largest
;
316 if (fofs
< largest
->fofs
+ largest
->len
&& fofs
+ len
> largest
->fofs
) {
318 f2fs_mark_inode_dirty_sync(inode
, true);
322 /* return true, if inode page is changed */
323 static bool __f2fs_init_extent_tree(struct inode
*inode
, struct f2fs_extent
*i_ext
)
325 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
326 struct extent_tree
*et
;
327 struct extent_node
*en
;
328 struct extent_info ei
;
330 if (!f2fs_may_extent_tree(inode
)) {
331 /* drop largest extent */
332 if (i_ext
&& i_ext
->len
) {
339 et
= __grab_extent_tree(inode
);
341 if (!i_ext
|| !i_ext
->len
)
344 get_extent_info(&ei
, i_ext
);
346 write_lock(&et
->lock
);
347 if (atomic_read(&et
->node_cnt
))
350 en
= __init_extent_tree(sbi
, et
, &ei
);
352 spin_lock(&sbi
->extent_lock
);
353 list_add_tail(&en
->list
, &sbi
->extent_list
);
354 spin_unlock(&sbi
->extent_lock
);
357 write_unlock(&et
->lock
);
361 bool f2fs_init_extent_tree(struct inode
*inode
, struct f2fs_extent
*i_ext
)
363 bool ret
= __f2fs_init_extent_tree(inode
, i_ext
);
365 if (!F2FS_I(inode
)->extent_tree
)
366 set_inode_flag(inode
, FI_NO_EXTENT
);
371 static bool f2fs_lookup_extent_tree(struct inode
*inode
, pgoff_t pgofs
,
372 struct extent_info
*ei
)
374 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
375 struct extent_tree
*et
= F2FS_I(inode
)->extent_tree
;
376 struct extent_node
*en
;
379 f2fs_bug_on(sbi
, !et
);
381 trace_f2fs_lookup_extent_tree_start(inode
, pgofs
);
383 read_lock(&et
->lock
);
385 if (et
->largest
.fofs
<= pgofs
&&
386 et
->largest
.fofs
+ et
->largest
.len
> pgofs
) {
389 stat_inc_largest_node_hit(sbi
);
393 en
= (struct extent_node
*)__lookup_rb_tree(&et
->root
,
394 (struct rb_entry
*)et
->cached_en
, pgofs
);
398 if (en
== et
->cached_en
)
399 stat_inc_cached_node_hit(sbi
);
401 stat_inc_rbtree_node_hit(sbi
);
404 spin_lock(&sbi
->extent_lock
);
405 if (!list_empty(&en
->list
)) {
406 list_move_tail(&en
->list
, &sbi
->extent_list
);
409 spin_unlock(&sbi
->extent_lock
);
412 stat_inc_total_hit(sbi
);
413 read_unlock(&et
->lock
);
415 trace_f2fs_lookup_extent_tree_end(inode
, pgofs
, ei
);
419 static struct extent_node
*__try_merge_extent_node(struct inode
*inode
,
420 struct extent_tree
*et
, struct extent_info
*ei
,
421 struct extent_node
*prev_ex
,
422 struct extent_node
*next_ex
)
424 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
425 struct extent_node
*en
= NULL
;
427 if (prev_ex
&& __is_back_mergeable(ei
, &prev_ex
->ei
)) {
428 prev_ex
->ei
.len
+= ei
->len
;
433 if (next_ex
&& __is_front_mergeable(ei
, &next_ex
->ei
)) {
434 next_ex
->ei
.fofs
= ei
->fofs
;
435 next_ex
->ei
.blk
= ei
->blk
;
436 next_ex
->ei
.len
+= ei
->len
;
438 __release_extent_node(sbi
, et
, prev_ex
);
446 __try_update_largest_extent(inode
, et
, en
);
448 spin_lock(&sbi
->extent_lock
);
449 if (!list_empty(&en
->list
)) {
450 list_move_tail(&en
->list
, &sbi
->extent_list
);
453 spin_unlock(&sbi
->extent_lock
);
457 static struct extent_node
*__insert_extent_tree(struct inode
*inode
,
458 struct extent_tree
*et
, struct extent_info
*ei
,
459 struct rb_node
**insert_p
,
460 struct rb_node
*insert_parent
)
462 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
463 struct rb_node
**p
= &et
->root
.rb_node
;
464 struct rb_node
*parent
= NULL
;
465 struct extent_node
*en
= NULL
;
467 if (insert_p
&& insert_parent
) {
468 parent
= insert_parent
;
473 p
= __lookup_rb_tree_for_insert(sbi
, &et
->root
, &parent
, ei
->fofs
);
475 en
= __attach_extent_node(sbi
, et
, ei
, parent
, p
);
479 __try_update_largest_extent(inode
, et
, en
);
481 /* update in global extent list */
482 spin_lock(&sbi
->extent_lock
);
483 list_add_tail(&en
->list
, &sbi
->extent_list
);
485 spin_unlock(&sbi
->extent_lock
);
489 static void f2fs_update_extent_tree_range(struct inode
*inode
,
490 pgoff_t fofs
, block_t blkaddr
, unsigned int len
)
492 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
493 struct extent_tree
*et
= F2FS_I(inode
)->extent_tree
;
494 struct extent_node
*en
= NULL
, *en1
= NULL
;
495 struct extent_node
*prev_en
= NULL
, *next_en
= NULL
;
496 struct extent_info ei
, dei
, prev
;
497 struct rb_node
**insert_p
= NULL
, *insert_parent
= NULL
;
498 unsigned int end
= fofs
+ len
;
499 unsigned int pos
= (unsigned int)fofs
;
504 trace_f2fs_update_extent_tree_range(inode
, fofs
, blkaddr
, len
);
506 write_lock(&et
->lock
);
508 if (is_inode_flag_set(inode
, FI_NO_EXTENT
)) {
509 write_unlock(&et
->lock
);
517 * drop largest extent before lookup, in case it's already
518 * been shrunk from extent tree
520 __drop_largest_extent(inode
, fofs
, len
);
522 /* 1. lookup first extent node in range [fofs, fofs + len - 1] */
523 en
= (struct extent_node
*)__lookup_rb_tree_ret(&et
->root
,
524 (struct rb_entry
*)et
->cached_en
, fofs
,
525 (struct rb_entry
**)&prev_en
,
526 (struct rb_entry
**)&next_en
,
527 &insert_p
, &insert_parent
, false);
531 /* 2. invlidate all extent nodes in range [fofs, fofs + len - 1] */
532 while (en
&& en
->ei
.fofs
< end
) {
533 unsigned int org_end
;
534 int parts
= 0; /* # of parts current extent split into */
536 next_en
= en1
= NULL
;
539 org_end
= dei
.fofs
+ dei
.len
;
540 f2fs_bug_on(sbi
, pos
>= org_end
);
542 if (pos
> dei
.fofs
&& pos
- dei
.fofs
>= F2FS_MIN_EXTENT_LEN
) {
543 en
->ei
.len
= pos
- en
->ei
.fofs
;
548 if (end
< org_end
&& org_end
- end
>= F2FS_MIN_EXTENT_LEN
) {
550 set_extent_info(&ei
, end
,
551 end
- dei
.fofs
+ dei
.blk
,
553 en1
= __insert_extent_tree(inode
, et
, &ei
,
558 en
->ei
.blk
+= end
- dei
.fofs
;
559 en
->ei
.len
-= end
- dei
.fofs
;
566 struct rb_node
*node
= rb_next(&en
->rb_node
);
568 next_en
= rb_entry_safe(node
, struct extent_node
,
573 __try_update_largest_extent(inode
, et
, en
);
575 __release_extent_node(sbi
, et
, en
);
578 * if original extent is split into zero or two parts, extent
579 * tree has been altered by deletion or insertion, therefore
580 * invalidate pointers regard to tree.
584 insert_parent
= NULL
;
589 /* 3. update extent in extent cache */
592 set_extent_info(&ei
, fofs
, blkaddr
, len
);
593 if (!__try_merge_extent_node(inode
, et
, &ei
, prev_en
, next_en
))
594 __insert_extent_tree(inode
, et
, &ei
,
595 insert_p
, insert_parent
);
597 /* give up extent_cache, if split and small updates happen */
599 prev
.len
< F2FS_MIN_EXTENT_LEN
&&
600 et
->largest
.len
< F2FS_MIN_EXTENT_LEN
) {
601 __drop_largest_extent(inode
, 0, UINT_MAX
);
602 set_inode_flag(inode
, FI_NO_EXTENT
);
606 if (is_inode_flag_set(inode
, FI_NO_EXTENT
))
607 __free_extent_tree(sbi
, et
);
609 write_unlock(&et
->lock
);
612 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info
*sbi
, int nr_shrink
)
614 struct extent_tree
*et
, *next
;
615 struct extent_node
*en
;
616 unsigned int node_cnt
= 0, tree_cnt
= 0;
619 if (!test_opt(sbi
, EXTENT_CACHE
))
622 if (!atomic_read(&sbi
->total_zombie_tree
))
625 if (!mutex_trylock(&sbi
->extent_tree_lock
))
628 /* 1. remove unreferenced extent tree */
629 list_for_each_entry_safe(et
, next
, &sbi
->zombie_list
, list
) {
630 if (atomic_read(&et
->node_cnt
)) {
631 write_lock(&et
->lock
);
632 node_cnt
+= __free_extent_tree(sbi
, et
);
633 write_unlock(&et
->lock
);
635 f2fs_bug_on(sbi
, atomic_read(&et
->node_cnt
));
636 list_del_init(&et
->list
);
637 radix_tree_delete(&sbi
->extent_tree_root
, et
->ino
);
638 kmem_cache_free(extent_tree_slab
, et
);
639 atomic_dec(&sbi
->total_ext_tree
);
640 atomic_dec(&sbi
->total_zombie_tree
);
643 if (node_cnt
+ tree_cnt
>= nr_shrink
)
647 mutex_unlock(&sbi
->extent_tree_lock
);
650 /* 2. remove LRU extent entries */
651 if (!mutex_trylock(&sbi
->extent_tree_lock
))
654 remained
= nr_shrink
- (node_cnt
+ tree_cnt
);
656 spin_lock(&sbi
->extent_lock
);
657 for (; remained
> 0; remained
--) {
658 if (list_empty(&sbi
->extent_list
))
660 en
= list_first_entry(&sbi
->extent_list
,
661 struct extent_node
, list
);
663 if (!write_trylock(&et
->lock
)) {
664 /* refresh this extent node's position in extent list */
665 list_move_tail(&en
->list
, &sbi
->extent_list
);
669 list_del_init(&en
->list
);
670 spin_unlock(&sbi
->extent_lock
);
672 __detach_extent_node(sbi
, et
, en
);
674 write_unlock(&et
->lock
);
676 spin_lock(&sbi
->extent_lock
);
678 spin_unlock(&sbi
->extent_lock
);
681 mutex_unlock(&sbi
->extent_tree_lock
);
683 trace_f2fs_shrink_extent_tree(sbi
, node_cnt
, tree_cnt
);
685 return node_cnt
+ tree_cnt
;
688 unsigned int f2fs_destroy_extent_node(struct inode
*inode
)
690 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
691 struct extent_tree
*et
= F2FS_I(inode
)->extent_tree
;
692 unsigned int node_cnt
= 0;
694 if (!et
|| !atomic_read(&et
->node_cnt
))
697 write_lock(&et
->lock
);
698 node_cnt
= __free_extent_tree(sbi
, et
);
699 write_unlock(&et
->lock
);
704 void f2fs_drop_extent_tree(struct inode
*inode
)
706 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
707 struct extent_tree
*et
= F2FS_I(inode
)->extent_tree
;
709 set_inode_flag(inode
, FI_NO_EXTENT
);
711 write_lock(&et
->lock
);
712 __free_extent_tree(sbi
, et
);
713 __drop_largest_extent(inode
, 0, UINT_MAX
);
714 write_unlock(&et
->lock
);
717 void f2fs_destroy_extent_tree(struct inode
*inode
)
719 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
720 struct extent_tree
*et
= F2FS_I(inode
)->extent_tree
;
721 unsigned int node_cnt
= 0;
726 if (inode
->i_nlink
&& !is_bad_inode(inode
) &&
727 atomic_read(&et
->node_cnt
)) {
728 mutex_lock(&sbi
->extent_tree_lock
);
729 list_add_tail(&et
->list
, &sbi
->zombie_list
);
730 atomic_inc(&sbi
->total_zombie_tree
);
731 mutex_unlock(&sbi
->extent_tree_lock
);
735 /* free all extent info belong to this extent tree */
736 node_cnt
= f2fs_destroy_extent_node(inode
);
738 /* delete extent tree entry in radix tree */
739 mutex_lock(&sbi
->extent_tree_lock
);
740 f2fs_bug_on(sbi
, atomic_read(&et
->node_cnt
));
741 radix_tree_delete(&sbi
->extent_tree_root
, inode
->i_ino
);
742 kmem_cache_free(extent_tree_slab
, et
);
743 atomic_dec(&sbi
->total_ext_tree
);
744 mutex_unlock(&sbi
->extent_tree_lock
);
746 F2FS_I(inode
)->extent_tree
= NULL
;
748 trace_f2fs_destroy_extent_tree(inode
, node_cnt
);
751 bool f2fs_lookup_extent_cache(struct inode
*inode
, pgoff_t pgofs
,
752 struct extent_info
*ei
)
754 if (!f2fs_may_extent_tree(inode
))
757 return f2fs_lookup_extent_tree(inode
, pgofs
, ei
);
760 void f2fs_update_extent_cache(struct dnode_of_data
*dn
)
765 if (!f2fs_may_extent_tree(dn
->inode
))
768 if (dn
->data_blkaddr
== NEW_ADDR
)
771 blkaddr
= dn
->data_blkaddr
;
773 fofs
= start_bidx_of_node(ofs_of_node(dn
->node_page
), dn
->inode
) +
775 f2fs_update_extent_tree_range(dn
->inode
, fofs
, blkaddr
, 1);
778 void f2fs_update_extent_cache_range(struct dnode_of_data
*dn
,
779 pgoff_t fofs
, block_t blkaddr
, unsigned int len
)
782 if (!f2fs_may_extent_tree(dn
->inode
))
785 f2fs_update_extent_tree_range(dn
->inode
, fofs
, blkaddr
, len
);
788 void init_extent_cache_info(struct f2fs_sb_info
*sbi
)
790 INIT_RADIX_TREE(&sbi
->extent_tree_root
, GFP_NOIO
);
791 mutex_init(&sbi
->extent_tree_lock
);
792 INIT_LIST_HEAD(&sbi
->extent_list
);
793 spin_lock_init(&sbi
->extent_lock
);
794 atomic_set(&sbi
->total_ext_tree
, 0);
795 INIT_LIST_HEAD(&sbi
->zombie_list
);
796 atomic_set(&sbi
->total_zombie_tree
, 0);
797 atomic_set(&sbi
->total_ext_node
, 0);
800 int __init
create_extent_cache(void)
802 extent_tree_slab
= f2fs_kmem_cache_create("f2fs_extent_tree",
803 sizeof(struct extent_tree
));
804 if (!extent_tree_slab
)
806 extent_node_slab
= f2fs_kmem_cache_create("f2fs_extent_node",
807 sizeof(struct extent_node
));
808 if (!extent_node_slab
) {
809 kmem_cache_destroy(extent_tree_slab
);
815 void destroy_extent_cache(void)
817 kmem_cache_destroy(extent_node_slab
);
818 kmem_cache_destroy(extent_tree_slab
);