1 // SPDX-License-Identifier: GPL-2.0
3 * fs/ext4/extents_status.c
5 * Written by Yongqiang Yang <xiaoqiangnk@gmail.com>
7 * Allison Henderson <achender@linux.vnet.ibm.com>
8 * Hugh Dickins <hughd@google.com>
9 * Zheng Liu <wenqing.lz@taobao.com>
11 * Ext4 extents status tree core functions.
13 #include <linux/list_sort.h>
14 #include <linux/proc_fs.h>
15 #include <linux/seq_file.h>
18 #include <trace/events/ext4.h>
21 * According to previous discussion in Ext4 Developer Workshop, we
22 * will introduce a new structure called io tree to track all extent
23 * status in order to solve some problems that we have met
24 * (e.g. Reservation space warning), and provide extent-level locking.
25 * Delay extent tree is the first step to achieve this goal. It is
26 * original built by Yongqiang Yang. At that time it is called delay
27 * extent tree, whose goal is only track delayed extents in memory to
28 * simplify the implementation of fiemap and bigalloc, and introduce
29 * lseek SEEK_DATA/SEEK_HOLE support. That is why it is still called
30 * delay extent tree at the first commit. But for better understand
31 * what it does, it has been rename to extent status tree.
34 * Currently the first step has been done. All delayed extents are
35 * tracked in the tree. It maintains the delayed extent when a delayed
36 * allocation is issued, and the delayed extent is written out or
37 * invalidated. Therefore the implementation of fiemap and bigalloc
38 * are simplified, and SEEK_DATA/SEEK_HOLE are introduced.
40 * The following comment describes the implemenmtation of extent
41 * status tree and future works.
44 * In this step all extent status are tracked by extent status tree.
45 * Thus, we can first try to lookup a block mapping in this tree before
46 * finding it in extent tree. Hence, single extent cache can be removed
47 * because extent status tree can do a better job. Extents in status
48 * tree are loaded on-demand. Therefore, the extent status tree may not
49 * contain all of the extents in a file. Meanwhile we define a shrinker
50 * to reclaim memory from extent status tree because fragmented extent
51 * tree will make status tree cost too much memory. written/unwritten/-
52 * hole extents in the tree will be reclaimed by this shrinker when we
53 * are under high memory pressure. Delayed extents will not be
54 * reclimed because fiemap, bigalloc, and seek_data/hole need it.
58 * Extent status tree implementation for ext4.
61 * ==========================================================================
62 * Extent status tree tracks all extent status.
64 * 1. Why we need to implement extent status tree?
66 * Without extent status tree, ext4 identifies a delayed extent by looking
67 * up page cache, this has several deficiencies - complicated, buggy,
68 * and inefficient code.
70 * FIEMAP, SEEK_HOLE/DATA, bigalloc, and writeout all need to know if a
71 * block or a range of blocks are belonged to a delayed extent.
73 * Let us have a look at how they do without extent status tree.
75 * FIEMAP looks up page cache to identify delayed allocations from holes.
78 * SEEK_HOLE/DATA has the same problem as FIEMAP.
81 * bigalloc looks up page cache to figure out if a block is
82 * already under delayed allocation or not to determine whether
83 * quota reserving is needed for the cluster.
86 * Writeout looks up whole page cache to see if a buffer is
87 * mapped, If there are not very many delayed buffers, then it is
90 * With extent status tree implementation, FIEMAP, SEEK_HOLE/DATA,
91 * bigalloc and writeout can figure out if a block or a range of
92 * blocks is under delayed allocation(belonged to a delayed extent) or
93 * not by searching the extent tree.
96 * ==========================================================================
97 * 2. Ext4 extent status tree impelmentation
100 * A extent is a range of blocks which are contiguous logically and
101 * physically. Unlike extent in extent tree, this extent in ext4 is
102 * a in-memory struct, there is no corresponding on-disk data. There
103 * is no limit on length of extent, so an extent can contain as many
104 * blocks as they are contiguous logically and physically.
106 * -- extent status tree
107 * Every inode has an extent status tree and all allocation blocks
108 * are added to the tree with different status. The extent in the
109 * tree are ordered by logical block no.
111 * -- operations on a extent status tree
112 * There are three important operations on a delayed extent tree: find
113 * next extent, adding a extent(a range of blocks) and removing a extent.
115 * -- race on a extent status tree
116 * Extent status tree is protected by inode->i_es_lock.
118 * -- memory consumption
119 * Fragmented extent tree will make extent status tree cost too much
120 * memory. Hence, we will reclaim written/unwritten/hole extents from
121 * the tree under a heavy memory pressure.
124 * ==========================================================================
125 * 3. Performance analysis
128 * 1. There is a cache extent for write access, so if writes are
129 * not very random, adding space operaions are in O(1) time.
132 * 2. Code is much simpler, more readable, more maintainable and
136 * ==========================================================================
139 * -- Refactor delayed space reservation
141 * -- Extent-level locking
144 static struct kmem_cache
*ext4_es_cachep
;
145 static struct kmem_cache
*ext4_pending_cachep
;
147 static int __es_insert_extent(struct inode
*inode
, struct extent_status
*newes
);
148 static int __es_remove_extent(struct inode
*inode
, ext4_lblk_t lblk
,
149 ext4_lblk_t end
, int *reserved
);
150 static int es_reclaim_extents(struct ext4_inode_info
*ei
, int *nr_to_scan
);
151 static int __es_shrink(struct ext4_sb_info
*sbi
, int nr_to_scan
,
152 struct ext4_inode_info
*locked_ei
);
153 static void __revise_pending(struct inode
*inode
, ext4_lblk_t lblk
,
156 int __init
ext4_init_es(void)
158 ext4_es_cachep
= kmem_cache_create("ext4_extent_status",
159 sizeof(struct extent_status
),
160 0, (SLAB_RECLAIM_ACCOUNT
), NULL
);
161 if (ext4_es_cachep
== NULL
)
166 void ext4_exit_es(void)
168 kmem_cache_destroy(ext4_es_cachep
);
171 void ext4_es_init_tree(struct ext4_es_tree
*tree
)
173 tree
->root
= RB_ROOT
;
174 tree
->cache_es
= NULL
;
178 static void ext4_es_print_tree(struct inode
*inode
)
180 struct ext4_es_tree
*tree
;
181 struct rb_node
*node
;
183 printk(KERN_DEBUG
"status extents for inode %lu:", inode
->i_ino
);
184 tree
= &EXT4_I(inode
)->i_es_tree
;
185 node
= rb_first(&tree
->root
);
187 struct extent_status
*es
;
188 es
= rb_entry(node
, struct extent_status
, rb_node
);
189 printk(KERN_DEBUG
" [%u/%u) %llu %x",
190 es
->es_lblk
, es
->es_len
,
191 ext4_es_pblock(es
), ext4_es_status(es
));
192 node
= rb_next(node
);
194 printk(KERN_DEBUG
"\n");
197 #define ext4_es_print_tree(inode)
200 static inline ext4_lblk_t
ext4_es_end(struct extent_status
*es
)
202 BUG_ON(es
->es_lblk
+ es
->es_len
< es
->es_lblk
);
203 return es
->es_lblk
+ es
->es_len
- 1;
207 * search through the tree for an delayed extent with a given offset. If
208 * it can't be found, try to find next extent.
210 static struct extent_status
*__es_tree_search(struct rb_root
*root
,
213 struct rb_node
*node
= root
->rb_node
;
214 struct extent_status
*es
= NULL
;
217 es
= rb_entry(node
, struct extent_status
, rb_node
);
218 if (lblk
< es
->es_lblk
)
219 node
= node
->rb_left
;
220 else if (lblk
> ext4_es_end(es
))
221 node
= node
->rb_right
;
226 if (es
&& lblk
< es
->es_lblk
)
229 if (es
&& lblk
> ext4_es_end(es
)) {
230 node
= rb_next(&es
->rb_node
);
231 return node
? rb_entry(node
, struct extent_status
, rb_node
) :
239 * ext4_es_find_extent_range - find extent with specified status within block
240 * range or next extent following block range in
241 * extents status tree
243 * @inode - file containing the range
244 * @matching_fn - pointer to function that matches extents with desired status
245 * @lblk - logical block defining start of range
246 * @end - logical block defining end of range
247 * @es - extent found, if any
249 * Find the first extent within the block range specified by @lblk and @end
250 * in the extents status tree that satisfies @matching_fn. If a match
251 * is found, it's returned in @es. If not, and a matching extent is found
252 * beyond the block range, it's returned in @es. If no match is found, an
253 * extent is returned in @es whose es_lblk, es_len, and es_pblk components
256 static void __es_find_extent_range(struct inode
*inode
,
257 int (*matching_fn
)(struct extent_status
*es
),
258 ext4_lblk_t lblk
, ext4_lblk_t end
,
259 struct extent_status
*es
)
261 struct ext4_es_tree
*tree
= NULL
;
262 struct extent_status
*es1
= NULL
;
263 struct rb_node
*node
;
268 tree
= &EXT4_I(inode
)->i_es_tree
;
270 /* see if the extent has been cached */
271 es
->es_lblk
= es
->es_len
= es
->es_pblk
= 0;
272 if (tree
->cache_es
) {
273 es1
= tree
->cache_es
;
274 if (in_range(lblk
, es1
->es_lblk
, es1
->es_len
)) {
275 es_debug("%u cached by [%u/%u) %llu %x\n",
276 lblk
, es1
->es_lblk
, es1
->es_len
,
277 ext4_es_pblock(es1
), ext4_es_status(es1
));
282 es1
= __es_tree_search(&tree
->root
, lblk
);
285 if (es1
&& !matching_fn(es1
)) {
286 while ((node
= rb_next(&es1
->rb_node
)) != NULL
) {
287 es1
= rb_entry(node
, struct extent_status
, rb_node
);
288 if (es1
->es_lblk
> end
) {
292 if (matching_fn(es1
))
297 if (es1
&& matching_fn(es1
)) {
298 tree
->cache_es
= es1
;
299 es
->es_lblk
= es1
->es_lblk
;
300 es
->es_len
= es1
->es_len
;
301 es
->es_pblk
= es1
->es_pblk
;
307 * Locking for __es_find_extent_range() for external use
309 void ext4_es_find_extent_range(struct inode
*inode
,
310 int (*matching_fn
)(struct extent_status
*es
),
311 ext4_lblk_t lblk
, ext4_lblk_t end
,
312 struct extent_status
*es
)
314 if (EXT4_SB(inode
->i_sb
)->s_mount_state
& EXT4_FC_REPLAY
)
317 trace_ext4_es_find_extent_range_enter(inode
, lblk
);
319 read_lock(&EXT4_I(inode
)->i_es_lock
);
320 __es_find_extent_range(inode
, matching_fn
, lblk
, end
, es
);
321 read_unlock(&EXT4_I(inode
)->i_es_lock
);
323 trace_ext4_es_find_extent_range_exit(inode
, es
);
327 * __es_scan_range - search block range for block with specified status
328 * in extents status tree
330 * @inode - file containing the range
331 * @matching_fn - pointer to function that matches extents with desired status
332 * @lblk - logical block defining start of range
333 * @end - logical block defining end of range
335 * Returns true if at least one block in the specified block range satisfies
336 * the criterion specified by @matching_fn, and false if not. If at least
337 * one extent has the specified status, then there is at least one block
338 * in the cluster with that status. Should only be called by code that has
341 static bool __es_scan_range(struct inode
*inode
,
342 int (*matching_fn
)(struct extent_status
*es
),
343 ext4_lblk_t start
, ext4_lblk_t end
)
345 struct extent_status es
;
347 __es_find_extent_range(inode
, matching_fn
, start
, end
, &es
);
349 return false; /* no matching extent in the tree */
350 else if (es
.es_lblk
<= start
&&
351 start
< es
.es_lblk
+ es
.es_len
)
353 else if (start
<= es
.es_lblk
&& es
.es_lblk
<= end
)
359 * Locking for __es_scan_range() for external use
361 bool ext4_es_scan_range(struct inode
*inode
,
362 int (*matching_fn
)(struct extent_status
*es
),
363 ext4_lblk_t lblk
, ext4_lblk_t end
)
367 if (EXT4_SB(inode
->i_sb
)->s_mount_state
& EXT4_FC_REPLAY
)
370 read_lock(&EXT4_I(inode
)->i_es_lock
);
371 ret
= __es_scan_range(inode
, matching_fn
, lblk
, end
);
372 read_unlock(&EXT4_I(inode
)->i_es_lock
);
378 * __es_scan_clu - search cluster for block with specified status in
379 * extents status tree
381 * @inode - file containing the cluster
382 * @matching_fn - pointer to function that matches extents with desired status
383 * @lblk - logical block in cluster to be searched
385 * Returns true if at least one extent in the cluster containing @lblk
386 * satisfies the criterion specified by @matching_fn, and false if not. If at
387 * least one extent has the specified status, then there is at least one block
388 * in the cluster with that status. Should only be called by code that has
391 static bool __es_scan_clu(struct inode
*inode
,
392 int (*matching_fn
)(struct extent_status
*es
),
395 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
396 ext4_lblk_t lblk_start
, lblk_end
;
398 lblk_start
= EXT4_LBLK_CMASK(sbi
, lblk
);
399 lblk_end
= lblk_start
+ sbi
->s_cluster_ratio
- 1;
401 return __es_scan_range(inode
, matching_fn
, lblk_start
, lblk_end
);
405 * Locking for __es_scan_clu() for external use
407 bool ext4_es_scan_clu(struct inode
*inode
,
408 int (*matching_fn
)(struct extent_status
*es
),
413 if (EXT4_SB(inode
->i_sb
)->s_mount_state
& EXT4_FC_REPLAY
)
416 read_lock(&EXT4_I(inode
)->i_es_lock
);
417 ret
= __es_scan_clu(inode
, matching_fn
, lblk
);
418 read_unlock(&EXT4_I(inode
)->i_es_lock
);
423 static void ext4_es_list_add(struct inode
*inode
)
425 struct ext4_inode_info
*ei
= EXT4_I(inode
);
426 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
428 if (!list_empty(&ei
->i_es_list
))
431 spin_lock(&sbi
->s_es_lock
);
432 if (list_empty(&ei
->i_es_list
)) {
433 list_add_tail(&ei
->i_es_list
, &sbi
->s_es_list
);
434 sbi
->s_es_nr_inode
++;
436 spin_unlock(&sbi
->s_es_lock
);
439 static void ext4_es_list_del(struct inode
*inode
)
441 struct ext4_inode_info
*ei
= EXT4_I(inode
);
442 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
444 spin_lock(&sbi
->s_es_lock
);
445 if (!list_empty(&ei
->i_es_list
)) {
446 list_del_init(&ei
->i_es_list
);
447 sbi
->s_es_nr_inode
--;
448 WARN_ON_ONCE(sbi
->s_es_nr_inode
< 0);
450 spin_unlock(&sbi
->s_es_lock
);
453 static struct extent_status
*
454 ext4_es_alloc_extent(struct inode
*inode
, ext4_lblk_t lblk
, ext4_lblk_t len
,
457 struct extent_status
*es
;
458 es
= kmem_cache_alloc(ext4_es_cachep
, GFP_ATOMIC
);
466 * We don't count delayed extent because we never try to reclaim them
468 if (!ext4_es_is_delayed(es
)) {
469 if (!EXT4_I(inode
)->i_es_shk_nr
++)
470 ext4_es_list_add(inode
);
471 percpu_counter_inc(&EXT4_SB(inode
->i_sb
)->
472 s_es_stats
.es_stats_shk_cnt
);
475 EXT4_I(inode
)->i_es_all_nr
++;
476 percpu_counter_inc(&EXT4_SB(inode
->i_sb
)->s_es_stats
.es_stats_all_cnt
);
481 static void ext4_es_free_extent(struct inode
*inode
, struct extent_status
*es
)
483 EXT4_I(inode
)->i_es_all_nr
--;
484 percpu_counter_dec(&EXT4_SB(inode
->i_sb
)->s_es_stats
.es_stats_all_cnt
);
486 /* Decrease the shrink counter when this es is not delayed */
487 if (!ext4_es_is_delayed(es
)) {
488 BUG_ON(EXT4_I(inode
)->i_es_shk_nr
== 0);
489 if (!--EXT4_I(inode
)->i_es_shk_nr
)
490 ext4_es_list_del(inode
);
491 percpu_counter_dec(&EXT4_SB(inode
->i_sb
)->
492 s_es_stats
.es_stats_shk_cnt
);
495 kmem_cache_free(ext4_es_cachep
, es
);
499 * Check whether or not two extents can be merged
501 * - logical block number is contiguous
502 * - physical block number is contiguous
505 static int ext4_es_can_be_merged(struct extent_status
*es1
,
506 struct extent_status
*es2
)
508 if (ext4_es_type(es1
) != ext4_es_type(es2
))
511 if (((__u64
) es1
->es_len
) + es2
->es_len
> EXT_MAX_BLOCKS
) {
512 pr_warn("ES assertion failed when merging extents. "
513 "The sum of lengths of es1 (%d) and es2 (%d) "
514 "is bigger than allowed file size (%d)\n",
515 es1
->es_len
, es2
->es_len
, EXT_MAX_BLOCKS
);
520 if (((__u64
) es1
->es_lblk
) + es1
->es_len
!= es2
->es_lblk
)
523 if ((ext4_es_is_written(es1
) || ext4_es_is_unwritten(es1
)) &&
524 (ext4_es_pblock(es1
) + es1
->es_len
== ext4_es_pblock(es2
)))
527 if (ext4_es_is_hole(es1
))
530 /* we need to check delayed extent is without unwritten status */
531 if (ext4_es_is_delayed(es1
) && !ext4_es_is_unwritten(es1
))
537 static struct extent_status
*
538 ext4_es_try_to_merge_left(struct inode
*inode
, struct extent_status
*es
)
540 struct ext4_es_tree
*tree
= &EXT4_I(inode
)->i_es_tree
;
541 struct extent_status
*es1
;
542 struct rb_node
*node
;
544 node
= rb_prev(&es
->rb_node
);
548 es1
= rb_entry(node
, struct extent_status
, rb_node
);
549 if (ext4_es_can_be_merged(es1
, es
)) {
550 es1
->es_len
+= es
->es_len
;
551 if (ext4_es_is_referenced(es
))
552 ext4_es_set_referenced(es1
);
553 rb_erase(&es
->rb_node
, &tree
->root
);
554 ext4_es_free_extent(inode
, es
);
561 static struct extent_status
*
562 ext4_es_try_to_merge_right(struct inode
*inode
, struct extent_status
*es
)
564 struct ext4_es_tree
*tree
= &EXT4_I(inode
)->i_es_tree
;
565 struct extent_status
*es1
;
566 struct rb_node
*node
;
568 node
= rb_next(&es
->rb_node
);
572 es1
= rb_entry(node
, struct extent_status
, rb_node
);
573 if (ext4_es_can_be_merged(es
, es1
)) {
574 es
->es_len
+= es1
->es_len
;
575 if (ext4_es_is_referenced(es1
))
576 ext4_es_set_referenced(es
);
577 rb_erase(node
, &tree
->root
);
578 ext4_es_free_extent(inode
, es1
);
584 #ifdef ES_AGGRESSIVE_TEST
585 #include "ext4_extents.h" /* Needed when ES_AGGRESSIVE_TEST is defined */
587 static void ext4_es_insert_extent_ext_check(struct inode
*inode
,
588 struct extent_status
*es
)
590 struct ext4_ext_path
*path
= NULL
;
591 struct ext4_extent
*ex
;
592 ext4_lblk_t ee_block
;
593 ext4_fsblk_t ee_start
;
594 unsigned short ee_len
;
595 int depth
, ee_status
, es_status
;
597 path
= ext4_find_extent(inode
, es
->es_lblk
, NULL
, EXT4_EX_NOCACHE
);
601 depth
= ext_depth(inode
);
602 ex
= path
[depth
].p_ext
;
606 ee_block
= le32_to_cpu(ex
->ee_block
);
607 ee_start
= ext4_ext_pblock(ex
);
608 ee_len
= ext4_ext_get_actual_len(ex
);
610 ee_status
= ext4_ext_is_unwritten(ex
) ? 1 : 0;
611 es_status
= ext4_es_is_unwritten(es
) ? 1 : 0;
614 * Make sure ex and es are not overlap when we try to insert
615 * a delayed/hole extent.
617 if (!ext4_es_is_written(es
) && !ext4_es_is_unwritten(es
)) {
618 if (in_range(es
->es_lblk
, ee_block
, ee_len
)) {
619 pr_warn("ES insert assertion failed for "
620 "inode: %lu we can find an extent "
621 "at block [%d/%d/%llu/%c], but we "
622 "want to add a delayed/hole extent "
624 inode
->i_ino
, ee_block
, ee_len
,
625 ee_start
, ee_status
? 'u' : 'w',
626 es
->es_lblk
, es
->es_len
,
627 ext4_es_pblock(es
), ext4_es_status(es
));
633 * We don't check ee_block == es->es_lblk, etc. because es
634 * might be a part of whole extent, vice versa.
636 if (es
->es_lblk
< ee_block
||
637 ext4_es_pblock(es
) != ee_start
+ es
->es_lblk
- ee_block
) {
638 pr_warn("ES insert assertion failed for inode: %lu "
639 "ex_status [%d/%d/%llu/%c] != "
640 "es_status [%d/%d/%llu/%c]\n", inode
->i_ino
,
641 ee_block
, ee_len
, ee_start
,
642 ee_status
? 'u' : 'w', es
->es_lblk
, es
->es_len
,
643 ext4_es_pblock(es
), es_status
? 'u' : 'w');
647 if (ee_status
^ es_status
) {
648 pr_warn("ES insert assertion failed for inode: %lu "
649 "ex_status [%d/%d/%llu/%c] != "
650 "es_status [%d/%d/%llu/%c]\n", inode
->i_ino
,
651 ee_block
, ee_len
, ee_start
,
652 ee_status
? 'u' : 'w', es
->es_lblk
, es
->es_len
,
653 ext4_es_pblock(es
), es_status
? 'u' : 'w');
657 * We can't find an extent on disk. So we need to make sure
658 * that we don't want to add an written/unwritten extent.
660 if (!ext4_es_is_delayed(es
) && !ext4_es_is_hole(es
)) {
661 pr_warn("ES insert assertion failed for inode: %lu "
662 "can't find an extent at block %d but we want "
663 "to add a written/unwritten extent "
664 "[%d/%d/%llu/%x]\n", inode
->i_ino
,
665 es
->es_lblk
, es
->es_lblk
, es
->es_len
,
666 ext4_es_pblock(es
), ext4_es_status(es
));
670 ext4_ext_drop_refs(path
);
674 static void ext4_es_insert_extent_ind_check(struct inode
*inode
,
675 struct extent_status
*es
)
677 struct ext4_map_blocks map
;
681 * Here we call ext4_ind_map_blocks to lookup a block mapping because
682 * 'Indirect' structure is defined in indirect.c. So we couldn't
683 * access direct/indirect tree from outside. It is too dirty to define
684 * this function in indirect.c file.
687 map
.m_lblk
= es
->es_lblk
;
688 map
.m_len
= es
->es_len
;
690 retval
= ext4_ind_map_blocks(NULL
, inode
, &map
, 0);
692 if (ext4_es_is_delayed(es
) || ext4_es_is_hole(es
)) {
694 * We want to add a delayed/hole extent but this
695 * block has been allocated.
697 pr_warn("ES insert assertion failed for inode: %lu "
698 "We can find blocks but we want to add a "
699 "delayed/hole extent [%d/%d/%llu/%x]\n",
700 inode
->i_ino
, es
->es_lblk
, es
->es_len
,
701 ext4_es_pblock(es
), ext4_es_status(es
));
703 } else if (ext4_es_is_written(es
)) {
704 if (retval
!= es
->es_len
) {
705 pr_warn("ES insert assertion failed for "
706 "inode: %lu retval %d != es_len %d\n",
707 inode
->i_ino
, retval
, es
->es_len
);
710 if (map
.m_pblk
!= ext4_es_pblock(es
)) {
711 pr_warn("ES insert assertion failed for "
712 "inode: %lu m_pblk %llu != "
714 inode
->i_ino
, map
.m_pblk
,
720 * We don't need to check unwritten extent because
721 * indirect-based file doesn't have it.
725 } else if (retval
== 0) {
726 if (ext4_es_is_written(es
)) {
727 pr_warn("ES insert assertion failed for inode: %lu "
728 "We can't find the block but we want to add "
729 "a written extent [%d/%d/%llu/%x]\n",
730 inode
->i_ino
, es
->es_lblk
, es
->es_len
,
731 ext4_es_pblock(es
), ext4_es_status(es
));
737 static inline void ext4_es_insert_extent_check(struct inode
*inode
,
738 struct extent_status
*es
)
741 * We don't need to worry about the race condition because
742 * caller takes i_data_sem locking.
744 BUG_ON(!rwsem_is_locked(&EXT4_I(inode
)->i_data_sem
));
745 if (ext4_test_inode_flag(inode
, EXT4_INODE_EXTENTS
))
746 ext4_es_insert_extent_ext_check(inode
, es
);
748 ext4_es_insert_extent_ind_check(inode
, es
);
751 static inline void ext4_es_insert_extent_check(struct inode
*inode
,
752 struct extent_status
*es
)
757 static int __es_insert_extent(struct inode
*inode
, struct extent_status
*newes
)
759 struct ext4_es_tree
*tree
= &EXT4_I(inode
)->i_es_tree
;
760 struct rb_node
**p
= &tree
->root
.rb_node
;
761 struct rb_node
*parent
= NULL
;
762 struct extent_status
*es
;
766 es
= rb_entry(parent
, struct extent_status
, rb_node
);
768 if (newes
->es_lblk
< es
->es_lblk
) {
769 if (ext4_es_can_be_merged(newes
, es
)) {
771 * Here we can modify es_lblk directly
772 * because it isn't overlapped.
774 es
->es_lblk
= newes
->es_lblk
;
775 es
->es_len
+= newes
->es_len
;
776 if (ext4_es_is_written(es
) ||
777 ext4_es_is_unwritten(es
))
778 ext4_es_store_pblock(es
,
780 es
= ext4_es_try_to_merge_left(inode
, es
);
784 } else if (newes
->es_lblk
> ext4_es_end(es
)) {
785 if (ext4_es_can_be_merged(es
, newes
)) {
786 es
->es_len
+= newes
->es_len
;
787 es
= ext4_es_try_to_merge_right(inode
, es
);
797 es
= ext4_es_alloc_extent(inode
, newes
->es_lblk
, newes
->es_len
,
801 rb_link_node(&es
->rb_node
, parent
, p
);
802 rb_insert_color(&es
->rb_node
, &tree
->root
);
810 * ext4_es_insert_extent() adds information to an inode's extent
813 * Return 0 on success, error code on failure.
815 int ext4_es_insert_extent(struct inode
*inode
, ext4_lblk_t lblk
,
816 ext4_lblk_t len
, ext4_fsblk_t pblk
,
819 struct extent_status newes
;
820 ext4_lblk_t end
= lblk
+ len
- 1;
822 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
824 if (EXT4_SB(inode
->i_sb
)->s_mount_state
& EXT4_FC_REPLAY
)
827 es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n",
828 lblk
, len
, pblk
, status
, inode
->i_ino
);
835 if ((status
& EXTENT_STATUS_DELAYED
) &&
836 (status
& EXTENT_STATUS_WRITTEN
)) {
837 ext4_warning(inode
->i_sb
, "Inserting extent [%u/%u] as "
838 " delayed and written which can potentially "
839 " cause data loss.", lblk
, len
);
843 newes
.es_lblk
= lblk
;
845 ext4_es_store_pblock_status(&newes
, pblk
, status
);
846 trace_ext4_es_insert_extent(inode
, &newes
);
848 ext4_es_insert_extent_check(inode
, &newes
);
850 write_lock(&EXT4_I(inode
)->i_es_lock
);
851 err
= __es_remove_extent(inode
, lblk
, end
, NULL
);
855 err
= __es_insert_extent(inode
, &newes
);
856 if (err
== -ENOMEM
&& __es_shrink(EXT4_SB(inode
->i_sb
),
859 if (err
== -ENOMEM
&& !ext4_es_is_delayed(&newes
))
862 if (sbi
->s_cluster_ratio
> 1 && test_opt(inode
->i_sb
, DELALLOC
) &&
863 (status
& EXTENT_STATUS_WRITTEN
||
864 status
& EXTENT_STATUS_UNWRITTEN
))
865 __revise_pending(inode
, lblk
, len
);
868 write_unlock(&EXT4_I(inode
)->i_es_lock
);
870 ext4_es_print_tree(inode
);
876 * ext4_es_cache_extent() inserts information into the extent status
877 * tree if and only if there isn't information about the range in
880 void ext4_es_cache_extent(struct inode
*inode
, ext4_lblk_t lblk
,
881 ext4_lblk_t len
, ext4_fsblk_t pblk
,
884 struct extent_status
*es
;
885 struct extent_status newes
;
886 ext4_lblk_t end
= lblk
+ len
- 1;
888 if (EXT4_SB(inode
->i_sb
)->s_mount_state
& EXT4_FC_REPLAY
)
891 newes
.es_lblk
= lblk
;
893 ext4_es_store_pblock_status(&newes
, pblk
, status
);
894 trace_ext4_es_cache_extent(inode
, &newes
);
901 write_lock(&EXT4_I(inode
)->i_es_lock
);
903 es
= __es_tree_search(&EXT4_I(inode
)->i_es_tree
.root
, lblk
);
904 if (!es
|| es
->es_lblk
> end
)
905 __es_insert_extent(inode
, &newes
);
906 write_unlock(&EXT4_I(inode
)->i_es_lock
);
910 * ext4_es_lookup_extent() looks up an extent in extent status tree.
912 * ext4_es_lookup_extent is called by ext4_map_blocks/ext4_da_map_blocks.
914 * Return: 1 on found, 0 on not
916 int ext4_es_lookup_extent(struct inode
*inode
, ext4_lblk_t lblk
,
917 ext4_lblk_t
*next_lblk
,
918 struct extent_status
*es
)
920 struct ext4_es_tree
*tree
;
921 struct ext4_es_stats
*stats
;
922 struct extent_status
*es1
= NULL
;
923 struct rb_node
*node
;
926 if (EXT4_SB(inode
->i_sb
)->s_mount_state
& EXT4_FC_REPLAY
)
929 trace_ext4_es_lookup_extent_enter(inode
, lblk
);
930 es_debug("lookup extent in block %u\n", lblk
);
932 tree
= &EXT4_I(inode
)->i_es_tree
;
933 read_lock(&EXT4_I(inode
)->i_es_lock
);
935 /* find extent in cache firstly */
936 es
->es_lblk
= es
->es_len
= es
->es_pblk
= 0;
937 if (tree
->cache_es
) {
938 es1
= tree
->cache_es
;
939 if (in_range(lblk
, es1
->es_lblk
, es1
->es_len
)) {
940 es_debug("%u cached by [%u/%u)\n",
941 lblk
, es1
->es_lblk
, es1
->es_len
);
947 node
= tree
->root
.rb_node
;
949 es1
= rb_entry(node
, struct extent_status
, rb_node
);
950 if (lblk
< es1
->es_lblk
)
951 node
= node
->rb_left
;
952 else if (lblk
> ext4_es_end(es1
))
953 node
= node
->rb_right
;
961 stats
= &EXT4_SB(inode
->i_sb
)->s_es_stats
;
964 es
->es_lblk
= es1
->es_lblk
;
965 es
->es_len
= es1
->es_len
;
966 es
->es_pblk
= es1
->es_pblk
;
967 if (!ext4_es_is_referenced(es1
))
968 ext4_es_set_referenced(es1
);
969 percpu_counter_inc(&stats
->es_stats_cache_hits
);
971 node
= rb_next(&es1
->rb_node
);
973 es1
= rb_entry(node
, struct extent_status
,
975 *next_lblk
= es1
->es_lblk
;
980 percpu_counter_inc(&stats
->es_stats_cache_misses
);
983 read_unlock(&EXT4_I(inode
)->i_es_lock
);
985 trace_ext4_es_lookup_extent_exit(inode
, es
, found
);
991 bool first_do_lblk_found
;
992 ext4_lblk_t first_do_lblk
;
993 ext4_lblk_t last_do_lblk
;
994 struct extent_status
*left_es
;
1000 * init_rsvd - initialize reserved count data before removing block range
1001 * in file from extent status tree
1003 * @inode - file containing range
1004 * @lblk - first block in range
1005 * @es - pointer to first extent in range
1006 * @rc - pointer to reserved count data
1008 * Assumes es is not NULL
1010 static void init_rsvd(struct inode
*inode
, ext4_lblk_t lblk
,
1011 struct extent_status
*es
, struct rsvd_count
*rc
)
1013 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
1014 struct rb_node
*node
;
1019 * for bigalloc, note the first delonly block in the range has not
1020 * been found, record the extent containing the block to the left of
1021 * the region to be removed, if any, and note that there's no partial
1024 if (sbi
->s_cluster_ratio
> 1) {
1025 rc
->first_do_lblk_found
= false;
1026 if (lblk
> es
->es_lblk
) {
1029 node
= rb_prev(&es
->rb_node
);
1030 rc
->left_es
= node
? rb_entry(node
,
1031 struct extent_status
,
1034 rc
->partial
= false;
1039 * count_rsvd - count the clusters containing delayed and not unwritten
1040 * (delonly) blocks in a range within an extent and add to
1041 * the running tally in rsvd_count
1043 * @inode - file containing extent
1044 * @lblk - first block in range
1045 * @len - length of range in blocks
1046 * @es - pointer to extent containing clusters to be counted
1047 * @rc - pointer to reserved count data
1049 * Tracks partial clusters found at the beginning and end of extents so
1050 * they aren't overcounted when they span adjacent extents
1052 static void count_rsvd(struct inode
*inode
, ext4_lblk_t lblk
, long len
,
1053 struct extent_status
*es
, struct rsvd_count
*rc
)
1055 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
1056 ext4_lblk_t i
, end
, nclu
;
1058 if (!ext4_es_is_delonly(es
))
1063 if (sbi
->s_cluster_ratio
== 1) {
1064 rc
->ndelonly
+= (int) len
;
1070 i
= (lblk
< es
->es_lblk
) ? es
->es_lblk
: lblk
;
1071 end
= lblk
+ (ext4_lblk_t
) len
- 1;
1072 end
= (end
> ext4_es_end(es
)) ? ext4_es_end(es
) : end
;
1074 /* record the first block of the first delonly extent seen */
1075 if (!rc
->first_do_lblk_found
) {
1076 rc
->first_do_lblk
= i
;
1077 rc
->first_do_lblk_found
= true;
1080 /* update the last lblk in the region seen so far */
1081 rc
->last_do_lblk
= end
;
1084 * if we're tracking a partial cluster and the current extent
1085 * doesn't start with it, count it and stop tracking
1087 if (rc
->partial
&& (rc
->lclu
!= EXT4_B2C(sbi
, i
))) {
1089 rc
->partial
= false;
1093 * if the first cluster doesn't start on a cluster boundary but
1094 * ends on one, count it
1096 if (EXT4_LBLK_COFF(sbi
, i
) != 0) {
1097 if (end
>= EXT4_LBLK_CFILL(sbi
, i
)) {
1099 rc
->partial
= false;
1100 i
= EXT4_LBLK_CFILL(sbi
, i
) + 1;
1105 * if the current cluster starts on a cluster boundary, count the
1106 * number of whole delonly clusters in the extent
1108 if ((i
+ sbi
->s_cluster_ratio
- 1) <= end
) {
1109 nclu
= (end
- i
+ 1) >> sbi
->s_cluster_bits
;
1110 rc
->ndelonly
+= nclu
;
1111 i
+= nclu
<< sbi
->s_cluster_bits
;
1115 * start tracking a partial cluster if there's a partial at the end
1116 * of the current extent and we're not already tracking one
1118 if (!rc
->partial
&& i
<= end
) {
1120 rc
->lclu
= EXT4_B2C(sbi
, i
);
1125 * __pr_tree_search - search for a pending cluster reservation
1127 * @root - root of pending reservation tree
1128 * @lclu - logical cluster to search for
1130 * Returns the pending reservation for the cluster identified by @lclu
1131 * if found. If not, returns a reservation for the next cluster if any,
1132 * and if not, returns NULL.
1134 static struct pending_reservation
*__pr_tree_search(struct rb_root
*root
,
1137 struct rb_node
*node
= root
->rb_node
;
1138 struct pending_reservation
*pr
= NULL
;
1141 pr
= rb_entry(node
, struct pending_reservation
, rb_node
);
1142 if (lclu
< pr
->lclu
)
1143 node
= node
->rb_left
;
1144 else if (lclu
> pr
->lclu
)
1145 node
= node
->rb_right
;
1149 if (pr
&& lclu
< pr
->lclu
)
1151 if (pr
&& lclu
> pr
->lclu
) {
1152 node
= rb_next(&pr
->rb_node
);
1153 return node
? rb_entry(node
, struct pending_reservation
,
1160 * get_rsvd - calculates and returns the number of cluster reservations to be
1161 * released when removing a block range from the extent status tree
1162 * and releases any pending reservations within the range
1164 * @inode - file containing block range
1165 * @end - last block in range
1166 * @right_es - pointer to extent containing next block beyond end or NULL
1167 * @rc - pointer to reserved count data
1169 * The number of reservations to be released is equal to the number of
1170 * clusters containing delayed and not unwritten (delonly) blocks within
1171 * the range, minus the number of clusters still containing delonly blocks
1172 * at the ends of the range, and minus the number of pending reservations
1175 static unsigned int get_rsvd(struct inode
*inode
, ext4_lblk_t end
,
1176 struct extent_status
*right_es
,
1177 struct rsvd_count
*rc
)
1179 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
1180 struct pending_reservation
*pr
;
1181 struct ext4_pending_tree
*tree
= &EXT4_I(inode
)->i_pending_tree
;
1182 struct rb_node
*node
;
1183 ext4_lblk_t first_lclu
, last_lclu
;
1184 bool left_delonly
, right_delonly
, count_pending
;
1185 struct extent_status
*es
;
1187 if (sbi
->s_cluster_ratio
> 1) {
1188 /* count any remaining partial cluster */
1192 if (rc
->ndelonly
== 0)
1195 first_lclu
= EXT4_B2C(sbi
, rc
->first_do_lblk
);
1196 last_lclu
= EXT4_B2C(sbi
, rc
->last_do_lblk
);
1199 * decrease the delonly count by the number of clusters at the
1200 * ends of the range that still contain delonly blocks -
1201 * these clusters still need to be reserved
1203 left_delonly
= right_delonly
= false;
1206 while (es
&& ext4_es_end(es
) >=
1207 EXT4_LBLK_CMASK(sbi
, rc
->first_do_lblk
)) {
1208 if (ext4_es_is_delonly(es
)) {
1210 left_delonly
= true;
1213 node
= rb_prev(&es
->rb_node
);
1216 es
= rb_entry(node
, struct extent_status
, rb_node
);
1218 if (right_es
&& (!left_delonly
|| first_lclu
!= last_lclu
)) {
1219 if (end
< ext4_es_end(right_es
)) {
1222 node
= rb_next(&right_es
->rb_node
);
1223 es
= node
? rb_entry(node
, struct extent_status
,
1226 while (es
&& es
->es_lblk
<=
1227 EXT4_LBLK_CFILL(sbi
, rc
->last_do_lblk
)) {
1228 if (ext4_es_is_delonly(es
)) {
1230 right_delonly
= true;
1233 node
= rb_next(&es
->rb_node
);
1236 es
= rb_entry(node
, struct extent_status
,
1242 * Determine the block range that should be searched for
1243 * pending reservations, if any. Clusters on the ends of the
1244 * original removed range containing delonly blocks are
1245 * excluded. They've already been accounted for and it's not
1246 * possible to determine if an associated pending reservation
1247 * should be released with the information available in the
1248 * extents status tree.
1250 if (first_lclu
== last_lclu
) {
1251 if (left_delonly
| right_delonly
)
1252 count_pending
= false;
1254 count_pending
= true;
1260 if (first_lclu
<= last_lclu
)
1261 count_pending
= true;
1263 count_pending
= false;
1267 * a pending reservation found between first_lclu and last_lclu
1268 * represents an allocated cluster that contained at least one
1269 * delonly block, so the delonly total must be reduced by one
1270 * for each pending reservation found and released
1272 if (count_pending
) {
1273 pr
= __pr_tree_search(&tree
->root
, first_lclu
);
1274 while (pr
&& pr
->lclu
<= last_lclu
) {
1276 node
= rb_next(&pr
->rb_node
);
1277 rb_erase(&pr
->rb_node
, &tree
->root
);
1278 kmem_cache_free(ext4_pending_cachep
, pr
);
1281 pr
= rb_entry(node
, struct pending_reservation
,
1286 return rc
->ndelonly
;
1291 * __es_remove_extent - removes block range from extent status tree
1293 * @inode - file containing range
1294 * @lblk - first block in range
1295 * @end - last block in range
1296 * @reserved - number of cluster reservations released
1298 * If @reserved is not NULL and delayed allocation is enabled, counts
1299 * block/cluster reservations freed by removing range and if bigalloc
1300 * enabled cancels pending reservations as needed. Returns 0 on success,
1301 * error code on failure.
1303 static int __es_remove_extent(struct inode
*inode
, ext4_lblk_t lblk
,
1304 ext4_lblk_t end
, int *reserved
)
1306 struct ext4_es_tree
*tree
= &EXT4_I(inode
)->i_es_tree
;
1307 struct rb_node
*node
;
1308 struct extent_status
*es
;
1309 struct extent_status orig_es
;
1310 ext4_lblk_t len1
, len2
;
1313 bool count_reserved
= true;
1314 struct rsvd_count rc
;
1316 if (reserved
== NULL
|| !test_opt(inode
->i_sb
, DELALLOC
))
1317 count_reserved
= false;
1321 es
= __es_tree_search(&tree
->root
, lblk
);
1324 if (es
->es_lblk
> end
)
1327 /* Simply invalidate cache_es. */
1328 tree
->cache_es
= NULL
;
1330 init_rsvd(inode
, lblk
, es
, &rc
);
1332 orig_es
.es_lblk
= es
->es_lblk
;
1333 orig_es
.es_len
= es
->es_len
;
1334 orig_es
.es_pblk
= es
->es_pblk
;
1336 len1
= lblk
> es
->es_lblk
? lblk
- es
->es_lblk
: 0;
1337 len2
= ext4_es_end(es
) > end
? ext4_es_end(es
) - end
: 0;
1342 struct extent_status newes
;
1344 newes
.es_lblk
= end
+ 1;
1345 newes
.es_len
= len2
;
1346 block
= 0x7FDEADBEEFULL
;
1347 if (ext4_es_is_written(&orig_es
) ||
1348 ext4_es_is_unwritten(&orig_es
))
1349 block
= ext4_es_pblock(&orig_es
) +
1350 orig_es
.es_len
- len2
;
1351 ext4_es_store_pblock_status(&newes
, block
,
1352 ext4_es_status(&orig_es
));
1353 err
= __es_insert_extent(inode
, &newes
);
1355 es
->es_lblk
= orig_es
.es_lblk
;
1356 es
->es_len
= orig_es
.es_len
;
1357 if ((err
== -ENOMEM
) &&
1358 __es_shrink(EXT4_SB(inode
->i_sb
),
1359 128, EXT4_I(inode
)))
1364 es
->es_lblk
= end
+ 1;
1366 if (ext4_es_is_written(es
) ||
1367 ext4_es_is_unwritten(es
)) {
1368 block
= orig_es
.es_pblk
+ orig_es
.es_len
- len2
;
1369 ext4_es_store_pblock(es
, block
);
1373 count_rsvd(inode
, lblk
, orig_es
.es_len
- len1
- len2
,
1380 count_rsvd(inode
, lblk
, orig_es
.es_len
- len1
,
1382 node
= rb_next(&es
->rb_node
);
1384 es
= rb_entry(node
, struct extent_status
, rb_node
);
1389 while (es
&& ext4_es_end(es
) <= end
) {
1391 count_rsvd(inode
, es
->es_lblk
, es
->es_len
, es
, &rc
);
1392 node
= rb_next(&es
->rb_node
);
1393 rb_erase(&es
->rb_node
, &tree
->root
);
1394 ext4_es_free_extent(inode
, es
);
1399 es
= rb_entry(node
, struct extent_status
, rb_node
);
1402 if (es
&& es
->es_lblk
< end
+ 1) {
1403 ext4_lblk_t orig_len
= es
->es_len
;
1405 len1
= ext4_es_end(es
) - end
;
1407 count_rsvd(inode
, es
->es_lblk
, orig_len
- len1
,
1409 es
->es_lblk
= end
+ 1;
1411 if (ext4_es_is_written(es
) || ext4_es_is_unwritten(es
)) {
1412 block
= es
->es_pblk
+ orig_len
- len1
;
1413 ext4_es_store_pblock(es
, block
);
1418 *reserved
= get_rsvd(inode
, end
, es
, &rc
);
1424 * ext4_es_remove_extent - removes block range from extent status tree
1426 * @inode - file containing range
1427 * @lblk - first block in range
1428 * @len - number of blocks to remove
1430 * Reduces block/cluster reservation count and for bigalloc cancels pending
1431 * reservations as needed. Returns 0 on success, error code on failure.
1433 int ext4_es_remove_extent(struct inode
*inode
, ext4_lblk_t lblk
,
1440 if (EXT4_SB(inode
->i_sb
)->s_mount_state
& EXT4_FC_REPLAY
)
1443 trace_ext4_es_remove_extent(inode
, lblk
, len
);
1444 es_debug("remove [%u/%u) from extent status tree of inode %lu\n",
1445 lblk
, len
, inode
->i_ino
);
1450 end
= lblk
+ len
- 1;
1454 * ext4_clear_inode() depends on us taking i_es_lock unconditionally
1455 * so that we are sure __es_shrink() is done with the inode before it
1458 write_lock(&EXT4_I(inode
)->i_es_lock
);
1459 err
= __es_remove_extent(inode
, lblk
, end
, &reserved
);
1460 write_unlock(&EXT4_I(inode
)->i_es_lock
);
1461 ext4_es_print_tree(inode
);
1462 ext4_da_release_space(inode
, reserved
);
1466 static int __es_shrink(struct ext4_sb_info
*sbi
, int nr_to_scan
,
1467 struct ext4_inode_info
*locked_ei
)
1469 struct ext4_inode_info
*ei
;
1470 struct ext4_es_stats
*es_stats
;
1475 int retried
= 0, nr_skipped
= 0;
1477 es_stats
= &sbi
->s_es_stats
;
1478 start_time
= ktime_get();
1481 spin_lock(&sbi
->s_es_lock
);
1482 nr_to_walk
= sbi
->s_es_nr_inode
;
1483 while (nr_to_walk
-- > 0) {
1484 if (list_empty(&sbi
->s_es_list
)) {
1485 spin_unlock(&sbi
->s_es_lock
);
1488 ei
= list_first_entry(&sbi
->s_es_list
, struct ext4_inode_info
,
1490 /* Move the inode to the tail */
1491 list_move_tail(&ei
->i_es_list
, &sbi
->s_es_list
);
1494 * Normally we try hard to avoid shrinking precached inodes,
1495 * but we will as a last resort.
1497 if (!retried
&& ext4_test_inode_state(&ei
->vfs_inode
,
1498 EXT4_STATE_EXT_PRECACHED
)) {
1503 if (ei
== locked_ei
|| !write_trylock(&ei
->i_es_lock
)) {
1508 * Now we hold i_es_lock which protects us from inode reclaim
1509 * freeing inode under us
1511 spin_unlock(&sbi
->s_es_lock
);
1513 nr_shrunk
+= es_reclaim_extents(ei
, &nr_to_scan
);
1514 write_unlock(&ei
->i_es_lock
);
1516 if (nr_to_scan
<= 0)
1518 spin_lock(&sbi
->s_es_lock
);
1520 spin_unlock(&sbi
->s_es_lock
);
1523 * If we skipped any inodes, and we weren't able to make any
1524 * forward progress, try again to scan precached inodes.
1526 if ((nr_shrunk
== 0) && nr_skipped
&& !retried
) {
1531 if (locked_ei
&& nr_shrunk
== 0)
1532 nr_shrunk
= es_reclaim_extents(locked_ei
, &nr_to_scan
);
1535 scan_time
= ktime_to_ns(ktime_sub(ktime_get(), start_time
));
1536 if (likely(es_stats
->es_stats_scan_time
))
1537 es_stats
->es_stats_scan_time
= (scan_time
+
1538 es_stats
->es_stats_scan_time
*3) / 4;
1540 es_stats
->es_stats_scan_time
= scan_time
;
1541 if (scan_time
> es_stats
->es_stats_max_scan_time
)
1542 es_stats
->es_stats_max_scan_time
= scan_time
;
1543 if (likely(es_stats
->es_stats_shrunk
))
1544 es_stats
->es_stats_shrunk
= (nr_shrunk
+
1545 es_stats
->es_stats_shrunk
*3) / 4;
1547 es_stats
->es_stats_shrunk
= nr_shrunk
;
1549 trace_ext4_es_shrink(sbi
->s_sb
, nr_shrunk
, scan_time
,
1550 nr_skipped
, retried
);
1554 static unsigned long ext4_es_count(struct shrinker
*shrink
,
1555 struct shrink_control
*sc
)
1558 struct ext4_sb_info
*sbi
;
1560 sbi
= container_of(shrink
, struct ext4_sb_info
, s_es_shrinker
);
1561 nr
= percpu_counter_read_positive(&sbi
->s_es_stats
.es_stats_shk_cnt
);
1562 trace_ext4_es_shrink_count(sbi
->s_sb
, sc
->nr_to_scan
, nr
);
1566 static unsigned long ext4_es_scan(struct shrinker
*shrink
,
1567 struct shrink_control
*sc
)
1569 struct ext4_sb_info
*sbi
= container_of(shrink
,
1570 struct ext4_sb_info
, s_es_shrinker
);
1571 int nr_to_scan
= sc
->nr_to_scan
;
1574 ret
= percpu_counter_read_positive(&sbi
->s_es_stats
.es_stats_shk_cnt
);
1575 trace_ext4_es_shrink_scan_enter(sbi
->s_sb
, nr_to_scan
, ret
);
1577 nr_shrunk
= __es_shrink(sbi
, nr_to_scan
, NULL
);
1579 ret
= percpu_counter_read_positive(&sbi
->s_es_stats
.es_stats_shk_cnt
);
1580 trace_ext4_es_shrink_scan_exit(sbi
->s_sb
, nr_shrunk
, ret
);
1584 int ext4_seq_es_shrinker_info_show(struct seq_file
*seq
, void *v
)
1586 struct ext4_sb_info
*sbi
= EXT4_SB((struct super_block
*) seq
->private);
1587 struct ext4_es_stats
*es_stats
= &sbi
->s_es_stats
;
1588 struct ext4_inode_info
*ei
, *max
= NULL
;
1589 unsigned int inode_cnt
= 0;
1591 if (v
!= SEQ_START_TOKEN
)
1594 /* here we just find an inode that has the max nr. of objects */
1595 spin_lock(&sbi
->s_es_lock
);
1596 list_for_each_entry(ei
, &sbi
->s_es_list
, i_es_list
) {
1598 if (max
&& max
->i_es_all_nr
< ei
->i_es_all_nr
)
1603 spin_unlock(&sbi
->s_es_lock
);
1605 seq_printf(seq
, "stats:\n %lld objects\n %lld reclaimable objects\n",
1606 percpu_counter_sum_positive(&es_stats
->es_stats_all_cnt
),
1607 percpu_counter_sum_positive(&es_stats
->es_stats_shk_cnt
));
1608 seq_printf(seq
, " %lld/%lld cache hits/misses\n",
1609 percpu_counter_sum_positive(&es_stats
->es_stats_cache_hits
),
1610 percpu_counter_sum_positive(&es_stats
->es_stats_cache_misses
));
1612 seq_printf(seq
, " %d inodes on list\n", inode_cnt
);
1614 seq_printf(seq
, "average:\n %llu us scan time\n",
1615 div_u64(es_stats
->es_stats_scan_time
, 1000));
1616 seq_printf(seq
, " %lu shrunk objects\n", es_stats
->es_stats_shrunk
);
1619 "maximum:\n %lu inode (%u objects, %u reclaimable)\n"
1620 " %llu us max scan time\n",
1621 max
->vfs_inode
.i_ino
, max
->i_es_all_nr
, max
->i_es_shk_nr
,
1622 div_u64(es_stats
->es_stats_max_scan_time
, 1000));
1627 int ext4_es_register_shrinker(struct ext4_sb_info
*sbi
)
1631 /* Make sure we have enough bits for physical block number */
1632 BUILD_BUG_ON(ES_SHIFT
< 48);
1633 INIT_LIST_HEAD(&sbi
->s_es_list
);
1634 sbi
->s_es_nr_inode
= 0;
1635 spin_lock_init(&sbi
->s_es_lock
);
1636 sbi
->s_es_stats
.es_stats_shrunk
= 0;
1637 err
= percpu_counter_init(&sbi
->s_es_stats
.es_stats_cache_hits
, 0,
1641 err
= percpu_counter_init(&sbi
->s_es_stats
.es_stats_cache_misses
, 0,
1645 sbi
->s_es_stats
.es_stats_scan_time
= 0;
1646 sbi
->s_es_stats
.es_stats_max_scan_time
= 0;
1647 err
= percpu_counter_init(&sbi
->s_es_stats
.es_stats_all_cnt
, 0, GFP_KERNEL
);
1650 err
= percpu_counter_init(&sbi
->s_es_stats
.es_stats_shk_cnt
, 0, GFP_KERNEL
);
1654 sbi
->s_es_shrinker
.scan_objects
= ext4_es_scan
;
1655 sbi
->s_es_shrinker
.count_objects
= ext4_es_count
;
1656 sbi
->s_es_shrinker
.seeks
= DEFAULT_SEEKS
;
1657 err
= register_shrinker(&sbi
->s_es_shrinker
);
1663 percpu_counter_destroy(&sbi
->s_es_stats
.es_stats_shk_cnt
);
1665 percpu_counter_destroy(&sbi
->s_es_stats
.es_stats_all_cnt
);
1667 percpu_counter_destroy(&sbi
->s_es_stats
.es_stats_cache_misses
);
1669 percpu_counter_destroy(&sbi
->s_es_stats
.es_stats_cache_hits
);
1673 void ext4_es_unregister_shrinker(struct ext4_sb_info
*sbi
)
1675 percpu_counter_destroy(&sbi
->s_es_stats
.es_stats_cache_hits
);
1676 percpu_counter_destroy(&sbi
->s_es_stats
.es_stats_cache_misses
);
1677 percpu_counter_destroy(&sbi
->s_es_stats
.es_stats_all_cnt
);
1678 percpu_counter_destroy(&sbi
->s_es_stats
.es_stats_shk_cnt
);
1679 unregister_shrinker(&sbi
->s_es_shrinker
);
1683 * Shrink extents in given inode from ei->i_es_shrink_lblk till end. Scan at
1684 * most *nr_to_scan extents, update *nr_to_scan accordingly.
1686 * Return 0 if we hit end of tree / interval, 1 if we exhausted nr_to_scan.
1687 * Increment *nr_shrunk by the number of reclaimed extents. Also update
1688 * ei->i_es_shrink_lblk to where we should continue scanning.
1690 static int es_do_reclaim_extents(struct ext4_inode_info
*ei
, ext4_lblk_t end
,
1691 int *nr_to_scan
, int *nr_shrunk
)
1693 struct inode
*inode
= &ei
->vfs_inode
;
1694 struct ext4_es_tree
*tree
= &ei
->i_es_tree
;
1695 struct extent_status
*es
;
1696 struct rb_node
*node
;
1698 es
= __es_tree_search(&tree
->root
, ei
->i_es_shrink_lblk
);
1702 while (*nr_to_scan
> 0) {
1703 if (es
->es_lblk
> end
) {
1704 ei
->i_es_shrink_lblk
= end
+ 1;
1709 node
= rb_next(&es
->rb_node
);
1711 * We can't reclaim delayed extent from status tree because
1712 * fiemap, bigallic, and seek_data/hole need to use it.
1714 if (ext4_es_is_delayed(es
))
1716 if (ext4_es_is_referenced(es
)) {
1717 ext4_es_clear_referenced(es
);
1721 rb_erase(&es
->rb_node
, &tree
->root
);
1722 ext4_es_free_extent(inode
, es
);
1727 es
= rb_entry(node
, struct extent_status
, rb_node
);
1729 ei
->i_es_shrink_lblk
= es
->es_lblk
;
1732 ei
->i_es_shrink_lblk
= 0;
1736 static int es_reclaim_extents(struct ext4_inode_info
*ei
, int *nr_to_scan
)
1738 struct inode
*inode
= &ei
->vfs_inode
;
1740 ext4_lblk_t start
= ei
->i_es_shrink_lblk
;
1741 static DEFINE_RATELIMIT_STATE(_rs
, DEFAULT_RATELIMIT_INTERVAL
,
1742 DEFAULT_RATELIMIT_BURST
);
1744 if (ei
->i_es_shk_nr
== 0)
1747 if (ext4_test_inode_state(inode
, EXT4_STATE_EXT_PRECACHED
) &&
1749 ext4_warning(inode
->i_sb
, "forced shrink of precached extents");
1751 if (!es_do_reclaim_extents(ei
, EXT_MAX_BLOCKS
, nr_to_scan
, &nr_shrunk
) &&
1753 es_do_reclaim_extents(ei
, start
- 1, nr_to_scan
, &nr_shrunk
);
1755 ei
->i_es_tree
.cache_es
= NULL
;
1760 * Called to support EXT4_IOC_CLEAR_ES_CACHE. We can only remove
1761 * discretionary entries from the extent status cache. (Some entries
1762 * must be present for proper operations.)
1764 void ext4_clear_inode_es(struct inode
*inode
)
1766 struct ext4_inode_info
*ei
= EXT4_I(inode
);
1767 struct extent_status
*es
;
1768 struct ext4_es_tree
*tree
;
1769 struct rb_node
*node
;
1771 write_lock(&ei
->i_es_lock
);
1772 tree
= &EXT4_I(inode
)->i_es_tree
;
1773 tree
->cache_es
= NULL
;
1774 node
= rb_first(&tree
->root
);
1776 es
= rb_entry(node
, struct extent_status
, rb_node
);
1777 node
= rb_next(node
);
1778 if (!ext4_es_is_delayed(es
)) {
1779 rb_erase(&es
->rb_node
, &tree
->root
);
1780 ext4_es_free_extent(inode
, es
);
1783 ext4_clear_inode_state(inode
, EXT4_STATE_EXT_PRECACHED
);
1784 write_unlock(&ei
->i_es_lock
);
1788 static void ext4_print_pending_tree(struct inode
*inode
)
1790 struct ext4_pending_tree
*tree
;
1791 struct rb_node
*node
;
1792 struct pending_reservation
*pr
;
1794 printk(KERN_DEBUG
"pending reservations for inode %lu:", inode
->i_ino
);
1795 tree
= &EXT4_I(inode
)->i_pending_tree
;
1796 node
= rb_first(&tree
->root
);
1798 pr
= rb_entry(node
, struct pending_reservation
, rb_node
);
1799 printk(KERN_DEBUG
" %u", pr
->lclu
);
1800 node
= rb_next(node
);
1802 printk(KERN_DEBUG
"\n");
1805 #define ext4_print_pending_tree(inode)
1808 int __init
ext4_init_pending(void)
1810 ext4_pending_cachep
= kmem_cache_create("ext4_pending_reservation",
1811 sizeof(struct pending_reservation
),
1812 0, (SLAB_RECLAIM_ACCOUNT
), NULL
);
1813 if (ext4_pending_cachep
== NULL
)
1818 void ext4_exit_pending(void)
1820 kmem_cache_destroy(ext4_pending_cachep
);
1823 void ext4_init_pending_tree(struct ext4_pending_tree
*tree
)
1825 tree
->root
= RB_ROOT
;
1829 * __get_pending - retrieve a pointer to a pending reservation
1831 * @inode - file containing the pending cluster reservation
1832 * @lclu - logical cluster of interest
1834 * Returns a pointer to a pending reservation if it's a member of
1835 * the set, and NULL if not. Must be called holding i_es_lock.
1837 static struct pending_reservation
*__get_pending(struct inode
*inode
,
1840 struct ext4_pending_tree
*tree
;
1841 struct rb_node
*node
;
1842 struct pending_reservation
*pr
= NULL
;
1844 tree
= &EXT4_I(inode
)->i_pending_tree
;
1845 node
= (&tree
->root
)->rb_node
;
1848 pr
= rb_entry(node
, struct pending_reservation
, rb_node
);
1849 if (lclu
< pr
->lclu
)
1850 node
= node
->rb_left
;
1851 else if (lclu
> pr
->lclu
)
1852 node
= node
->rb_right
;
1853 else if (lclu
== pr
->lclu
)
1860 * __insert_pending - adds a pending cluster reservation to the set of
1861 * pending reservations
1863 * @inode - file containing the cluster
1864 * @lblk - logical block in the cluster to be added
1866 * Returns 0 on successful insertion and -ENOMEM on failure. If the
1867 * pending reservation is already in the set, returns successfully.
1869 static int __insert_pending(struct inode
*inode
, ext4_lblk_t lblk
)
1871 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
1872 struct ext4_pending_tree
*tree
= &EXT4_I(inode
)->i_pending_tree
;
1873 struct rb_node
**p
= &tree
->root
.rb_node
;
1874 struct rb_node
*parent
= NULL
;
1875 struct pending_reservation
*pr
;
1879 lclu
= EXT4_B2C(sbi
, lblk
);
1880 /* search to find parent for insertion */
1883 pr
= rb_entry(parent
, struct pending_reservation
, rb_node
);
1885 if (lclu
< pr
->lclu
) {
1887 } else if (lclu
> pr
->lclu
) {
1888 p
= &(*p
)->rb_right
;
1890 /* pending reservation already inserted */
1895 pr
= kmem_cache_alloc(ext4_pending_cachep
, GFP_ATOMIC
);
1902 rb_link_node(&pr
->rb_node
, parent
, p
);
1903 rb_insert_color(&pr
->rb_node
, &tree
->root
);
1910 * __remove_pending - removes a pending cluster reservation from the set
1911 * of pending reservations
1913 * @inode - file containing the cluster
1914 * @lblk - logical block in the pending cluster reservation to be removed
1916 * Returns successfully if pending reservation is not a member of the set.
1918 static void __remove_pending(struct inode
*inode
, ext4_lblk_t lblk
)
1920 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
1921 struct pending_reservation
*pr
;
1922 struct ext4_pending_tree
*tree
;
1924 pr
= __get_pending(inode
, EXT4_B2C(sbi
, lblk
));
1926 tree
= &EXT4_I(inode
)->i_pending_tree
;
1927 rb_erase(&pr
->rb_node
, &tree
->root
);
1928 kmem_cache_free(ext4_pending_cachep
, pr
);
1933 * ext4_remove_pending - removes a pending cluster reservation from the set
1934 * of pending reservations
1936 * @inode - file containing the cluster
1937 * @lblk - logical block in the pending cluster reservation to be removed
1939 * Locking for external use of __remove_pending.
1941 void ext4_remove_pending(struct inode
*inode
, ext4_lblk_t lblk
)
1943 struct ext4_inode_info
*ei
= EXT4_I(inode
);
1945 write_lock(&ei
->i_es_lock
);
1946 __remove_pending(inode
, lblk
);
1947 write_unlock(&ei
->i_es_lock
);
1951 * ext4_is_pending - determine whether a cluster has a pending reservation
1954 * @inode - file containing the cluster
1955 * @lblk - logical block in the cluster
1957 * Returns true if there's a pending reservation for the cluster in the
1958 * set of pending reservations, and false if not.
1960 bool ext4_is_pending(struct inode
*inode
, ext4_lblk_t lblk
)
1962 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
1963 struct ext4_inode_info
*ei
= EXT4_I(inode
);
1966 read_lock(&ei
->i_es_lock
);
1967 ret
= (bool)(__get_pending(inode
, EXT4_B2C(sbi
, lblk
)) != NULL
);
1968 read_unlock(&ei
->i_es_lock
);
1974 * ext4_es_insert_delayed_block - adds a delayed block to the extents status
1975 * tree, adding a pending reservation where
1978 * @inode - file containing the newly added block
1979 * @lblk - logical block to be added
1980 * @allocated - indicates whether a physical cluster has been allocated for
1981 * the logical cluster that contains the block
1983 * Returns 0 on success, negative error code on failure.
1985 int ext4_es_insert_delayed_block(struct inode
*inode
, ext4_lblk_t lblk
,
1988 struct extent_status newes
;
1991 if (EXT4_SB(inode
->i_sb
)->s_mount_state
& EXT4_FC_REPLAY
)
1994 es_debug("add [%u/1) delayed to extent status tree of inode %lu\n",
1995 lblk
, inode
->i_ino
);
1997 newes
.es_lblk
= lblk
;
1999 ext4_es_store_pblock_status(&newes
, ~0, EXTENT_STATUS_DELAYED
);
2000 trace_ext4_es_insert_delayed_block(inode
, &newes
, allocated
);
2002 ext4_es_insert_extent_check(inode
, &newes
);
2004 write_lock(&EXT4_I(inode
)->i_es_lock
);
2006 err
= __es_remove_extent(inode
, lblk
, lblk
, NULL
);
2010 err
= __es_insert_extent(inode
, &newes
);
2011 if (err
== -ENOMEM
&& __es_shrink(EXT4_SB(inode
->i_sb
),
2012 128, EXT4_I(inode
)))
2018 __insert_pending(inode
, lblk
);
2021 write_unlock(&EXT4_I(inode
)->i_es_lock
);
2023 ext4_es_print_tree(inode
);
2024 ext4_print_pending_tree(inode
);
2030 * __es_delayed_clu - count number of clusters containing blocks that
2033 * @inode - file containing block range
2034 * @start - logical block defining start of range
2035 * @end - logical block defining end of range
2037 * Returns the number of clusters containing only delayed (not delayed
2038 * and unwritten) blocks in the range specified by @start and @end. Any
2039 * cluster or part of a cluster within the range and containing a delayed
2040 * and not unwritten block within the range is counted as a whole cluster.
2042 static unsigned int __es_delayed_clu(struct inode
*inode
, ext4_lblk_t start
,
2045 struct ext4_es_tree
*tree
= &EXT4_I(inode
)->i_es_tree
;
2046 struct extent_status
*es
;
2047 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
2048 struct rb_node
*node
;
2049 ext4_lblk_t first_lclu
, last_lclu
;
2050 unsigned long long last_counted_lclu
;
2053 /* guaranteed to be unequal to any ext4_lblk_t value */
2054 last_counted_lclu
= ~0ULL;
2056 es
= __es_tree_search(&tree
->root
, start
);
2058 while (es
&& (es
->es_lblk
<= end
)) {
2059 if (ext4_es_is_delonly(es
)) {
2060 if (es
->es_lblk
<= start
)
2061 first_lclu
= EXT4_B2C(sbi
, start
);
2063 first_lclu
= EXT4_B2C(sbi
, es
->es_lblk
);
2065 if (ext4_es_end(es
) >= end
)
2066 last_lclu
= EXT4_B2C(sbi
, end
);
2068 last_lclu
= EXT4_B2C(sbi
, ext4_es_end(es
));
2070 if (first_lclu
== last_counted_lclu
)
2071 n
+= last_lclu
- first_lclu
;
2073 n
+= last_lclu
- first_lclu
+ 1;
2074 last_counted_lclu
= last_lclu
;
2076 node
= rb_next(&es
->rb_node
);
2079 es
= rb_entry(node
, struct extent_status
, rb_node
);
2086 * ext4_es_delayed_clu - count number of clusters containing blocks that
2087 * are both delayed and unwritten
2089 * @inode - file containing block range
2090 * @lblk - logical block defining start of range
2091 * @len - number of blocks in range
2093 * Locking for external use of __es_delayed_clu().
2095 unsigned int ext4_es_delayed_clu(struct inode
*inode
, ext4_lblk_t lblk
,
2098 struct ext4_inode_info
*ei
= EXT4_I(inode
);
2105 end
= lblk
+ len
- 1;
2106 WARN_ON(end
< lblk
);
2108 read_lock(&ei
->i_es_lock
);
2110 n
= __es_delayed_clu(inode
, lblk
, end
);
2112 read_unlock(&ei
->i_es_lock
);
2118 * __revise_pending - makes, cancels, or leaves unchanged pending cluster
2119 * reservations for a specified block range depending
2120 * upon the presence or absence of delayed blocks
2121 * outside the range within clusters at the ends of the
2124 * @inode - file containing the range
2125 * @lblk - logical block defining the start of range
2126 * @len - length of range in blocks
2128 * Used after a newly allocated extent is added to the extents status tree.
2129 * Requires that the extents in the range have either written or unwritten
2130 * status. Must be called while holding i_es_lock.
2132 static void __revise_pending(struct inode
*inode
, ext4_lblk_t lblk
,
2135 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
2136 ext4_lblk_t end
= lblk
+ len
- 1;
2137 ext4_lblk_t first
, last
;
2138 bool f_del
= false, l_del
= false;
2144 * Two cases - block range within single cluster and block range
2145 * spanning two or more clusters. Note that a cluster belonging
2146 * to a range starting and/or ending on a cluster boundary is treated
2147 * as if it does not contain a delayed extent. The new range may
2148 * have allocated space for previously delayed blocks out to the
2149 * cluster boundary, requiring that any pre-existing pending
2150 * reservation be canceled. Because this code only looks at blocks
2151 * outside the range, it should revise pending reservations
2152 * correctly even if the extent represented by the range can't be
2153 * inserted in the extents status tree due to ENOSPC.
2156 if (EXT4_B2C(sbi
, lblk
) == EXT4_B2C(sbi
, end
)) {
2157 first
= EXT4_LBLK_CMASK(sbi
, lblk
);
2159 f_del
= __es_scan_range(inode
, &ext4_es_is_delonly
,
2162 __insert_pending(inode
, first
);
2164 last
= EXT4_LBLK_CMASK(sbi
, end
) +
2165 sbi
->s_cluster_ratio
- 1;
2167 l_del
= __es_scan_range(inode
,
2168 &ext4_es_is_delonly
,
2171 __insert_pending(inode
, last
);
2173 __remove_pending(inode
, last
);
2176 first
= EXT4_LBLK_CMASK(sbi
, lblk
);
2178 f_del
= __es_scan_range(inode
, &ext4_es_is_delonly
,
2181 __insert_pending(inode
, first
);
2183 __remove_pending(inode
, first
);
2185 last
= EXT4_LBLK_CMASK(sbi
, end
) + sbi
->s_cluster_ratio
- 1;
2187 l_del
= __es_scan_range(inode
, &ext4_es_is_delonly
,
2190 __insert_pending(inode
, last
);
2192 __remove_pending(inode
, last
);