1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Created by Gao Xiang <gaoxiang25@huawei.com>
8 #include <linux/pagevec.h>
10 struct page
*erofs_allocpage(struct list_head
*pool
, gfp_t gfp
)
14 if (!list_empty(pool
)) {
15 page
= lru_to_page(pool
);
16 DBG_BUGON(page_ref_count(page
) != 1);
19 page
= alloc_page(gfp
);
24 #ifdef CONFIG_EROFS_FS_ZIP
25 /* global shrink count (for all mounted EROFS instances) */
26 static atomic_long_t erofs_global_shrink_cnt
;
28 static int erofs_workgroup_get(struct erofs_workgroup
*grp
)
33 o
= erofs_wait_on_workgroup_freezed(grp
);
37 if (atomic_cmpxchg(&grp
->refcount
, o
, o
+ 1) != o
)
40 /* decrease refcount paired by erofs_workgroup_put */
42 atomic_long_dec(&erofs_global_shrink_cnt
);
46 struct erofs_workgroup
*erofs_find_workgroup(struct super_block
*sb
,
49 struct erofs_sb_info
*sbi
= EROFS_SB(sb
);
50 struct erofs_workgroup
*grp
;
54 grp
= xa_load(&sbi
->managed_pslots
, index
);
56 if (erofs_workgroup_get(grp
)) {
57 /* prefer to relax rcu read side */
62 DBG_BUGON(index
!= grp
->index
);
68 struct erofs_workgroup
*erofs_insert_workgroup(struct super_block
*sb
,
69 struct erofs_workgroup
*grp
)
71 struct erofs_sb_info
*const sbi
= EROFS_SB(sb
);
72 struct erofs_workgroup
*pre
;
75 * Bump up a reference count before making this visible
76 * to others for the XArray in order to avoid potential
77 * UAF without serialized by xa_lock.
79 atomic_inc(&grp
->refcount
);
82 xa_lock(&sbi
->managed_pslots
);
83 pre
= __xa_cmpxchg(&sbi
->managed_pslots
, grp
->index
,
87 pre
= ERR_PTR(xa_err(pre
));
88 } else if (erofs_workgroup_get(pre
)) {
89 /* try to legitimize the current in-tree one */
90 xa_unlock(&sbi
->managed_pslots
);
94 atomic_dec(&grp
->refcount
);
97 xa_unlock(&sbi
->managed_pslots
);
101 static void __erofs_workgroup_free(struct erofs_workgroup
*grp
)
103 atomic_long_dec(&erofs_global_shrink_cnt
);
104 erofs_workgroup_free_rcu(grp
);
107 int erofs_workgroup_put(struct erofs_workgroup
*grp
)
109 int count
= atomic_dec_return(&grp
->refcount
);
112 atomic_long_inc(&erofs_global_shrink_cnt
);
114 __erofs_workgroup_free(grp
);
118 static bool erofs_try_to_release_workgroup(struct erofs_sb_info
*sbi
,
119 struct erofs_workgroup
*grp
)
122 * If managed cache is on, refcount of workgroups
123 * themselves could be < 0 (freezed). In other words,
124 * there is no guarantee that all refcounts > 0.
126 if (!erofs_workgroup_try_to_freeze(grp
, 1))
130 * Note that all cached pages should be unattached
131 * before deleted from the XArray. Otherwise some
132 * cached pages could be still attached to the orphan
133 * old workgroup when the new one is available in the tree.
135 if (erofs_try_to_free_all_cached_pages(sbi
, grp
)) {
136 erofs_workgroup_unfreeze(grp
, 1);
141 * It's impossible to fail after the workgroup is freezed,
142 * however in order to avoid some race conditions, add a
143 * DBG_BUGON to observe this in advance.
145 DBG_BUGON(xa_erase(&sbi
->managed_pslots
, grp
->index
) != grp
);
147 /* last refcount should be connected with its managed pslot. */
148 erofs_workgroup_unfreeze(grp
, 0);
149 __erofs_workgroup_free(grp
);
153 static unsigned long erofs_shrink_workstation(struct erofs_sb_info
*sbi
,
154 unsigned long nr_shrink
)
156 struct erofs_workgroup
*grp
;
157 unsigned int freed
= 0;
160 xa_for_each(&sbi
->managed_pslots
, index
, grp
) {
161 /* try to shrink each valid workgroup */
162 if (!erofs_try_to_release_workgroup(sbi
, grp
))
172 /* protected by 'erofs_sb_list_lock' */
173 static unsigned int shrinker_run_no
;
175 /* protects the mounted 'erofs_sb_list' */
176 static DEFINE_SPINLOCK(erofs_sb_list_lock
);
177 static LIST_HEAD(erofs_sb_list
);
179 void erofs_shrinker_register(struct super_block
*sb
)
181 struct erofs_sb_info
*sbi
= EROFS_SB(sb
);
183 mutex_init(&sbi
->umount_mutex
);
185 spin_lock(&erofs_sb_list_lock
);
186 list_add(&sbi
->list
, &erofs_sb_list
);
187 spin_unlock(&erofs_sb_list_lock
);
190 void erofs_shrinker_unregister(struct super_block
*sb
)
192 struct erofs_sb_info
*const sbi
= EROFS_SB(sb
);
194 mutex_lock(&sbi
->umount_mutex
);
195 /* clean up all remaining workgroups in memory */
196 erofs_shrink_workstation(sbi
, ~0UL);
198 spin_lock(&erofs_sb_list_lock
);
199 list_del(&sbi
->list
);
200 spin_unlock(&erofs_sb_list_lock
);
201 mutex_unlock(&sbi
->umount_mutex
);
204 static unsigned long erofs_shrink_count(struct shrinker
*shrink
,
205 struct shrink_control
*sc
)
207 return atomic_long_read(&erofs_global_shrink_cnt
);
210 static unsigned long erofs_shrink_scan(struct shrinker
*shrink
,
211 struct shrink_control
*sc
)
213 struct erofs_sb_info
*sbi
;
216 unsigned long nr
= sc
->nr_to_scan
;
218 unsigned long freed
= 0;
220 spin_lock(&erofs_sb_list_lock
);
222 run_no
= ++shrinker_run_no
;
223 } while (run_no
== 0);
225 /* Iterate over all mounted superblocks and try to shrink them */
226 p
= erofs_sb_list
.next
;
227 while (p
!= &erofs_sb_list
) {
228 sbi
= list_entry(p
, struct erofs_sb_info
, list
);
231 * We move the ones we do to the end of the list, so we stop
232 * when we see one we have already done.
234 if (sbi
->shrinker_run_no
== run_no
)
237 if (!mutex_trylock(&sbi
->umount_mutex
)) {
242 spin_unlock(&erofs_sb_list_lock
);
243 sbi
->shrinker_run_no
= run_no
;
245 freed
+= erofs_shrink_workstation(sbi
, nr
- freed
);
247 spin_lock(&erofs_sb_list_lock
);
248 /* Get the next list element before we move this one */
252 * Move this one to the end of the list to provide some
255 list_move_tail(&sbi
->list
, &erofs_sb_list
);
256 mutex_unlock(&sbi
->umount_mutex
);
261 spin_unlock(&erofs_sb_list_lock
);
265 static struct shrinker erofs_shrinker_info
= {
266 .scan_objects
= erofs_shrink_scan
,
267 .count_objects
= erofs_shrink_count
,
268 .seeks
= DEFAULT_SEEKS
,
271 int __init
erofs_init_shrinker(void)
273 return register_shrinker(&erofs_shrinker_info
);
276 void erofs_exit_shrinker(void)
278 unregister_shrinker(&erofs_shrinker_info
);
280 #endif /* !CONFIG_EROFS_FS_ZIP */