]>
Commit | Line | Data |
---|---|---|
1 | /* SPDX-License-Identifier: GPL-2.0 */ | |
2 | #ifndef _LINUX_SHRINKER_H | |
3 | #define _LINUX_SHRINKER_H | |
4 | ||
5 | /* | |
6 | * This struct is used to pass information from page reclaim to the shrinkers. | |
7 | * We consolidate the values for easier extention later. | |
8 | * | |
9 | * The 'gfpmask' refers to the allocation we are currently trying to | |
10 | * fulfil. | |
11 | */ | |
12 | struct shrink_control { | |
13 | gfp_t gfp_mask; | |
14 | ||
15 | /* | |
16 | * How many objects scan_objects should scan and try to reclaim. | |
17 | * This is reset before every call, so it is safe for callees | |
18 | * to modify. | |
19 | */ | |
20 | unsigned long nr_to_scan; | |
21 | ||
22 | /* | |
23 | * How many objects did scan_objects process? | |
24 | * This defaults to nr_to_scan before every call, but the callee | |
25 | * should track its actual progress. | |
26 | */ | |
27 | unsigned long nr_scanned; | |
28 | ||
29 | /* current node being shrunk (for NUMA aware shrinkers) */ | |
30 | int nid; | |
31 | ||
32 | /* current memcg being shrunk (for memcg aware shrinkers) */ | |
33 | struct mem_cgroup *memcg; | |
34 | }; | |
35 | ||
36 | #define SHRINK_STOP (~0UL) | |
37 | /* | |
38 | * A callback you can register to apply pressure to ageable caches. | |
39 | * | |
40 | * @count_objects should return the number of freeable items in the cache. If | |
41 | * there are no objects to free or the number of freeable items cannot be | |
42 | * determined, it should return 0. No deadlock checks should be done during the | |
43 | * count callback - the shrinker relies on aggregating scan counts that couldn't | |
44 | * be executed due to potential deadlocks to be run at a later call when the | |
45 | * deadlock condition is no longer pending. | |
46 | * | |
47 | * @scan_objects will only be called if @count_objects returned a non-zero | |
48 | * value for the number of freeable objects. The callout should scan the cache | |
49 | * and attempt to free items from the cache. It should then return the number | |
50 | * of objects freed during the scan, or SHRINK_STOP if progress cannot be made | |
51 | * due to potential deadlocks. If SHRINK_STOP is returned, then no further | |
52 | * attempts to call the @scan_objects will be made from the current reclaim | |
53 | * context. | |
54 | * | |
55 | * @flags determine the shrinker abilities, like numa awareness | |
56 | */ | |
57 | struct shrinker { | |
58 | unsigned long (*count_objects)(struct shrinker *, | |
59 | struct shrink_control *sc); | |
60 | unsigned long (*scan_objects)(struct shrinker *, | |
61 | struct shrink_control *sc); | |
62 | ||
63 | int seeks; /* seeks to recreate an obj */ | |
64 | long batch; /* reclaim batch size, 0 = default */ | |
65 | unsigned long flags; | |
66 | ||
67 | /* These are for internal use */ | |
68 | struct list_head list; | |
69 | /* objs pending delete, per node */ | |
70 | atomic_long_t *nr_deferred; | |
71 | }; | |
72 | #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ | |
73 | ||
74 | /* Flags */ | |
75 | #define SHRINKER_NUMA_AWARE (1 << 0) | |
76 | #define SHRINKER_MEMCG_AWARE (1 << 1) | |
77 | ||
78 | extern int prealloc_shrinker(struct shrinker *shrinker); | |
79 | extern void register_shrinker_prepared(struct shrinker *shrinker); | |
80 | extern int register_shrinker(struct shrinker *shrinker); | |
81 | extern void unregister_shrinker(struct shrinker *shrinker); | |
82 | extern void free_prealloced_shrinker(struct shrinker *shrinker); | |
83 | #endif |