]>
Commit | Line | Data |
---|---|---|
1 | /* SPDX-License-Identifier: GPL-2.0 */ | |
2 | #ifndef _LINUX_COMPACTION_H | |
3 | #define _LINUX_COMPACTION_H | |
4 | ||
5 | /* | |
6 | * Determines how hard direct compaction should try to succeed. | |
7 | * Lower value means higher priority, analogically to reclaim priority. | |
8 | */ | |
9 | enum compact_priority { | |
10 | COMPACT_PRIO_SYNC_FULL, | |
11 | MIN_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_FULL, | |
12 | COMPACT_PRIO_SYNC_LIGHT, | |
13 | MIN_COMPACT_COSTLY_PRIORITY = COMPACT_PRIO_SYNC_LIGHT, | |
14 | DEF_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_LIGHT, | |
15 | COMPACT_PRIO_ASYNC, | |
16 | INIT_COMPACT_PRIORITY = COMPACT_PRIO_ASYNC | |
17 | }; | |
18 | ||
19 | /* Return values for compact_zone() and try_to_compact_pages() */ | |
20 | /* When adding new states, please adjust include/trace/events/compaction.h */ | |
21 | enum compact_result { | |
22 | /* For more detailed tracepoint output - internal to compaction */ | |
23 | COMPACT_NOT_SUITABLE_ZONE, | |
24 | /* | |
25 | * compaction didn't start as it was not possible or direct reclaim | |
26 | * was more suitable | |
27 | */ | |
28 | COMPACT_SKIPPED, | |
29 | /* compaction didn't start as it was deferred due to past failures */ | |
30 | COMPACT_DEFERRED, | |
31 | ||
32 | /* compaction not active last round */ | |
33 | COMPACT_INACTIVE = COMPACT_DEFERRED, | |
34 | ||
35 | /* For more detailed tracepoint output - internal to compaction */ | |
36 | COMPACT_NO_SUITABLE_PAGE, | |
37 | /* compaction should continue to another pageblock */ | |
38 | COMPACT_CONTINUE, | |
39 | ||
40 | /* | |
41 | * The full zone was compacted scanned but wasn't successfull to compact | |
42 | * suitable pages. | |
43 | */ | |
44 | COMPACT_COMPLETE, | |
45 | /* | |
46 | * direct compaction has scanned part of the zone but wasn't successfull | |
47 | * to compact suitable pages. | |
48 | */ | |
49 | COMPACT_PARTIAL_SKIPPED, | |
50 | ||
51 | /* compaction terminated prematurely due to lock contentions */ | |
52 | COMPACT_CONTENDED, | |
53 | ||
54 | /* | |
55 | * direct compaction terminated after concluding that the allocation | |
56 | * should now succeed | |
57 | */ | |
58 | COMPACT_SUCCESS, | |
59 | }; | |
60 | ||
61 | struct alloc_context; /* in mm/internal.h */ | |
62 | ||
63 | /* | |
64 | * Number of free order-0 pages that should be available above given watermark | |
65 | * to make sure compaction has reasonable chance of not running out of free | |
66 | * pages that it needs to isolate as migration target during its work. | |
67 | */ | |
68 | static inline unsigned long compact_gap(unsigned int order) | |
69 | { | |
70 | /* | |
71 | * Although all the isolations for migration are temporary, compaction | |
72 | * free scanner may have up to 1 << order pages on its list and then | |
73 | * try to split an (order - 1) free page. At that point, a gap of | |
74 | * 1 << order might not be enough, so it's safer to require twice that | |
75 | * amount. Note that the number of pages on the list is also | |
76 | * effectively limited by COMPACT_CLUSTER_MAX, as that's the maximum | |
77 | * that the migrate scanner can have isolated on migrate list, and free | |
78 | * scanner is only invoked when the number of isolated free pages is | |
79 | * lower than that. But it's not worth to complicate the formula here | |
80 | * as a bigger gap for higher orders than strictly necessary can also | |
81 | * improve chances of compaction success. | |
82 | */ | |
83 | return 2UL << order; | |
84 | } | |
85 | ||
86 | #ifdef CONFIG_COMPACTION | |
87 | extern int sysctl_compact_memory; | |
88 | extern int sysctl_compaction_handler(struct ctl_table *table, int write, | |
89 | void __user *buffer, size_t *length, loff_t *ppos); | |
90 | extern int sysctl_extfrag_threshold; | |
91 | extern int sysctl_extfrag_handler(struct ctl_table *table, int write, | |
92 | void __user *buffer, size_t *length, loff_t *ppos); | |
93 | extern int sysctl_compact_unevictable_allowed; | |
94 | ||
95 | extern int fragmentation_index(struct zone *zone, unsigned int order); | |
96 | extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, | |
97 | unsigned int order, unsigned int alloc_flags, | |
98 | const struct alloc_context *ac, enum compact_priority prio); | |
99 | extern void reset_isolation_suitable(pg_data_t *pgdat); | |
100 | extern enum compact_result compaction_suitable(struct zone *zone, int order, | |
101 | unsigned int alloc_flags, int classzone_idx); | |
102 | ||
103 | extern void defer_compaction(struct zone *zone, int order); | |
104 | extern bool compaction_deferred(struct zone *zone, int order); | |
105 | extern void compaction_defer_reset(struct zone *zone, int order, | |
106 | bool alloc_success); | |
107 | extern bool compaction_restarting(struct zone *zone, int order); | |
108 | ||
109 | /* Compaction has made some progress and retrying makes sense */ | |
110 | static inline bool compaction_made_progress(enum compact_result result) | |
111 | { | |
112 | /* | |
113 | * Even though this might sound confusing this in fact tells us | |
114 | * that the compaction successfully isolated and migrated some | |
115 | * pageblocks. | |
116 | */ | |
117 | if (result == COMPACT_SUCCESS) | |
118 | return true; | |
119 | ||
120 | return false; | |
121 | } | |
122 | ||
123 | /* Compaction has failed and it doesn't make much sense to keep retrying. */ | |
124 | static inline bool compaction_failed(enum compact_result result) | |
125 | { | |
126 | /* All zones were scanned completely and still not result. */ | |
127 | if (result == COMPACT_COMPLETE) | |
128 | return true; | |
129 | ||
130 | return false; | |
131 | } | |
132 | ||
133 | /* | |
134 | * Compaction has backed off for some reason. It might be throttling or | |
135 | * lock contention. Retrying is still worthwhile. | |
136 | */ | |
137 | static inline bool compaction_withdrawn(enum compact_result result) | |
138 | { | |
139 | /* | |
140 | * Compaction backed off due to watermark checks for order-0 | |
141 | * so the regular reclaim has to try harder and reclaim something. | |
142 | */ | |
143 | if (result == COMPACT_SKIPPED) | |
144 | return true; | |
145 | ||
146 | /* | |
147 | * If compaction is deferred for high-order allocations, it is | |
148 | * because sync compaction recently failed. If this is the case | |
149 | * and the caller requested a THP allocation, we do not want | |
150 | * to heavily disrupt the system, so we fail the allocation | |
151 | * instead of entering direct reclaim. | |
152 | */ | |
153 | if (result == COMPACT_DEFERRED) | |
154 | return true; | |
155 | ||
156 | /* | |
157 | * If compaction in async mode encounters contention or blocks higher | |
158 | * priority task we back off early rather than cause stalls. | |
159 | */ | |
160 | if (result == COMPACT_CONTENDED) | |
161 | return true; | |
162 | ||
163 | /* | |
164 | * Page scanners have met but we haven't scanned full zones so this | |
165 | * is a back off in fact. | |
166 | */ | |
167 | if (result == COMPACT_PARTIAL_SKIPPED) | |
168 | return true; | |
169 | ||
170 | return false; | |
171 | } | |
172 | ||
173 | ||
174 | bool compaction_zonelist_suitable(struct alloc_context *ac, int order, | |
175 | int alloc_flags); | |
176 | ||
177 | extern int kcompactd_run(int nid); | |
178 | extern void kcompactd_stop(int nid); | |
179 | extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx); | |
180 | ||
181 | #else | |
182 | static inline void reset_isolation_suitable(pg_data_t *pgdat) | |
183 | { | |
184 | } | |
185 | ||
186 | static inline enum compact_result compaction_suitable(struct zone *zone, int order, | |
187 | int alloc_flags, int classzone_idx) | |
188 | { | |
189 | return COMPACT_SKIPPED; | |
190 | } | |
191 | ||
192 | static inline void defer_compaction(struct zone *zone, int order) | |
193 | { | |
194 | } | |
195 | ||
196 | static inline bool compaction_deferred(struct zone *zone, int order) | |
197 | { | |
198 | return true; | |
199 | } | |
200 | ||
201 | static inline bool compaction_made_progress(enum compact_result result) | |
202 | { | |
203 | return false; | |
204 | } | |
205 | ||
206 | static inline bool compaction_failed(enum compact_result result) | |
207 | { | |
208 | return false; | |
209 | } | |
210 | ||
211 | static inline bool compaction_withdrawn(enum compact_result result) | |
212 | { | |
213 | return true; | |
214 | } | |
215 | ||
216 | static inline int kcompactd_run(int nid) | |
217 | { | |
218 | return 0; | |
219 | } | |
220 | static inline void kcompactd_stop(int nid) | |
221 | { | |
222 | } | |
223 | ||
224 | static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx) | |
225 | { | |
226 | } | |
227 | ||
228 | #endif /* CONFIG_COMPACTION */ | |
229 | ||
230 | #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) | |
231 | struct node; | |
232 | extern int compaction_register_node(struct node *node); | |
233 | extern void compaction_unregister_node(struct node *node); | |
234 | ||
235 | #else | |
236 | ||
237 | static inline int compaction_register_node(struct node *node) | |
238 | { | |
239 | return 0; | |
240 | } | |
241 | ||
242 | static inline void compaction_unregister_node(struct node *node) | |
243 | { | |
244 | } | |
245 | #endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */ | |
246 | ||
247 | #endif /* _LINUX_COMPACTION_H */ |