1 #ifndef _LINUX_COMPACTION_H
2 #define _LINUX_COMPACTION_H
5 * Determines how hard direct compaction should try to succeed.
6 * Lower value means higher priority, analogically to reclaim priority.
8 enum compact_priority
{
9 COMPACT_PRIO_SYNC_FULL
,
10 MIN_COMPACT_PRIORITY
= COMPACT_PRIO_SYNC_FULL
,
11 COMPACT_PRIO_SYNC_LIGHT
,
12 MIN_COMPACT_COSTLY_PRIORITY
= COMPACT_PRIO_SYNC_LIGHT
,
13 DEF_COMPACT_PRIORITY
= COMPACT_PRIO_SYNC_LIGHT
,
15 INIT_COMPACT_PRIORITY
= COMPACT_PRIO_ASYNC
18 /* Return values for compact_zone() and try_to_compact_pages() */
19 /* When adding new states, please adjust include/trace/events/compaction.h */
21 /* For more detailed tracepoint output - internal to compaction */
22 COMPACT_NOT_SUITABLE_ZONE
,
24 * compaction didn't start as it was not possible or direct reclaim
28 /* compaction didn't start as it was deferred due to past failures */
31 /* compaction not active last round */
32 COMPACT_INACTIVE
= COMPACT_DEFERRED
,
34 /* For more detailed tracepoint output - internal to compaction */
35 COMPACT_NO_SUITABLE_PAGE
,
36 /* compaction should continue to another pageblock */
40 * The full zone was compacted scanned but wasn't successfull to compact
45 * direct compaction has scanned part of the zone but wasn't successfull
46 * to compact suitable pages.
48 COMPACT_PARTIAL_SKIPPED
,
50 /* compaction terminated prematurely due to lock contentions */
54 * direct compaction terminated after concluding that the allocation
60 struct alloc_context
; /* in mm/internal.h */
63 * Number of free order-0 pages that should be available above given watermark
64 * to make sure compaction has reasonable chance of not running out of free
65 * pages that it needs to isolate as migration target during its work.
67 static inline unsigned long compact_gap(unsigned int order
)
70 * Although all the isolations for migration are temporary, compaction
71 * free scanner may have up to 1 << order pages on its list and then
72 * try to split an (order - 1) free page. At that point, a gap of
73 * 1 << order might not be enough, so it's safer to require twice that
74 * amount. Note that the number of pages on the list is also
75 * effectively limited by COMPACT_CLUSTER_MAX, as that's the maximum
76 * that the migrate scanner can have isolated on migrate list, and free
77 * scanner is only invoked when the number of isolated free pages is
78 * lower than that. But it's not worth to complicate the formula here
79 * as a bigger gap for higher orders than strictly necessary can also
80 * improve chances of compaction success.
85 #ifdef CONFIG_COMPACTION
86 extern int sysctl_compact_memory
;
87 extern int sysctl_compaction_handler(struct ctl_table
*table
, int write
,
88 void __user
*buffer
, size_t *length
, loff_t
*ppos
);
89 extern int sysctl_extfrag_threshold
;
90 extern int sysctl_extfrag_handler(struct ctl_table
*table
, int write
,
91 void __user
*buffer
, size_t *length
, loff_t
*ppos
);
92 extern int sysctl_compact_unevictable_allowed
;
94 extern int fragmentation_index(struct zone
*zone
, unsigned int order
);
95 extern enum compact_result
try_to_compact_pages(gfp_t gfp_mask
,
96 unsigned int order
, unsigned int alloc_flags
,
97 const struct alloc_context
*ac
, enum compact_priority prio
);
98 extern void reset_isolation_suitable(pg_data_t
*pgdat
);
99 extern enum compact_result
compaction_suitable(struct zone
*zone
, int order
,
100 unsigned int alloc_flags
, int classzone_idx
);
102 extern void defer_compaction(struct zone
*zone
, int order
);
103 extern bool compaction_deferred(struct zone
*zone
, int order
);
104 extern void compaction_defer_reset(struct zone
*zone
, int order
,
106 extern bool compaction_restarting(struct zone
*zone
, int order
);
108 /* Compaction has made some progress and retrying makes sense */
109 static inline bool compaction_made_progress(enum compact_result result
)
112 * Even though this might sound confusing this in fact tells us
113 * that the compaction successfully isolated and migrated some
116 if (result
== COMPACT_SUCCESS
)
122 /* Compaction has failed and it doesn't make much sense to keep retrying. */
123 static inline bool compaction_failed(enum compact_result result
)
125 /* All zones were scanned completely and still not result. */
126 if (result
== COMPACT_COMPLETE
)
133 * Compaction has backed off for some reason. It might be throttling or
134 * lock contention. Retrying is still worthwhile.
136 static inline bool compaction_withdrawn(enum compact_result result
)
139 * Compaction backed off due to watermark checks for order-0
140 * so the regular reclaim has to try harder and reclaim something.
142 if (result
== COMPACT_SKIPPED
)
146 * If compaction is deferred for high-order allocations, it is
147 * because sync compaction recently failed. If this is the case
148 * and the caller requested a THP allocation, we do not want
149 * to heavily disrupt the system, so we fail the allocation
150 * instead of entering direct reclaim.
152 if (result
== COMPACT_DEFERRED
)
156 * If compaction in async mode encounters contention or blocks higher
157 * priority task we back off early rather than cause stalls.
159 if (result
== COMPACT_CONTENDED
)
163 * Page scanners have met but we haven't scanned full zones so this
164 * is a back off in fact.
166 if (result
== COMPACT_PARTIAL_SKIPPED
)
173 bool compaction_zonelist_suitable(struct alloc_context
*ac
, int order
,
176 extern int kcompactd_run(int nid
);
177 extern void kcompactd_stop(int nid
);
178 extern void wakeup_kcompactd(pg_data_t
*pgdat
, int order
, int classzone_idx
);
181 static inline void reset_isolation_suitable(pg_data_t
*pgdat
)
185 static inline enum compact_result
compaction_suitable(struct zone
*zone
, int order
,
186 int alloc_flags
, int classzone_idx
)
188 return COMPACT_SKIPPED
;
191 static inline void defer_compaction(struct zone
*zone
, int order
)
195 static inline bool compaction_deferred(struct zone
*zone
, int order
)
200 static inline bool compaction_made_progress(enum compact_result result
)
205 static inline bool compaction_failed(enum compact_result result
)
210 static inline bool compaction_withdrawn(enum compact_result result
)
215 static inline int kcompactd_run(int nid
)
219 static inline void kcompactd_stop(int nid
)
223 static inline void wakeup_kcompactd(pg_data_t
*pgdat
, int order
, int classzone_idx
)
227 #endif /* CONFIG_COMPACTION */
229 #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
231 extern int compaction_register_node(struct node
*node
);
232 extern void compaction_unregister_node(struct node
*node
);
236 static inline int compaction_register_node(struct node
*node
)
241 static inline void compaction_unregister_node(struct node
*node
)
244 #endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */
246 #endif /* _LINUX_COMPACTION_H */