]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __LINUX_GFP_H |
2 | #define __LINUX_GFP_H | |
3 | ||
309381fe | 4 | #include <linux/mmdebug.h> |
1da177e4 LT |
5 | #include <linux/mmzone.h> |
6 | #include <linux/stddef.h> | |
7 | #include <linux/linkage.h> | |
082edb7b | 8 | #include <linux/topology.h> |
1da177e4 LT |
9 | |
10 | struct vm_area_struct; | |
11 | ||
1f7866b4 VB |
12 | /* |
13 | * In case of changes, please don't forget to update | |
420adbe9 | 14 | * include/trace/events/mmflags.h and tools/perf/builtin-kmem.c |
1f7866b4 VB |
15 | */ |
16 | ||
16b56cf4 NK |
17 | /* Plain integer GFP bitmasks. Do not use this directly. */ |
18 | #define ___GFP_DMA 0x01u | |
19 | #define ___GFP_HIGHMEM 0x02u | |
20 | #define ___GFP_DMA32 0x04u | |
21 | #define ___GFP_MOVABLE 0x08u | |
016c13da | 22 | #define ___GFP_RECLAIMABLE 0x10u |
16b56cf4 NK |
23 | #define ___GFP_HIGH 0x20u |
24 | #define ___GFP_IO 0x40u | |
25 | #define ___GFP_FS 0x80u | |
26 | #define ___GFP_COLD 0x100u | |
27 | #define ___GFP_NOWARN 0x200u | |
dcda9b04 | 28 | #define ___GFP_RETRY_MAYFAIL 0x400u |
16b56cf4 NK |
29 | #define ___GFP_NOFAIL 0x800u |
30 | #define ___GFP_NORETRY 0x1000u | |
b37f1dd0 | 31 | #define ___GFP_MEMALLOC 0x2000u |
16b56cf4 NK |
32 | #define ___GFP_COMP 0x4000u |
33 | #define ___GFP_ZERO 0x8000u | |
34 | #define ___GFP_NOMEMALLOC 0x10000u | |
35 | #define ___GFP_HARDWALL 0x20000u | |
36 | #define ___GFP_THISNODE 0x40000u | |
d0164adc | 37 | #define ___GFP_ATOMIC 0x80000u |
a9bb7e62 | 38 | #define ___GFP_ACCOUNT 0x100000u |
caf49191 | 39 | #define ___GFP_NOTRACK 0x200000u |
d0164adc | 40 | #define ___GFP_DIRECT_RECLAIM 0x400000u |
41b6167e MH |
41 | #define ___GFP_WRITE 0x800000u |
42 | #define ___GFP_KSWAPD_RECLAIM 0x1000000u | |
7e784422 | 43 | #ifdef CONFIG_LOCKDEP |
1bde33e0 | 44 | #define ___GFP_NOLOCKDEP 0x2000000u |
7e784422 MH |
45 | #else |
46 | #define ___GFP_NOLOCKDEP 0 | |
47 | #endif | |
05b0afd7 | 48 | /* If the above are modified, __GFP_BITS_SHIFT may need updating */ |
16b56cf4 | 49 | |
1da177e4 | 50 | /* |
dd56b046 | 51 | * Physical address zone modifiers (see linux/mmzone.h - low four bits) |
e53ef38d | 52 | * |
e53ef38d | 53 | * Do not put any conditional on these. If necessary modify the definitions |
263ff5d8 | 54 | * without the underscores and use them consistently. The definitions here may |
e53ef38d | 55 | * be used in bit comparisons. |
1da177e4 | 56 | */ |
16b56cf4 NK |
57 | #define __GFP_DMA ((__force gfp_t)___GFP_DMA) |
58 | #define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) | |
59 | #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) | |
dd56b046 | 60 | #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ |
b70d94ee | 61 | #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) |
dd56b046 | 62 | |
1da177e4 | 63 | /* |
dd56b046 | 64 | * Page mobility and placement hints |
1da177e4 | 65 | * |
dd56b046 MG |
66 | * These flags provide hints about how mobile the page is. Pages with similar |
67 | * mobility are placed within the same pageblocks to minimise problems due | |
68 | * to external fragmentation. | |
1da177e4 | 69 | * |
dd56b046 MG |
70 | * __GFP_MOVABLE (also a zone modifier) indicates that the page can be |
71 | * moved by page migration during memory compaction or can be reclaimed. | |
1da177e4 | 72 | * |
dd56b046 MG |
73 | * __GFP_RECLAIMABLE is used for slab allocations that specify |
74 | * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers. | |
75 | * | |
76 | * __GFP_WRITE indicates the caller intends to dirty the page. Where possible, | |
77 | * these pages will be spread between local zones to avoid all the dirty | |
78 | * pages being in one zone (fair zone allocation policy). | |
769848c0 | 79 | * |
dd56b046 MG |
80 | * __GFP_HARDWALL enforces the cpuset memory allocation policy. |
81 | * | |
82 | * __GFP_THISNODE forces the allocation to be satisified from the requested | |
83 | * node with no fallbacks or placement policy enforcements. | |
a9bb7e62 | 84 | * |
4949148a | 85 | * __GFP_ACCOUNT causes the allocation to be accounted to kmemcg. |
1da177e4 | 86 | */ |
dd56b046 MG |
87 | #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) |
88 | #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) | |
89 | #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) | |
90 | #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE) | |
a9bb7e62 | 91 | #define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT) |
32dba98e | 92 | |
d0164adc | 93 | /* |
dd56b046 MG |
94 | * Watermark modifiers -- controls access to emergency reserves |
95 | * | |
96 | * __GFP_HIGH indicates that the caller is high-priority and that granting | |
97 | * the request is necessary before the system can make forward progress. | |
98 | * For example, creating an IO context to clean pages. | |
99 | * | |
100 | * __GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is | |
101 | * high priority. Users are typically interrupt handlers. This may be | |
102 | * used in conjunction with __GFP_HIGH | |
103 | * | |
104 | * __GFP_MEMALLOC allows access to all memory. This should only be used when | |
105 | * the caller guarantees the allocation will allow more memory to be freed | |
106 | * very shortly e.g. process exiting or swapping. Users either should | |
107 | * be the MM or co-ordinating closely with the VM (e.g. swap over NFS). | |
108 | * | |
109 | * __GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves. | |
110 | * This takes precedence over the __GFP_MEMALLOC flag if both are set. | |
d0164adc | 111 | */ |
dd56b046 MG |
112 | #define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC) |
113 | #define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) | |
114 | #define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC) | |
115 | #define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) | |
dd56b046 MG |
116 | |
117 | /* | |
118 | * Reclaim modifiers | |
119 | * | |
120 | * __GFP_IO can start physical IO. | |
121 | * | |
122 | * __GFP_FS can call down to the low-level FS. Clearing the flag avoids the | |
123 | * allocator recursing into the filesystem which might already be holding | |
124 | * locks. | |
125 | * | |
126 | * __GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim. | |
127 | * This flag can be cleared to avoid unnecessary delays when a fallback | |
128 | * option is available. | |
129 | * | |
130 | * __GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when | |
131 | * the low watermark is reached and have it reclaim pages until the high | |
132 | * watermark is reached. A caller may wish to clear this flag when fallback | |
133 | * options are available and the reclaim is likely to disrupt the system. The | |
134 | * canonical example is THP allocation where a fallback is cheap but | |
135 | * reclaim/compaction may cause indirect stalls. | |
136 | * | |
137 | * __GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim. | |
138 | * | |
dcda9b04 MH |
139 | * The default allocator behavior depends on the request size. We have a concept |
140 | * of so called costly allocations (with order > PAGE_ALLOC_COSTLY_ORDER). | |
141 | * !costly allocations are too essential to fail so they are implicitly | |
142 | * non-failing by default (with some exceptions like OOM victims might fail so | |
143 | * the caller still has to check for failures) while costly requests try to be | |
144 | * not disruptive and back off even without invoking the OOM killer. | |
145 | * The following three modifiers might be used to override some of these | |
146 | * implicit rules | |
147 | * | |
148 | * __GFP_NORETRY: The VM implementation will try only very lightweight | |
149 | * memory direct reclaim to get some memory under memory pressure (thus | |
150 | * it can sleep). It will avoid disruptive actions like OOM killer. The | |
151 | * caller must handle the failure which is quite likely to happen under | |
152 | * heavy memory pressure. The flag is suitable when failure can easily be | |
153 | * handled at small cost, such as reduced throughput | |
154 | * | |
155 | * __GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim | |
156 | * procedures that have previously failed if there is some indication | |
157 | * that progress has been made else where. It can wait for other | |
158 | * tasks to attempt high level approaches to freeing memory such as | |
159 | * compaction (which removes fragmentation) and page-out. | |
160 | * There is still a definite limit to the number of retries, but it is | |
161 | * a larger limit than with __GFP_NORETRY. | |
162 | * Allocations with this flag may fail, but only when there is | |
163 | * genuinely little unused memory. While these allocations do not | |
164 | * directly trigger the OOM killer, their failure indicates that | |
165 | * the system is likely to need to use the OOM killer soon. The | |
166 | * caller must handle failure, but can reasonably do so by failing | |
167 | * a higher-level request, or completing it only in a much less | |
168 | * efficient manner. | |
169 | * If the allocation does fail, and the caller is in a position to | |
170 | * free some non-essential memory, doing so could benefit the system | |
171 | * as a whole. | |
dd56b046 MG |
172 | * |
173 | * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller | |
dcda9b04 MH |
174 | * cannot handle allocation failures. The allocation could block |
175 | * indefinitely but will never return with failure. Testing for | |
176 | * failure is pointless. | |
177 | * New users should be evaluated carefully (and the flag should be | |
178 | * used only when there is no reasonable failure policy) but it is | |
179 | * definitely preferable to use the flag rather than opencode endless | |
180 | * loop around allocator. | |
181 | * Using this flag for costly allocations is _highly_ discouraged. | |
dd56b046 MG |
182 | */ |
183 | #define __GFP_IO ((__force gfp_t)___GFP_IO) | |
184 | #define __GFP_FS ((__force gfp_t)___GFP_FS) | |
d0164adc MG |
185 | #define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */ |
186 | #define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */ | |
dd56b046 | 187 | #define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM)) |
dcda9b04 | 188 | #define __GFP_RETRY_MAYFAIL ((__force gfp_t)___GFP_RETRY_MAYFAIL) |
dd56b046 MG |
189 | #define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) |
190 | #define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) | |
d0164adc | 191 | |
2dff4405 | 192 | /* |
dd56b046 MG |
193 | * Action modifiers |
194 | * | |
195 | * __GFP_COLD indicates that the caller does not expect to be used in the near | |
196 | * future. Where possible, a cache-cold page will be returned. | |
197 | * | |
198 | * __GFP_NOWARN suppresses allocation failure reports. | |
199 | * | |
200 | * __GFP_COMP address compound page metadata. | |
201 | * | |
202 | * __GFP_ZERO returns a zeroed page on success. | |
203 | * | |
204 | * __GFP_NOTRACK avoids tracking with kmemcheck. | |
205 | * | |
206 | * __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of | |
207 | * distinguishing in the source between false positives and allocations that | |
208 | * cannot be supported (e.g. page tables). | |
2dff4405 | 209 | */ |
dd56b046 MG |
210 | #define __GFP_COLD ((__force gfp_t)___GFP_COLD) |
211 | #define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) | |
212 | #define __GFP_COMP ((__force gfp_t)___GFP_COMP) | |
213 | #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) | |
214 | #define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) | |
2dff4405 VN |
215 | #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) |
216 | ||
7e784422 MH |
217 | /* Disable lockdep for GFP context tracking */ |
218 | #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) | |
219 | ||
dd56b046 | 220 | /* Room for N __GFP_FOO bits */ |
7e784422 | 221 | #define __GFP_BITS_SHIFT (25 + IS_ENABLED(CONFIG_LOCKDEP)) |
af4ca457 | 222 | #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) |
1da177e4 | 223 | |
d0164adc | 224 | /* |
dd56b046 MG |
225 | * Useful GFP flag combinations that are commonly used. It is recommended |
226 | * that subsystems start with one of these combinations and then set/clear | |
227 | * __GFP_FOO flags as necessary. | |
228 | * | |
229 | * GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower | |
230 | * watermark is applied to allow access to "atomic reserves" | |
231 | * | |
232 | * GFP_KERNEL is typical for kernel-internal allocations. The caller requires | |
233 | * ZONE_NORMAL or a lower zone for direct access but can direct reclaim. | |
234 | * | |
a9bb7e62 VD |
235 | * GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is |
236 | * accounted to kmemcg. | |
237 | * | |
dd56b046 MG |
238 | * GFP_NOWAIT is for kernel allocations that should not stall for direct |
239 | * reclaim, start physical IO or use any filesystem callback. | |
240 | * | |
241 | * GFP_NOIO will use direct reclaim to discard clean pages or slab pages | |
242 | * that do not require the starting of any physical IO. | |
7dea19f9 MH |
243 | * Please try to avoid using this flag directly and instead use |
244 | * memalloc_noio_{save,restore} to mark the whole scope which cannot | |
245 | * perform any IO with a short explanation why. All allocation requests | |
246 | * will inherit GFP_NOIO implicitly. | |
dd56b046 MG |
247 | * |
248 | * GFP_NOFS will use direct reclaim but will not use any filesystem interfaces. | |
7dea19f9 MH |
249 | * Please try to avoid using this flag directly and instead use |
250 | * memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't | |
251 | * recurse into the FS layer with a short explanation why. All allocation | |
252 | * requests will inherit GFP_NOFS implicitly. | |
dd56b046 MG |
253 | * |
254 | * GFP_USER is for userspace allocations that also need to be directly | |
255 | * accessibly by the kernel or hardware. It is typically used by hardware | |
256 | * for buffers that are mapped to userspace (e.g. graphics) that hardware | |
257 | * still must DMA to. cpuset limits are enforced for these allocations. | |
258 | * | |
259 | * GFP_DMA exists for historical reasons and should be avoided where possible. | |
260 | * The flags indicates that the caller requires that the lowest zone be | |
261 | * used (ZONE_DMA or 16M on x86-64). Ideally, this would be removed but | |
262 | * it would require careful auditing as some users really require it and | |
263 | * others use the flag to avoid lowmem reserves in ZONE_DMA and treat the | |
264 | * lowest zone as a type of emergency reserve. | |
265 | * | |
266 | * GFP_DMA32 is similar to GFP_DMA except that the caller requires a 32-bit | |
267 | * address. | |
268 | * | |
269 | * GFP_HIGHUSER is for userspace allocations that may be mapped to userspace, | |
270 | * do not need to be directly accessible by the kernel but that cannot | |
271 | * move once in use. An example may be a hardware allocation that maps | |
272 | * data directly into userspace but has no addressing limitations. | |
273 | * | |
274 | * GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not | |
275 | * need direct access to but can use kmap() when access is required. They | |
276 | * are expected to be movable via page reclaim or page migration. Typically, | |
277 | * pages on the LRU would also be allocated with GFP_HIGHUSER_MOVABLE. | |
278 | * | |
25160354 VB |
279 | * GFP_TRANSHUGE and GFP_TRANSHUGE_LIGHT are used for THP allocations. They are |
280 | * compound allocations that will generally fail quickly if memory is not | |
281 | * available and will not wake kswapd/kcompactd on failure. The _LIGHT | |
282 | * version does not attempt reclaim/compaction at all and is by default used | |
283 | * in page fault path, while the non-light is used by khugepaged. | |
d0164adc MG |
284 | */ |
285 | #define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) | |
dd56b046 | 286 | #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) |
a9bb7e62 | 287 | #define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT) |
d0164adc | 288 | #define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) |
71baba4b MG |
289 | #define GFP_NOIO (__GFP_RECLAIM) |
290 | #define GFP_NOFS (__GFP_RECLAIM | __GFP_IO) | |
71baba4b | 291 | #define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL) |
dd56b046 MG |
292 | #define GFP_DMA __GFP_DMA |
293 | #define GFP_DMA32 __GFP_DMA32 | |
2d48366b JZ |
294 | #define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) |
295 | #define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) | |
25160354 VB |
296 | #define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ |
297 | __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM) | |
298 | #define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM) | |
1da177e4 | 299 | |
dd56b046 | 300 | /* Convert GFP flags to their corresponding migrate type */ |
e12ba74d | 301 | #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) |
016c13da | 302 | #define GFP_MOVABLE_SHIFT 3 |
6cb06229 | 303 | |
43e7a34d | 304 | static inline int gfpflags_to_migratetype(const gfp_t gfp_flags) |
467c996c | 305 | { |
016c13da MG |
306 | VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); |
307 | BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE); | |
308 | BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE); | |
467c996c MG |
309 | |
310 | if (unlikely(page_group_by_mobility_disabled)) | |
311 | return MIGRATE_UNMOVABLE; | |
312 | ||
313 | /* Group based on mobility */ | |
016c13da | 314 | return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; |
467c996c | 315 | } |
dd56b046 MG |
316 | #undef GFP_MOVABLE_MASK |
317 | #undef GFP_MOVABLE_SHIFT | |
a2f1b424 | 318 | |
d0164adc MG |
319 | static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) |
320 | { | |
543dfb2d | 321 | return !!(gfp_flags & __GFP_DIRECT_RECLAIM); |
d0164adc MG |
322 | } |
323 | ||
b70d94ee CL |
324 | #ifdef CONFIG_HIGHMEM |
325 | #define OPT_ZONE_HIGHMEM ZONE_HIGHMEM | |
326 | #else | |
327 | #define OPT_ZONE_HIGHMEM ZONE_NORMAL | |
328 | #endif | |
329 | ||
4b51d669 | 330 | #ifdef CONFIG_ZONE_DMA |
b70d94ee CL |
331 | #define OPT_ZONE_DMA ZONE_DMA |
332 | #else | |
333 | #define OPT_ZONE_DMA ZONE_NORMAL | |
4b51d669 | 334 | #endif |
b70d94ee | 335 | |
4e4785bc | 336 | #ifdef CONFIG_ZONE_DMA32 |
b70d94ee CL |
337 | #define OPT_ZONE_DMA32 ZONE_DMA32 |
338 | #else | |
339 | #define OPT_ZONE_DMA32 ZONE_NORMAL | |
4e4785bc | 340 | #endif |
b70d94ee CL |
341 | |
342 | /* | |
343 | * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the | |
ac2e8e40 HL |
344 | * zone to use given the lowest 4 bits of gfp_t. Entries are GFP_ZONES_SHIFT |
345 | * bits long and there are 16 of them to cover all possible combinations of | |
263ff5d8 | 346 | * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM. |
b70d94ee CL |
347 | * |
348 | * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. | |
349 | * But GFP_MOVABLE is not only a zone specifier but also an allocation | |
350 | * policy. Therefore __GFP_MOVABLE plus another zone selector is valid. | |
263ff5d8 | 351 | * Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1". |
b70d94ee CL |
352 | * |
353 | * bit result | |
354 | * ================= | |
355 | * 0x0 => NORMAL | |
356 | * 0x1 => DMA or NORMAL | |
357 | * 0x2 => HIGHMEM or NORMAL | |
358 | * 0x3 => BAD (DMA+HIGHMEM) | |
359 | * 0x4 => DMA32 or DMA or NORMAL | |
360 | * 0x5 => BAD (DMA+DMA32) | |
361 | * 0x6 => BAD (HIGHMEM+DMA32) | |
362 | * 0x7 => BAD (HIGHMEM+DMA32+DMA) | |
363 | * 0x8 => NORMAL (MOVABLE+0) | |
364 | * 0x9 => DMA or NORMAL (MOVABLE+DMA) | |
365 | * 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too) | |
366 | * 0xb => BAD (MOVABLE+HIGHMEM+DMA) | |
537926ca | 367 | * 0xc => DMA32 (MOVABLE+DMA32) |
b70d94ee CL |
368 | * 0xd => BAD (MOVABLE+DMA32+DMA) |
369 | * 0xe => BAD (MOVABLE+DMA32+HIGHMEM) | |
370 | * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA) | |
371 | * | |
b11a7b94 | 372 | * GFP_ZONES_SHIFT must be <= 2 on 32 bit platforms. |
b70d94ee CL |
373 | */ |
374 | ||
b11a7b94 DW |
375 | #if defined(CONFIG_ZONE_DEVICE) && (MAX_NR_ZONES-1) <= 4 |
376 | /* ZONE_DEVICE is not a valid GFP zone specifier */ | |
377 | #define GFP_ZONES_SHIFT 2 | |
378 | #else | |
379 | #define GFP_ZONES_SHIFT ZONES_SHIFT | |
380 | #endif | |
381 | ||
382 | #if 16 * GFP_ZONES_SHIFT > BITS_PER_LONG | |
383 | #error GFP_ZONES_SHIFT too large to create GFP_ZONE_TABLE integer | |
b70d94ee CL |
384 | #endif |
385 | ||
386 | #define GFP_ZONE_TABLE ( \ | |
b11a7b94 DW |
387 | (ZONE_NORMAL << 0 * GFP_ZONES_SHIFT) \ |
388 | | (OPT_ZONE_DMA << ___GFP_DMA * GFP_ZONES_SHIFT) \ | |
389 | | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * GFP_ZONES_SHIFT) \ | |
390 | | (OPT_ZONE_DMA32 << ___GFP_DMA32 * GFP_ZONES_SHIFT) \ | |
391 | | (ZONE_NORMAL << ___GFP_MOVABLE * GFP_ZONES_SHIFT) \ | |
392 | | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * GFP_ZONES_SHIFT) \ | |
393 | | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * GFP_ZONES_SHIFT)\ | |
394 | | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * GFP_ZONES_SHIFT)\ | |
b70d94ee CL |
395 | ) |
396 | ||
397 | /* | |
263ff5d8 | 398 | * GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32 |
b70d94ee CL |
399 | * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per |
400 | * entry starting with bit 0. Bit is set if the combination is not | |
401 | * allowed. | |
402 | */ | |
403 | #define GFP_ZONE_BAD ( \ | |
16b56cf4 NK |
404 | 1 << (___GFP_DMA | ___GFP_HIGHMEM) \ |
405 | | 1 << (___GFP_DMA | ___GFP_DMA32) \ | |
406 | | 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \ | |
407 | | 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \ | |
408 | | 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \ | |
409 | | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \ | |
410 | | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \ | |
411 | | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \ | |
b70d94ee CL |
412 | ) |
413 | ||
414 | static inline enum zone_type gfp_zone(gfp_t flags) | |
415 | { | |
416 | enum zone_type z; | |
16b56cf4 | 417 | int bit = (__force int) (flags & GFP_ZONEMASK); |
b70d94ee | 418 | |
b11a7b94 DW |
419 | z = (GFP_ZONE_TABLE >> (bit * GFP_ZONES_SHIFT)) & |
420 | ((1 << GFP_ZONES_SHIFT) - 1); | |
82d4b577 | 421 | VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1); |
b70d94ee | 422 | return z; |
4e4785bc CL |
423 | } |
424 | ||
1da177e4 LT |
425 | /* |
426 | * There is only one page-allocator function, and two main namespaces to | |
427 | * it. The alloc_page*() variants return 'struct page *' and as such | |
428 | * can allocate highmem pages, the *get*page*() variants return | |
429 | * virtual kernel addresses to the allocated page(s). | |
430 | */ | |
431 | ||
54a6eb5c MG |
432 | static inline int gfp_zonelist(gfp_t flags) |
433 | { | |
c00eb15a YB |
434 | #ifdef CONFIG_NUMA |
435 | if (unlikely(flags & __GFP_THISNODE)) | |
436 | return ZONELIST_NOFALLBACK; | |
437 | #endif | |
438 | return ZONELIST_FALLBACK; | |
54a6eb5c MG |
439 | } |
440 | ||
1da177e4 LT |
441 | /* |
442 | * We get the zone list from the current node and the gfp_mask. | |
443 | * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones. | |
54a6eb5c MG |
444 | * There are two zonelists per node, one for all zones with memory and |
445 | * one containing just zones from the node the zonelist belongs to. | |
1da177e4 LT |
446 | * |
447 | * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets | |
448 | * optimized to &contig_page_data at compile-time. | |
449 | */ | |
0e88460d MG |
450 | static inline struct zonelist *node_zonelist(int nid, gfp_t flags) |
451 | { | |
54a6eb5c | 452 | return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags); |
0e88460d | 453 | } |
1da177e4 LT |
454 | |
455 | #ifndef HAVE_ARCH_FREE_PAGE | |
456 | static inline void arch_free_page(struct page *page, int order) { } | |
457 | #endif | |
cc102509 NP |
458 | #ifndef HAVE_ARCH_ALLOC_PAGE |
459 | static inline void arch_alloc_page(struct page *page, int order) { } | |
460 | #endif | |
1da177e4 | 461 | |
e4048e5d | 462 | struct page * |
04ec6264 VB |
463 | __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, |
464 | nodemask_t *nodemask); | |
e4048e5d KM |
465 | |
466 | static inline struct page * | |
04ec6264 | 467 | __alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid) |
e4048e5d | 468 | { |
04ec6264 | 469 | return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL); |
e4048e5d KM |
470 | } |
471 | ||
96db800f VB |
472 | /* |
473 | * Allocate pages, preferring the node given as nid. The node must be valid and | |
474 | * online. For more general interface, see alloc_pages_node(). | |
475 | */ | |
476 | static inline struct page * | |
477 | __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) | |
1da177e4 | 478 | { |
0bc35a97 VB |
479 | VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); |
480 | VM_WARN_ON(!node_online(nid)); | |
819a6928 | 481 | |
04ec6264 | 482 | return __alloc_pages(gfp_mask, order, nid); |
1da177e4 LT |
483 | } |
484 | ||
96db800f VB |
485 | /* |
486 | * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE, | |
82c1fc71 VB |
487 | * prefer the current CPU's closest node. Otherwise node must be valid and |
488 | * online. | |
96db800f VB |
489 | */ |
490 | static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, | |
6484eb3e MG |
491 | unsigned int order) |
492 | { | |
0bc35a97 | 493 | if (nid == NUMA_NO_NODE) |
82c1fc71 | 494 | nid = numa_mem_id(); |
6484eb3e | 495 | |
0bc35a97 | 496 | return __alloc_pages_node(nid, gfp_mask, order); |
6484eb3e MG |
497 | } |
498 | ||
1da177e4 | 499 | #ifdef CONFIG_NUMA |
dd0fc66f | 500 | extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); |
1da177e4 LT |
501 | |
502 | static inline struct page * | |
dd0fc66f | 503 | alloc_pages(gfp_t gfp_mask, unsigned int order) |
1da177e4 | 504 | { |
1da177e4 LT |
505 | return alloc_pages_current(gfp_mask, order); |
506 | } | |
0bbbc0b3 | 507 | extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, |
2f5f9486 | 508 | struct vm_area_struct *vma, unsigned long addr, |
be97a41b VB |
509 | int node, bool hugepage); |
510 | #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ | |
511 | alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true) | |
1da177e4 LT |
512 | #else |
513 | #define alloc_pages(gfp_mask, order) \ | |
514 | alloc_pages_node(numa_node_id(), gfp_mask, order) | |
be97a41b | 515 | #define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\ |
0bbbc0b3 | 516 | alloc_pages(gfp_mask, order) |
077fcf11 AK |
517 | #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ |
518 | alloc_pages(gfp_mask, order) | |
1da177e4 LT |
519 | #endif |
520 | #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) | |
2f5f9486 | 521 | #define alloc_page_vma(gfp_mask, vma, addr) \ |
be97a41b | 522 | alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false) |
236344d6 | 523 | #define alloc_page_vma_node(gfp_mask, vma, addr, node) \ |
be97a41b | 524 | alloc_pages_vma(gfp_mask, 0, vma, addr, node, false) |
1da177e4 | 525 | |
b3c97528 HH |
526 | extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); |
527 | extern unsigned long get_zeroed_page(gfp_t gfp_mask); | |
1da177e4 | 528 | |
2be0ffe2 TT |
529 | void *alloc_pages_exact(size_t size, gfp_t gfp_mask); |
530 | void free_pages_exact(void *virt, size_t size); | |
e1931811 | 531 | void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); |
2be0ffe2 | 532 | |
1da177e4 | 533 | #define __get_free_page(gfp_mask) \ |
fd23855e | 534 | __get_free_pages((gfp_mask), 0) |
1da177e4 LT |
535 | |
536 | #define __get_dma_pages(gfp_mask, order) \ | |
fd23855e | 537 | __get_free_pages((gfp_mask) | GFP_DMA, (order)) |
1da177e4 | 538 | |
b3c97528 HH |
539 | extern void __free_pages(struct page *page, unsigned int order); |
540 | extern void free_pages(unsigned long addr, unsigned int order); | |
b745bc85 MG |
541 | extern void free_hot_cold_page(struct page *page, bool cold); |
542 | extern void free_hot_cold_page_list(struct list_head *list, bool cold); | |
1da177e4 | 543 | |
b63ae8ca | 544 | struct page_frag_cache; |
2976db80 | 545 | extern void __page_frag_cache_drain(struct page *page, unsigned int count); |
8c2dd3e4 AD |
546 | extern void *page_frag_alloc(struct page_frag_cache *nc, |
547 | unsigned int fragsz, gfp_t gfp_mask); | |
548 | extern void page_frag_free(void *addr); | |
b63ae8ca | 549 | |
1da177e4 | 550 | #define __free_page(page) __free_pages((page), 0) |
fd23855e | 551 | #define free_page(addr) free_pages((addr), 0) |
1da177e4 LT |
552 | |
553 | void page_alloc_init(void); | |
4037d452 | 554 | void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); |
93481ff0 VB |
555 | void drain_all_pages(struct zone *zone); |
556 | void drain_local_pages(struct zone *zone); | |
1da177e4 | 557 | |
0e1cc95b | 558 | void page_alloc_init_late(void); |
0e1cc95b | 559 | |
f90ac398 MG |
560 | /* |
561 | * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what | |
562 | * GFP flags are used before interrupts are enabled. Once interrupts are | |
563 | * enabled, it is set to __GFP_BITS_MASK while the system is running. During | |
564 | * hibernation, it is used by PM to avoid I/O during memory allocation while | |
565 | * devices are suspended. | |
566 | */ | |
dcce284a BH |
567 | extern gfp_t gfp_allowed_mask; |
568 | ||
c93bdd0e MG |
569 | /* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */ |
570 | bool gfp_pfmemalloc_allowed(gfp_t gfp_mask); | |
571 | ||
c9e664f1 RW |
572 | extern void pm_restrict_gfp_mask(void); |
573 | extern void pm_restore_gfp_mask(void); | |
dcce284a | 574 | |
f90ac398 MG |
575 | #ifdef CONFIG_PM_SLEEP |
576 | extern bool pm_suspended_storage(void); | |
577 | #else | |
578 | static inline bool pm_suspended_storage(void) | |
579 | { | |
580 | return false; | |
581 | } | |
582 | #endif /* CONFIG_PM_SLEEP */ | |
583 | ||
080fe206 | 584 | #if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA) |
041d3a8c | 585 | /* The below functions must be run on a range from a single zone. */ |
0815f3d8 | 586 | extern int alloc_contig_range(unsigned long start, unsigned long end, |
ca96b625 | 587 | unsigned migratetype, gfp_t gfp_mask); |
041d3a8c | 588 | extern void free_contig_range(unsigned long pfn, unsigned nr_pages); |
080fe206 | 589 | #endif |
041d3a8c | 590 | |
080fe206 | 591 | #ifdef CONFIG_CMA |
47118af0 MN |
592 | /* CMA stuff */ |
593 | extern void init_cma_reserved_pageblock(struct page *page); | |
041d3a8c MN |
594 | #endif |
595 | ||
1da177e4 | 596 | #endif /* __LINUX_GFP_H */ |