]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __LINUX_GFP_H |
2 | #define __LINUX_GFP_H | |
3 | ||
309381fe | 4 | #include <linux/mmdebug.h> |
1da177e4 LT |
5 | #include <linux/mmzone.h> |
6 | #include <linux/stddef.h> | |
7 | #include <linux/linkage.h> | |
082edb7b | 8 | #include <linux/topology.h> |
1da177e4 LT |
9 | |
10 | struct vm_area_struct; | |
11 | ||
16b56cf4 NK |
12 | /* Plain integer GFP bitmasks. Do not use this directly. */ |
13 | #define ___GFP_DMA 0x01u | |
14 | #define ___GFP_HIGHMEM 0x02u | |
15 | #define ___GFP_DMA32 0x04u | |
16 | #define ___GFP_MOVABLE 0x08u | |
016c13da | 17 | #define ___GFP_RECLAIMABLE 0x10u |
16b56cf4 NK |
18 | #define ___GFP_HIGH 0x20u |
19 | #define ___GFP_IO 0x40u | |
20 | #define ___GFP_FS 0x80u | |
21 | #define ___GFP_COLD 0x100u | |
22 | #define ___GFP_NOWARN 0x200u | |
23 | #define ___GFP_REPEAT 0x400u | |
24 | #define ___GFP_NOFAIL 0x800u | |
25 | #define ___GFP_NORETRY 0x1000u | |
b37f1dd0 | 26 | #define ___GFP_MEMALLOC 0x2000u |
16b56cf4 NK |
27 | #define ___GFP_COMP 0x4000u |
28 | #define ___GFP_ZERO 0x8000u | |
29 | #define ___GFP_NOMEMALLOC 0x10000u | |
30 | #define ___GFP_HARDWALL 0x20000u | |
31 | #define ___GFP_THISNODE 0x40000u | |
d0164adc | 32 | #define ___GFP_ATOMIC 0x80000u |
8f4fc071 | 33 | #define ___GFP_NOACCOUNT 0x100000u |
caf49191 | 34 | #define ___GFP_NOTRACK 0x200000u |
d0164adc | 35 | #define ___GFP_DIRECT_RECLAIM 0x400000u |
caf49191 LT |
36 | #define ___GFP_OTHER_NODE 0x800000u |
37 | #define ___GFP_WRITE 0x1000000u | |
d0164adc | 38 | #define ___GFP_KSWAPD_RECLAIM 0x2000000u |
05b0afd7 | 39 | /* If the above are modified, __GFP_BITS_SHIFT may need updating */ |
16b56cf4 | 40 | |
1da177e4 LT |
41 | /* |
42 | * GFP bitmasks.. | |
e53ef38d CL |
43 | * |
44 | * Zone modifiers (see linux/mmzone.h - low three bits) | |
45 | * | |
e53ef38d | 46 | * Do not put any conditional on these. If necessary modify the definitions |
263ff5d8 | 47 | * without the underscores and use them consistently. The definitions here may |
e53ef38d | 48 | * be used in bit comparisons. |
1da177e4 | 49 | */ |
16b56cf4 NK |
50 | #define __GFP_DMA ((__force gfp_t)___GFP_DMA) |
51 | #define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) | |
52 | #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) | |
53 | #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */ | |
b70d94ee | 54 | #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) |
1da177e4 LT |
55 | /* |
56 | * Action modifiers - doesn't change the zoning | |
57 | * | |
58 | * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt | |
59 | * _might_ fail. This depends upon the particular VM implementation. | |
60 | * | |
61 | * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller | |
64775719 MH |
62 | * cannot handle allocation failures. New users should be evaluated carefully |
63 | * (and the flag should be used only when there is no reasonable failure policy) | |
64 | * but it is definitely preferable to use the flag rather than opencode endless | |
65 | * loop around allocator. | |
1da177e4 | 66 | * |
28c015d0 DR |
67 | * __GFP_NORETRY: The VM implementation must not retry indefinitely and will |
68 | * return NULL when direct reclaim and memory compaction have failed to allow | |
69 | * the allocation to succeed. The OOM killer is not called with the current | |
70 | * implementation. | |
769848c0 MG |
71 | * |
72 | * __GFP_MOVABLE: Flag that this page will be movable by the page migration | |
73 | * mechanism or reclaimed | |
1da177e4 | 74 | */ |
d0164adc | 75 | #define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC) /* Caller cannot wait or reschedule */ |
16b56cf4 NK |
76 | #define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) /* Should access emergency pools? */ |
77 | #define __GFP_IO ((__force gfp_t)___GFP_IO) /* Can start physical IO? */ | |
78 | #define __GFP_FS ((__force gfp_t)___GFP_FS) /* Can call down to low-level FS? */ | |
79 | #define __GFP_COLD ((__force gfp_t)___GFP_COLD) /* Cache-cold page required */ | |
80 | #define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) /* Suppress page allocation failure warning */ | |
81 | #define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) /* See above */ | |
82 | #define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) /* See above */ | |
83 | #define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* See above */ | |
b37f1dd0 | 84 | #define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)/* Allow access to emergency reserves */ |
16b56cf4 NK |
85 | #define __GFP_COMP ((__force gfp_t)___GFP_COMP) /* Add compound page metadata */ |
86 | #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) /* Return zeroed page on success */ | |
b37f1dd0 MG |
87 | #define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves. |
88 | * This takes precedence over the | |
89 | * __GFP_MEMALLOC flag if both are | |
90 | * set | |
91 | */ | |
16b56cf4 NK |
92 | #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */ |
93 | #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */ | |
94 | #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */ | |
8f4fc071 | 95 | #define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT) /* Don't account to kmemcg */ |
16b56cf4 | 96 | #define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */ |
1da177e4 | 97 | |
78afd561 | 98 | #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */ |
a756cf59 | 99 | #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */ |
32dba98e | 100 | |
d0164adc MG |
101 | /* |
102 | * A caller that is willing to wait may enter direct reclaim and will | |
103 | * wake kswapd to reclaim pages in the background until the high | |
104 | * watermark is met. A caller may wish to clear __GFP_DIRECT_RECLAIM to | |
105 | * avoid unnecessary delays when a fallback option is available but | |
106 | * still allow kswapd to reclaim in the background. The kswapd flag | |
107 | * can be cleared when the reclaiming of pages would cause unnecessary | |
108 | * disruption. | |
109 | */ | |
110 | #define __GFP_WAIT ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM)) | |
111 | #define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */ | |
112 | #define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */ | |
113 | ||
2dff4405 VN |
114 | /* |
115 | * This may seem redundant, but it's a way of annotating false positives vs. | |
116 | * allocations that simply cannot be supported (e.g. page tables). | |
117 | */ | |
118 | #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) | |
119 | ||
d0164adc | 120 | #define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */ |
af4ca457 | 121 | #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) |
1da177e4 | 122 | |
d0164adc MG |
123 | /* |
124 | * GFP_ATOMIC callers can not sleep, need the allocation to succeed. | |
125 | * A lower watermark is applied to allow access to "atomic reserves" | |
126 | */ | |
127 | #define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) | |
128 | #define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) | |
1da177e4 LT |
129 | #define GFP_NOIO (__GFP_WAIT) |
130 | #define GFP_NOFS (__GFP_WAIT | __GFP_IO) | |
131 | #define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS) | |
e12ba74d MG |
132 | #define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \ |
133 | __GFP_RECLAIMABLE) | |
f90b1d2f | 134 | #define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) |
2d48366b JZ |
135 | #define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) |
136 | #define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) | |
d0164adc MG |
137 | #define GFP_IOFS (__GFP_IO | __GFP_FS | __GFP_KSWAPD_RECLAIM) |
138 | #define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ | |
139 | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & \ | |
140 | ~__GFP_KSWAPD_RECLAIM) | |
1da177e4 | 141 | |
6cb06229 | 142 | /* This mask makes up all the page movable related flags */ |
e12ba74d | 143 | #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) |
016c13da | 144 | #define GFP_MOVABLE_SHIFT 3 |
6cb06229 CL |
145 | |
146 | /* Control page allocator reclaim behavior */ | |
147 | #define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\ | |
148 | __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ | |
b37f1dd0 | 149 | __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC) |
6cb06229 | 150 | |
7e85ee0c | 151 | /* Control slab gfp mask during early boot */ |
fd23855e | 152 | #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)) |
7e85ee0c | 153 | |
6cb06229 CL |
154 | /* Control allocation constraints */ |
155 | #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) | |
156 | ||
157 | /* Do not use these with a slab allocator */ | |
158 | #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) | |
980128f2 | 159 | |
1da177e4 LT |
160 | /* Flag - indicates that the buffer will be suitable for DMA. Ignored on some |
161 | platforms, used as appropriate on others */ | |
162 | ||
163 | #define GFP_DMA __GFP_DMA | |
164 | ||
a2f1b424 AK |
165 | /* 4GB DMA on some platforms */ |
166 | #define GFP_DMA32 __GFP_DMA32 | |
167 | ||
467c996c | 168 | /* Convert GFP flags to their corresponding migrate type */ |
43e7a34d | 169 | static inline int gfpflags_to_migratetype(const gfp_t gfp_flags) |
467c996c | 170 | { |
016c13da MG |
171 | VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); |
172 | BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE); | |
173 | BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE); | |
467c996c MG |
174 | |
175 | if (unlikely(page_group_by_mobility_disabled)) | |
176 | return MIGRATE_UNMOVABLE; | |
177 | ||
178 | /* Group based on mobility */ | |
016c13da | 179 | return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; |
467c996c | 180 | } |
a2f1b424 | 181 | |
d0164adc MG |
182 | static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) |
183 | { | |
184 | return gfp_flags & __GFP_DIRECT_RECLAIM; | |
185 | } | |
186 | ||
b70d94ee CL |
187 | #ifdef CONFIG_HIGHMEM |
188 | #define OPT_ZONE_HIGHMEM ZONE_HIGHMEM | |
189 | #else | |
190 | #define OPT_ZONE_HIGHMEM ZONE_NORMAL | |
191 | #endif | |
192 | ||
4b51d669 | 193 | #ifdef CONFIG_ZONE_DMA |
b70d94ee CL |
194 | #define OPT_ZONE_DMA ZONE_DMA |
195 | #else | |
196 | #define OPT_ZONE_DMA ZONE_NORMAL | |
4b51d669 | 197 | #endif |
b70d94ee | 198 | |
4e4785bc | 199 | #ifdef CONFIG_ZONE_DMA32 |
b70d94ee CL |
200 | #define OPT_ZONE_DMA32 ZONE_DMA32 |
201 | #else | |
202 | #define OPT_ZONE_DMA32 ZONE_NORMAL | |
4e4785bc | 203 | #endif |
b70d94ee CL |
204 | |
205 | /* | |
206 | * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the | |
207 | * zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long | |
208 | * and there are 16 of them to cover all possible combinations of | |
263ff5d8 | 209 | * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM. |
b70d94ee CL |
210 | * |
211 | * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. | |
212 | * But GFP_MOVABLE is not only a zone specifier but also an allocation | |
213 | * policy. Therefore __GFP_MOVABLE plus another zone selector is valid. | |
263ff5d8 | 214 | * Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1". |
b70d94ee CL |
215 | * |
216 | * bit result | |
217 | * ================= | |
218 | * 0x0 => NORMAL | |
219 | * 0x1 => DMA or NORMAL | |
220 | * 0x2 => HIGHMEM or NORMAL | |
221 | * 0x3 => BAD (DMA+HIGHMEM) | |
222 | * 0x4 => DMA32 or DMA or NORMAL | |
223 | * 0x5 => BAD (DMA+DMA32) | |
224 | * 0x6 => BAD (HIGHMEM+DMA32) | |
225 | * 0x7 => BAD (HIGHMEM+DMA32+DMA) | |
226 | * 0x8 => NORMAL (MOVABLE+0) | |
227 | * 0x9 => DMA or NORMAL (MOVABLE+DMA) | |
228 | * 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too) | |
229 | * 0xb => BAD (MOVABLE+HIGHMEM+DMA) | |
537926ca | 230 | * 0xc => DMA32 (MOVABLE+DMA32) |
b70d94ee CL |
231 | * 0xd => BAD (MOVABLE+DMA32+DMA) |
232 | * 0xe => BAD (MOVABLE+DMA32+HIGHMEM) | |
233 | * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA) | |
234 | * | |
235 | * ZONES_SHIFT must be <= 2 on 32 bit platforms. | |
236 | */ | |
237 | ||
238 | #if 16 * ZONES_SHIFT > BITS_PER_LONG | |
239 | #error ZONES_SHIFT too large to create GFP_ZONE_TABLE integer | |
240 | #endif | |
241 | ||
242 | #define GFP_ZONE_TABLE ( \ | |
16b56cf4 NK |
243 | (ZONE_NORMAL << 0 * ZONES_SHIFT) \ |
244 | | (OPT_ZONE_DMA << ___GFP_DMA * ZONES_SHIFT) \ | |
245 | | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * ZONES_SHIFT) \ | |
246 | | (OPT_ZONE_DMA32 << ___GFP_DMA32 * ZONES_SHIFT) \ | |
247 | | (ZONE_NORMAL << ___GFP_MOVABLE * ZONES_SHIFT) \ | |
248 | | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * ZONES_SHIFT) \ | |
249 | | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * ZONES_SHIFT) \ | |
250 | | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * ZONES_SHIFT) \ | |
b70d94ee CL |
251 | ) |
252 | ||
253 | /* | |
263ff5d8 | 254 | * GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32 |
b70d94ee CL |
255 | * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per |
256 | * entry starting with bit 0. Bit is set if the combination is not | |
257 | * allowed. | |
258 | */ | |
259 | #define GFP_ZONE_BAD ( \ | |
16b56cf4 NK |
260 | 1 << (___GFP_DMA | ___GFP_HIGHMEM) \ |
261 | | 1 << (___GFP_DMA | ___GFP_DMA32) \ | |
262 | | 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \ | |
263 | | 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \ | |
264 | | 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \ | |
265 | | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \ | |
266 | | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \ | |
267 | | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \ | |
b70d94ee CL |
268 | ) |
269 | ||
270 | static inline enum zone_type gfp_zone(gfp_t flags) | |
271 | { | |
272 | enum zone_type z; | |
16b56cf4 | 273 | int bit = (__force int) (flags & GFP_ZONEMASK); |
b70d94ee CL |
274 | |
275 | z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) & | |
276 | ((1 << ZONES_SHIFT) - 1); | |
82d4b577 | 277 | VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1); |
b70d94ee | 278 | return z; |
4e4785bc CL |
279 | } |
280 | ||
1da177e4 LT |
281 | /* |
282 | * There is only one page-allocator function, and two main namespaces to | |
283 | * it. The alloc_page*() variants return 'struct page *' and as such | |
284 | * can allocate highmem pages, the *get*page*() variants return | |
285 | * virtual kernel addresses to the allocated page(s). | |
286 | */ | |
287 | ||
54a6eb5c MG |
288 | static inline int gfp_zonelist(gfp_t flags) |
289 | { | |
e5adfffc | 290 | if (IS_ENABLED(CONFIG_NUMA) && unlikely(flags & __GFP_THISNODE)) |
54a6eb5c MG |
291 | return 1; |
292 | ||
293 | return 0; | |
294 | } | |
295 | ||
1da177e4 LT |
296 | /* |
297 | * We get the zone list from the current node and the gfp_mask. | |
298 | * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones. | |
54a6eb5c MG |
299 | * There are two zonelists per node, one for all zones with memory and |
300 | * one containing just zones from the node the zonelist belongs to. | |
1da177e4 LT |
301 | * |
302 | * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets | |
303 | * optimized to &contig_page_data at compile-time. | |
304 | */ | |
0e88460d MG |
305 | static inline struct zonelist *node_zonelist(int nid, gfp_t flags) |
306 | { | |
54a6eb5c | 307 | return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags); |
0e88460d | 308 | } |
1da177e4 LT |
309 | |
310 | #ifndef HAVE_ARCH_FREE_PAGE | |
311 | static inline void arch_free_page(struct page *page, int order) { } | |
312 | #endif | |
cc102509 NP |
313 | #ifndef HAVE_ARCH_ALLOC_PAGE |
314 | static inline void arch_alloc_page(struct page *page, int order) { } | |
315 | #endif | |
1da177e4 | 316 | |
e4048e5d | 317 | struct page * |
d239171e | 318 | __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, |
e4048e5d KM |
319 | struct zonelist *zonelist, nodemask_t *nodemask); |
320 | ||
321 | static inline struct page * | |
322 | __alloc_pages(gfp_t gfp_mask, unsigned int order, | |
323 | struct zonelist *zonelist) | |
324 | { | |
d239171e | 325 | return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL); |
e4048e5d KM |
326 | } |
327 | ||
96db800f VB |
328 | /* |
329 | * Allocate pages, preferring the node given as nid. The node must be valid and | |
330 | * online. For more general interface, see alloc_pages_node(). | |
331 | */ | |
332 | static inline struct page * | |
333 | __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) | |
1da177e4 | 334 | { |
0bc35a97 VB |
335 | VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); |
336 | VM_WARN_ON(!node_online(nid)); | |
819a6928 | 337 | |
0e88460d | 338 | return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); |
1da177e4 LT |
339 | } |
340 | ||
96db800f VB |
341 | /* |
342 | * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE, | |
82c1fc71 VB |
343 | * prefer the current CPU's closest node. Otherwise node must be valid and |
344 | * online. | |
96db800f VB |
345 | */ |
346 | static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, | |
6484eb3e MG |
347 | unsigned int order) |
348 | { | |
0bc35a97 | 349 | if (nid == NUMA_NO_NODE) |
82c1fc71 | 350 | nid = numa_mem_id(); |
6484eb3e | 351 | |
0bc35a97 | 352 | return __alloc_pages_node(nid, gfp_mask, order); |
6484eb3e MG |
353 | } |
354 | ||
1da177e4 | 355 | #ifdef CONFIG_NUMA |
dd0fc66f | 356 | extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); |
1da177e4 LT |
357 | |
358 | static inline struct page * | |
dd0fc66f | 359 | alloc_pages(gfp_t gfp_mask, unsigned int order) |
1da177e4 | 360 | { |
1da177e4 LT |
361 | return alloc_pages_current(gfp_mask, order); |
362 | } | |
0bbbc0b3 | 363 | extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, |
2f5f9486 | 364 | struct vm_area_struct *vma, unsigned long addr, |
be97a41b VB |
365 | int node, bool hugepage); |
366 | #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ | |
367 | alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true) | |
1da177e4 LT |
368 | #else |
369 | #define alloc_pages(gfp_mask, order) \ | |
370 | alloc_pages_node(numa_node_id(), gfp_mask, order) | |
be97a41b | 371 | #define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\ |
0bbbc0b3 | 372 | alloc_pages(gfp_mask, order) |
077fcf11 AK |
373 | #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ |
374 | alloc_pages(gfp_mask, order) | |
1da177e4 LT |
375 | #endif |
376 | #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) | |
2f5f9486 | 377 | #define alloc_page_vma(gfp_mask, vma, addr) \ |
be97a41b | 378 | alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false) |
236344d6 | 379 | #define alloc_page_vma_node(gfp_mask, vma, addr, node) \ |
be97a41b | 380 | alloc_pages_vma(gfp_mask, 0, vma, addr, node, false) |
1da177e4 | 381 | |
52383431 VD |
382 | extern struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order); |
383 | extern struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, | |
384 | unsigned int order); | |
385 | ||
b3c97528 HH |
386 | extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); |
387 | extern unsigned long get_zeroed_page(gfp_t gfp_mask); | |
1da177e4 | 388 | |
2be0ffe2 TT |
389 | void *alloc_pages_exact(size_t size, gfp_t gfp_mask); |
390 | void free_pages_exact(void *virt, size_t size); | |
e1931811 | 391 | void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); |
2be0ffe2 | 392 | |
1da177e4 | 393 | #define __get_free_page(gfp_mask) \ |
fd23855e | 394 | __get_free_pages((gfp_mask), 0) |
1da177e4 LT |
395 | |
396 | #define __get_dma_pages(gfp_mask, order) \ | |
fd23855e | 397 | __get_free_pages((gfp_mask) | GFP_DMA, (order)) |
1da177e4 | 398 | |
b3c97528 HH |
399 | extern void __free_pages(struct page *page, unsigned int order); |
400 | extern void free_pages(unsigned long addr, unsigned int order); | |
b745bc85 MG |
401 | extern void free_hot_cold_page(struct page *page, bool cold); |
402 | extern void free_hot_cold_page_list(struct list_head *list, bool cold); | |
1da177e4 | 403 | |
b63ae8ca AD |
404 | struct page_frag_cache; |
405 | extern void *__alloc_page_frag(struct page_frag_cache *nc, | |
406 | unsigned int fragsz, gfp_t gfp_mask); | |
407 | extern void __free_page_frag(void *addr); | |
408 | ||
52383431 VD |
409 | extern void __free_kmem_pages(struct page *page, unsigned int order); |
410 | extern void free_kmem_pages(unsigned long addr, unsigned int order); | |
6a1a0d3b | 411 | |
1da177e4 | 412 | #define __free_page(page) __free_pages((page), 0) |
fd23855e | 413 | #define free_page(addr) free_pages((addr), 0) |
1da177e4 LT |
414 | |
415 | void page_alloc_init(void); | |
4037d452 | 416 | void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); |
93481ff0 VB |
417 | void drain_all_pages(struct zone *zone); |
418 | void drain_local_pages(struct zone *zone); | |
1da177e4 | 419 | |
0e1cc95b MG |
420 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT |
421 | void page_alloc_init_late(void); | |
422 | #else | |
423 | static inline void page_alloc_init_late(void) | |
424 | { | |
425 | } | |
426 | #endif | |
427 | ||
f90ac398 MG |
428 | /* |
429 | * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what | |
430 | * GFP flags are used before interrupts are enabled. Once interrupts are | |
431 | * enabled, it is set to __GFP_BITS_MASK while the system is running. During | |
432 | * hibernation, it is used by PM to avoid I/O during memory allocation while | |
433 | * devices are suspended. | |
434 | */ | |
dcce284a BH |
435 | extern gfp_t gfp_allowed_mask; |
436 | ||
c93bdd0e MG |
437 | /* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */ |
438 | bool gfp_pfmemalloc_allowed(gfp_t gfp_mask); | |
439 | ||
c9e664f1 RW |
440 | extern void pm_restrict_gfp_mask(void); |
441 | extern void pm_restore_gfp_mask(void); | |
dcce284a | 442 | |
f90ac398 MG |
443 | #ifdef CONFIG_PM_SLEEP |
444 | extern bool pm_suspended_storage(void); | |
445 | #else | |
446 | static inline bool pm_suspended_storage(void) | |
447 | { | |
448 | return false; | |
449 | } | |
450 | #endif /* CONFIG_PM_SLEEP */ | |
451 | ||
041d3a8c MN |
452 | #ifdef CONFIG_CMA |
453 | ||
454 | /* The below functions must be run on a range from a single zone. */ | |
0815f3d8 MN |
455 | extern int alloc_contig_range(unsigned long start, unsigned long end, |
456 | unsigned migratetype); | |
041d3a8c MN |
457 | extern void free_contig_range(unsigned long pfn, unsigned nr_pages); |
458 | ||
47118af0 MN |
459 | /* CMA stuff */ |
460 | extern void init_cma_reserved_pageblock(struct page *page); | |
461 | ||
041d3a8c MN |
462 | #endif |
463 | ||
1da177e4 | 464 | #endif /* __LINUX_GFP_H */ |