]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - include/linux/gfp.h
Move free pages between lists on steal
[mirror_ubuntu-bionic-kernel.git] / include / linux / gfp.h
CommitLineData
1da177e4
LT
1#ifndef __LINUX_GFP_H
2#define __LINUX_GFP_H
3
4#include <linux/mmzone.h>
5#include <linux/stddef.h>
6#include <linux/linkage.h>
1da177e4
LT
7
8struct vm_area_struct;
9
10/*
11 * GFP bitmasks..
e53ef38d
CL
12 *
13 * Zone modifiers (see linux/mmzone.h - low three bits)
14 *
e53ef38d
CL
15 * Do not put any conditional on these. If necessary modify the definitions
16 * without the underscores and use the consistently. The definitions here may
17 * be used in bit comparisons.
1da177e4 18 */
af4ca457
AV
19#define __GFP_DMA ((__force gfp_t)0x01u)
20#define __GFP_HIGHMEM ((__force gfp_t)0x02u)
e53ef38d 21#define __GFP_DMA32 ((__force gfp_t)0x04u)
1da177e4
LT
22
23/*
24 * Action modifiers - doesn't change the zoning
25 *
26 * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt
27 * _might_ fail. This depends upon the particular VM implementation.
28 *
29 * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
30 * cannot handle allocation failures.
31 *
32 * __GFP_NORETRY: The VM implementation must not retry indefinitely.
769848c0
MG
33 *
34 * __GFP_MOVABLE: Flag that this page will be movable by the page migration
35 * mechanism or reclaimed
1da177e4 36 */
af4ca457
AV
37#define __GFP_WAIT ((__force gfp_t)0x10u) /* Can wait and reschedule? */
38#define __GFP_HIGH ((__force gfp_t)0x20u) /* Should access emergency pools? */
39#define __GFP_IO ((__force gfp_t)0x40u) /* Can start physical IO? */
40#define __GFP_FS ((__force gfp_t)0x80u) /* Can call down to low-level FS? */
41#define __GFP_COLD ((__force gfp_t)0x100u) /* Cache-cold page required */
42#define __GFP_NOWARN ((__force gfp_t)0x200u) /* Suppress page allocation failure warning */
43#define __GFP_REPEAT ((__force gfp_t)0x400u) /* Retry the allocation. Might fail */
44#define __GFP_NOFAIL ((__force gfp_t)0x800u) /* Retry for ever. Cannot fail */
45#define __GFP_NORETRY ((__force gfp_t)0x1000u)/* Do not retry. Might fail */
af4ca457
AV
46#define __GFP_COMP ((__force gfp_t)0x4000u)/* Add compound page metadata */
47#define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */
48#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */
2d6c666e 49#define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */
9b819d20 50#define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */
769848c0 51#define __GFP_MOVABLE ((__force gfp_t)0x80000u) /* Page is movable */
1da177e4 52
b84a35be 53#define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */
af4ca457 54#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
1da177e4 55
7b04d717
JD
56/* This equals 0, but use constants in case they ever change */
57#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH)
4eac915d 58/* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */
1da177e4
LT
59#define GFP_ATOMIC (__GFP_HIGH)
60#define GFP_NOIO (__GFP_WAIT)
61#define GFP_NOFS (__GFP_WAIT | __GFP_IO)
62#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)
f90b1d2f
PJ
63#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
64#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \
65 __GFP_HIGHMEM)
769848c0
MG
66#define GFP_HIGHUSER_MOVABLE (__GFP_WAIT | __GFP_IO | __GFP_FS | \
67 __GFP_HARDWALL | __GFP_HIGHMEM | \
68 __GFP_MOVABLE)
69#define GFP_NOFS_PAGECACHE (__GFP_WAIT | __GFP_IO | __GFP_MOVABLE)
70#define GFP_USER_PAGECACHE (__GFP_WAIT | __GFP_IO | __GFP_FS | \
71 __GFP_HARDWALL | __GFP_MOVABLE)
72#define GFP_HIGHUSER_PAGECACHE (__GFP_WAIT | __GFP_IO | __GFP_FS | \
73 __GFP_HARDWALL | __GFP_HIGHMEM | \
74 __GFP_MOVABLE)
1da177e4 75
77f700da 76#ifdef CONFIG_NUMA
980128f2 77#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
77f700da 78#else
f2e97df6 79#define GFP_THISNODE ((__force gfp_t)0)
77f700da
CL
80#endif
81
6cb06229
CL
82/* This mask makes up all the page movable related flags */
83#define GFP_MOVABLE_MASK (__GFP_MOVABLE)
84
85/* Control page allocator reclaim behavior */
86#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\
87 __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
88 __GFP_NORETRY|__GFP_NOMEMALLOC)
89
90/* Control allocation constraints */
91#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
92
93/* Do not use these with a slab allocator */
94#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
980128f2 95
1da177e4
LT
96/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
97 platforms, used as appropriate on others */
98
99#define GFP_DMA __GFP_DMA
100
a2f1b424
AK
101/* 4GB DMA on some platforms */
102#define GFP_DMA32 __GFP_DMA32
103
104
19655d34 105static inline enum zone_type gfp_zone(gfp_t flags)
4e4785bc 106{
523b9458
CL
107 int base = 0;
108
109#ifdef CONFIG_NUMA
110 if (flags & __GFP_THISNODE)
111 base = MAX_NR_ZONES;
112#endif
113
4b51d669 114#ifdef CONFIG_ZONE_DMA
4e4785bc 115 if (flags & __GFP_DMA)
523b9458 116 return base + ZONE_DMA;
4b51d669 117#endif
4e4785bc
CL
118#ifdef CONFIG_ZONE_DMA32
119 if (flags & __GFP_DMA32)
523b9458 120 return base + ZONE_DMA32;
4e4785bc 121#endif
2a1e274a
MG
122 if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) ==
123 (__GFP_HIGHMEM | __GFP_MOVABLE))
523b9458 124 return base + ZONE_MOVABLE;
4e4785bc
CL
125#ifdef CONFIG_HIGHMEM
126 if (flags & __GFP_HIGHMEM)
523b9458 127 return base + ZONE_HIGHMEM;
4e4785bc 128#endif
523b9458 129 return base + ZONE_NORMAL;
4e4785bc
CL
130}
131
1da177e4
LT
132/*
133 * There is only one page-allocator function, and two main namespaces to
134 * it. The alloc_page*() variants return 'struct page *' and as such
135 * can allocate highmem pages, the *get*page*() variants return
136 * virtual kernel addresses to the allocated page(s).
137 */
138
139/*
140 * We get the zone list from the current node and the gfp_mask.
141 * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones.
142 *
143 * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
144 * optimized to &contig_page_data at compile-time.
145 */
146
147#ifndef HAVE_ARCH_FREE_PAGE
148static inline void arch_free_page(struct page *page, int order) { }
149#endif
cc102509
NP
150#ifndef HAVE_ARCH_ALLOC_PAGE
151static inline void arch_alloc_page(struct page *page, int order) { }
152#endif
1da177e4
LT
153
154extern struct page *
dd0fc66f 155FASTCALL(__alloc_pages(gfp_t, unsigned int, struct zonelist *));
1da177e4 156
dd0fc66f 157static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
1da177e4
LT
158 unsigned int order)
159{
160 if (unlikely(order >= MAX_ORDER))
161 return NULL;
162
819a6928
AK
163 /* Unknown node is current node */
164 if (nid < 0)
165 nid = numa_node_id();
166
1da177e4 167 return __alloc_pages(gfp_mask, order,
af4ca457 168 NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_mask));
1da177e4
LT
169}
170
171#ifdef CONFIG_NUMA
dd0fc66f 172extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
1da177e4
LT
173
174static inline struct page *
dd0fc66f 175alloc_pages(gfp_t gfp_mask, unsigned int order)
1da177e4
LT
176{
177 if (unlikely(order >= MAX_ORDER))
178 return NULL;
179
180 return alloc_pages_current(gfp_mask, order);
181}
dd0fc66f 182extern struct page *alloc_page_vma(gfp_t gfp_mask,
1da177e4
LT
183 struct vm_area_struct *vma, unsigned long addr);
184#else
185#define alloc_pages(gfp_mask, order) \
186 alloc_pages_node(numa_node_id(), gfp_mask, order)
187#define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0)
188#endif
189#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
190
dd0fc66f
AV
191extern unsigned long FASTCALL(__get_free_pages(gfp_t gfp_mask, unsigned int order));
192extern unsigned long FASTCALL(get_zeroed_page(gfp_t gfp_mask));
1da177e4
LT
193
194#define __get_free_page(gfp_mask) \
195 __get_free_pages((gfp_mask),0)
196
197#define __get_dma_pages(gfp_mask, order) \
198 __get_free_pages((gfp_mask) | GFP_DMA,(order))
199
200extern void FASTCALL(__free_pages(struct page *page, unsigned int order));
201extern void FASTCALL(free_pages(unsigned long addr, unsigned int order));
202extern void FASTCALL(free_hot_page(struct page *page));
203extern void FASTCALL(free_cold_page(struct page *page));
204
205#define __free_page(page) __free_pages((page), 0)
206#define free_page(addr) free_pages((addr),0)
207
208void page_alloc_init(void);
4037d452 209void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
1da177e4
LT
210
211#endif /* __LINUX_GFP_H */