]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - include/linux/mempolicy.h
mempolicy: rework shmem mpol parsing and display
[mirror_ubuntu-zesty-kernel.git] / include / linux / mempolicy.h
1 #ifndef _LINUX_MEMPOLICY_H
2 #define _LINUX_MEMPOLICY_H 1
3
4 #include <linux/errno.h>
5
6 /*
7 * NUMA memory policies for Linux.
8 * Copyright 2003,2004 Andi Kleen SuSE Labs
9 */
10
11 /*
12 * Both the MPOL_* mempolicy mode and the MPOL_F_* optional mode flags are
13 * passed by the user to either set_mempolicy() or mbind() in an 'int' actual.
14 * The MPOL_MODE_FLAGS macro determines the legal set of optional mode flags.
15 */
16
17 /* Policies */
18 enum {
19 MPOL_DEFAULT,
20 MPOL_PREFERRED,
21 MPOL_BIND,
22 MPOL_INTERLEAVE,
23 MPOL_MAX, /* always last member of enum */
24 };
25
26 /* Flags for set_mempolicy */
27 #define MPOL_F_STATIC_NODES (1 << 15)
28 #define MPOL_F_RELATIVE_NODES (1 << 14)
29
30 /*
31 * MPOL_MODE_FLAGS is the union of all possible optional mode flags passed to
32 * either set_mempolicy() or mbind().
33 */
34 #define MPOL_MODE_FLAGS (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES)
35
36 /* Flags for get_mempolicy */
37 #define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */
38 #define MPOL_F_ADDR (1<<1) /* look up vma using address */
39 #define MPOL_F_MEMS_ALLOWED (1<<2) /* return allowed memories */
40
41 /* Flags for mbind */
42 #define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */
43 #define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */
44 #define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */
45 #define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */
46
47 /*
48 * Internal flags that share the struct mempolicy flags word with
49 * "mode flags". These flags are allocated from bit 0 up, as they
50 * are never OR'ed into the mode in mempolicy API arguments.
51 */
52 #define MPOL_F_SHARED (1 << 0) /* identify shared policies */
53 #define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */
54
55 #ifdef __KERNEL__
56
57 #include <linux/mmzone.h>
58 #include <linux/slab.h>
59 #include <linux/rbtree.h>
60 #include <linux/spinlock.h>
61 #include <linux/nodemask.h>
62
63 struct mm_struct;
64
65 #ifdef CONFIG_NUMA
66
67 /*
68 * Describe a memory policy.
69 *
70 * A mempolicy can be either associated with a process or with a VMA.
71 * For VMA related allocations the VMA policy is preferred, otherwise
72 * the process policy is used. Interrupts ignore the memory policy
73 * of the current process.
74 *
75 * Locking policy for interlave:
76 * In process context there is no locking because only the process accesses
77 * its own state. All vma manipulation is somewhat protected by a down_read on
78 * mmap_sem.
79 *
80 * Freeing policy:
81 * Mempolicy objects are reference counted. A mempolicy will be freed when
82 * mpol_put() decrements the reference count to zero.
83 *
84 * Duplicating policy objects:
85 * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
86 * to the new storage. The reference count of the new object is initialized
87 * to 1, representing the caller of mpol_dup().
88 */
89 struct mempolicy {
90 atomic_t refcnt;
91 unsigned short mode; /* See MPOL_* above */
92 unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
93 union {
94 short preferred_node; /* preferred */
95 nodemask_t nodes; /* interleave/bind */
96 /* undefined for default */
97 } v;
98 union {
99 nodemask_t cpuset_mems_allowed; /* relative to these nodes */
100 nodemask_t user_nodemask; /* nodemask passed by user */
101 } w;
102 };
103
104 /*
105 * Support for managing mempolicy data objects (clone, copy, destroy)
106 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
107 */
108
109 extern void __mpol_put(struct mempolicy *pol);
110 static inline void mpol_put(struct mempolicy *pol)
111 {
112 if (pol)
113 __mpol_put(pol);
114 }
115
116 /*
117 * Does mempolicy pol need explicit unref after use?
118 * Currently only needed for shared policies.
119 */
120 static inline int mpol_needs_cond_ref(struct mempolicy *pol)
121 {
122 return (pol && (pol->flags & MPOL_F_SHARED));
123 }
124
125 static inline void mpol_cond_put(struct mempolicy *pol)
126 {
127 if (mpol_needs_cond_ref(pol))
128 __mpol_put(pol);
129 }
130
131 extern struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
132 struct mempolicy *frompol);
133 static inline struct mempolicy *mpol_cond_copy(struct mempolicy *tompol,
134 struct mempolicy *frompol)
135 {
136 if (!frompol)
137 return frompol;
138 return __mpol_cond_copy(tompol, frompol);
139 }
140
141 extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
142 static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
143 {
144 if (pol)
145 pol = __mpol_dup(pol);
146 return pol;
147 }
148
149 #define vma_policy(vma) ((vma)->vm_policy)
150 #define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol))
151
152 static inline void mpol_get(struct mempolicy *pol)
153 {
154 if (pol)
155 atomic_inc(&pol->refcnt);
156 }
157
158 extern int __mpol_equal(struct mempolicy *a, struct mempolicy *b);
159 static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
160 {
161 if (a == b)
162 return 1;
163 return __mpol_equal(a, b);
164 }
165
166 /*
167 * Tree of shared policies for a shared memory region.
168 * Maintain the policies in a pseudo mm that contains vmas. The vmas
169 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
170 * bytes, so that we can work with shared memory segments bigger than
171 * unsigned long.
172 */
173
174 struct sp_node {
175 struct rb_node nd;
176 unsigned long start, end;
177 struct mempolicy *policy;
178 };
179
180 struct shared_policy {
181 struct rb_root root;
182 spinlock_t lock;
183 };
184
185 void mpol_shared_policy_init(struct shared_policy *info, unsigned short mode,
186 unsigned short flags, nodemask_t *nodes);
187 int mpol_set_shared_policy(struct shared_policy *info,
188 struct vm_area_struct *vma,
189 struct mempolicy *new);
190 void mpol_free_shared_policy(struct shared_policy *p);
191 struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
192 unsigned long idx);
193
194 extern void numa_default_policy(void);
195 extern void numa_policy_init(void);
196 extern void mpol_rebind_task(struct task_struct *tsk,
197 const nodemask_t *new);
198 extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
199 extern void mpol_fix_fork_child_flag(struct task_struct *p);
200
201 extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
202 unsigned long addr, gfp_t gfp_flags,
203 struct mempolicy **mpol, nodemask_t **nodemask);
204 extern unsigned slab_node(struct mempolicy *policy);
205
206 extern enum zone_type policy_zone;
207
208 static inline void check_highest_zone(enum zone_type k)
209 {
210 if (k > policy_zone && k != ZONE_MOVABLE)
211 policy_zone = k;
212 }
213
214 int do_migrate_pages(struct mm_struct *mm,
215 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags);
216
217
218 #ifdef CONFIG_TMPFS
219 extern int mpol_parse_str(char *str, unsigned short *mode,
220 unsigned short *mode_flags, nodemask_t *policy_nodes);
221
222 extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
223 #endif
224 #else
225
226 struct mempolicy {};
227
228 static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
229 {
230 return 1;
231 }
232
233 static inline void mpol_put(struct mempolicy *p)
234 {
235 }
236
237 static inline void mpol_cond_put(struct mempolicy *pol)
238 {
239 }
240
241 static inline struct mempolicy *mpol_cond_copy(struct mempolicy *to,
242 struct mempolicy *from)
243 {
244 return from;
245 }
246
247 static inline void mpol_get(struct mempolicy *pol)
248 {
249 }
250
251 static inline struct mempolicy *mpol_dup(struct mempolicy *old)
252 {
253 return NULL;
254 }
255
256 struct shared_policy {};
257
258 static inline int mpol_set_shared_policy(struct shared_policy *info,
259 struct vm_area_struct *vma,
260 struct mempolicy *new)
261 {
262 return -EINVAL;
263 }
264
265 static inline void mpol_shared_policy_init(struct shared_policy *info,
266 unsigned short mode, unsigned short flags, nodemask_t *nodes)
267 {
268 }
269
270 static inline void mpol_free_shared_policy(struct shared_policy *p)
271 {
272 }
273
274 static inline struct mempolicy *
275 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
276 {
277 return NULL;
278 }
279
280 #define vma_policy(vma) NULL
281 #define vma_set_policy(vma, pol) do {} while(0)
282
283 static inline void numa_policy_init(void)
284 {
285 }
286
287 static inline void numa_default_policy(void)
288 {
289 }
290
291 static inline void mpol_rebind_task(struct task_struct *tsk,
292 const nodemask_t *new)
293 {
294 }
295
296 static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
297 {
298 }
299
300 static inline void mpol_fix_fork_child_flag(struct task_struct *p)
301 {
302 }
303
304 static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
305 unsigned long addr, gfp_t gfp_flags,
306 struct mempolicy **mpol, nodemask_t **nodemask)
307 {
308 *mpol = NULL;
309 *nodemask = NULL;
310 return node_zonelist(0, gfp_flags);
311 }
312
313 static inline int do_migrate_pages(struct mm_struct *mm,
314 const nodemask_t *from_nodes,
315 const nodemask_t *to_nodes, int flags)
316 {
317 return 0;
318 }
319
320 static inline void check_highest_zone(int k)
321 {
322 }
323
324 #ifdef CONFIG_TMPFS
325 static inline int mpol_parse_str(char *value, unsigned short *policy,
326 unsigned short flags, nodemask_t *policy_nodes)
327 {
328 return 1;
329 }
330
331 static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
332 {
333 return 0;
334 }
335 #endif
336
337 #endif /* CONFIG_NUMA */
338 #endif /* __KERNEL__ */
339
340 #endif