]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * NUMA memory policies for Linux. | |
3 | * Copyright 2003,2004 Andi Kleen SuSE Labs | |
4 | */ | |
5 | #ifndef _LINUX_MEMPOLICY_H | |
6 | #define _LINUX_MEMPOLICY_H 1 | |
7 | ||
8 | ||
9 | #include <linux/mmzone.h> | |
10 | #include <linux/slab.h> | |
11 | #include <linux/rbtree.h> | |
12 | #include <linux/spinlock.h> | |
13 | #include <linux/nodemask.h> | |
14 | #include <linux/pagemap.h> | |
15 | #include <uapi/linux/mempolicy.h> | |
16 | ||
17 | struct mm_struct; | |
18 | ||
19 | #ifdef CONFIG_NUMA | |
20 | ||
21 | /* | |
22 | * Describe a memory policy. | |
23 | * | |
24 | * A mempolicy can be either associated with a process or with a VMA. | |
25 | * For VMA related allocations the VMA policy is preferred, otherwise | |
26 | * the process policy is used. Interrupts ignore the memory policy | |
27 | * of the current process. | |
28 | * | |
29 | * Locking policy for interlave: | |
30 | * In process context there is no locking because only the process accesses | |
31 | * its own state. All vma manipulation is somewhat protected by a down_read on | |
32 | * mmap_sem. | |
33 | * | |
34 | * Freeing policy: | |
35 | * Mempolicy objects are reference counted. A mempolicy will be freed when | |
36 | * mpol_put() decrements the reference count to zero. | |
37 | * | |
38 | * Duplicating policy objects: | |
39 | * mpol_dup() allocates a new mempolicy and copies the specified mempolicy | |
40 | * to the new storage. The reference count of the new object is initialized | |
41 | * to 1, representing the caller of mpol_dup(). | |
42 | */ | |
43 | struct mempolicy { | |
44 | atomic_t refcnt; | |
45 | unsigned short mode; /* See MPOL_* above */ | |
46 | unsigned short flags; /* See set_mempolicy() MPOL_F_* above */ | |
47 | union { | |
48 | short preferred_node; /* preferred */ | |
49 | nodemask_t nodes; /* interleave/bind */ | |
50 | /* undefined for default */ | |
51 | } v; | |
52 | union { | |
53 | nodemask_t cpuset_mems_allowed; /* relative to these nodes */ | |
54 | nodemask_t user_nodemask; /* nodemask passed by user */ | |
55 | } w; | |
56 | }; | |
57 | ||
58 | /* | |
59 | * Support for managing mempolicy data objects (clone, copy, destroy) | |
60 | * The default fast path of a NULL MPOL_DEFAULT policy is always inlined. | |
61 | */ | |
62 | ||
63 | extern void __mpol_put(struct mempolicy *pol); | |
64 | static inline void mpol_put(struct mempolicy *pol) | |
65 | { | |
66 | if (pol) | |
67 | __mpol_put(pol); | |
68 | } | |
69 | ||
70 | /* | |
71 | * Does mempolicy pol need explicit unref after use? | |
72 | * Currently only needed for shared policies. | |
73 | */ | |
74 | static inline int mpol_needs_cond_ref(struct mempolicy *pol) | |
75 | { | |
76 | return (pol && (pol->flags & MPOL_F_SHARED)); | |
77 | } | |
78 | ||
79 | static inline void mpol_cond_put(struct mempolicy *pol) | |
80 | { | |
81 | if (mpol_needs_cond_ref(pol)) | |
82 | __mpol_put(pol); | |
83 | } | |
84 | ||
85 | extern struct mempolicy *__mpol_dup(struct mempolicy *pol); | |
86 | static inline struct mempolicy *mpol_dup(struct mempolicy *pol) | |
87 | { | |
88 | if (pol) | |
89 | pol = __mpol_dup(pol); | |
90 | return pol; | |
91 | } | |
92 | ||
93 | #define vma_policy(vma) ((vma)->vm_policy) | |
94 | ||
95 | static inline void mpol_get(struct mempolicy *pol) | |
96 | { | |
97 | if (pol) | |
98 | atomic_inc(&pol->refcnt); | |
99 | } | |
100 | ||
101 | extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b); | |
102 | static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) | |
103 | { | |
104 | if (a == b) | |
105 | return true; | |
106 | return __mpol_equal(a, b); | |
107 | } | |
108 | ||
109 | /* | |
110 | * Tree of shared policies for a shared memory region. | |
111 | * Maintain the policies in a pseudo mm that contains vmas. The vmas | |
112 | * carry the policy. As a special twist the pseudo mm is indexed in pages, not | |
113 | * bytes, so that we can work with shared memory segments bigger than | |
114 | * unsigned long. | |
115 | */ | |
116 | ||
117 | struct sp_node { | |
118 | struct rb_node nd; | |
119 | unsigned long start, end; | |
120 | struct mempolicy *policy; | |
121 | }; | |
122 | ||
123 | struct shared_policy { | |
124 | struct rb_root root; | |
125 | spinlock_t lock; | |
126 | }; | |
127 | ||
128 | int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst); | |
129 | void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); | |
130 | int mpol_set_shared_policy(struct shared_policy *info, | |
131 | struct vm_area_struct *vma, | |
132 | struct mempolicy *new); | |
133 | void mpol_free_shared_policy(struct shared_policy *p); | |
134 | struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, | |
135 | unsigned long idx); | |
136 | ||
137 | struct mempolicy *get_task_policy(struct task_struct *p); | |
138 | struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, | |
139 | unsigned long addr); | |
140 | bool vma_policy_mof(struct vm_area_struct *vma); | |
141 | ||
142 | extern void numa_default_policy(void); | |
143 | extern void numa_policy_init(void); | |
144 | extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new, | |
145 | enum mpol_rebind_step step); | |
146 | extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); | |
147 | ||
148 | extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, | |
149 | unsigned long addr, gfp_t gfp_flags, | |
150 | struct mempolicy **mpol, nodemask_t **nodemask); | |
151 | extern bool init_nodemask_of_mempolicy(nodemask_t *mask); | |
152 | extern bool mempolicy_nodemask_intersects(struct task_struct *tsk, | |
153 | const nodemask_t *mask); | |
154 | extern unsigned int mempolicy_slab_node(void); | |
155 | ||
156 | extern enum zone_type policy_zone; | |
157 | ||
158 | static inline void check_highest_zone(enum zone_type k) | |
159 | { | |
160 | if (k > policy_zone && k != ZONE_MOVABLE) | |
161 | policy_zone = k; | |
162 | } | |
163 | ||
164 | int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, | |
165 | const nodemask_t *to, int flags); | |
166 | ||
167 | ||
168 | #ifdef CONFIG_TMPFS | |
169 | extern int mpol_parse_str(char *str, struct mempolicy **mpol); | |
170 | #endif | |
171 | ||
172 | extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol); | |
173 | ||
174 | /* Check if a vma is migratable */ | |
175 | static inline int vma_migratable(struct vm_area_struct *vma) | |
176 | { | |
177 | if (vma->vm_flags & (VM_IO | VM_PFNMAP)) | |
178 | return 0; | |
179 | ||
180 | #ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION | |
181 | if (vma->vm_flags & VM_HUGETLB) | |
182 | return 0; | |
183 | #endif | |
184 | ||
185 | /* | |
186 | * Migration allocates pages in the highest zone. If we cannot | |
187 | * do so then migration (at least from node to node) is not | |
188 | * possible. | |
189 | */ | |
190 | if (vma->vm_file && | |
191 | gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) | |
192 | < policy_zone) | |
193 | return 0; | |
194 | return 1; | |
195 | } | |
196 | ||
197 | extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); | |
198 | ||
199 | #else | |
200 | ||
201 | struct mempolicy {}; | |
202 | ||
203 | static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) | |
204 | { | |
205 | return true; | |
206 | } | |
207 | ||
208 | static inline void mpol_put(struct mempolicy *p) | |
209 | { | |
210 | } | |
211 | ||
212 | static inline void mpol_cond_put(struct mempolicy *pol) | |
213 | { | |
214 | } | |
215 | ||
216 | static inline void mpol_get(struct mempolicy *pol) | |
217 | { | |
218 | } | |
219 | ||
220 | struct shared_policy {}; | |
221 | ||
222 | static inline void mpol_shared_policy_init(struct shared_policy *sp, | |
223 | struct mempolicy *mpol) | |
224 | { | |
225 | } | |
226 | ||
227 | static inline void mpol_free_shared_policy(struct shared_policy *p) | |
228 | { | |
229 | } | |
230 | ||
231 | #define vma_policy(vma) NULL | |
232 | ||
233 | static inline int | |
234 | vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) | |
235 | { | |
236 | return 0; | |
237 | } | |
238 | ||
239 | static inline void numa_policy_init(void) | |
240 | { | |
241 | } | |
242 | ||
243 | static inline void numa_default_policy(void) | |
244 | { | |
245 | } | |
246 | ||
247 | static inline void mpol_rebind_task(struct task_struct *tsk, | |
248 | const nodemask_t *new, | |
249 | enum mpol_rebind_step step) | |
250 | { | |
251 | } | |
252 | ||
253 | static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) | |
254 | { | |
255 | } | |
256 | ||
257 | static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, | |
258 | unsigned long addr, gfp_t gfp_flags, | |
259 | struct mempolicy **mpol, nodemask_t **nodemask) | |
260 | { | |
261 | *mpol = NULL; | |
262 | *nodemask = NULL; | |
263 | return node_zonelist(0, gfp_flags); | |
264 | } | |
265 | ||
266 | static inline bool init_nodemask_of_mempolicy(nodemask_t *m) | |
267 | { | |
268 | return false; | |
269 | } | |
270 | ||
271 | static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, | |
272 | const nodemask_t *to, int flags) | |
273 | { | |
274 | return 0; | |
275 | } | |
276 | ||
277 | static inline void check_highest_zone(int k) | |
278 | { | |
279 | } | |
280 | ||
281 | #ifdef CONFIG_TMPFS | |
282 | static inline int mpol_parse_str(char *str, struct mempolicy **mpol) | |
283 | { | |
284 | return 1; /* error */ | |
285 | } | |
286 | #endif | |
287 | ||
288 | static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma, | |
289 | unsigned long address) | |
290 | { | |
291 | return -1; /* no node preference */ | |
292 | } | |
293 | ||
294 | #endif /* CONFIG_NUMA */ | |
295 | #endif |