]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * NUMA memory policies for Linux. | |
3 | * Copyright 2003,2004 Andi Kleen SuSE Labs | |
4 | */ | |
607ca46e DH |
5 | #ifndef _LINUX_MEMPOLICY_H |
6 | #define _LINUX_MEMPOLICY_H 1 | |
1da177e4 | 7 | |
1da177e4 | 8 | |
1da177e4 | 9 | #include <linux/mmzone.h> |
1da177e4 LT |
10 | #include <linux/slab.h> |
11 | #include <linux/rbtree.h> | |
12 | #include <linux/spinlock.h> | |
dfcd3c0d | 13 | #include <linux/nodemask.h> |
83d1674a | 14 | #include <linux/pagemap.h> |
607ca46e | 15 | #include <uapi/linux/mempolicy.h> |
1da177e4 | 16 | |
45b35a5c | 17 | struct mm_struct; |
1da177e4 LT |
18 | |
19 | #ifdef CONFIG_NUMA | |
20 | ||
21 | /* | |
22 | * Describe a memory policy. | |
23 | * | |
24 | * A mempolicy can be either associated with a process or with a VMA. | |
25 | * For VMA related allocations the VMA policy is preferred, otherwise | |
26 | * the process policy is used. Interrupts ignore the memory policy | |
27 | * of the current process. | |
28 | * | |
29 | * Locking policy for interlave: | |
30 | * In process context there is no locking because only the process accesses | |
31 | * its own state. All vma manipulation is somewhat protected by a down_read on | |
b8072f09 | 32 | * mmap_sem. |
1da177e4 LT |
33 | * |
34 | * Freeing policy: | |
19770b32 | 35 | * Mempolicy objects are reference counted. A mempolicy will be freed when |
f0be3d32 | 36 | * mpol_put() decrements the reference count to zero. |
1da177e4 | 37 | * |
846a16bf LS |
38 | * Duplicating policy objects: |
39 | * mpol_dup() allocates a new mempolicy and copies the specified mempolicy | |
19770b32 | 40 | * to the new storage. The reference count of the new object is initialized |
846a16bf | 41 | * to 1, representing the caller of mpol_dup(). |
1da177e4 LT |
42 | */ |
43 | struct mempolicy { | |
44 | atomic_t refcnt; | |
45c4745a | 45 | unsigned short mode; /* See MPOL_* above */ |
028fec41 | 46 | unsigned short flags; /* See set_mempolicy() MPOL_F_* above */ |
1da177e4 | 47 | union { |
1da177e4 | 48 | short preferred_node; /* preferred */ |
19770b32 | 49 | nodemask_t nodes; /* interleave/bind */ |
1da177e4 LT |
50 | /* undefined for default */ |
51 | } v; | |
f5b087b5 DR |
52 | union { |
53 | nodemask_t cpuset_mems_allowed; /* relative to these nodes */ | |
54 | nodemask_t user_nodemask; /* nodemask passed by user */ | |
55 | } w; | |
1da177e4 LT |
56 | }; |
57 | ||
58 | /* | |
59 | * Support for managing mempolicy data objects (clone, copy, destroy) | |
60 | * The default fast path of a NULL MPOL_DEFAULT policy is always inlined. | |
61 | */ | |
62 | ||
f0be3d32 LS |
63 | extern void __mpol_put(struct mempolicy *pol); |
64 | static inline void mpol_put(struct mempolicy *pol) | |
1da177e4 LT |
65 | { |
66 | if (pol) | |
f0be3d32 | 67 | __mpol_put(pol); |
1da177e4 LT |
68 | } |
69 | ||
52cd3b07 LS |
70 | /* |
71 | * Does mempolicy pol need explicit unref after use? | |
72 | * Currently only needed for shared policies. | |
73 | */ | |
74 | static inline int mpol_needs_cond_ref(struct mempolicy *pol) | |
75 | { | |
76 | return (pol && (pol->flags & MPOL_F_SHARED)); | |
77 | } | |
78 | ||
79 | static inline void mpol_cond_put(struct mempolicy *pol) | |
80 | { | |
81 | if (mpol_needs_cond_ref(pol)) | |
82 | __mpol_put(pol); | |
83 | } | |
84 | ||
846a16bf LS |
85 | extern struct mempolicy *__mpol_dup(struct mempolicy *pol); |
86 | static inline struct mempolicy *mpol_dup(struct mempolicy *pol) | |
1da177e4 LT |
87 | { |
88 | if (pol) | |
846a16bf | 89 | pol = __mpol_dup(pol); |
1da177e4 LT |
90 | return pol; |
91 | } | |
92 | ||
93 | #define vma_policy(vma) ((vma)->vm_policy) | |
1da177e4 LT |
94 | |
95 | static inline void mpol_get(struct mempolicy *pol) | |
96 | { | |
97 | if (pol) | |
98 | atomic_inc(&pol->refcnt); | |
99 | } | |
100 | ||
fcfb4dcc KM |
101 | extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b); |
102 | static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) | |
1da177e4 LT |
103 | { |
104 | if (a == b) | |
fcfb4dcc | 105 | return true; |
1da177e4 LT |
106 | return __mpol_equal(a, b); |
107 | } | |
1da177e4 | 108 | |
1da177e4 LT |
109 | /* |
110 | * Tree of shared policies for a shared memory region. | |
111 | * Maintain the policies in a pseudo mm that contains vmas. The vmas | |
112 | * carry the policy. As a special twist the pseudo mm is indexed in pages, not | |
113 | * bytes, so that we can work with shared memory segments bigger than | |
114 | * unsigned long. | |
115 | */ | |
116 | ||
117 | struct sp_node { | |
118 | struct rb_node nd; | |
119 | unsigned long start, end; | |
120 | struct mempolicy *policy; | |
121 | }; | |
122 | ||
123 | struct shared_policy { | |
124 | struct rb_root root; | |
42288fe3 | 125 | spinlock_t lock; |
1da177e4 LT |
126 | }; |
127 | ||
ef0855d3 | 128 | int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst); |
71fe804b | 129 | void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); |
1da177e4 LT |
130 | int mpol_set_shared_policy(struct shared_policy *info, |
131 | struct vm_area_struct *vma, | |
132 | struct mempolicy *new); | |
133 | void mpol_free_shared_policy(struct shared_policy *p); | |
134 | struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, | |
135 | unsigned long idx); | |
136 | ||
d98f6cb6 SW |
137 | struct mempolicy *get_vma_policy(struct task_struct *tsk, |
138 | struct vm_area_struct *vma, unsigned long addr); | |
fc314724 | 139 | bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma); |
d98f6cb6 | 140 | |
1da177e4 LT |
141 | extern void numa_default_policy(void); |
142 | extern void numa_policy_init(void); | |
708c1bbc MX |
143 | extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new, |
144 | enum mpol_rebind_step step); | |
4225399a | 145 | extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); |
4225399a | 146 | |
5da7ca86 | 147 | extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, |
19770b32 MG |
148 | unsigned long addr, gfp_t gfp_flags, |
149 | struct mempolicy **mpol, nodemask_t **nodemask); | |
06808b08 | 150 | extern bool init_nodemask_of_mempolicy(nodemask_t *mask); |
6f48d0eb DR |
151 | extern bool mempolicy_nodemask_intersects(struct task_struct *tsk, |
152 | const nodemask_t *mask); | |
2a389610 | 153 | extern unsigned int mempolicy_slab_node(void); |
1da177e4 | 154 | |
2f6726e5 | 155 | extern enum zone_type policy_zone; |
4be38e35 | 156 | |
2f6726e5 | 157 | static inline void check_highest_zone(enum zone_type k) |
4be38e35 | 158 | { |
b377fd39 | 159 | if (k > policy_zone && k != ZONE_MOVABLE) |
4be38e35 CL |
160 | policy_zone = k; |
161 | } | |
162 | ||
0ce72d4f AM |
163 | int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, |
164 | const nodemask_t *to, int flags); | |
39743889 | 165 | |
095f1fc4 LS |
166 | |
167 | #ifdef CONFIG_TMPFS | |
a7a88b23 | 168 | extern int mpol_parse_str(char *str, struct mempolicy **mpol); |
13057efb | 169 | #endif |
095f1fc4 | 170 | |
948927ee | 171 | extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol); |
83d1674a GS |
172 | |
173 | /* Check if a vma is migratable */ | |
174 | static inline int vma_migratable(struct vm_area_struct *vma) | |
175 | { | |
71ea2efb | 176 | if (vma->vm_flags & (VM_IO | VM_PFNMAP)) |
83d1674a | 177 | return 0; |
c177c81e NH |
178 | |
179 | #ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION | |
180 | if (vma->vm_flags & VM_HUGETLB) | |
181 | return 0; | |
182 | #endif | |
183 | ||
83d1674a GS |
184 | /* |
185 | * Migration allocates pages in the highest zone. If we cannot | |
186 | * do so then migration (at least from node to node) is not | |
187 | * possible. | |
188 | */ | |
189 | if (vma->vm_file && | |
190 | gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) | |
191 | < policy_zone) | |
192 | return 0; | |
193 | return 1; | |
194 | } | |
195 | ||
771fb4d8 LS |
196 | extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); |
197 | ||
1da177e4 LT |
198 | #else |
199 | ||
200 | struct mempolicy {}; | |
201 | ||
fcfb4dcc | 202 | static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) |
1da177e4 | 203 | { |
fcfb4dcc | 204 | return true; |
1da177e4 | 205 | } |
1da177e4 | 206 | |
f0be3d32 | 207 | static inline void mpol_put(struct mempolicy *p) |
1da177e4 LT |
208 | { |
209 | } | |
210 | ||
52cd3b07 LS |
211 | static inline void mpol_cond_put(struct mempolicy *pol) |
212 | { | |
213 | } | |
214 | ||
1da177e4 LT |
215 | static inline void mpol_get(struct mempolicy *pol) |
216 | { | |
217 | } | |
218 | ||
1da177e4 LT |
219 | struct shared_policy {}; |
220 | ||
71fe804b LS |
221 | static inline void mpol_shared_policy_init(struct shared_policy *sp, |
222 | struct mempolicy *mpol) | |
1da177e4 LT |
223 | { |
224 | } | |
225 | ||
226 | static inline void mpol_free_shared_policy(struct shared_policy *p) | |
227 | { | |
228 | } | |
229 | ||
1da177e4 | 230 | #define vma_policy(vma) NULL |
ef0855d3 ON |
231 | |
232 | static inline int | |
233 | vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) | |
234 | { | |
235 | return 0; | |
236 | } | |
1da177e4 LT |
237 | |
238 | static inline void numa_policy_init(void) | |
239 | { | |
240 | } | |
241 | ||
242 | static inline void numa_default_policy(void) | |
243 | { | |
244 | } | |
245 | ||
74cb2155 | 246 | static inline void mpol_rebind_task(struct task_struct *tsk, |
708c1bbc MX |
247 | const nodemask_t *new, |
248 | enum mpol_rebind_step step) | |
68860ec1 PJ |
249 | { |
250 | } | |
251 | ||
4225399a PJ |
252 | static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) |
253 | { | |
254 | } | |
255 | ||
5da7ca86 | 256 | static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, |
19770b32 MG |
257 | unsigned long addr, gfp_t gfp_flags, |
258 | struct mempolicy **mpol, nodemask_t **nodemask) | |
5da7ca86 | 259 | { |
19770b32 MG |
260 | *mpol = NULL; |
261 | *nodemask = NULL; | |
0e88460d | 262 | return node_zonelist(0, gfp_flags); |
5da7ca86 CL |
263 | } |
264 | ||
6f48d0eb DR |
265 | static inline bool init_nodemask_of_mempolicy(nodemask_t *m) |
266 | { | |
267 | return false; | |
268 | } | |
269 | ||
0ce72d4f AM |
270 | static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, |
271 | const nodemask_t *to, int flags) | |
45b07ef3 PJ |
272 | { |
273 | return 0; | |
274 | } | |
275 | ||
4be38e35 CL |
276 | static inline void check_highest_zone(int k) |
277 | { | |
278 | } | |
095f1fc4 LS |
279 | |
280 | #ifdef CONFIG_TMPFS | |
a7a88b23 | 281 | static inline int mpol_parse_str(char *str, struct mempolicy **mpol) |
095f1fc4 | 282 | { |
71fe804b | 283 | return 1; /* error */ |
095f1fc4 | 284 | } |
13057efb | 285 | #endif |
095f1fc4 | 286 | |
771fb4d8 LS |
287 | static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma, |
288 | unsigned long address) | |
289 | { | |
290 | return -1; /* no node preference */ | |
291 | } | |
292 | ||
1da177e4 | 293 | #endif /* CONFIG_NUMA */ |
1da177e4 | 294 | #endif |