]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Simple NUMA memory policy for the Linux kernel. | |
3 | * | |
4 | * Copyright 2003,2004 Andi Kleen, SuSE Labs. | |
8bccd85f | 5 | * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. |
1da177e4 LT |
6 | * Subject to the GNU Public License, version 2. |
7 | * | |
8 | * NUMA policy allows the user to give hints in which node(s) memory should | |
9 | * be allocated. | |
10 | * | |
11 | * Support four policies per VMA and per process: | |
12 | * | |
13 | * The VMA policy has priority over the process policy for a page fault. | |
14 | * | |
15 | * interleave Allocate memory interleaved over a set of nodes, | |
16 | * with normal fallback if it fails. | |
17 | * For VMA based allocations this interleaves based on the | |
18 | * offset into the backing object or offset into the mapping | |
19 | * for anonymous memory. For process policy an process counter | |
20 | * is used. | |
8bccd85f | 21 | * |
1da177e4 LT |
22 | * bind Only allocate memory on a specific set of nodes, |
23 | * no fallback. | |
8bccd85f CL |
24 | * FIXME: memory is allocated starting with the first node |
25 | * to the last. It would be better if bind would truly restrict | |
26 | * the allocation to memory nodes instead | |
27 | * | |
1da177e4 LT |
28 | * preferred Try a specific node first before normal fallback. |
29 | * As a special case node -1 here means do the allocation | |
30 | * on the local CPU. This is normally identical to default, | |
31 | * but useful to set in a VMA when you have a non default | |
32 | * process policy. | |
8bccd85f | 33 | * |
1da177e4 LT |
34 | * default Allocate on the local node first, or when on a VMA |
35 | * use the process policy. This is what Linux always did | |
36 | * in a NUMA aware kernel and still does by, ahem, default. | |
37 | * | |
38 | * The process policy is applied for most non interrupt memory allocations | |
39 | * in that process' context. Interrupts ignore the policies and always | |
40 | * try to allocate on the local CPU. The VMA policy is only applied for memory | |
41 | * allocations for a VMA in the VM. | |
42 | * | |
43 | * Currently there are a few corner cases in swapping where the policy | |
44 | * is not applied, but the majority should be handled. When process policy | |
45 | * is used it is not remembered over swap outs/swap ins. | |
46 | * | |
47 | * Only the highest zone in the zone hierarchy gets policied. Allocations | |
48 | * requesting a lower zone just use default policy. This implies that | |
49 | * on systems with highmem kernel lowmem allocation don't get policied. | |
50 | * Same with GFP_DMA allocations. | |
51 | * | |
52 | * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between | |
53 | * all users and remembered even when nobody has memory mapped. | |
54 | */ | |
55 | ||
56 | /* Notebook: | |
57 | fix mmap readahead to honour policy and enable policy for any page cache | |
58 | object | |
59 | statistics for bigpages | |
60 | global policy for page cache? currently it uses process policy. Requires | |
61 | first item above. | |
62 | handle mremap for shared memory (currently ignored for the policy) | |
63 | grows down? | |
64 | make bind policy root only? It can trigger oom much faster and the | |
65 | kernel is not always grateful with that. | |
66 | could replace all the switch()es with a mempolicy_ops structure. | |
67 | */ | |
68 | ||
69 | #include <linux/mempolicy.h> | |
70 | #include <linux/mm.h> | |
71 | #include <linux/highmem.h> | |
72 | #include <linux/hugetlb.h> | |
73 | #include <linux/kernel.h> | |
74 | #include <linux/sched.h> | |
75 | #include <linux/mm.h> | |
76 | #include <linux/nodemask.h> | |
77 | #include <linux/cpuset.h> | |
78 | #include <linux/gfp.h> | |
79 | #include <linux/slab.h> | |
80 | #include <linux/string.h> | |
81 | #include <linux/module.h> | |
82 | #include <linux/interrupt.h> | |
83 | #include <linux/init.h> | |
84 | #include <linux/compat.h> | |
85 | #include <linux/mempolicy.h> | |
dc9aa5b9 | 86 | #include <linux/swap.h> |
1a75a6c8 CL |
87 | #include <linux/seq_file.h> |
88 | #include <linux/proc_fs.h> | |
dc9aa5b9 | 89 | |
1da177e4 LT |
90 | #include <asm/tlbflush.h> |
91 | #include <asm/uaccess.h> | |
92 | ||
38e35860 | 93 | /* Internal flags */ |
dc9aa5b9 | 94 | #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ |
38e35860 | 95 | #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ |
1a75a6c8 | 96 | #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */ |
dc9aa5b9 | 97 | |
1da177e4 LT |
98 | static kmem_cache_t *policy_cache; |
99 | static kmem_cache_t *sn_cache; | |
100 | ||
101 | #define PDprintk(fmt...) | |
102 | ||
103 | /* Highest zone. An specific allocation for a zone below that is not | |
104 | policied. */ | |
4be38e35 | 105 | int policy_zone = ZONE_DMA; |
1da177e4 | 106 | |
d42c6997 | 107 | struct mempolicy default_policy = { |
1da177e4 LT |
108 | .refcnt = ATOMIC_INIT(1), /* never free it */ |
109 | .policy = MPOL_DEFAULT, | |
110 | }; | |
111 | ||
1da177e4 | 112 | /* Do sanity checking on a policy */ |
dfcd3c0d | 113 | static int mpol_check_policy(int mode, nodemask_t *nodes) |
1da177e4 | 114 | { |
dfcd3c0d | 115 | int empty = nodes_empty(*nodes); |
1da177e4 LT |
116 | |
117 | switch (mode) { | |
118 | case MPOL_DEFAULT: | |
119 | if (!empty) | |
120 | return -EINVAL; | |
121 | break; | |
122 | case MPOL_BIND: | |
123 | case MPOL_INTERLEAVE: | |
124 | /* Preferred will only use the first bit, but allow | |
125 | more for now. */ | |
126 | if (empty) | |
127 | return -EINVAL; | |
128 | break; | |
129 | } | |
dfcd3c0d | 130 | return nodes_subset(*nodes, node_online_map) ? 0 : -EINVAL; |
1da177e4 | 131 | } |
1da177e4 | 132 | /* Generate a custom zonelist for the BIND policy. */ |
dfcd3c0d | 133 | static struct zonelist *bind_zonelist(nodemask_t *nodes) |
1da177e4 LT |
134 | { |
135 | struct zonelist *zl; | |
136 | int num, max, nd; | |
137 | ||
dfcd3c0d | 138 | max = 1 + MAX_NR_ZONES * nodes_weight(*nodes); |
1da177e4 LT |
139 | zl = kmalloc(sizeof(void *) * max, GFP_KERNEL); |
140 | if (!zl) | |
141 | return NULL; | |
142 | num = 0; | |
4be38e35 CL |
143 | for_each_node_mask(nd, *nodes) |
144 | zl->zones[num++] = &NODE_DATA(nd)->node_zones[policy_zone]; | |
1da177e4 LT |
145 | zl->zones[num] = NULL; |
146 | return zl; | |
147 | } | |
148 | ||
149 | /* Create a new policy */ | |
dfcd3c0d | 150 | static struct mempolicy *mpol_new(int mode, nodemask_t *nodes) |
1da177e4 LT |
151 | { |
152 | struct mempolicy *policy; | |
153 | ||
dfcd3c0d | 154 | PDprintk("setting mode %d nodes[0] %lx\n", mode, nodes_addr(*nodes)[0]); |
1da177e4 LT |
155 | if (mode == MPOL_DEFAULT) |
156 | return NULL; | |
157 | policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); | |
158 | if (!policy) | |
159 | return ERR_PTR(-ENOMEM); | |
160 | atomic_set(&policy->refcnt, 1); | |
161 | switch (mode) { | |
162 | case MPOL_INTERLEAVE: | |
dfcd3c0d | 163 | policy->v.nodes = *nodes; |
8f493d79 AK |
164 | if (nodes_weight(*nodes) == 0) { |
165 | kmem_cache_free(policy_cache, policy); | |
166 | return ERR_PTR(-EINVAL); | |
167 | } | |
1da177e4 LT |
168 | break; |
169 | case MPOL_PREFERRED: | |
dfcd3c0d | 170 | policy->v.preferred_node = first_node(*nodes); |
1da177e4 LT |
171 | if (policy->v.preferred_node >= MAX_NUMNODES) |
172 | policy->v.preferred_node = -1; | |
173 | break; | |
174 | case MPOL_BIND: | |
175 | policy->v.zonelist = bind_zonelist(nodes); | |
176 | if (policy->v.zonelist == NULL) { | |
177 | kmem_cache_free(policy_cache, policy); | |
178 | return ERR_PTR(-ENOMEM); | |
179 | } | |
180 | break; | |
181 | } | |
182 | policy->policy = mode; | |
183 | return policy; | |
184 | } | |
185 | ||
1a75a6c8 | 186 | static void gather_stats(struct page *, void *); |
6ce3c4c0 CL |
187 | static void migrate_page_add(struct vm_area_struct *vma, |
188 | struct page *page, struct list_head *pagelist, unsigned long flags); | |
1a75a6c8 | 189 | |
38e35860 | 190 | /* Scan through pages checking if pages follow certain conditions. */ |
b5810039 | 191 | static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, |
dc9aa5b9 CL |
192 | unsigned long addr, unsigned long end, |
193 | const nodemask_t *nodes, unsigned long flags, | |
38e35860 | 194 | void *private) |
1da177e4 | 195 | { |
91612e0d HD |
196 | pte_t *orig_pte; |
197 | pte_t *pte; | |
705e87c0 | 198 | spinlock_t *ptl; |
941150a3 | 199 | |
705e87c0 | 200 | orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
91612e0d | 201 | do { |
6aab341e | 202 | struct page *page; |
91612e0d HD |
203 | unsigned int nid; |
204 | ||
205 | if (!pte_present(*pte)) | |
1da177e4 | 206 | continue; |
6aab341e LT |
207 | page = vm_normal_page(vma, addr, *pte); |
208 | if (!page) | |
1da177e4 | 209 | continue; |
6aab341e | 210 | nid = page_to_nid(page); |
38e35860 CL |
211 | if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) |
212 | continue; | |
213 | ||
1a75a6c8 CL |
214 | if (flags & MPOL_MF_STATS) |
215 | gather_stats(page, private); | |
132beacf CL |
216 | else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { |
217 | spin_unlock(ptl); | |
38e35860 | 218 | migrate_page_add(vma, page, private, flags); |
132beacf CL |
219 | spin_lock(ptl); |
220 | } | |
38e35860 CL |
221 | else |
222 | break; | |
91612e0d | 223 | } while (pte++, addr += PAGE_SIZE, addr != end); |
705e87c0 | 224 | pte_unmap_unlock(orig_pte, ptl); |
91612e0d HD |
225 | return addr != end; |
226 | } | |
227 | ||
b5810039 | 228 | static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud, |
dc9aa5b9 CL |
229 | unsigned long addr, unsigned long end, |
230 | const nodemask_t *nodes, unsigned long flags, | |
38e35860 | 231 | void *private) |
91612e0d HD |
232 | { |
233 | pmd_t *pmd; | |
234 | unsigned long next; | |
235 | ||
236 | pmd = pmd_offset(pud, addr); | |
237 | do { | |
238 | next = pmd_addr_end(addr, end); | |
239 | if (pmd_none_or_clear_bad(pmd)) | |
240 | continue; | |
dc9aa5b9 | 241 | if (check_pte_range(vma, pmd, addr, next, nodes, |
38e35860 | 242 | flags, private)) |
91612e0d HD |
243 | return -EIO; |
244 | } while (pmd++, addr = next, addr != end); | |
245 | return 0; | |
246 | } | |
247 | ||
b5810039 | 248 | static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd, |
dc9aa5b9 CL |
249 | unsigned long addr, unsigned long end, |
250 | const nodemask_t *nodes, unsigned long flags, | |
38e35860 | 251 | void *private) |
91612e0d HD |
252 | { |
253 | pud_t *pud; | |
254 | unsigned long next; | |
255 | ||
256 | pud = pud_offset(pgd, addr); | |
257 | do { | |
258 | next = pud_addr_end(addr, end); | |
259 | if (pud_none_or_clear_bad(pud)) | |
260 | continue; | |
dc9aa5b9 | 261 | if (check_pmd_range(vma, pud, addr, next, nodes, |
38e35860 | 262 | flags, private)) |
91612e0d HD |
263 | return -EIO; |
264 | } while (pud++, addr = next, addr != end); | |
265 | return 0; | |
266 | } | |
267 | ||
b5810039 | 268 | static inline int check_pgd_range(struct vm_area_struct *vma, |
dc9aa5b9 CL |
269 | unsigned long addr, unsigned long end, |
270 | const nodemask_t *nodes, unsigned long flags, | |
38e35860 | 271 | void *private) |
91612e0d HD |
272 | { |
273 | pgd_t *pgd; | |
274 | unsigned long next; | |
275 | ||
b5810039 | 276 | pgd = pgd_offset(vma->vm_mm, addr); |
91612e0d HD |
277 | do { |
278 | next = pgd_addr_end(addr, end); | |
279 | if (pgd_none_or_clear_bad(pgd)) | |
280 | continue; | |
dc9aa5b9 | 281 | if (check_pud_range(vma, pgd, addr, next, nodes, |
38e35860 | 282 | flags, private)) |
91612e0d HD |
283 | return -EIO; |
284 | } while (pgd++, addr = next, addr != end); | |
285 | return 0; | |
1da177e4 LT |
286 | } |
287 | ||
dc9aa5b9 CL |
288 | /* Check if a vma is migratable */ |
289 | static inline int vma_migratable(struct vm_area_struct *vma) | |
290 | { | |
291 | if (vma->vm_flags & ( | |
292 | VM_LOCKED|VM_IO|VM_HUGETLB|VM_PFNMAP)) | |
293 | return 0; | |
294 | return 1; | |
295 | } | |
296 | ||
297 | /* | |
298 | * Check if all pages in a range are on a set of nodes. | |
299 | * If pagelist != NULL then isolate pages from the LRU and | |
300 | * put them on the pagelist. | |
301 | */ | |
1da177e4 LT |
302 | static struct vm_area_struct * |
303 | check_range(struct mm_struct *mm, unsigned long start, unsigned long end, | |
38e35860 | 304 | const nodemask_t *nodes, unsigned long flags, void *private) |
1da177e4 LT |
305 | { |
306 | int err; | |
307 | struct vm_area_struct *first, *vma, *prev; | |
308 | ||
309 | first = find_vma(mm, start); | |
310 | if (!first) | |
311 | return ERR_PTR(-EFAULT); | |
312 | prev = NULL; | |
313 | for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) { | |
dc9aa5b9 CL |
314 | if (!(flags & MPOL_MF_DISCONTIG_OK)) { |
315 | if (!vma->vm_next && vma->vm_end < end) | |
316 | return ERR_PTR(-EFAULT); | |
317 | if (prev && prev->vm_end < vma->vm_start) | |
318 | return ERR_PTR(-EFAULT); | |
319 | } | |
320 | if (!is_vm_hugetlb_page(vma) && | |
321 | ((flags & MPOL_MF_STRICT) || | |
322 | ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) && | |
323 | vma_migratable(vma)))) { | |
5b952b3c | 324 | unsigned long endvma = vma->vm_end; |
dc9aa5b9 | 325 | |
5b952b3c AK |
326 | if (endvma > end) |
327 | endvma = end; | |
328 | if (vma->vm_start > start) | |
329 | start = vma->vm_start; | |
dc9aa5b9 | 330 | err = check_pgd_range(vma, start, endvma, nodes, |
38e35860 | 331 | flags, private); |
1da177e4 LT |
332 | if (err) { |
333 | first = ERR_PTR(err); | |
334 | break; | |
335 | } | |
336 | } | |
337 | prev = vma; | |
338 | } | |
339 | return first; | |
340 | } | |
341 | ||
342 | /* Apply policy to a single VMA */ | |
343 | static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new) | |
344 | { | |
345 | int err = 0; | |
346 | struct mempolicy *old = vma->vm_policy; | |
347 | ||
348 | PDprintk("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", | |
349 | vma->vm_start, vma->vm_end, vma->vm_pgoff, | |
350 | vma->vm_ops, vma->vm_file, | |
351 | vma->vm_ops ? vma->vm_ops->set_policy : NULL); | |
352 | ||
353 | if (vma->vm_ops && vma->vm_ops->set_policy) | |
354 | err = vma->vm_ops->set_policy(vma, new); | |
355 | if (!err) { | |
356 | mpol_get(new); | |
357 | vma->vm_policy = new; | |
358 | mpol_free(old); | |
359 | } | |
360 | return err; | |
361 | } | |
362 | ||
363 | /* Step 2: apply policy to a range and do splits. */ | |
364 | static int mbind_range(struct vm_area_struct *vma, unsigned long start, | |
365 | unsigned long end, struct mempolicy *new) | |
366 | { | |
367 | struct vm_area_struct *next; | |
368 | int err; | |
369 | ||
370 | err = 0; | |
371 | for (; vma && vma->vm_start < end; vma = next) { | |
372 | next = vma->vm_next; | |
373 | if (vma->vm_start < start) | |
374 | err = split_vma(vma->vm_mm, vma, start, 1); | |
375 | if (!err && vma->vm_end > end) | |
376 | err = split_vma(vma->vm_mm, vma, end, 0); | |
377 | if (!err) | |
378 | err = policy_vma(vma, new); | |
379 | if (err) | |
380 | break; | |
381 | } | |
382 | return err; | |
383 | } | |
384 | ||
8bccd85f CL |
385 | static int contextualize_policy(int mode, nodemask_t *nodes) |
386 | { | |
387 | if (!nodes) | |
388 | return 0; | |
389 | ||
8bccd85f | 390 | cpuset_update_current_mems_allowed(); |
5966514d PJ |
391 | if (!cpuset_nodes_subset_current_mems_allowed(*nodes)) |
392 | return -EINVAL; | |
8bccd85f CL |
393 | return mpol_check_policy(mode, nodes); |
394 | } | |
395 | ||
1da177e4 | 396 | /* Set the process memory policy */ |
8bccd85f | 397 | long do_set_mempolicy(int mode, nodemask_t *nodes) |
1da177e4 | 398 | { |
1da177e4 | 399 | struct mempolicy *new; |
1da177e4 | 400 | |
8bccd85f | 401 | if (contextualize_policy(mode, nodes)) |
1da177e4 | 402 | return -EINVAL; |
8bccd85f | 403 | new = mpol_new(mode, nodes); |
1da177e4 LT |
404 | if (IS_ERR(new)) |
405 | return PTR_ERR(new); | |
406 | mpol_free(current->mempolicy); | |
407 | current->mempolicy = new; | |
408 | if (new && new->policy == MPOL_INTERLEAVE) | |
dfcd3c0d | 409 | current->il_next = first_node(new->v.nodes); |
1da177e4 LT |
410 | return 0; |
411 | } | |
412 | ||
413 | /* Fill a zone bitmap for a policy */ | |
dfcd3c0d | 414 | static void get_zonemask(struct mempolicy *p, nodemask_t *nodes) |
1da177e4 LT |
415 | { |
416 | int i; | |
417 | ||
dfcd3c0d | 418 | nodes_clear(*nodes); |
1da177e4 LT |
419 | switch (p->policy) { |
420 | case MPOL_BIND: | |
421 | for (i = 0; p->v.zonelist->zones[i]; i++) | |
8bccd85f CL |
422 | node_set(p->v.zonelist->zones[i]->zone_pgdat->node_id, |
423 | *nodes); | |
1da177e4 LT |
424 | break; |
425 | case MPOL_DEFAULT: | |
426 | break; | |
427 | case MPOL_INTERLEAVE: | |
dfcd3c0d | 428 | *nodes = p->v.nodes; |
1da177e4 LT |
429 | break; |
430 | case MPOL_PREFERRED: | |
431 | /* or use current node instead of online map? */ | |
432 | if (p->v.preferred_node < 0) | |
dfcd3c0d | 433 | *nodes = node_online_map; |
1da177e4 | 434 | else |
dfcd3c0d | 435 | node_set(p->v.preferred_node, *nodes); |
1da177e4 LT |
436 | break; |
437 | default: | |
438 | BUG(); | |
439 | } | |
440 | } | |
441 | ||
442 | static int lookup_node(struct mm_struct *mm, unsigned long addr) | |
443 | { | |
444 | struct page *p; | |
445 | int err; | |
446 | ||
447 | err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL); | |
448 | if (err >= 0) { | |
449 | err = page_to_nid(p); | |
450 | put_page(p); | |
451 | } | |
452 | return err; | |
453 | } | |
454 | ||
1da177e4 | 455 | /* Retrieve NUMA policy */ |
8bccd85f CL |
456 | long do_get_mempolicy(int *policy, nodemask_t *nmask, |
457 | unsigned long addr, unsigned long flags) | |
1da177e4 | 458 | { |
8bccd85f | 459 | int err; |
1da177e4 LT |
460 | struct mm_struct *mm = current->mm; |
461 | struct vm_area_struct *vma = NULL; | |
462 | struct mempolicy *pol = current->mempolicy; | |
463 | ||
68860ec1 | 464 | cpuset_update_current_mems_allowed(); |
1da177e4 LT |
465 | if (flags & ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR)) |
466 | return -EINVAL; | |
1da177e4 LT |
467 | if (flags & MPOL_F_ADDR) { |
468 | down_read(&mm->mmap_sem); | |
469 | vma = find_vma_intersection(mm, addr, addr+1); | |
470 | if (!vma) { | |
471 | up_read(&mm->mmap_sem); | |
472 | return -EFAULT; | |
473 | } | |
474 | if (vma->vm_ops && vma->vm_ops->get_policy) | |
475 | pol = vma->vm_ops->get_policy(vma, addr); | |
476 | else | |
477 | pol = vma->vm_policy; | |
478 | } else if (addr) | |
479 | return -EINVAL; | |
480 | ||
481 | if (!pol) | |
482 | pol = &default_policy; | |
483 | ||
484 | if (flags & MPOL_F_NODE) { | |
485 | if (flags & MPOL_F_ADDR) { | |
486 | err = lookup_node(mm, addr); | |
487 | if (err < 0) | |
488 | goto out; | |
8bccd85f | 489 | *policy = err; |
1da177e4 LT |
490 | } else if (pol == current->mempolicy && |
491 | pol->policy == MPOL_INTERLEAVE) { | |
8bccd85f | 492 | *policy = current->il_next; |
1da177e4 LT |
493 | } else { |
494 | err = -EINVAL; | |
495 | goto out; | |
496 | } | |
497 | } else | |
8bccd85f | 498 | *policy = pol->policy; |
1da177e4 LT |
499 | |
500 | if (vma) { | |
501 | up_read(¤t->mm->mmap_sem); | |
502 | vma = NULL; | |
503 | } | |
504 | ||
1da177e4 | 505 | err = 0; |
8bccd85f CL |
506 | if (nmask) |
507 | get_zonemask(pol, nmask); | |
1da177e4 LT |
508 | |
509 | out: | |
510 | if (vma) | |
511 | up_read(¤t->mm->mmap_sem); | |
512 | return err; | |
513 | } | |
514 | ||
6ce3c4c0 CL |
515 | /* |
516 | * page migration | |
517 | */ | |
518 | ||
519 | /* Check if we are the only process mapping the page in question */ | |
520 | static inline int single_mm_mapping(struct mm_struct *mm, | |
521 | struct address_space *mapping) | |
522 | { | |
523 | struct vm_area_struct *vma; | |
524 | struct prio_tree_iter iter; | |
525 | int rc = 1; | |
526 | ||
527 | spin_lock(&mapping->i_mmap_lock); | |
528 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX) | |
529 | if (mm != vma->vm_mm) { | |
530 | rc = 0; | |
531 | goto out; | |
532 | } | |
533 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) | |
534 | if (mm != vma->vm_mm) { | |
535 | rc = 0; | |
536 | goto out; | |
537 | } | |
538 | out: | |
539 | spin_unlock(&mapping->i_mmap_lock); | |
540 | return rc; | |
541 | } | |
542 | ||
543 | /* | |
544 | * Add a page to be migrated to the pagelist | |
545 | */ | |
546 | static void migrate_page_add(struct vm_area_struct *vma, | |
547 | struct page *page, struct list_head *pagelist, unsigned long flags) | |
548 | { | |
549 | /* | |
550 | * Avoid migrating a page that is shared by others and not writable. | |
551 | */ | |
552 | if ((flags & MPOL_MF_MOVE_ALL) || !page->mapping || PageAnon(page) || | |
553 | mapping_writably_mapped(page->mapping) || | |
554 | single_mm_mapping(vma->vm_mm, page->mapping)) { | |
555 | int rc = isolate_lru_page(page); | |
556 | ||
557 | if (rc == 1) | |
558 | list_add(&page->lru, pagelist); | |
559 | /* | |
560 | * If the isolate attempt was not successful then we just | |
561 | * encountered an unswappable page. Something must be wrong. | |
562 | */ | |
563 | WARN_ON(rc == 0); | |
564 | } | |
565 | } | |
566 | ||
567 | static int swap_pages(struct list_head *pagelist) | |
568 | { | |
569 | LIST_HEAD(moved); | |
570 | LIST_HEAD(failed); | |
571 | int n; | |
572 | ||
573 | n = migrate_pages(pagelist, NULL, &moved, &failed); | |
574 | putback_lru_pages(&failed); | |
575 | putback_lru_pages(&moved); | |
576 | ||
577 | return n; | |
578 | } | |
579 | ||
39743889 CL |
580 | /* |
581 | * For now migrate_pages simply swaps out the pages from nodes that are in | |
582 | * the source set but not in the target set. In the future, we would | |
583 | * want a function that moves pages between the two nodesets in such | |
584 | * a way as to preserve the physical layout as much as possible. | |
585 | * | |
586 | * Returns the number of page that could not be moved. | |
587 | */ | |
588 | int do_migrate_pages(struct mm_struct *mm, | |
589 | const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags) | |
590 | { | |
591 | LIST_HEAD(pagelist); | |
592 | int count = 0; | |
593 | nodemask_t nodes; | |
594 | ||
595 | nodes_andnot(nodes, *from_nodes, *to_nodes); | |
39743889 CL |
596 | |
597 | down_read(&mm->mmap_sem); | |
598 | check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nodes, | |
599 | flags | MPOL_MF_DISCONTIG_OK, &pagelist); | |
d4984711 | 600 | |
39743889 | 601 | if (!list_empty(&pagelist)) { |
d4984711 CL |
602 | count = swap_pages(&pagelist); |
603 | putback_lru_pages(&pagelist); | |
39743889 | 604 | } |
d4984711 | 605 | |
39743889 CL |
606 | up_read(&mm->mmap_sem); |
607 | return count; | |
608 | } | |
609 | ||
6ce3c4c0 CL |
610 | long do_mbind(unsigned long start, unsigned long len, |
611 | unsigned long mode, nodemask_t *nmask, unsigned long flags) | |
612 | { | |
613 | struct vm_area_struct *vma; | |
614 | struct mm_struct *mm = current->mm; | |
615 | struct mempolicy *new; | |
616 | unsigned long end; | |
617 | int err; | |
618 | LIST_HEAD(pagelist); | |
619 | ||
620 | if ((flags & ~(unsigned long)(MPOL_MF_STRICT | | |
621 | MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) | |
622 | || mode > MPOL_MAX) | |
623 | return -EINVAL; | |
624 | if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_RESOURCE)) | |
625 | return -EPERM; | |
626 | ||
627 | if (start & ~PAGE_MASK) | |
628 | return -EINVAL; | |
629 | ||
630 | if (mode == MPOL_DEFAULT) | |
631 | flags &= ~MPOL_MF_STRICT; | |
632 | ||
633 | len = (len + PAGE_SIZE - 1) & PAGE_MASK; | |
634 | end = start + len; | |
635 | ||
636 | if (end < start) | |
637 | return -EINVAL; | |
638 | if (end == start) | |
639 | return 0; | |
640 | ||
641 | if (mpol_check_policy(mode, nmask)) | |
642 | return -EINVAL; | |
643 | ||
644 | new = mpol_new(mode, nmask); | |
645 | if (IS_ERR(new)) | |
646 | return PTR_ERR(new); | |
647 | ||
648 | /* | |
649 | * If we are using the default policy then operation | |
650 | * on discontinuous address spaces is okay after all | |
651 | */ | |
652 | if (!new) | |
653 | flags |= MPOL_MF_DISCONTIG_OK; | |
654 | ||
655 | PDprintk("mbind %lx-%lx mode:%ld nodes:%lx\n",start,start+len, | |
656 | mode,nodes_addr(nodes)[0]); | |
657 | ||
658 | down_write(&mm->mmap_sem); | |
659 | vma = check_range(mm, start, end, nmask, | |
660 | flags | MPOL_MF_INVERT, &pagelist); | |
661 | ||
662 | err = PTR_ERR(vma); | |
663 | if (!IS_ERR(vma)) { | |
664 | int nr_failed = 0; | |
665 | ||
666 | err = mbind_range(vma, start, end, new); | |
667 | if (!list_empty(&pagelist)) | |
668 | nr_failed = swap_pages(&pagelist); | |
669 | ||
670 | if (!err && nr_failed && (flags & MPOL_MF_STRICT)) | |
671 | err = -EIO; | |
672 | } | |
673 | if (!list_empty(&pagelist)) | |
674 | putback_lru_pages(&pagelist); | |
675 | ||
676 | up_write(&mm->mmap_sem); | |
677 | mpol_free(new); | |
678 | return err; | |
679 | } | |
680 | ||
8bccd85f CL |
681 | /* |
682 | * User space interface with variable sized bitmaps for nodelists. | |
683 | */ | |
684 | ||
685 | /* Copy a node mask from user space. */ | |
39743889 | 686 | static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, |
8bccd85f CL |
687 | unsigned long maxnode) |
688 | { | |
689 | unsigned long k; | |
690 | unsigned long nlongs; | |
691 | unsigned long endmask; | |
692 | ||
693 | --maxnode; | |
694 | nodes_clear(*nodes); | |
695 | if (maxnode == 0 || !nmask) | |
696 | return 0; | |
697 | ||
698 | nlongs = BITS_TO_LONGS(maxnode); | |
699 | if ((maxnode % BITS_PER_LONG) == 0) | |
700 | endmask = ~0UL; | |
701 | else | |
702 | endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1; | |
703 | ||
704 | /* When the user specified more nodes than supported just check | |
705 | if the non supported part is all zero. */ | |
706 | if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { | |
707 | if (nlongs > PAGE_SIZE/sizeof(long)) | |
708 | return -EINVAL; | |
709 | for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { | |
710 | unsigned long t; | |
711 | if (get_user(t, nmask + k)) | |
712 | return -EFAULT; | |
713 | if (k == nlongs - 1) { | |
714 | if (t & endmask) | |
715 | return -EINVAL; | |
716 | } else if (t) | |
717 | return -EINVAL; | |
718 | } | |
719 | nlongs = BITS_TO_LONGS(MAX_NUMNODES); | |
720 | endmask = ~0UL; | |
721 | } | |
722 | ||
723 | if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long))) | |
724 | return -EFAULT; | |
725 | nodes_addr(*nodes)[nlongs-1] &= endmask; | |
726 | return 0; | |
727 | } | |
728 | ||
729 | /* Copy a kernel node mask to user space */ | |
730 | static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, | |
731 | nodemask_t *nodes) | |
732 | { | |
733 | unsigned long copy = ALIGN(maxnode-1, 64) / 8; | |
734 | const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long); | |
735 | ||
736 | if (copy > nbytes) { | |
737 | if (copy > PAGE_SIZE) | |
738 | return -EINVAL; | |
739 | if (clear_user((char __user *)mask + nbytes, copy - nbytes)) | |
740 | return -EFAULT; | |
741 | copy = nbytes; | |
742 | } | |
743 | return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; | |
744 | } | |
745 | ||
746 | asmlinkage long sys_mbind(unsigned long start, unsigned long len, | |
747 | unsigned long mode, | |
748 | unsigned long __user *nmask, unsigned long maxnode, | |
749 | unsigned flags) | |
750 | { | |
751 | nodemask_t nodes; | |
752 | int err; | |
753 | ||
754 | err = get_nodes(&nodes, nmask, maxnode); | |
755 | if (err) | |
756 | return err; | |
757 | return do_mbind(start, len, mode, &nodes, flags); | |
758 | } | |
759 | ||
760 | /* Set the process memory policy */ | |
761 | asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask, | |
762 | unsigned long maxnode) | |
763 | { | |
764 | int err; | |
765 | nodemask_t nodes; | |
766 | ||
767 | if (mode < 0 || mode > MPOL_MAX) | |
768 | return -EINVAL; | |
769 | err = get_nodes(&nodes, nmask, maxnode); | |
770 | if (err) | |
771 | return err; | |
772 | return do_set_mempolicy(mode, &nodes); | |
773 | } | |
774 | ||
39743889 CL |
775 | /* Macro needed until Paul implements this function in kernel/cpusets.c */ |
776 | #define cpuset_mems_allowed(task) node_online_map | |
777 | ||
778 | asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode, | |
779 | const unsigned long __user *old_nodes, | |
780 | const unsigned long __user *new_nodes) | |
781 | { | |
782 | struct mm_struct *mm; | |
783 | struct task_struct *task; | |
784 | nodemask_t old; | |
785 | nodemask_t new; | |
786 | nodemask_t task_nodes; | |
787 | int err; | |
788 | ||
789 | err = get_nodes(&old, old_nodes, maxnode); | |
790 | if (err) | |
791 | return err; | |
792 | ||
793 | err = get_nodes(&new, new_nodes, maxnode); | |
794 | if (err) | |
795 | return err; | |
796 | ||
797 | /* Find the mm_struct */ | |
798 | read_lock(&tasklist_lock); | |
799 | task = pid ? find_task_by_pid(pid) : current; | |
800 | if (!task) { | |
801 | read_unlock(&tasklist_lock); | |
802 | return -ESRCH; | |
803 | } | |
804 | mm = get_task_mm(task); | |
805 | read_unlock(&tasklist_lock); | |
806 | ||
807 | if (!mm) | |
808 | return -EINVAL; | |
809 | ||
810 | /* | |
811 | * Check if this process has the right to modify the specified | |
812 | * process. The right exists if the process has administrative | |
813 | * capabilities, superuser priviledges or the same | |
814 | * userid as the target process. | |
815 | */ | |
816 | if ((current->euid != task->suid) && (current->euid != task->uid) && | |
817 | (current->uid != task->suid) && (current->uid != task->uid) && | |
818 | !capable(CAP_SYS_ADMIN)) { | |
819 | err = -EPERM; | |
820 | goto out; | |
821 | } | |
822 | ||
823 | task_nodes = cpuset_mems_allowed(task); | |
824 | /* Is the user allowed to access the target nodes? */ | |
825 | if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_ADMIN)) { | |
826 | err = -EPERM; | |
827 | goto out; | |
828 | } | |
829 | ||
830 | err = do_migrate_pages(mm, &old, &new, MPOL_MF_MOVE); | |
831 | out: | |
832 | mmput(mm); | |
833 | return err; | |
834 | } | |
835 | ||
836 | ||
8bccd85f CL |
837 | /* Retrieve NUMA policy */ |
838 | asmlinkage long sys_get_mempolicy(int __user *policy, | |
839 | unsigned long __user *nmask, | |
840 | unsigned long maxnode, | |
841 | unsigned long addr, unsigned long flags) | |
842 | { | |
843 | int err, pval; | |
844 | nodemask_t nodes; | |
845 | ||
846 | if (nmask != NULL && maxnode < MAX_NUMNODES) | |
847 | return -EINVAL; | |
848 | ||
849 | err = do_get_mempolicy(&pval, &nodes, addr, flags); | |
850 | ||
851 | if (err) | |
852 | return err; | |
853 | ||
854 | if (policy && put_user(pval, policy)) | |
855 | return -EFAULT; | |
856 | ||
857 | if (nmask) | |
858 | err = copy_nodes_to_user(nmask, maxnode, &nodes); | |
859 | ||
860 | return err; | |
861 | } | |
862 | ||
1da177e4 LT |
863 | #ifdef CONFIG_COMPAT |
864 | ||
865 | asmlinkage long compat_sys_get_mempolicy(int __user *policy, | |
866 | compat_ulong_t __user *nmask, | |
867 | compat_ulong_t maxnode, | |
868 | compat_ulong_t addr, compat_ulong_t flags) | |
869 | { | |
870 | long err; | |
871 | unsigned long __user *nm = NULL; | |
872 | unsigned long nr_bits, alloc_size; | |
873 | DECLARE_BITMAP(bm, MAX_NUMNODES); | |
874 | ||
875 | nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); | |
876 | alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; | |
877 | ||
878 | if (nmask) | |
879 | nm = compat_alloc_user_space(alloc_size); | |
880 | ||
881 | err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags); | |
882 | ||
883 | if (!err && nmask) { | |
884 | err = copy_from_user(bm, nm, alloc_size); | |
885 | /* ensure entire bitmap is zeroed */ | |
886 | err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8); | |
887 | err |= compat_put_bitmap(nmask, bm, nr_bits); | |
888 | } | |
889 | ||
890 | return err; | |
891 | } | |
892 | ||
893 | asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask, | |
894 | compat_ulong_t maxnode) | |
895 | { | |
896 | long err = 0; | |
897 | unsigned long __user *nm = NULL; | |
898 | unsigned long nr_bits, alloc_size; | |
899 | DECLARE_BITMAP(bm, MAX_NUMNODES); | |
900 | ||
901 | nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); | |
902 | alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; | |
903 | ||
904 | if (nmask) { | |
905 | err = compat_get_bitmap(bm, nmask, nr_bits); | |
906 | nm = compat_alloc_user_space(alloc_size); | |
907 | err |= copy_to_user(nm, bm, alloc_size); | |
908 | } | |
909 | ||
910 | if (err) | |
911 | return -EFAULT; | |
912 | ||
913 | return sys_set_mempolicy(mode, nm, nr_bits+1); | |
914 | } | |
915 | ||
916 | asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len, | |
917 | compat_ulong_t mode, compat_ulong_t __user *nmask, | |
918 | compat_ulong_t maxnode, compat_ulong_t flags) | |
919 | { | |
920 | long err = 0; | |
921 | unsigned long __user *nm = NULL; | |
922 | unsigned long nr_bits, alloc_size; | |
dfcd3c0d | 923 | nodemask_t bm; |
1da177e4 LT |
924 | |
925 | nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); | |
926 | alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; | |
927 | ||
928 | if (nmask) { | |
dfcd3c0d | 929 | err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits); |
1da177e4 | 930 | nm = compat_alloc_user_space(alloc_size); |
dfcd3c0d | 931 | err |= copy_to_user(nm, nodes_addr(bm), alloc_size); |
1da177e4 LT |
932 | } |
933 | ||
934 | if (err) | |
935 | return -EFAULT; | |
936 | ||
937 | return sys_mbind(start, len, mode, nm, nr_bits+1, flags); | |
938 | } | |
939 | ||
940 | #endif | |
941 | ||
942 | /* Return effective policy for a VMA */ | |
48fce342 CL |
943 | static struct mempolicy * get_vma_policy(struct task_struct *task, |
944 | struct vm_area_struct *vma, unsigned long addr) | |
1da177e4 | 945 | { |
6e21c8f1 | 946 | struct mempolicy *pol = task->mempolicy; |
1da177e4 LT |
947 | |
948 | if (vma) { | |
949 | if (vma->vm_ops && vma->vm_ops->get_policy) | |
8bccd85f | 950 | pol = vma->vm_ops->get_policy(vma, addr); |
1da177e4 LT |
951 | else if (vma->vm_policy && |
952 | vma->vm_policy->policy != MPOL_DEFAULT) | |
953 | pol = vma->vm_policy; | |
954 | } | |
955 | if (!pol) | |
956 | pol = &default_policy; | |
957 | return pol; | |
958 | } | |
959 | ||
960 | /* Return a zonelist representing a mempolicy */ | |
dd0fc66f | 961 | static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy) |
1da177e4 LT |
962 | { |
963 | int nd; | |
964 | ||
965 | switch (policy->policy) { | |
966 | case MPOL_PREFERRED: | |
967 | nd = policy->v.preferred_node; | |
968 | if (nd < 0) | |
969 | nd = numa_node_id(); | |
970 | break; | |
971 | case MPOL_BIND: | |
972 | /* Lower zones don't get a policy applied */ | |
973 | /* Careful: current->mems_allowed might have moved */ | |
af4ca457 | 974 | if (gfp_zone(gfp) >= policy_zone) |
1da177e4 LT |
975 | if (cpuset_zonelist_valid_mems_allowed(policy->v.zonelist)) |
976 | return policy->v.zonelist; | |
977 | /*FALL THROUGH*/ | |
978 | case MPOL_INTERLEAVE: /* should not happen */ | |
979 | case MPOL_DEFAULT: | |
980 | nd = numa_node_id(); | |
981 | break; | |
982 | default: | |
983 | nd = 0; | |
984 | BUG(); | |
985 | } | |
af4ca457 | 986 | return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp); |
1da177e4 LT |
987 | } |
988 | ||
989 | /* Do dynamic interleaving for a process */ | |
990 | static unsigned interleave_nodes(struct mempolicy *policy) | |
991 | { | |
992 | unsigned nid, next; | |
993 | struct task_struct *me = current; | |
994 | ||
995 | nid = me->il_next; | |
dfcd3c0d | 996 | next = next_node(nid, policy->v.nodes); |
1da177e4 | 997 | if (next >= MAX_NUMNODES) |
dfcd3c0d | 998 | next = first_node(policy->v.nodes); |
1da177e4 LT |
999 | me->il_next = next; |
1000 | return nid; | |
1001 | } | |
1002 | ||
1003 | /* Do static interleaving for a VMA with known offset. */ | |
1004 | static unsigned offset_il_node(struct mempolicy *pol, | |
1005 | struct vm_area_struct *vma, unsigned long off) | |
1006 | { | |
dfcd3c0d | 1007 | unsigned nnodes = nodes_weight(pol->v.nodes); |
1da177e4 LT |
1008 | unsigned target = (unsigned)off % nnodes; |
1009 | int c; | |
1010 | int nid = -1; | |
1011 | ||
1012 | c = 0; | |
1013 | do { | |
dfcd3c0d | 1014 | nid = next_node(nid, pol->v.nodes); |
1da177e4 LT |
1015 | c++; |
1016 | } while (c <= target); | |
1da177e4 LT |
1017 | return nid; |
1018 | } | |
1019 | ||
5da7ca86 CL |
1020 | /* Determine a node number for interleave */ |
1021 | static inline unsigned interleave_nid(struct mempolicy *pol, | |
1022 | struct vm_area_struct *vma, unsigned long addr, int shift) | |
1023 | { | |
1024 | if (vma) { | |
1025 | unsigned long off; | |
1026 | ||
1027 | off = vma->vm_pgoff; | |
1028 | off += (addr - vma->vm_start) >> shift; | |
1029 | return offset_il_node(pol, vma, off); | |
1030 | } else | |
1031 | return interleave_nodes(pol); | |
1032 | } | |
1033 | ||
1034 | /* Return a zonelist suitable for a huge page allocation. */ | |
1035 | struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr) | |
1036 | { | |
1037 | struct mempolicy *pol = get_vma_policy(current, vma, addr); | |
1038 | ||
1039 | if (pol->policy == MPOL_INTERLEAVE) { | |
1040 | unsigned nid; | |
1041 | ||
1042 | nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT); | |
1043 | return NODE_DATA(nid)->node_zonelists + gfp_zone(GFP_HIGHUSER); | |
1044 | } | |
1045 | return zonelist_policy(GFP_HIGHUSER, pol); | |
1046 | } | |
1047 | ||
1da177e4 LT |
1048 | /* Allocate a page in interleaved policy. |
1049 | Own path because it needs to do special accounting. */ | |
662f3a0b AK |
1050 | static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, |
1051 | unsigned nid) | |
1da177e4 LT |
1052 | { |
1053 | struct zonelist *zl; | |
1054 | struct page *page; | |
1055 | ||
af4ca457 | 1056 | zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp); |
1da177e4 LT |
1057 | page = __alloc_pages(gfp, order, zl); |
1058 | if (page && page_zone(page) == zl->zones[0]) { | |
e7c8d5c9 | 1059 | zone_pcp(zl->zones[0],get_cpu())->interleave_hit++; |
1da177e4 LT |
1060 | put_cpu(); |
1061 | } | |
1062 | return page; | |
1063 | } | |
1064 | ||
1065 | /** | |
1066 | * alloc_page_vma - Allocate a page for a VMA. | |
1067 | * | |
1068 | * @gfp: | |
1069 | * %GFP_USER user allocation. | |
1070 | * %GFP_KERNEL kernel allocations, | |
1071 | * %GFP_HIGHMEM highmem/user allocations, | |
1072 | * %GFP_FS allocation should not call back into a file system. | |
1073 | * %GFP_ATOMIC don't sleep. | |
1074 | * | |
1075 | * @vma: Pointer to VMA or NULL if not available. | |
1076 | * @addr: Virtual Address of the allocation. Must be inside the VMA. | |
1077 | * | |
1078 | * This function allocates a page from the kernel page pool and applies | |
1079 | * a NUMA policy associated with the VMA or the current process. | |
1080 | * When VMA is not NULL caller must hold down_read on the mmap_sem of the | |
1081 | * mm_struct of the VMA to prevent it from going away. Should be used for | |
1082 | * all allocations for pages that will be mapped into | |
1083 | * user space. Returns NULL when no page can be allocated. | |
1084 | * | |
1085 | * Should be called with the mm_sem of the vma hold. | |
1086 | */ | |
1087 | struct page * | |
dd0fc66f | 1088 | alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr) |
1da177e4 | 1089 | { |
6e21c8f1 | 1090 | struct mempolicy *pol = get_vma_policy(current, vma, addr); |
1da177e4 LT |
1091 | |
1092 | cpuset_update_current_mems_allowed(); | |
1093 | ||
1094 | if (unlikely(pol->policy == MPOL_INTERLEAVE)) { | |
1095 | unsigned nid; | |
5da7ca86 CL |
1096 | |
1097 | nid = interleave_nid(pol, vma, addr, PAGE_SHIFT); | |
1da177e4 LT |
1098 | return alloc_page_interleave(gfp, 0, nid); |
1099 | } | |
1100 | return __alloc_pages(gfp, 0, zonelist_policy(gfp, pol)); | |
1101 | } | |
1102 | ||
1103 | /** | |
1104 | * alloc_pages_current - Allocate pages. | |
1105 | * | |
1106 | * @gfp: | |
1107 | * %GFP_USER user allocation, | |
1108 | * %GFP_KERNEL kernel allocation, | |
1109 | * %GFP_HIGHMEM highmem allocation, | |
1110 | * %GFP_FS don't call back into a file system. | |
1111 | * %GFP_ATOMIC don't sleep. | |
1112 | * @order: Power of two of allocation size in pages. 0 is a single page. | |
1113 | * | |
1114 | * Allocate a page from the kernel page pool. When not in | |
1115 | * interrupt context and apply the current process NUMA policy. | |
1116 | * Returns NULL when no page can be allocated. | |
1117 | * | |
1118 | * Don't call cpuset_update_current_mems_allowed() unless | |
1119 | * 1) it's ok to take cpuset_sem (can WAIT), and | |
1120 | * 2) allocating for current task (not interrupt). | |
1121 | */ | |
dd0fc66f | 1122 | struct page *alloc_pages_current(gfp_t gfp, unsigned order) |
1da177e4 LT |
1123 | { |
1124 | struct mempolicy *pol = current->mempolicy; | |
1125 | ||
1126 | if ((gfp & __GFP_WAIT) && !in_interrupt()) | |
1127 | cpuset_update_current_mems_allowed(); | |
1128 | if (!pol || in_interrupt()) | |
1129 | pol = &default_policy; | |
1130 | if (pol->policy == MPOL_INTERLEAVE) | |
1131 | return alloc_page_interleave(gfp, order, interleave_nodes(pol)); | |
1132 | return __alloc_pages(gfp, order, zonelist_policy(gfp, pol)); | |
1133 | } | |
1134 | EXPORT_SYMBOL(alloc_pages_current); | |
1135 | ||
1136 | /* Slow path of a mempolicy copy */ | |
1137 | struct mempolicy *__mpol_copy(struct mempolicy *old) | |
1138 | { | |
1139 | struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); | |
1140 | ||
1141 | if (!new) | |
1142 | return ERR_PTR(-ENOMEM); | |
1143 | *new = *old; | |
1144 | atomic_set(&new->refcnt, 1); | |
1145 | if (new->policy == MPOL_BIND) { | |
1146 | int sz = ksize(old->v.zonelist); | |
1147 | new->v.zonelist = kmalloc(sz, SLAB_KERNEL); | |
1148 | if (!new->v.zonelist) { | |
1149 | kmem_cache_free(policy_cache, new); | |
1150 | return ERR_PTR(-ENOMEM); | |
1151 | } | |
1152 | memcpy(new->v.zonelist, old->v.zonelist, sz); | |
1153 | } | |
1154 | return new; | |
1155 | } | |
1156 | ||
1157 | /* Slow path of a mempolicy comparison */ | |
1158 | int __mpol_equal(struct mempolicy *a, struct mempolicy *b) | |
1159 | { | |
1160 | if (!a || !b) | |
1161 | return 0; | |
1162 | if (a->policy != b->policy) | |
1163 | return 0; | |
1164 | switch (a->policy) { | |
1165 | case MPOL_DEFAULT: | |
1166 | return 1; | |
1167 | case MPOL_INTERLEAVE: | |
dfcd3c0d | 1168 | return nodes_equal(a->v.nodes, b->v.nodes); |
1da177e4 LT |
1169 | case MPOL_PREFERRED: |
1170 | return a->v.preferred_node == b->v.preferred_node; | |
1171 | case MPOL_BIND: { | |
1172 | int i; | |
1173 | for (i = 0; a->v.zonelist->zones[i]; i++) | |
1174 | if (a->v.zonelist->zones[i] != b->v.zonelist->zones[i]) | |
1175 | return 0; | |
1176 | return b->v.zonelist->zones[i] == NULL; | |
1177 | } | |
1178 | default: | |
1179 | BUG(); | |
1180 | return 0; | |
1181 | } | |
1182 | } | |
1183 | ||
1184 | /* Slow path of a mpol destructor. */ | |
1185 | void __mpol_free(struct mempolicy *p) | |
1186 | { | |
1187 | if (!atomic_dec_and_test(&p->refcnt)) | |
1188 | return; | |
1189 | if (p->policy == MPOL_BIND) | |
1190 | kfree(p->v.zonelist); | |
1191 | p->policy = MPOL_DEFAULT; | |
1192 | kmem_cache_free(policy_cache, p); | |
1193 | } | |
1194 | ||
1da177e4 LT |
1195 | /* |
1196 | * Shared memory backing store policy support. | |
1197 | * | |
1198 | * Remember policies even when nobody has shared memory mapped. | |
1199 | * The policies are kept in Red-Black tree linked from the inode. | |
1200 | * They are protected by the sp->lock spinlock, which should be held | |
1201 | * for any accesses to the tree. | |
1202 | */ | |
1203 | ||
1204 | /* lookup first element intersecting start-end */ | |
1205 | /* Caller holds sp->lock */ | |
1206 | static struct sp_node * | |
1207 | sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) | |
1208 | { | |
1209 | struct rb_node *n = sp->root.rb_node; | |
1210 | ||
1211 | while (n) { | |
1212 | struct sp_node *p = rb_entry(n, struct sp_node, nd); | |
1213 | ||
1214 | if (start >= p->end) | |
1215 | n = n->rb_right; | |
1216 | else if (end <= p->start) | |
1217 | n = n->rb_left; | |
1218 | else | |
1219 | break; | |
1220 | } | |
1221 | if (!n) | |
1222 | return NULL; | |
1223 | for (;;) { | |
1224 | struct sp_node *w = NULL; | |
1225 | struct rb_node *prev = rb_prev(n); | |
1226 | if (!prev) | |
1227 | break; | |
1228 | w = rb_entry(prev, struct sp_node, nd); | |
1229 | if (w->end <= start) | |
1230 | break; | |
1231 | n = prev; | |
1232 | } | |
1233 | return rb_entry(n, struct sp_node, nd); | |
1234 | } | |
1235 | ||
1236 | /* Insert a new shared policy into the list. */ | |
1237 | /* Caller holds sp->lock */ | |
1238 | static void sp_insert(struct shared_policy *sp, struct sp_node *new) | |
1239 | { | |
1240 | struct rb_node **p = &sp->root.rb_node; | |
1241 | struct rb_node *parent = NULL; | |
1242 | struct sp_node *nd; | |
1243 | ||
1244 | while (*p) { | |
1245 | parent = *p; | |
1246 | nd = rb_entry(parent, struct sp_node, nd); | |
1247 | if (new->start < nd->start) | |
1248 | p = &(*p)->rb_left; | |
1249 | else if (new->end > nd->end) | |
1250 | p = &(*p)->rb_right; | |
1251 | else | |
1252 | BUG(); | |
1253 | } | |
1254 | rb_link_node(&new->nd, parent, p); | |
1255 | rb_insert_color(&new->nd, &sp->root); | |
1256 | PDprintk("inserting %lx-%lx: %d\n", new->start, new->end, | |
1257 | new->policy ? new->policy->policy : 0); | |
1258 | } | |
1259 | ||
1260 | /* Find shared policy intersecting idx */ | |
1261 | struct mempolicy * | |
1262 | mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) | |
1263 | { | |
1264 | struct mempolicy *pol = NULL; | |
1265 | struct sp_node *sn; | |
1266 | ||
1267 | if (!sp->root.rb_node) | |
1268 | return NULL; | |
1269 | spin_lock(&sp->lock); | |
1270 | sn = sp_lookup(sp, idx, idx+1); | |
1271 | if (sn) { | |
1272 | mpol_get(sn->policy); | |
1273 | pol = sn->policy; | |
1274 | } | |
1275 | spin_unlock(&sp->lock); | |
1276 | return pol; | |
1277 | } | |
1278 | ||
1279 | static void sp_delete(struct shared_policy *sp, struct sp_node *n) | |
1280 | { | |
1281 | PDprintk("deleting %lx-l%x\n", n->start, n->end); | |
1282 | rb_erase(&n->nd, &sp->root); | |
1283 | mpol_free(n->policy); | |
1284 | kmem_cache_free(sn_cache, n); | |
1285 | } | |
1286 | ||
1287 | struct sp_node * | |
1288 | sp_alloc(unsigned long start, unsigned long end, struct mempolicy *pol) | |
1289 | { | |
1290 | struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL); | |
1291 | ||
1292 | if (!n) | |
1293 | return NULL; | |
1294 | n->start = start; | |
1295 | n->end = end; | |
1296 | mpol_get(pol); | |
1297 | n->policy = pol; | |
1298 | return n; | |
1299 | } | |
1300 | ||
1301 | /* Replace a policy range. */ | |
1302 | static int shared_policy_replace(struct shared_policy *sp, unsigned long start, | |
1303 | unsigned long end, struct sp_node *new) | |
1304 | { | |
1305 | struct sp_node *n, *new2 = NULL; | |
1306 | ||
1307 | restart: | |
1308 | spin_lock(&sp->lock); | |
1309 | n = sp_lookup(sp, start, end); | |
1310 | /* Take care of old policies in the same range. */ | |
1311 | while (n && n->start < end) { | |
1312 | struct rb_node *next = rb_next(&n->nd); | |
1313 | if (n->start >= start) { | |
1314 | if (n->end <= end) | |
1315 | sp_delete(sp, n); | |
1316 | else | |
1317 | n->start = end; | |
1318 | } else { | |
1319 | /* Old policy spanning whole new range. */ | |
1320 | if (n->end > end) { | |
1321 | if (!new2) { | |
1322 | spin_unlock(&sp->lock); | |
1323 | new2 = sp_alloc(end, n->end, n->policy); | |
1324 | if (!new2) | |
1325 | return -ENOMEM; | |
1326 | goto restart; | |
1327 | } | |
1328 | n->end = start; | |
1329 | sp_insert(sp, new2); | |
1330 | new2 = NULL; | |
1331 | break; | |
1332 | } else | |
1333 | n->end = start; | |
1334 | } | |
1335 | if (!next) | |
1336 | break; | |
1337 | n = rb_entry(next, struct sp_node, nd); | |
1338 | } | |
1339 | if (new) | |
1340 | sp_insert(sp, new); | |
1341 | spin_unlock(&sp->lock); | |
1342 | if (new2) { | |
1343 | mpol_free(new2->policy); | |
1344 | kmem_cache_free(sn_cache, new2); | |
1345 | } | |
1346 | return 0; | |
1347 | } | |
1348 | ||
1349 | int mpol_set_shared_policy(struct shared_policy *info, | |
1350 | struct vm_area_struct *vma, struct mempolicy *npol) | |
1351 | { | |
1352 | int err; | |
1353 | struct sp_node *new = NULL; | |
1354 | unsigned long sz = vma_pages(vma); | |
1355 | ||
1356 | PDprintk("set_shared_policy %lx sz %lu %d %lx\n", | |
1357 | vma->vm_pgoff, | |
1358 | sz, npol? npol->policy : -1, | |
dfcd3c0d | 1359 | npol ? nodes_addr(npol->v.nodes)[0] : -1); |
1da177e4 LT |
1360 | |
1361 | if (npol) { | |
1362 | new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); | |
1363 | if (!new) | |
1364 | return -ENOMEM; | |
1365 | } | |
1366 | err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); | |
1367 | if (err && new) | |
1368 | kmem_cache_free(sn_cache, new); | |
1369 | return err; | |
1370 | } | |
1371 | ||
1372 | /* Free a backing policy store on inode delete. */ | |
1373 | void mpol_free_shared_policy(struct shared_policy *p) | |
1374 | { | |
1375 | struct sp_node *n; | |
1376 | struct rb_node *next; | |
1377 | ||
1378 | if (!p->root.rb_node) | |
1379 | return; | |
1380 | spin_lock(&p->lock); | |
1381 | next = rb_first(&p->root); | |
1382 | while (next) { | |
1383 | n = rb_entry(next, struct sp_node, nd); | |
1384 | next = rb_next(&n->nd); | |
90c5029e | 1385 | rb_erase(&n->nd, &p->root); |
1da177e4 LT |
1386 | mpol_free(n->policy); |
1387 | kmem_cache_free(sn_cache, n); | |
1388 | } | |
1389 | spin_unlock(&p->lock); | |
1da177e4 LT |
1390 | } |
1391 | ||
1392 | /* assumes fs == KERNEL_DS */ | |
1393 | void __init numa_policy_init(void) | |
1394 | { | |
1395 | policy_cache = kmem_cache_create("numa_policy", | |
1396 | sizeof(struct mempolicy), | |
1397 | 0, SLAB_PANIC, NULL, NULL); | |
1398 | ||
1399 | sn_cache = kmem_cache_create("shared_policy_node", | |
1400 | sizeof(struct sp_node), | |
1401 | 0, SLAB_PANIC, NULL, NULL); | |
1402 | ||
1403 | /* Set interleaving policy for system init. This way not all | |
1404 | the data structures allocated at system boot end up in node zero. */ | |
1405 | ||
8bccd85f | 1406 | if (do_set_mempolicy(MPOL_INTERLEAVE, &node_online_map)) |
1da177e4 LT |
1407 | printk("numa_policy_init: interleaving failed\n"); |
1408 | } | |
1409 | ||
8bccd85f | 1410 | /* Reset policy of current process to default */ |
1da177e4 LT |
1411 | void numa_default_policy(void) |
1412 | { | |
8bccd85f | 1413 | do_set_mempolicy(MPOL_DEFAULT, NULL); |
1da177e4 | 1414 | } |
68860ec1 PJ |
1415 | |
1416 | /* Migrate a policy to a different set of nodes */ | |
1417 | static void rebind_policy(struct mempolicy *pol, const nodemask_t *old, | |
1418 | const nodemask_t *new) | |
1419 | { | |
1420 | nodemask_t tmp; | |
1421 | ||
1422 | if (!pol) | |
1423 | return; | |
1424 | ||
1425 | switch (pol->policy) { | |
1426 | case MPOL_DEFAULT: | |
1427 | break; | |
1428 | case MPOL_INTERLEAVE: | |
1429 | nodes_remap(tmp, pol->v.nodes, *old, *new); | |
1430 | pol->v.nodes = tmp; | |
1431 | current->il_next = node_remap(current->il_next, *old, *new); | |
1432 | break; | |
1433 | case MPOL_PREFERRED: | |
1434 | pol->v.preferred_node = node_remap(pol->v.preferred_node, | |
1435 | *old, *new); | |
1436 | break; | |
1437 | case MPOL_BIND: { | |
1438 | nodemask_t nodes; | |
1439 | struct zone **z; | |
1440 | struct zonelist *zonelist; | |
1441 | ||
1442 | nodes_clear(nodes); | |
1443 | for (z = pol->v.zonelist->zones; *z; z++) | |
1444 | node_set((*z)->zone_pgdat->node_id, nodes); | |
1445 | nodes_remap(tmp, nodes, *old, *new); | |
1446 | nodes = tmp; | |
1447 | ||
1448 | zonelist = bind_zonelist(&nodes); | |
1449 | ||
1450 | /* If no mem, then zonelist is NULL and we keep old zonelist. | |
1451 | * If that old zonelist has no remaining mems_allowed nodes, | |
1452 | * then zonelist_policy() will "FALL THROUGH" to MPOL_DEFAULT. | |
1453 | */ | |
1454 | ||
1455 | if (zonelist) { | |
1456 | /* Good - got mem - substitute new zonelist */ | |
1457 | kfree(pol->v.zonelist); | |
1458 | pol->v.zonelist = zonelist; | |
1459 | } | |
1460 | break; | |
1461 | } | |
1462 | default: | |
1463 | BUG(); | |
1464 | break; | |
1465 | } | |
1466 | } | |
1467 | ||
1468 | /* | |
1469 | * Someone moved this task to different nodes. Fixup mempolicies. | |
1470 | * | |
1471 | * TODO - fixup current->mm->vma and shmfs/tmpfs/hugetlbfs policies as well, | |
1472 | * once we have a cpuset mechanism to mark which cpuset subtree is migrating. | |
1473 | */ | |
1474 | void numa_policy_rebind(const nodemask_t *old, const nodemask_t *new) | |
1475 | { | |
1476 | rebind_policy(current->mempolicy, old, new); | |
1477 | } | |
1a75a6c8 CL |
1478 | |
1479 | /* | |
1480 | * Display pages allocated per node and memory policy via /proc. | |
1481 | */ | |
1482 | ||
1483 | static const char *policy_types[] = { "default", "prefer", "bind", | |
1484 | "interleave" }; | |
1485 | ||
1486 | /* | |
1487 | * Convert a mempolicy into a string. | |
1488 | * Returns the number of characters in buffer (if positive) | |
1489 | * or an error (negative) | |
1490 | */ | |
1491 | static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) | |
1492 | { | |
1493 | char *p = buffer; | |
1494 | int l; | |
1495 | nodemask_t nodes; | |
1496 | int mode = pol ? pol->policy : MPOL_DEFAULT; | |
1497 | ||
1498 | switch (mode) { | |
1499 | case MPOL_DEFAULT: | |
1500 | nodes_clear(nodes); | |
1501 | break; | |
1502 | ||
1503 | case MPOL_PREFERRED: | |
1504 | nodes_clear(nodes); | |
1505 | node_set(pol->v.preferred_node, nodes); | |
1506 | break; | |
1507 | ||
1508 | case MPOL_BIND: | |
1509 | get_zonemask(pol, &nodes); | |
1510 | break; | |
1511 | ||
1512 | case MPOL_INTERLEAVE: | |
1513 | nodes = pol->v.nodes; | |
1514 | break; | |
1515 | ||
1516 | default: | |
1517 | BUG(); | |
1518 | return -EFAULT; | |
1519 | } | |
1520 | ||
1521 | l = strlen(policy_types[mode]); | |
1522 | if (buffer + maxlen < p + l + 1) | |
1523 | return -ENOSPC; | |
1524 | ||
1525 | strcpy(p, policy_types[mode]); | |
1526 | p += l; | |
1527 | ||
1528 | if (!nodes_empty(nodes)) { | |
1529 | if (buffer + maxlen < p + 2) | |
1530 | return -ENOSPC; | |
1531 | *p++ = '='; | |
1532 | p += nodelist_scnprintf(p, buffer + maxlen - p, nodes); | |
1533 | } | |
1534 | return p - buffer; | |
1535 | } | |
1536 | ||
1537 | struct numa_maps { | |
1538 | unsigned long pages; | |
1539 | unsigned long anon; | |
1540 | unsigned long mapped; | |
1541 | unsigned long mapcount_max; | |
1542 | unsigned long node[MAX_NUMNODES]; | |
1543 | }; | |
1544 | ||
1545 | static void gather_stats(struct page *page, void *private) | |
1546 | { | |
1547 | struct numa_maps *md = private; | |
1548 | int count = page_mapcount(page); | |
1549 | ||
1550 | if (count) | |
1551 | md->mapped++; | |
1552 | ||
1553 | if (count > md->mapcount_max) | |
1554 | md->mapcount_max = count; | |
1555 | ||
1556 | md->pages++; | |
1557 | ||
1558 | if (PageAnon(page)) | |
1559 | md->anon++; | |
1560 | ||
1561 | md->node[page_to_nid(page)]++; | |
1562 | cond_resched(); | |
1563 | } | |
1564 | ||
1565 | int show_numa_map(struct seq_file *m, void *v) | |
1566 | { | |
1567 | struct task_struct *task = m->private; | |
1568 | struct vm_area_struct *vma = v; | |
1569 | struct numa_maps *md; | |
1570 | int n; | |
1571 | char buffer[50]; | |
1572 | ||
1573 | if (!vma->vm_mm) | |
1574 | return 0; | |
1575 | ||
1576 | md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL); | |
1577 | if (!md) | |
1578 | return 0; | |
1579 | ||
1580 | check_pgd_range(vma, vma->vm_start, vma->vm_end, | |
1581 | &node_online_map, MPOL_MF_STATS, md); | |
1582 | ||
1583 | if (md->pages) { | |
1584 | mpol_to_str(buffer, sizeof(buffer), | |
1585 | get_vma_policy(task, vma, vma->vm_start)); | |
1586 | ||
1587 | seq_printf(m, "%08lx %s pages=%lu mapped=%lu maxref=%lu", | |
1588 | vma->vm_start, buffer, md->pages, | |
1589 | md->mapped, md->mapcount_max); | |
1590 | ||
1591 | if (md->anon) | |
1592 | seq_printf(m," anon=%lu",md->anon); | |
1593 | ||
1594 | for_each_online_node(n) | |
1595 | if (md->node[n]) | |
1596 | seq_printf(m, " N%d=%lu", n, md->node[n]); | |
1597 | ||
1598 | seq_putc(m, '\n'); | |
1599 | } | |
1600 | kfree(md); | |
1601 | ||
1602 | if (m->count < m->size) | |
1603 | m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0; | |
1604 | return 0; | |
1605 | } | |
1606 |