]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - mm/mempolicy.c
mempolicy: support optional mode flags
[mirror_ubuntu-zesty-kernel.git] / mm / mempolicy.c
CommitLineData
1da177e4
LT
1/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
8bccd85f 5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
1da177e4
LT
6 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
8bccd85f 21 *
1da177e4
LT
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
8bccd85f
CL
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
1da177e4
LT
28 * preferred Try a specific node first before normal fallback.
29 * As a special case node -1 here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
8bccd85f 33 *
1da177e4
LT
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
66 could replace all the switch()es with a mempolicy_ops structure.
67*/
68
69#include <linux/mempolicy.h>
70#include <linux/mm.h>
71#include <linux/highmem.h>
72#include <linux/hugetlb.h>
73#include <linux/kernel.h>
74#include <linux/sched.h>
1da177e4
LT
75#include <linux/nodemask.h>
76#include <linux/cpuset.h>
77#include <linux/gfp.h>
78#include <linux/slab.h>
79#include <linux/string.h>
80#include <linux/module.h>
b488893a 81#include <linux/nsproxy.h>
1da177e4
LT
82#include <linux/interrupt.h>
83#include <linux/init.h>
84#include <linux/compat.h>
dc9aa5b9 85#include <linux/swap.h>
1a75a6c8
CL
86#include <linux/seq_file.h>
87#include <linux/proc_fs.h>
b20a3503 88#include <linux/migrate.h>
95a402c3 89#include <linux/rmap.h>
86c3a764 90#include <linux/security.h>
dbcb0f19 91#include <linux/syscalls.h>
dc9aa5b9 92
1da177e4
LT
93#include <asm/tlbflush.h>
94#include <asm/uaccess.h>
95
38e35860 96/* Internal flags */
dc9aa5b9 97#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
38e35860 98#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
1a75a6c8 99#define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */
dc9aa5b9 100
fcc234f8
PE
101static struct kmem_cache *policy_cache;
102static struct kmem_cache *sn_cache;
1da177e4 103
1da177e4
LT
104/* Highest zone. An specific allocation for a zone below that is not
105 policied. */
6267276f 106enum zone_type policy_zone = 0;
1da177e4 107
d42c6997 108struct mempolicy default_policy = {
1da177e4
LT
109 .refcnt = ATOMIC_INIT(1), /* never free it */
110 .policy = MPOL_DEFAULT,
111};
112
dbcb0f19
AB
113static void mpol_rebind_policy(struct mempolicy *pol,
114 const nodemask_t *newmask);
115
1da177e4 116/* Do sanity checking on a policy */
a3b51e01 117static int mpol_check_policy(unsigned short mode, nodemask_t *nodes)
1da177e4 118{
31f1de46
KM
119 int was_empty, is_empty;
120
121 if (!nodes)
122 return 0;
123
124 /*
125 * "Contextualize" the in-coming nodemast for cpusets:
126 * Remember whether in-coming nodemask was empty, If not,
127 * restrict the nodes to the allowed nodes in the cpuset.
128 * This is guaranteed to be a subset of nodes with memory.
129 */
130 cpuset_update_task_memory_state();
131 is_empty = was_empty = nodes_empty(*nodes);
132 if (!was_empty) {
133 nodes_and(*nodes, *nodes, cpuset_current_mems_allowed);
134 is_empty = nodes_empty(*nodes); /* after "contextualization" */
135 }
1da177e4
LT
136
137 switch (mode) {
138 case MPOL_DEFAULT:
31f1de46
KM
139 /*
140 * require caller to specify an empty nodemask
141 * before "contextualization"
142 */
143 if (!was_empty)
1da177e4
LT
144 return -EINVAL;
145 break;
146 case MPOL_BIND:
147 case MPOL_INTERLEAVE:
31f1de46
KM
148 /*
149 * require at least 1 valid node after "contextualization"
150 */
151 if (is_empty)
152 return -EINVAL;
153 break;
154 case MPOL_PREFERRED:
155 /*
156 * Did caller specify invalid nodes?
157 * Don't silently accept this as "local allocation".
158 */
159 if (!was_empty && is_empty)
1da177e4
LT
160 return -EINVAL;
161 break;
a3b51e01
DR
162 default:
163 BUG();
1da177e4 164 }
31f1de46 165 return 0;
1da177e4 166}
dd942ae3 167
19770b32
MG
168/* Check that the nodemask contains at least one populated zone */
169static int is_valid_nodemask(nodemask_t *nodemask)
1da177e4 170{
19770b32 171 int nd, k;
1da177e4 172
19770b32
MG
173 /* Check that there is something useful in this mask */
174 k = policy_zone;
175
176 for_each_node_mask(nd, *nodemask) {
177 struct zone *z;
178
179 for (k = 0; k <= policy_zone; k++) {
180 z = &NODE_DATA(nd)->node_zones[k];
181 if (z->present_pages > 0)
182 return 1;
dd942ae3 183 }
8af5e2eb 184 }
19770b32
MG
185
186 return 0;
1da177e4
LT
187}
188
189/* Create a new policy */
028fec41
DR
190static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
191 nodemask_t *nodes)
1da177e4
LT
192{
193 struct mempolicy *policy;
194
028fec41
DR
195 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
196 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
140d5a49 197
1da177e4
LT
198 if (mode == MPOL_DEFAULT)
199 return NULL;
200 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
201 if (!policy)
202 return ERR_PTR(-ENOMEM);
203 atomic_set(&policy->refcnt, 1);
204 switch (mode) {
205 case MPOL_INTERLEAVE:
dfcd3c0d 206 policy->v.nodes = *nodes;
6eaf806a 207 if (nodes_weight(policy->v.nodes) == 0) {
8f493d79
AK
208 kmem_cache_free(policy_cache, policy);
209 return ERR_PTR(-EINVAL);
210 }
1da177e4
LT
211 break;
212 case MPOL_PREFERRED:
dfcd3c0d 213 policy->v.preferred_node = first_node(*nodes);
1da177e4
LT
214 if (policy->v.preferred_node >= MAX_NUMNODES)
215 policy->v.preferred_node = -1;
216 break;
217 case MPOL_BIND:
19770b32 218 if (!is_valid_nodemask(nodes)) {
1da177e4 219 kmem_cache_free(policy_cache, policy);
19770b32 220 return ERR_PTR(-EINVAL);
1da177e4 221 }
19770b32 222 policy->v.nodes = *nodes;
1da177e4 223 break;
a3b51e01
DR
224 default:
225 BUG();
1da177e4
LT
226 }
227 policy->policy = mode;
028fec41 228 policy->flags = flags;
74cb2155 229 policy->cpuset_mems_allowed = cpuset_mems_allowed(current);
1da177e4
LT
230 return policy;
231}
232
397874df 233static void gather_stats(struct page *, void *, int pte_dirty);
fc301289
CL
234static void migrate_page_add(struct page *page, struct list_head *pagelist,
235 unsigned long flags);
1a75a6c8 236
38e35860 237/* Scan through pages checking if pages follow certain conditions. */
b5810039 238static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
dc9aa5b9
CL
239 unsigned long addr, unsigned long end,
240 const nodemask_t *nodes, unsigned long flags,
38e35860 241 void *private)
1da177e4 242{
91612e0d
HD
243 pte_t *orig_pte;
244 pte_t *pte;
705e87c0 245 spinlock_t *ptl;
941150a3 246
705e87c0 247 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
91612e0d 248 do {
6aab341e 249 struct page *page;
25ba77c1 250 int nid;
91612e0d
HD
251
252 if (!pte_present(*pte))
1da177e4 253 continue;
6aab341e
LT
254 page = vm_normal_page(vma, addr, *pte);
255 if (!page)
1da177e4 256 continue;
053837fc
NP
257 /*
258 * The check for PageReserved here is important to avoid
259 * handling zero pages and other pages that may have been
260 * marked special by the system.
261 *
262 * If the PageReserved would not be checked here then f.e.
263 * the location of the zero page could have an influence
264 * on MPOL_MF_STRICT, zero pages would be counted for
265 * the per node stats, and there would be useless attempts
266 * to put zero pages on the migration list.
267 */
f4598c8b
CL
268 if (PageReserved(page))
269 continue;
6aab341e 270 nid = page_to_nid(page);
38e35860
CL
271 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
272 continue;
273
1a75a6c8 274 if (flags & MPOL_MF_STATS)
397874df 275 gather_stats(page, private, pte_dirty(*pte));
053837fc 276 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
fc301289 277 migrate_page_add(page, private, flags);
38e35860
CL
278 else
279 break;
91612e0d 280 } while (pte++, addr += PAGE_SIZE, addr != end);
705e87c0 281 pte_unmap_unlock(orig_pte, ptl);
91612e0d
HD
282 return addr != end;
283}
284
b5810039 285static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
dc9aa5b9
CL
286 unsigned long addr, unsigned long end,
287 const nodemask_t *nodes, unsigned long flags,
38e35860 288 void *private)
91612e0d
HD
289{
290 pmd_t *pmd;
291 unsigned long next;
292
293 pmd = pmd_offset(pud, addr);
294 do {
295 next = pmd_addr_end(addr, end);
296 if (pmd_none_or_clear_bad(pmd))
297 continue;
dc9aa5b9 298 if (check_pte_range(vma, pmd, addr, next, nodes,
38e35860 299 flags, private))
91612e0d
HD
300 return -EIO;
301 } while (pmd++, addr = next, addr != end);
302 return 0;
303}
304
b5810039 305static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
dc9aa5b9
CL
306 unsigned long addr, unsigned long end,
307 const nodemask_t *nodes, unsigned long flags,
38e35860 308 void *private)
91612e0d
HD
309{
310 pud_t *pud;
311 unsigned long next;
312
313 pud = pud_offset(pgd, addr);
314 do {
315 next = pud_addr_end(addr, end);
316 if (pud_none_or_clear_bad(pud))
317 continue;
dc9aa5b9 318 if (check_pmd_range(vma, pud, addr, next, nodes,
38e35860 319 flags, private))
91612e0d
HD
320 return -EIO;
321 } while (pud++, addr = next, addr != end);
322 return 0;
323}
324
b5810039 325static inline int check_pgd_range(struct vm_area_struct *vma,
dc9aa5b9
CL
326 unsigned long addr, unsigned long end,
327 const nodemask_t *nodes, unsigned long flags,
38e35860 328 void *private)
91612e0d
HD
329{
330 pgd_t *pgd;
331 unsigned long next;
332
b5810039 333 pgd = pgd_offset(vma->vm_mm, addr);
91612e0d
HD
334 do {
335 next = pgd_addr_end(addr, end);
336 if (pgd_none_or_clear_bad(pgd))
337 continue;
dc9aa5b9 338 if (check_pud_range(vma, pgd, addr, next, nodes,
38e35860 339 flags, private))
91612e0d
HD
340 return -EIO;
341 } while (pgd++, addr = next, addr != end);
342 return 0;
1da177e4
LT
343}
344
dc9aa5b9
CL
345/*
346 * Check if all pages in a range are on a set of nodes.
347 * If pagelist != NULL then isolate pages from the LRU and
348 * put them on the pagelist.
349 */
1da177e4
LT
350static struct vm_area_struct *
351check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
38e35860 352 const nodemask_t *nodes, unsigned long flags, void *private)
1da177e4
LT
353{
354 int err;
355 struct vm_area_struct *first, *vma, *prev;
356
90036ee5 357 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
90036ee5 358
b20a3503
CL
359 err = migrate_prep();
360 if (err)
361 return ERR_PTR(err);
90036ee5 362 }
053837fc 363
1da177e4
LT
364 first = find_vma(mm, start);
365 if (!first)
366 return ERR_PTR(-EFAULT);
367 prev = NULL;
368 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
dc9aa5b9
CL
369 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
370 if (!vma->vm_next && vma->vm_end < end)
371 return ERR_PTR(-EFAULT);
372 if (prev && prev->vm_end < vma->vm_start)
373 return ERR_PTR(-EFAULT);
374 }
375 if (!is_vm_hugetlb_page(vma) &&
376 ((flags & MPOL_MF_STRICT) ||
377 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
378 vma_migratable(vma)))) {
5b952b3c 379 unsigned long endvma = vma->vm_end;
dc9aa5b9 380
5b952b3c
AK
381 if (endvma > end)
382 endvma = end;
383 if (vma->vm_start > start)
384 start = vma->vm_start;
dc9aa5b9 385 err = check_pgd_range(vma, start, endvma, nodes,
38e35860 386 flags, private);
1da177e4
LT
387 if (err) {
388 first = ERR_PTR(err);
389 break;
390 }
391 }
392 prev = vma;
393 }
394 return first;
395}
396
397/* Apply policy to a single VMA */
398static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
399{
400 int err = 0;
401 struct mempolicy *old = vma->vm_policy;
402
140d5a49 403 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
1da177e4
LT
404 vma->vm_start, vma->vm_end, vma->vm_pgoff,
405 vma->vm_ops, vma->vm_file,
406 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
407
408 if (vma->vm_ops && vma->vm_ops->set_policy)
409 err = vma->vm_ops->set_policy(vma, new);
410 if (!err) {
411 mpol_get(new);
412 vma->vm_policy = new;
413 mpol_free(old);
414 }
415 return err;
416}
417
418/* Step 2: apply policy to a range and do splits. */
419static int mbind_range(struct vm_area_struct *vma, unsigned long start,
420 unsigned long end, struct mempolicy *new)
421{
422 struct vm_area_struct *next;
423 int err;
424
425 err = 0;
426 for (; vma && vma->vm_start < end; vma = next) {
427 next = vma->vm_next;
428 if (vma->vm_start < start)
429 err = split_vma(vma->vm_mm, vma, start, 1);
430 if (!err && vma->vm_end > end)
431 err = split_vma(vma->vm_mm, vma, end, 0);
432 if (!err)
433 err = policy_vma(vma, new);
434 if (err)
435 break;
436 }
437 return err;
438}
439
c61afb18
PJ
440/*
441 * Update task->flags PF_MEMPOLICY bit: set iff non-default
442 * mempolicy. Allows more rapid checking of this (combined perhaps
443 * with other PF_* flag bits) on memory allocation hot code paths.
444 *
445 * If called from outside this file, the task 'p' should -only- be
446 * a newly forked child not yet visible on the task list, because
447 * manipulating the task flags of a visible task is not safe.
448 *
449 * The above limitation is why this routine has the funny name
450 * mpol_fix_fork_child_flag().
451 *
452 * It is also safe to call this with a task pointer of current,
453 * which the static wrapper mpol_set_task_struct_flag() does,
454 * for use within this file.
455 */
456
457void mpol_fix_fork_child_flag(struct task_struct *p)
458{
459 if (p->mempolicy)
460 p->flags |= PF_MEMPOLICY;
461 else
462 p->flags &= ~PF_MEMPOLICY;
463}
464
465static void mpol_set_task_struct_flag(void)
466{
467 mpol_fix_fork_child_flag(current);
468}
469
1da177e4 470/* Set the process memory policy */
028fec41
DR
471static long do_set_mempolicy(unsigned short mode, unsigned short flags,
472 nodemask_t *nodes)
1da177e4 473{
1da177e4 474 struct mempolicy *new;
1da177e4 475
31f1de46 476 if (mpol_check_policy(mode, nodes))
1da177e4 477 return -EINVAL;
028fec41 478 new = mpol_new(mode, flags, nodes);
1da177e4
LT
479 if (IS_ERR(new))
480 return PTR_ERR(new);
481 mpol_free(current->mempolicy);
482 current->mempolicy = new;
c61afb18 483 mpol_set_task_struct_flag();
1da177e4 484 if (new && new->policy == MPOL_INTERLEAVE)
dfcd3c0d 485 current->il_next = first_node(new->v.nodes);
1da177e4
LT
486 return 0;
487}
488
489/* Fill a zone bitmap for a policy */
dfcd3c0d 490static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
1da177e4 491{
dfcd3c0d 492 nodes_clear(*nodes);
1da177e4 493 switch (p->policy) {
1da177e4
LT
494 case MPOL_DEFAULT:
495 break;
19770b32
MG
496 case MPOL_BIND:
497 /* Fall through */
1da177e4 498 case MPOL_INTERLEAVE:
dfcd3c0d 499 *nodes = p->v.nodes;
1da177e4
LT
500 break;
501 case MPOL_PREFERRED:
56bbd65d 502 /* or use current node instead of memory_map? */
1da177e4 503 if (p->v.preferred_node < 0)
56bbd65d 504 *nodes = node_states[N_HIGH_MEMORY];
1da177e4 505 else
dfcd3c0d 506 node_set(p->v.preferred_node, *nodes);
1da177e4
LT
507 break;
508 default:
509 BUG();
510 }
511}
512
513static int lookup_node(struct mm_struct *mm, unsigned long addr)
514{
515 struct page *p;
516 int err;
517
518 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
519 if (err >= 0) {
520 err = page_to_nid(p);
521 put_page(p);
522 }
523 return err;
524}
525
1da177e4 526/* Retrieve NUMA policy */
dbcb0f19
AB
527static long do_get_mempolicy(int *policy, nodemask_t *nmask,
528 unsigned long addr, unsigned long flags)
1da177e4 529{
8bccd85f 530 int err;
1da177e4
LT
531 struct mm_struct *mm = current->mm;
532 struct vm_area_struct *vma = NULL;
533 struct mempolicy *pol = current->mempolicy;
534
cf2a473c 535 cpuset_update_task_memory_state();
754af6f5
LS
536 if (flags &
537 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
1da177e4 538 return -EINVAL;
754af6f5
LS
539
540 if (flags & MPOL_F_MEMS_ALLOWED) {
541 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
542 return -EINVAL;
543 *policy = 0; /* just so it's initialized */
544 *nmask = cpuset_current_mems_allowed;
545 return 0;
546 }
547
1da177e4
LT
548 if (flags & MPOL_F_ADDR) {
549 down_read(&mm->mmap_sem);
550 vma = find_vma_intersection(mm, addr, addr+1);
551 if (!vma) {
552 up_read(&mm->mmap_sem);
553 return -EFAULT;
554 }
555 if (vma->vm_ops && vma->vm_ops->get_policy)
556 pol = vma->vm_ops->get_policy(vma, addr);
557 else
558 pol = vma->vm_policy;
559 } else if (addr)
560 return -EINVAL;
561
562 if (!pol)
563 pol = &default_policy;
564
565 if (flags & MPOL_F_NODE) {
566 if (flags & MPOL_F_ADDR) {
567 err = lookup_node(mm, addr);
568 if (err < 0)
569 goto out;
8bccd85f 570 *policy = err;
1da177e4
LT
571 } else if (pol == current->mempolicy &&
572 pol->policy == MPOL_INTERLEAVE) {
8bccd85f 573 *policy = current->il_next;
1da177e4
LT
574 } else {
575 err = -EINVAL;
576 goto out;
577 }
578 } else
028fec41 579 *policy = pol->policy | pol->flags;
1da177e4
LT
580
581 if (vma) {
582 up_read(&current->mm->mmap_sem);
583 vma = NULL;
584 }
585
1da177e4 586 err = 0;
8bccd85f
CL
587 if (nmask)
588 get_zonemask(pol, nmask);
1da177e4
LT
589
590 out:
591 if (vma)
592 up_read(&current->mm->mmap_sem);
593 return err;
594}
595
b20a3503 596#ifdef CONFIG_MIGRATION
6ce3c4c0
CL
597/*
598 * page migration
599 */
fc301289
CL
600static void migrate_page_add(struct page *page, struct list_head *pagelist,
601 unsigned long flags)
6ce3c4c0
CL
602{
603 /*
fc301289 604 * Avoid migrating a page that is shared with others.
6ce3c4c0 605 */
b20a3503
CL
606 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1)
607 isolate_lru_page(page, pagelist);
7e2ab150 608}
6ce3c4c0 609
742755a1 610static struct page *new_node_page(struct page *page, unsigned long node, int **x)
95a402c3 611{
769848c0 612 return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0);
95a402c3
CL
613}
614
7e2ab150
CL
615/*
616 * Migrate pages from one node to a target node.
617 * Returns error or the number of pages not migrated.
618 */
dbcb0f19
AB
619static int migrate_to_node(struct mm_struct *mm, int source, int dest,
620 int flags)
7e2ab150
CL
621{
622 nodemask_t nmask;
623 LIST_HEAD(pagelist);
624 int err = 0;
625
626 nodes_clear(nmask);
627 node_set(source, nmask);
6ce3c4c0 628
7e2ab150
CL
629 check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
630 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
631
aaa994b3 632 if (!list_empty(&pagelist))
95a402c3
CL
633 err = migrate_pages(&pagelist, new_node_page, dest);
634
7e2ab150 635 return err;
6ce3c4c0
CL
636}
637
39743889 638/*
7e2ab150
CL
639 * Move pages between the two nodesets so as to preserve the physical
640 * layout as much as possible.
39743889
CL
641 *
642 * Returns the number of page that could not be moved.
643 */
644int do_migrate_pages(struct mm_struct *mm,
645 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
646{
647 LIST_HEAD(pagelist);
7e2ab150
CL
648 int busy = 0;
649 int err = 0;
650 nodemask_t tmp;
39743889 651
7e2ab150 652 down_read(&mm->mmap_sem);
39743889 653
7b2259b3
CL
654 err = migrate_vmas(mm, from_nodes, to_nodes, flags);
655 if (err)
656 goto out;
657
7e2ab150
CL
658/*
659 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
660 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
661 * bit in 'tmp', and return that <source, dest> pair for migration.
662 * The pair of nodemasks 'to' and 'from' define the map.
663 *
664 * If no pair of bits is found that way, fallback to picking some
665 * pair of 'source' and 'dest' bits that are not the same. If the
666 * 'source' and 'dest' bits are the same, this represents a node
667 * that will be migrating to itself, so no pages need move.
668 *
669 * If no bits are left in 'tmp', or if all remaining bits left
670 * in 'tmp' correspond to the same bit in 'to', return false
671 * (nothing left to migrate).
672 *
673 * This lets us pick a pair of nodes to migrate between, such that
674 * if possible the dest node is not already occupied by some other
675 * source node, minimizing the risk of overloading the memory on a
676 * node that would happen if we migrated incoming memory to a node
677 * before migrating outgoing memory source that same node.
678 *
679 * A single scan of tmp is sufficient. As we go, we remember the
680 * most recent <s, d> pair that moved (s != d). If we find a pair
681 * that not only moved, but what's better, moved to an empty slot
682 * (d is not set in tmp), then we break out then, with that pair.
683 * Otherwise when we finish scannng from_tmp, we at least have the
684 * most recent <s, d> pair that moved. If we get all the way through
685 * the scan of tmp without finding any node that moved, much less
686 * moved to an empty node, then there is nothing left worth migrating.
687 */
d4984711 688
7e2ab150
CL
689 tmp = *from_nodes;
690 while (!nodes_empty(tmp)) {
691 int s,d;
692 int source = -1;
693 int dest = 0;
694
695 for_each_node_mask(s, tmp) {
696 d = node_remap(s, *from_nodes, *to_nodes);
697 if (s == d)
698 continue;
699
700 source = s; /* Node moved. Memorize */
701 dest = d;
702
703 /* dest not in remaining from nodes? */
704 if (!node_isset(dest, tmp))
705 break;
706 }
707 if (source == -1)
708 break;
709
710 node_clear(source, tmp);
711 err = migrate_to_node(mm, source, dest, flags);
712 if (err > 0)
713 busy += err;
714 if (err < 0)
715 break;
39743889 716 }
7b2259b3 717out:
39743889 718 up_read(&mm->mmap_sem);
7e2ab150
CL
719 if (err < 0)
720 return err;
721 return busy;
b20a3503
CL
722
723}
724
3ad33b24
LS
725/*
726 * Allocate a new page for page migration based on vma policy.
727 * Start assuming that page is mapped by vma pointed to by @private.
728 * Search forward from there, if not. N.B., this assumes that the
729 * list of pages handed to migrate_pages()--which is how we get here--
730 * is in virtual address order.
731 */
742755a1 732static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
95a402c3
CL
733{
734 struct vm_area_struct *vma = (struct vm_area_struct *)private;
3ad33b24 735 unsigned long uninitialized_var(address);
95a402c3 736
3ad33b24
LS
737 while (vma) {
738 address = page_address_in_vma(page, vma);
739 if (address != -EFAULT)
740 break;
741 vma = vma->vm_next;
742 }
743
744 /*
745 * if !vma, alloc_page_vma() will use task or system default policy
746 */
747 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
95a402c3 748}
b20a3503
CL
749#else
750
751static void migrate_page_add(struct page *page, struct list_head *pagelist,
752 unsigned long flags)
753{
39743889
CL
754}
755
b20a3503
CL
756int do_migrate_pages(struct mm_struct *mm,
757 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
758{
759 return -ENOSYS;
760}
95a402c3 761
69939749 762static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
95a402c3
CL
763{
764 return NULL;
765}
b20a3503
CL
766#endif
767
dbcb0f19 768static long do_mbind(unsigned long start, unsigned long len,
028fec41
DR
769 unsigned short mode, unsigned short mode_flags,
770 nodemask_t *nmask, unsigned long flags)
6ce3c4c0
CL
771{
772 struct vm_area_struct *vma;
773 struct mm_struct *mm = current->mm;
774 struct mempolicy *new;
775 unsigned long end;
776 int err;
777 LIST_HEAD(pagelist);
778
a3b51e01
DR
779 if (flags & ~(unsigned long)(MPOL_MF_STRICT |
780 MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
6ce3c4c0 781 return -EINVAL;
74c00241 782 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
6ce3c4c0
CL
783 return -EPERM;
784
785 if (start & ~PAGE_MASK)
786 return -EINVAL;
787
788 if (mode == MPOL_DEFAULT)
789 flags &= ~MPOL_MF_STRICT;
790
791 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
792 end = start + len;
793
794 if (end < start)
795 return -EINVAL;
796 if (end == start)
797 return 0;
798
799 if (mpol_check_policy(mode, nmask))
800 return -EINVAL;
801
028fec41 802 new = mpol_new(mode, mode_flags, nmask);
6ce3c4c0
CL
803 if (IS_ERR(new))
804 return PTR_ERR(new);
805
806 /*
807 * If we are using the default policy then operation
808 * on discontinuous address spaces is okay after all
809 */
810 if (!new)
811 flags |= MPOL_MF_DISCONTIG_OK;
812
028fec41
DR
813 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
814 start, start + len, mode, mode_flags,
815 nmask ? nodes_addr(*nmask)[0] : -1);
6ce3c4c0
CL
816
817 down_write(&mm->mmap_sem);
818 vma = check_range(mm, start, end, nmask,
819 flags | MPOL_MF_INVERT, &pagelist);
820
821 err = PTR_ERR(vma);
822 if (!IS_ERR(vma)) {
823 int nr_failed = 0;
824
825 err = mbind_range(vma, start, end, new);
7e2ab150 826
6ce3c4c0 827 if (!list_empty(&pagelist))
95a402c3
CL
828 nr_failed = migrate_pages(&pagelist, new_vma_page,
829 (unsigned long)vma);
6ce3c4c0
CL
830
831 if (!err && nr_failed && (flags & MPOL_MF_STRICT))
832 err = -EIO;
833 }
b20a3503 834
6ce3c4c0
CL
835 up_write(&mm->mmap_sem);
836 mpol_free(new);
837 return err;
838}
839
8bccd85f
CL
840/*
841 * User space interface with variable sized bitmaps for nodelists.
842 */
843
844/* Copy a node mask from user space. */
39743889 845static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
8bccd85f
CL
846 unsigned long maxnode)
847{
848 unsigned long k;
849 unsigned long nlongs;
850 unsigned long endmask;
851
852 --maxnode;
853 nodes_clear(*nodes);
854 if (maxnode == 0 || !nmask)
855 return 0;
a9c930ba 856 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
636f13c1 857 return -EINVAL;
8bccd85f
CL
858
859 nlongs = BITS_TO_LONGS(maxnode);
860 if ((maxnode % BITS_PER_LONG) == 0)
861 endmask = ~0UL;
862 else
863 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
864
865 /* When the user specified more nodes than supported just check
866 if the non supported part is all zero. */
867 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
868 if (nlongs > PAGE_SIZE/sizeof(long))
869 return -EINVAL;
870 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
871 unsigned long t;
872 if (get_user(t, nmask + k))
873 return -EFAULT;
874 if (k == nlongs - 1) {
875 if (t & endmask)
876 return -EINVAL;
877 } else if (t)
878 return -EINVAL;
879 }
880 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
881 endmask = ~0UL;
882 }
883
884 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
885 return -EFAULT;
886 nodes_addr(*nodes)[nlongs-1] &= endmask;
887 return 0;
888}
889
890/* Copy a kernel node mask to user space */
891static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
892 nodemask_t *nodes)
893{
894 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
895 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
896
897 if (copy > nbytes) {
898 if (copy > PAGE_SIZE)
899 return -EINVAL;
900 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
901 return -EFAULT;
902 copy = nbytes;
903 }
904 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
905}
906
907asmlinkage long sys_mbind(unsigned long start, unsigned long len,
908 unsigned long mode,
909 unsigned long __user *nmask, unsigned long maxnode,
910 unsigned flags)
911{
912 nodemask_t nodes;
913 int err;
028fec41 914 unsigned short mode_flags;
8bccd85f 915
028fec41
DR
916 mode_flags = mode & MPOL_MODE_FLAGS;
917 mode &= ~MPOL_MODE_FLAGS;
a3b51e01
DR
918 if (mode >= MPOL_MAX)
919 return -EINVAL;
8bccd85f
CL
920 err = get_nodes(&nodes, nmask, maxnode);
921 if (err)
922 return err;
028fec41 923 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
8bccd85f
CL
924}
925
926/* Set the process memory policy */
927asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
928 unsigned long maxnode)
929{
930 int err;
931 nodemask_t nodes;
028fec41 932 unsigned short flags;
8bccd85f 933
028fec41
DR
934 flags = mode & MPOL_MODE_FLAGS;
935 mode &= ~MPOL_MODE_FLAGS;
936 if ((unsigned int)mode >= MPOL_MAX)
8bccd85f
CL
937 return -EINVAL;
938 err = get_nodes(&nodes, nmask, maxnode);
939 if (err)
940 return err;
028fec41 941 return do_set_mempolicy(mode, flags, &nodes);
8bccd85f
CL
942}
943
39743889
CL
944asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
945 const unsigned long __user *old_nodes,
946 const unsigned long __user *new_nodes)
947{
948 struct mm_struct *mm;
949 struct task_struct *task;
950 nodemask_t old;
951 nodemask_t new;
952 nodemask_t task_nodes;
953 int err;
954
955 err = get_nodes(&old, old_nodes, maxnode);
956 if (err)
957 return err;
958
959 err = get_nodes(&new, new_nodes, maxnode);
960 if (err)
961 return err;
962
963 /* Find the mm_struct */
964 read_lock(&tasklist_lock);
228ebcbe 965 task = pid ? find_task_by_vpid(pid) : current;
39743889
CL
966 if (!task) {
967 read_unlock(&tasklist_lock);
968 return -ESRCH;
969 }
970 mm = get_task_mm(task);
971 read_unlock(&tasklist_lock);
972
973 if (!mm)
974 return -EINVAL;
975
976 /*
977 * Check if this process has the right to modify the specified
978 * process. The right exists if the process has administrative
7f927fcc 979 * capabilities, superuser privileges or the same
39743889
CL
980 * userid as the target process.
981 */
982 if ((current->euid != task->suid) && (current->euid != task->uid) &&
983 (current->uid != task->suid) && (current->uid != task->uid) &&
74c00241 984 !capable(CAP_SYS_NICE)) {
39743889
CL
985 err = -EPERM;
986 goto out;
987 }
988
989 task_nodes = cpuset_mems_allowed(task);
990 /* Is the user allowed to access the target nodes? */
74c00241 991 if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
39743889
CL
992 err = -EPERM;
993 goto out;
994 }
995
37b07e41 996 if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) {
3b42d28b
CL
997 err = -EINVAL;
998 goto out;
999 }
1000
86c3a764
DQ
1001 err = security_task_movememory(task);
1002 if (err)
1003 goto out;
1004
511030bc 1005 err = do_migrate_pages(mm, &old, &new,
74c00241 1006 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
39743889
CL
1007out:
1008 mmput(mm);
1009 return err;
1010}
1011
1012
8bccd85f
CL
1013/* Retrieve NUMA policy */
1014asmlinkage long sys_get_mempolicy(int __user *policy,
1015 unsigned long __user *nmask,
1016 unsigned long maxnode,
1017 unsigned long addr, unsigned long flags)
1018{
dbcb0f19
AB
1019 int err;
1020 int uninitialized_var(pval);
8bccd85f
CL
1021 nodemask_t nodes;
1022
1023 if (nmask != NULL && maxnode < MAX_NUMNODES)
1024 return -EINVAL;
1025
1026 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1027
1028 if (err)
1029 return err;
1030
1031 if (policy && put_user(pval, policy))
1032 return -EFAULT;
1033
1034 if (nmask)
1035 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1036
1037 return err;
1038}
1039
1da177e4
LT
1040#ifdef CONFIG_COMPAT
1041
1042asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1043 compat_ulong_t __user *nmask,
1044 compat_ulong_t maxnode,
1045 compat_ulong_t addr, compat_ulong_t flags)
1046{
1047 long err;
1048 unsigned long __user *nm = NULL;
1049 unsigned long nr_bits, alloc_size;
1050 DECLARE_BITMAP(bm, MAX_NUMNODES);
1051
1052 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1053 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1054
1055 if (nmask)
1056 nm = compat_alloc_user_space(alloc_size);
1057
1058 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1059
1060 if (!err && nmask) {
1061 err = copy_from_user(bm, nm, alloc_size);
1062 /* ensure entire bitmap is zeroed */
1063 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1064 err |= compat_put_bitmap(nmask, bm, nr_bits);
1065 }
1066
1067 return err;
1068}
1069
1070asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1071 compat_ulong_t maxnode)
1072{
1073 long err = 0;
1074 unsigned long __user *nm = NULL;
1075 unsigned long nr_bits, alloc_size;
1076 DECLARE_BITMAP(bm, MAX_NUMNODES);
1077
1078 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1079 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1080
1081 if (nmask) {
1082 err = compat_get_bitmap(bm, nmask, nr_bits);
1083 nm = compat_alloc_user_space(alloc_size);
1084 err |= copy_to_user(nm, bm, alloc_size);
1085 }
1086
1087 if (err)
1088 return -EFAULT;
1089
1090 return sys_set_mempolicy(mode, nm, nr_bits+1);
1091}
1092
1093asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1094 compat_ulong_t mode, compat_ulong_t __user *nmask,
1095 compat_ulong_t maxnode, compat_ulong_t flags)
1096{
1097 long err = 0;
1098 unsigned long __user *nm = NULL;
1099 unsigned long nr_bits, alloc_size;
dfcd3c0d 1100 nodemask_t bm;
1da177e4
LT
1101
1102 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1103 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1104
1105 if (nmask) {
dfcd3c0d 1106 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1da177e4 1107 nm = compat_alloc_user_space(alloc_size);
dfcd3c0d 1108 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1da177e4
LT
1109 }
1110
1111 if (err)
1112 return -EFAULT;
1113
1114 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1115}
1116
1117#endif
1118
480eccf9
LS
1119/*
1120 * get_vma_policy(@task, @vma, @addr)
1121 * @task - task for fallback if vma policy == default
1122 * @vma - virtual memory area whose policy is sought
1123 * @addr - address in @vma for shared policy lookup
1124 *
1125 * Returns effective policy for a VMA at specified address.
1126 * Falls back to @task or system default policy, as necessary.
1127 * Returned policy has extra reference count if shared, vma,
1128 * or some other task's policy [show_numa_maps() can pass
1129 * @task != current]. It is the caller's responsibility to
1130 * free the reference in these cases.
1131 */
48fce342
CL
1132static struct mempolicy * get_vma_policy(struct task_struct *task,
1133 struct vm_area_struct *vma, unsigned long addr)
1da177e4 1134{
6e21c8f1 1135 struct mempolicy *pol = task->mempolicy;
480eccf9 1136 int shared_pol = 0;
1da177e4
LT
1137
1138 if (vma) {
480eccf9 1139 if (vma->vm_ops && vma->vm_ops->get_policy) {
8bccd85f 1140 pol = vma->vm_ops->get_policy(vma, addr);
480eccf9
LS
1141 shared_pol = 1; /* if pol non-NULL, add ref below */
1142 } else if (vma->vm_policy &&
1da177e4
LT
1143 vma->vm_policy->policy != MPOL_DEFAULT)
1144 pol = vma->vm_policy;
1145 }
1146 if (!pol)
1147 pol = &default_policy;
480eccf9
LS
1148 else if (!shared_pol && pol != current->mempolicy)
1149 mpol_get(pol); /* vma or other task's policy */
1da177e4
LT
1150 return pol;
1151}
1152
19770b32
MG
1153/* Return a nodemask representing a mempolicy */
1154static nodemask_t *nodemask_policy(gfp_t gfp, struct mempolicy *policy)
1155{
1156 /* Lower zones don't get a nodemask applied for MPOL_BIND */
1157 if (unlikely(policy->policy == MPOL_BIND) &&
1158 gfp_zone(gfp) >= policy_zone &&
1159 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1160 return &policy->v.nodes;
1161
1162 return NULL;
1163}
1164
1da177e4 1165/* Return a zonelist representing a mempolicy */
dd0fc66f 1166static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
1da177e4
LT
1167{
1168 int nd;
1169
1170 switch (policy->policy) {
1171 case MPOL_PREFERRED:
1172 nd = policy->v.preferred_node;
1173 if (nd < 0)
1174 nd = numa_node_id();
1175 break;
1176 case MPOL_BIND:
19770b32
MG
1177 /*
1178 * Normally, MPOL_BIND allocations node-local are node-local
1179 * within the allowed nodemask. However, if __GFP_THISNODE is
1180 * set and the current node is part of the mask, we use the
1181 * the zonelist for the first node in the mask instead.
1182 */
1183 nd = numa_node_id();
1184 if (unlikely(gfp & __GFP_THISNODE) &&
1185 unlikely(!node_isset(nd, policy->v.nodes)))
1186 nd = first_node(policy->v.nodes);
1187 break;
1da177e4
LT
1188 case MPOL_INTERLEAVE: /* should not happen */
1189 case MPOL_DEFAULT:
1190 nd = numa_node_id();
1191 break;
1192 default:
1193 nd = 0;
1194 BUG();
1195 }
0e88460d 1196 return node_zonelist(nd, gfp);
1da177e4
LT
1197}
1198
1199/* Do dynamic interleaving for a process */
1200static unsigned interleave_nodes(struct mempolicy *policy)
1201{
1202 unsigned nid, next;
1203 struct task_struct *me = current;
1204
1205 nid = me->il_next;
dfcd3c0d 1206 next = next_node(nid, policy->v.nodes);
1da177e4 1207 if (next >= MAX_NUMNODES)
dfcd3c0d 1208 next = first_node(policy->v.nodes);
1da177e4
LT
1209 me->il_next = next;
1210 return nid;
1211}
1212
dc85da15
CL
1213/*
1214 * Depending on the memory policy provide a node from which to allocate the
1215 * next slab entry.
1216 */
1217unsigned slab_node(struct mempolicy *policy)
1218{
a3b51e01 1219 unsigned short pol = policy ? policy->policy : MPOL_DEFAULT;
765c4507
CL
1220
1221 switch (pol) {
dc85da15
CL
1222 case MPOL_INTERLEAVE:
1223 return interleave_nodes(policy);
1224
dd1a239f 1225 case MPOL_BIND: {
dc85da15
CL
1226 /*
1227 * Follow bind policy behavior and start allocation at the
1228 * first node.
1229 */
19770b32
MG
1230 struct zonelist *zonelist;
1231 struct zone *zone;
1232 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1233 zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1234 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1235 &policy->v.nodes,
1236 &zone);
1237 return zone->node;
dd1a239f 1238 }
dc85da15
CL
1239
1240 case MPOL_PREFERRED:
1241 if (policy->v.preferred_node >= 0)
1242 return policy->v.preferred_node;
1243 /* Fall through */
1244
1245 default:
1246 return numa_node_id();
1247 }
1248}
1249
1da177e4
LT
1250/* Do static interleaving for a VMA with known offset. */
1251static unsigned offset_il_node(struct mempolicy *pol,
1252 struct vm_area_struct *vma, unsigned long off)
1253{
dfcd3c0d 1254 unsigned nnodes = nodes_weight(pol->v.nodes);
1da177e4
LT
1255 unsigned target = (unsigned)off % nnodes;
1256 int c;
1257 int nid = -1;
1258
1259 c = 0;
1260 do {
dfcd3c0d 1261 nid = next_node(nid, pol->v.nodes);
1da177e4
LT
1262 c++;
1263 } while (c <= target);
1da177e4
LT
1264 return nid;
1265}
1266
5da7ca86
CL
1267/* Determine a node number for interleave */
1268static inline unsigned interleave_nid(struct mempolicy *pol,
1269 struct vm_area_struct *vma, unsigned long addr, int shift)
1270{
1271 if (vma) {
1272 unsigned long off;
1273
3b98b087
NA
1274 /*
1275 * for small pages, there is no difference between
1276 * shift and PAGE_SHIFT, so the bit-shift is safe.
1277 * for huge pages, since vm_pgoff is in units of small
1278 * pages, we need to shift off the always 0 bits to get
1279 * a useful offset.
1280 */
1281 BUG_ON(shift < PAGE_SHIFT);
1282 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
5da7ca86
CL
1283 off += (addr - vma->vm_start) >> shift;
1284 return offset_il_node(pol, vma, off);
1285 } else
1286 return interleave_nodes(pol);
1287}
1288
00ac59ad 1289#ifdef CONFIG_HUGETLBFS
480eccf9
LS
1290/*
1291 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1292 * @vma = virtual memory area whose policy is sought
1293 * @addr = address in @vma for shared policy lookup and interleave policy
1294 * @gfp_flags = for requested zone
19770b32
MG
1295 * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1296 * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
480eccf9
LS
1297 *
1298 * Returns a zonelist suitable for a huge page allocation.
19770b32
MG
1299 * If the effective policy is 'BIND, returns pointer to local node's zonelist,
1300 * and a pointer to the mempolicy's @nodemask for filtering the zonelist.
480eccf9 1301 * If it is also a policy for which get_vma_policy() returns an extra
19770b32 1302 * reference, we must hold that reference until after the allocation.
480eccf9 1303 * In that case, return policy via @mpol so hugetlb allocation can drop
19770b32 1304 * the reference. For non-'BIND referenced policies, we can/do drop the
480eccf9
LS
1305 * reference here, so the caller doesn't need to know about the special case
1306 * for default and current task policy.
1307 */
396faf03 1308struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
19770b32
MG
1309 gfp_t gfp_flags, struct mempolicy **mpol,
1310 nodemask_t **nodemask)
5da7ca86
CL
1311{
1312 struct mempolicy *pol = get_vma_policy(current, vma, addr);
480eccf9 1313 struct zonelist *zl;
5da7ca86 1314
480eccf9 1315 *mpol = NULL; /* probably no unref needed */
19770b32
MG
1316 *nodemask = NULL; /* assume !MPOL_BIND */
1317 if (pol->policy == MPOL_BIND) {
1318 *nodemask = &pol->v.nodes;
1319 } else if (pol->policy == MPOL_INTERLEAVE) {
5da7ca86
CL
1320 unsigned nid;
1321
1322 nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
69682d85
LS
1323 if (unlikely(pol != &default_policy &&
1324 pol != current->mempolicy))
1325 __mpol_free(pol); /* finished with pol */
0e88460d 1326 return node_zonelist(nid, gfp_flags);
5da7ca86 1327 }
480eccf9
LS
1328
1329 zl = zonelist_policy(GFP_HIGHUSER, pol);
1330 if (unlikely(pol != &default_policy && pol != current->mempolicy)) {
1331 if (pol->policy != MPOL_BIND)
1332 __mpol_free(pol); /* finished with pol */
1333 else
1334 *mpol = pol; /* unref needed after allocation */
1335 }
1336 return zl;
5da7ca86 1337}
00ac59ad 1338#endif
5da7ca86 1339
1da177e4
LT
1340/* Allocate a page in interleaved policy.
1341 Own path because it needs to do special accounting. */
662f3a0b
AK
1342static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1343 unsigned nid)
1da177e4
LT
1344{
1345 struct zonelist *zl;
1346 struct page *page;
1347
0e88460d 1348 zl = node_zonelist(nid, gfp);
1da177e4 1349 page = __alloc_pages(gfp, order, zl);
dd1a239f 1350 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
ca889e6c 1351 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1da177e4
LT
1352 return page;
1353}
1354
1355/**
1356 * alloc_page_vma - Allocate a page for a VMA.
1357 *
1358 * @gfp:
1359 * %GFP_USER user allocation.
1360 * %GFP_KERNEL kernel allocations,
1361 * %GFP_HIGHMEM highmem/user allocations,
1362 * %GFP_FS allocation should not call back into a file system.
1363 * %GFP_ATOMIC don't sleep.
1364 *
1365 * @vma: Pointer to VMA or NULL if not available.
1366 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1367 *
1368 * This function allocates a page from the kernel page pool and applies
1369 * a NUMA policy associated with the VMA or the current process.
1370 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1371 * mm_struct of the VMA to prevent it from going away. Should be used for
1372 * all allocations for pages that will be mapped into
1373 * user space. Returns NULL when no page can be allocated.
1374 *
1375 * Should be called with the mm_sem of the vma hold.
1376 */
1377struct page *
dd0fc66f 1378alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1da177e4 1379{
6e21c8f1 1380 struct mempolicy *pol = get_vma_policy(current, vma, addr);
480eccf9 1381 struct zonelist *zl;
1da177e4 1382
cf2a473c 1383 cpuset_update_task_memory_state();
1da177e4
LT
1384
1385 if (unlikely(pol->policy == MPOL_INTERLEAVE)) {
1386 unsigned nid;
5da7ca86
CL
1387
1388 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
69682d85
LS
1389 if (unlikely(pol != &default_policy &&
1390 pol != current->mempolicy))
1391 __mpol_free(pol); /* finished with pol */
1da177e4
LT
1392 return alloc_page_interleave(gfp, 0, nid);
1393 }
480eccf9
LS
1394 zl = zonelist_policy(gfp, pol);
1395 if (pol != &default_policy && pol != current->mempolicy) {
1396 /*
1397 * slow path: ref counted policy -- shared or vma
1398 */
19770b32
MG
1399 struct page *page = __alloc_pages_nodemask(gfp, 0,
1400 zl, nodemask_policy(gfp, pol));
480eccf9
LS
1401 __mpol_free(pol);
1402 return page;
1403 }
1404 /*
1405 * fast path: default or task policy
1406 */
19770b32 1407 return __alloc_pages_nodemask(gfp, 0, zl, nodemask_policy(gfp, pol));
1da177e4
LT
1408}
1409
1410/**
1411 * alloc_pages_current - Allocate pages.
1412 *
1413 * @gfp:
1414 * %GFP_USER user allocation,
1415 * %GFP_KERNEL kernel allocation,
1416 * %GFP_HIGHMEM highmem allocation,
1417 * %GFP_FS don't call back into a file system.
1418 * %GFP_ATOMIC don't sleep.
1419 * @order: Power of two of allocation size in pages. 0 is a single page.
1420 *
1421 * Allocate a page from the kernel page pool. When not in
1422 * interrupt context and apply the current process NUMA policy.
1423 * Returns NULL when no page can be allocated.
1424 *
cf2a473c 1425 * Don't call cpuset_update_task_memory_state() unless
1da177e4
LT
1426 * 1) it's ok to take cpuset_sem (can WAIT), and
1427 * 2) allocating for current task (not interrupt).
1428 */
dd0fc66f 1429struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1da177e4
LT
1430{
1431 struct mempolicy *pol = current->mempolicy;
1432
1433 if ((gfp & __GFP_WAIT) && !in_interrupt())
cf2a473c 1434 cpuset_update_task_memory_state();
9b819d20 1435 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1da177e4
LT
1436 pol = &default_policy;
1437 if (pol->policy == MPOL_INTERLEAVE)
1438 return alloc_page_interleave(gfp, order, interleave_nodes(pol));
19770b32
MG
1439 return __alloc_pages_nodemask(gfp, order,
1440 zonelist_policy(gfp, pol), nodemask_policy(gfp, pol));
1da177e4
LT
1441}
1442EXPORT_SYMBOL(alloc_pages_current);
1443
4225399a
PJ
1444/*
1445 * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it
1446 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1447 * with the mems_allowed returned by cpuset_mems_allowed(). This
1448 * keeps mempolicies cpuset relative after its cpuset moves. See
1449 * further kernel/cpuset.c update_nodemask().
1450 */
4225399a 1451
1da177e4
LT
1452/* Slow path of a mempolicy copy */
1453struct mempolicy *__mpol_copy(struct mempolicy *old)
1454{
1455 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1456
1457 if (!new)
1458 return ERR_PTR(-ENOMEM);
4225399a
PJ
1459 if (current_cpuset_is_being_rebound()) {
1460 nodemask_t mems = cpuset_mems_allowed(current);
1461 mpol_rebind_policy(old, &mems);
1462 }
1da177e4
LT
1463 *new = *old;
1464 atomic_set(&new->refcnt, 1);
1da177e4
LT
1465 return new;
1466}
1467
1468/* Slow path of a mempolicy comparison */
1469int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1470{
1471 if (!a || !b)
1472 return 0;
1473 if (a->policy != b->policy)
1474 return 0;
1475 switch (a->policy) {
1476 case MPOL_DEFAULT:
1477 return 1;
19770b32
MG
1478 case MPOL_BIND:
1479 /* Fall through */
1da177e4 1480 case MPOL_INTERLEAVE:
dfcd3c0d 1481 return nodes_equal(a->v.nodes, b->v.nodes);
1da177e4
LT
1482 case MPOL_PREFERRED:
1483 return a->v.preferred_node == b->v.preferred_node;
1da177e4
LT
1484 default:
1485 BUG();
1486 return 0;
1487 }
1488}
1489
1490/* Slow path of a mpol destructor. */
1491void __mpol_free(struct mempolicy *p)
1492{
1493 if (!atomic_dec_and_test(&p->refcnt))
1494 return;
1da177e4
LT
1495 p->policy = MPOL_DEFAULT;
1496 kmem_cache_free(policy_cache, p);
1497}
1498
1da177e4
LT
1499/*
1500 * Shared memory backing store policy support.
1501 *
1502 * Remember policies even when nobody has shared memory mapped.
1503 * The policies are kept in Red-Black tree linked from the inode.
1504 * They are protected by the sp->lock spinlock, which should be held
1505 * for any accesses to the tree.
1506 */
1507
1508/* lookup first element intersecting start-end */
1509/* Caller holds sp->lock */
1510static struct sp_node *
1511sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
1512{
1513 struct rb_node *n = sp->root.rb_node;
1514
1515 while (n) {
1516 struct sp_node *p = rb_entry(n, struct sp_node, nd);
1517
1518 if (start >= p->end)
1519 n = n->rb_right;
1520 else if (end <= p->start)
1521 n = n->rb_left;
1522 else
1523 break;
1524 }
1525 if (!n)
1526 return NULL;
1527 for (;;) {
1528 struct sp_node *w = NULL;
1529 struct rb_node *prev = rb_prev(n);
1530 if (!prev)
1531 break;
1532 w = rb_entry(prev, struct sp_node, nd);
1533 if (w->end <= start)
1534 break;
1535 n = prev;
1536 }
1537 return rb_entry(n, struct sp_node, nd);
1538}
1539
1540/* Insert a new shared policy into the list. */
1541/* Caller holds sp->lock */
1542static void sp_insert(struct shared_policy *sp, struct sp_node *new)
1543{
1544 struct rb_node **p = &sp->root.rb_node;
1545 struct rb_node *parent = NULL;
1546 struct sp_node *nd;
1547
1548 while (*p) {
1549 parent = *p;
1550 nd = rb_entry(parent, struct sp_node, nd);
1551 if (new->start < nd->start)
1552 p = &(*p)->rb_left;
1553 else if (new->end > nd->end)
1554 p = &(*p)->rb_right;
1555 else
1556 BUG();
1557 }
1558 rb_link_node(&new->nd, parent, p);
1559 rb_insert_color(&new->nd, &sp->root);
140d5a49 1560 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
1da177e4
LT
1561 new->policy ? new->policy->policy : 0);
1562}
1563
1564/* Find shared policy intersecting idx */
1565struct mempolicy *
1566mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
1567{
1568 struct mempolicy *pol = NULL;
1569 struct sp_node *sn;
1570
1571 if (!sp->root.rb_node)
1572 return NULL;
1573 spin_lock(&sp->lock);
1574 sn = sp_lookup(sp, idx, idx+1);
1575 if (sn) {
1576 mpol_get(sn->policy);
1577 pol = sn->policy;
1578 }
1579 spin_unlock(&sp->lock);
1580 return pol;
1581}
1582
1583static void sp_delete(struct shared_policy *sp, struct sp_node *n)
1584{
140d5a49 1585 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
1da177e4
LT
1586 rb_erase(&n->nd, &sp->root);
1587 mpol_free(n->policy);
1588 kmem_cache_free(sn_cache, n);
1589}
1590
dbcb0f19
AB
1591static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
1592 struct mempolicy *pol)
1da177e4
LT
1593{
1594 struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1595
1596 if (!n)
1597 return NULL;
1598 n->start = start;
1599 n->end = end;
1600 mpol_get(pol);
1601 n->policy = pol;
1602 return n;
1603}
1604
1605/* Replace a policy range. */
1606static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
1607 unsigned long end, struct sp_node *new)
1608{
1609 struct sp_node *n, *new2 = NULL;
1610
1611restart:
1612 spin_lock(&sp->lock);
1613 n = sp_lookup(sp, start, end);
1614 /* Take care of old policies in the same range. */
1615 while (n && n->start < end) {
1616 struct rb_node *next = rb_next(&n->nd);
1617 if (n->start >= start) {
1618 if (n->end <= end)
1619 sp_delete(sp, n);
1620 else
1621 n->start = end;
1622 } else {
1623 /* Old policy spanning whole new range. */
1624 if (n->end > end) {
1625 if (!new2) {
1626 spin_unlock(&sp->lock);
1627 new2 = sp_alloc(end, n->end, n->policy);
1628 if (!new2)
1629 return -ENOMEM;
1630 goto restart;
1631 }
1632 n->end = start;
1633 sp_insert(sp, new2);
1634 new2 = NULL;
1635 break;
1636 } else
1637 n->end = start;
1638 }
1639 if (!next)
1640 break;
1641 n = rb_entry(next, struct sp_node, nd);
1642 }
1643 if (new)
1644 sp_insert(sp, new);
1645 spin_unlock(&sp->lock);
1646 if (new2) {
1647 mpol_free(new2->policy);
1648 kmem_cache_free(sn_cache, new2);
1649 }
1650 return 0;
1651}
1652
a3b51e01 1653void mpol_shared_policy_init(struct shared_policy *info, unsigned short policy,
028fec41 1654 unsigned short flags, nodemask_t *policy_nodes)
7339ff83
RH
1655{
1656 info->root = RB_ROOT;
1657 spin_lock_init(&info->lock);
1658
1659 if (policy != MPOL_DEFAULT) {
1660 struct mempolicy *newpol;
1661
1662 /* Falls back to MPOL_DEFAULT on any error */
028fec41 1663 newpol = mpol_new(policy, flags, policy_nodes);
7339ff83
RH
1664 if (!IS_ERR(newpol)) {
1665 /* Create pseudo-vma that contains just the policy */
1666 struct vm_area_struct pvma;
1667
1668 memset(&pvma, 0, sizeof(struct vm_area_struct));
1669 /* Policy covers entire file */
1670 pvma.vm_end = TASK_SIZE;
1671 mpol_set_shared_policy(info, &pvma, newpol);
1672 mpol_free(newpol);
1673 }
1674 }
1675}
1676
1da177e4
LT
1677int mpol_set_shared_policy(struct shared_policy *info,
1678 struct vm_area_struct *vma, struct mempolicy *npol)
1679{
1680 int err;
1681 struct sp_node *new = NULL;
1682 unsigned long sz = vma_pages(vma);
1683
028fec41 1684 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
1da177e4 1685 vma->vm_pgoff,
028fec41
DR
1686 sz, npol ? npol->policy : -1,
1687 npol ? npol->flags : -1,
140d5a49 1688 npol ? nodes_addr(npol->v.nodes)[0] : -1);
1da177e4
LT
1689
1690 if (npol) {
1691 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
1692 if (!new)
1693 return -ENOMEM;
1694 }
1695 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
1696 if (err && new)
1697 kmem_cache_free(sn_cache, new);
1698 return err;
1699}
1700
1701/* Free a backing policy store on inode delete. */
1702void mpol_free_shared_policy(struct shared_policy *p)
1703{
1704 struct sp_node *n;
1705 struct rb_node *next;
1706
1707 if (!p->root.rb_node)
1708 return;
1709 spin_lock(&p->lock);
1710 next = rb_first(&p->root);
1711 while (next) {
1712 n = rb_entry(next, struct sp_node, nd);
1713 next = rb_next(&n->nd);
90c5029e 1714 rb_erase(&n->nd, &p->root);
1da177e4
LT
1715 mpol_free(n->policy);
1716 kmem_cache_free(sn_cache, n);
1717 }
1718 spin_unlock(&p->lock);
1da177e4
LT
1719}
1720
1721/* assumes fs == KERNEL_DS */
1722void __init numa_policy_init(void)
1723{
b71636e2
PM
1724 nodemask_t interleave_nodes;
1725 unsigned long largest = 0;
1726 int nid, prefer = 0;
1727
1da177e4
LT
1728 policy_cache = kmem_cache_create("numa_policy",
1729 sizeof(struct mempolicy),
20c2df83 1730 0, SLAB_PANIC, NULL);
1da177e4
LT
1731
1732 sn_cache = kmem_cache_create("shared_policy_node",
1733 sizeof(struct sp_node),
20c2df83 1734 0, SLAB_PANIC, NULL);
1da177e4 1735
b71636e2
PM
1736 /*
1737 * Set interleaving policy for system init. Interleaving is only
1738 * enabled across suitably sized nodes (default is >= 16MB), or
1739 * fall back to the largest node if they're all smaller.
1740 */
1741 nodes_clear(interleave_nodes);
56bbd65d 1742 for_each_node_state(nid, N_HIGH_MEMORY) {
b71636e2
PM
1743 unsigned long total_pages = node_present_pages(nid);
1744
1745 /* Preserve the largest node */
1746 if (largest < total_pages) {
1747 largest = total_pages;
1748 prefer = nid;
1749 }
1750
1751 /* Interleave this node? */
1752 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
1753 node_set(nid, interleave_nodes);
1754 }
1755
1756 /* All too small, use the largest */
1757 if (unlikely(nodes_empty(interleave_nodes)))
1758 node_set(prefer, interleave_nodes);
1da177e4 1759
028fec41 1760 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
1da177e4
LT
1761 printk("numa_policy_init: interleaving failed\n");
1762}
1763
8bccd85f 1764/* Reset policy of current process to default */
1da177e4
LT
1765void numa_default_policy(void)
1766{
028fec41 1767 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
1da177e4 1768}
68860ec1
PJ
1769
1770/* Migrate a policy to a different set of nodes */
dbcb0f19
AB
1771static void mpol_rebind_policy(struct mempolicy *pol,
1772 const nodemask_t *newmask)
68860ec1 1773{
74cb2155 1774 nodemask_t *mpolmask;
68860ec1
PJ
1775 nodemask_t tmp;
1776
1777 if (!pol)
1778 return;
74cb2155
PJ
1779 mpolmask = &pol->cpuset_mems_allowed;
1780 if (nodes_equal(*mpolmask, *newmask))
1781 return;
68860ec1
PJ
1782
1783 switch (pol->policy) {
1784 case MPOL_DEFAULT:
1785 break;
19770b32
MG
1786 case MPOL_BIND:
1787 /* Fall through */
68860ec1 1788 case MPOL_INTERLEAVE:
74cb2155 1789 nodes_remap(tmp, pol->v.nodes, *mpolmask, *newmask);
68860ec1 1790 pol->v.nodes = tmp;
74cb2155
PJ
1791 *mpolmask = *newmask;
1792 current->il_next = node_remap(current->il_next,
1793 *mpolmask, *newmask);
68860ec1
PJ
1794 break;
1795 case MPOL_PREFERRED:
1796 pol->v.preferred_node = node_remap(pol->v.preferred_node,
74cb2155
PJ
1797 *mpolmask, *newmask);
1798 *mpolmask = *newmask;
68860ec1 1799 break;
68860ec1
PJ
1800 default:
1801 BUG();
1802 break;
1803 }
1804}
1805
1806/*
74cb2155
PJ
1807 * Wrapper for mpol_rebind_policy() that just requires task
1808 * pointer, and updates task mempolicy.
68860ec1 1809 */
74cb2155
PJ
1810
1811void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
68860ec1 1812{
74cb2155 1813 mpol_rebind_policy(tsk->mempolicy, new);
68860ec1 1814}
1a75a6c8 1815
4225399a
PJ
1816/*
1817 * Rebind each vma in mm to new nodemask.
1818 *
1819 * Call holding a reference to mm. Takes mm->mmap_sem during call.
1820 */
1821
1822void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
1823{
1824 struct vm_area_struct *vma;
1825
1826 down_write(&mm->mmap_sem);
1827 for (vma = mm->mmap; vma; vma = vma->vm_next)
1828 mpol_rebind_policy(vma->vm_policy, new);
1829 up_write(&mm->mmap_sem);
1830}
1831
1a75a6c8
CL
1832/*
1833 * Display pages allocated per node and memory policy via /proc.
1834 */
1835
15ad7cdc
HD
1836static const char * const policy_types[] =
1837 { "default", "prefer", "bind", "interleave" };
1a75a6c8
CL
1838
1839/*
1840 * Convert a mempolicy into a string.
1841 * Returns the number of characters in buffer (if positive)
1842 * or an error (negative)
1843 */
1844static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1845{
1846 char *p = buffer;
1847 int l;
1848 nodemask_t nodes;
a3b51e01 1849 unsigned short mode = pol ? pol->policy : MPOL_DEFAULT;
1a75a6c8
CL
1850
1851 switch (mode) {
1852 case MPOL_DEFAULT:
1853 nodes_clear(nodes);
1854 break;
1855
1856 case MPOL_PREFERRED:
1857 nodes_clear(nodes);
1858 node_set(pol->v.preferred_node, nodes);
1859 break;
1860
1861 case MPOL_BIND:
19770b32 1862 /* Fall through */
1a75a6c8
CL
1863 case MPOL_INTERLEAVE:
1864 nodes = pol->v.nodes;
1865 break;
1866
1867 default:
1868 BUG();
1869 return -EFAULT;
1870 }
1871
1872 l = strlen(policy_types[mode]);
1873 if (buffer + maxlen < p + l + 1)
1874 return -ENOSPC;
1875
1876 strcpy(p, policy_types[mode]);
1877 p += l;
1878
1879 if (!nodes_empty(nodes)) {
1880 if (buffer + maxlen < p + 2)
1881 return -ENOSPC;
1882 *p++ = '=';
1883 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
1884 }
1885 return p - buffer;
1886}
1887
1888struct numa_maps {
1889 unsigned long pages;
1890 unsigned long anon;
397874df
CL
1891 unsigned long active;
1892 unsigned long writeback;
1a75a6c8 1893 unsigned long mapcount_max;
397874df
CL
1894 unsigned long dirty;
1895 unsigned long swapcache;
1a75a6c8
CL
1896 unsigned long node[MAX_NUMNODES];
1897};
1898
397874df 1899static void gather_stats(struct page *page, void *private, int pte_dirty)
1a75a6c8
CL
1900{
1901 struct numa_maps *md = private;
1902 int count = page_mapcount(page);
1903
397874df
CL
1904 md->pages++;
1905 if (pte_dirty || PageDirty(page))
1906 md->dirty++;
1a75a6c8 1907
397874df
CL
1908 if (PageSwapCache(page))
1909 md->swapcache++;
1a75a6c8 1910
397874df
CL
1911 if (PageActive(page))
1912 md->active++;
1913
1914 if (PageWriteback(page))
1915 md->writeback++;
1a75a6c8
CL
1916
1917 if (PageAnon(page))
1918 md->anon++;
1919
397874df
CL
1920 if (count > md->mapcount_max)
1921 md->mapcount_max = count;
1922
1a75a6c8 1923 md->node[page_to_nid(page)]++;
1a75a6c8
CL
1924}
1925
7f709ed0 1926#ifdef CONFIG_HUGETLB_PAGE
397874df
CL
1927static void check_huge_range(struct vm_area_struct *vma,
1928 unsigned long start, unsigned long end,
1929 struct numa_maps *md)
1930{
1931 unsigned long addr;
1932 struct page *page;
1933
1934 for (addr = start; addr < end; addr += HPAGE_SIZE) {
1935 pte_t *ptep = huge_pte_offset(vma->vm_mm, addr & HPAGE_MASK);
1936 pte_t pte;
1937
1938 if (!ptep)
1939 continue;
1940
1941 pte = *ptep;
1942 if (pte_none(pte))
1943 continue;
1944
1945 page = pte_page(pte);
1946 if (!page)
1947 continue;
1948
1949 gather_stats(page, md, pte_dirty(*ptep));
1950 }
1951}
7f709ed0
AM
1952#else
1953static inline void check_huge_range(struct vm_area_struct *vma,
1954 unsigned long start, unsigned long end,
1955 struct numa_maps *md)
1956{
1957}
1958#endif
397874df 1959
1a75a6c8
CL
1960int show_numa_map(struct seq_file *m, void *v)
1961{
99f89551 1962 struct proc_maps_private *priv = m->private;
1a75a6c8
CL
1963 struct vm_area_struct *vma = v;
1964 struct numa_maps *md;
397874df
CL
1965 struct file *file = vma->vm_file;
1966 struct mm_struct *mm = vma->vm_mm;
480eccf9 1967 struct mempolicy *pol;
1a75a6c8
CL
1968 int n;
1969 char buffer[50];
1970
397874df 1971 if (!mm)
1a75a6c8
CL
1972 return 0;
1973
1974 md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
1975 if (!md)
1976 return 0;
1977
480eccf9
LS
1978 pol = get_vma_policy(priv->task, vma, vma->vm_start);
1979 mpol_to_str(buffer, sizeof(buffer), pol);
1980 /*
1981 * unref shared or other task's mempolicy
1982 */
1983 if (pol != &default_policy && pol != current->mempolicy)
1984 __mpol_free(pol);
397874df
CL
1985
1986 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1987
1988 if (file) {
1989 seq_printf(m, " file=");
c32c2f63 1990 seq_path(m, &file->f_path, "\n\t= ");
397874df
CL
1991 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1992 seq_printf(m, " heap");
1993 } else if (vma->vm_start <= mm->start_stack &&
1994 vma->vm_end >= mm->start_stack) {
1995 seq_printf(m, " stack");
1996 }
1997
1998 if (is_vm_hugetlb_page(vma)) {
1999 check_huge_range(vma, vma->vm_start, vma->vm_end, md);
2000 seq_printf(m, " huge");
2001 } else {
a57ebfdb 2002 check_pgd_range(vma, vma->vm_start, vma->vm_end,
56bbd65d 2003 &node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
397874df
CL
2004 }
2005
2006 if (!md->pages)
2007 goto out;
1a75a6c8 2008
397874df
CL
2009 if (md->anon)
2010 seq_printf(m," anon=%lu",md->anon);
1a75a6c8 2011
397874df
CL
2012 if (md->dirty)
2013 seq_printf(m," dirty=%lu",md->dirty);
1a75a6c8 2014
397874df
CL
2015 if (md->pages != md->anon && md->pages != md->dirty)
2016 seq_printf(m, " mapped=%lu", md->pages);
1a75a6c8 2017
397874df
CL
2018 if (md->mapcount_max > 1)
2019 seq_printf(m, " mapmax=%lu", md->mapcount_max);
1a75a6c8 2020
397874df
CL
2021 if (md->swapcache)
2022 seq_printf(m," swapcache=%lu", md->swapcache);
2023
2024 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2025 seq_printf(m," active=%lu", md->active);
2026
2027 if (md->writeback)
2028 seq_printf(m," writeback=%lu", md->writeback);
2029
56bbd65d 2030 for_each_node_state(n, N_HIGH_MEMORY)
397874df
CL
2031 if (md->node[n])
2032 seq_printf(m, " N%d=%lu", n, md->node[n]);
2033out:
2034 seq_putc(m, '\n');
1a75a6c8
CL
2035 kfree(md);
2036
2037 if (m->count < m->size)
99f89551 2038 m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
1a75a6c8
CL
2039 return 0;
2040}