]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - mm/mempolicy.c
writeback: introduce writeback_control.more_io to indicate more io
[mirror_ubuntu-bionic-kernel.git] / mm / mempolicy.c
CommitLineData
1da177e4
LT
1/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
8bccd85f 5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
1da177e4
LT
6 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
8bccd85f 21 *
1da177e4
LT
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
8bccd85f
CL
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
1da177e4
LT
28 * preferred Try a specific node first before normal fallback.
29 * As a special case node -1 here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
8bccd85f 33 *
1da177e4
LT
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
66 could replace all the switch()es with a mempolicy_ops structure.
67*/
68
69#include <linux/mempolicy.h>
70#include <linux/mm.h>
71#include <linux/highmem.h>
72#include <linux/hugetlb.h>
73#include <linux/kernel.h>
74#include <linux/sched.h>
1da177e4
LT
75#include <linux/nodemask.h>
76#include <linux/cpuset.h>
77#include <linux/gfp.h>
78#include <linux/slab.h>
79#include <linux/string.h>
80#include <linux/module.h>
81#include <linux/interrupt.h>
82#include <linux/init.h>
83#include <linux/compat.h>
dc9aa5b9 84#include <linux/swap.h>
1a75a6c8
CL
85#include <linux/seq_file.h>
86#include <linux/proc_fs.h>
b20a3503 87#include <linux/migrate.h>
95a402c3 88#include <linux/rmap.h>
86c3a764 89#include <linux/security.h>
dbcb0f19 90#include <linux/syscalls.h>
dc9aa5b9 91
1da177e4
LT
92#include <asm/tlbflush.h>
93#include <asm/uaccess.h>
94
38e35860 95/* Internal flags */
dc9aa5b9 96#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
38e35860 97#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
1a75a6c8 98#define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */
dc9aa5b9 99
fcc234f8
PE
100static struct kmem_cache *policy_cache;
101static struct kmem_cache *sn_cache;
1da177e4 102
1da177e4
LT
103/* Highest zone. An specific allocation for a zone below that is not
104 policied. */
6267276f 105enum zone_type policy_zone = 0;
1da177e4 106
d42c6997 107struct mempolicy default_policy = {
1da177e4
LT
108 .refcnt = ATOMIC_INIT(1), /* never free it */
109 .policy = MPOL_DEFAULT,
110};
111
dbcb0f19
AB
112static void mpol_rebind_policy(struct mempolicy *pol,
113 const nodemask_t *newmask);
114
1da177e4 115/* Do sanity checking on a policy */
dfcd3c0d 116static int mpol_check_policy(int mode, nodemask_t *nodes)
1da177e4 117{
dfcd3c0d 118 int empty = nodes_empty(*nodes);
1da177e4
LT
119
120 switch (mode) {
121 case MPOL_DEFAULT:
122 if (!empty)
123 return -EINVAL;
124 break;
125 case MPOL_BIND:
126 case MPOL_INTERLEAVE:
127 /* Preferred will only use the first bit, but allow
128 more for now. */
129 if (empty)
130 return -EINVAL;
131 break;
132 }
37b07e41 133 return nodes_subset(*nodes, node_states[N_HIGH_MEMORY]) ? 0 : -EINVAL;
1da177e4 134}
dd942ae3 135
1da177e4 136/* Generate a custom zonelist for the BIND policy. */
dfcd3c0d 137static struct zonelist *bind_zonelist(nodemask_t *nodes)
1da177e4
LT
138{
139 struct zonelist *zl;
2f6726e5
CL
140 int num, max, nd;
141 enum zone_type k;
1da177e4 142
dfcd3c0d 143 max = 1 + MAX_NR_ZONES * nodes_weight(*nodes);
9276b1bc 144 max++; /* space for zlcache_ptr (see mmzone.h) */
dd942ae3 145 zl = kmalloc(sizeof(struct zone *) * max, GFP_KERNEL);
1da177e4 146 if (!zl)
8af5e2eb 147 return ERR_PTR(-ENOMEM);
9276b1bc 148 zl->zlcache_ptr = NULL;
1da177e4 149 num = 0;
dd942ae3
AK
150 /* First put in the highest zones from all nodes, then all the next
151 lower zones etc. Avoid empty zones because the memory allocator
152 doesn't like them. If you implement node hot removal you
153 have to fix that. */
b377fd39 154 k = MAX_NR_ZONES - 1;
2f6726e5 155 while (1) {
dd942ae3
AK
156 for_each_node_mask(nd, *nodes) {
157 struct zone *z = &NODE_DATA(nd)->node_zones[k];
158 if (z->present_pages > 0)
159 zl->zones[num++] = z;
160 }
2f6726e5
CL
161 if (k == 0)
162 break;
163 k--;
dd942ae3 164 }
8af5e2eb
KH
165 if (num == 0) {
166 kfree(zl);
167 return ERR_PTR(-EINVAL);
168 }
1da177e4
LT
169 zl->zones[num] = NULL;
170 return zl;
171}
172
173/* Create a new policy */
dfcd3c0d 174static struct mempolicy *mpol_new(int mode, nodemask_t *nodes)
1da177e4
LT
175{
176 struct mempolicy *policy;
177
140d5a49
PM
178 pr_debug("setting mode %d nodes[0] %lx\n",
179 mode, nodes ? nodes_addr(*nodes)[0] : -1);
180
1da177e4
LT
181 if (mode == MPOL_DEFAULT)
182 return NULL;
183 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
184 if (!policy)
185 return ERR_PTR(-ENOMEM);
186 atomic_set(&policy->refcnt, 1);
187 switch (mode) {
188 case MPOL_INTERLEAVE:
dfcd3c0d 189 policy->v.nodes = *nodes;
6eaf806a
CL
190 nodes_and(policy->v.nodes, policy->v.nodes,
191 node_states[N_HIGH_MEMORY]);
192 if (nodes_weight(policy->v.nodes) == 0) {
8f493d79
AK
193 kmem_cache_free(policy_cache, policy);
194 return ERR_PTR(-EINVAL);
195 }
1da177e4
LT
196 break;
197 case MPOL_PREFERRED:
dfcd3c0d 198 policy->v.preferred_node = first_node(*nodes);
1da177e4
LT
199 if (policy->v.preferred_node >= MAX_NUMNODES)
200 policy->v.preferred_node = -1;
201 break;
202 case MPOL_BIND:
203 policy->v.zonelist = bind_zonelist(nodes);
8af5e2eb
KH
204 if (IS_ERR(policy->v.zonelist)) {
205 void *error_code = policy->v.zonelist;
1da177e4 206 kmem_cache_free(policy_cache, policy);
8af5e2eb 207 return error_code;
1da177e4
LT
208 }
209 break;
210 }
211 policy->policy = mode;
74cb2155 212 policy->cpuset_mems_allowed = cpuset_mems_allowed(current);
1da177e4
LT
213 return policy;
214}
215
397874df 216static void gather_stats(struct page *, void *, int pte_dirty);
fc301289
CL
217static void migrate_page_add(struct page *page, struct list_head *pagelist,
218 unsigned long flags);
1a75a6c8 219
38e35860 220/* Scan through pages checking if pages follow certain conditions. */
b5810039 221static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
dc9aa5b9
CL
222 unsigned long addr, unsigned long end,
223 const nodemask_t *nodes, unsigned long flags,
38e35860 224 void *private)
1da177e4 225{
91612e0d
HD
226 pte_t *orig_pte;
227 pte_t *pte;
705e87c0 228 spinlock_t *ptl;
941150a3 229
705e87c0 230 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
91612e0d 231 do {
6aab341e 232 struct page *page;
25ba77c1 233 int nid;
91612e0d
HD
234
235 if (!pte_present(*pte))
1da177e4 236 continue;
6aab341e
LT
237 page = vm_normal_page(vma, addr, *pte);
238 if (!page)
1da177e4 239 continue;
053837fc
NP
240 /*
241 * The check for PageReserved here is important to avoid
242 * handling zero pages and other pages that may have been
243 * marked special by the system.
244 *
245 * If the PageReserved would not be checked here then f.e.
246 * the location of the zero page could have an influence
247 * on MPOL_MF_STRICT, zero pages would be counted for
248 * the per node stats, and there would be useless attempts
249 * to put zero pages on the migration list.
250 */
f4598c8b
CL
251 if (PageReserved(page))
252 continue;
6aab341e 253 nid = page_to_nid(page);
38e35860
CL
254 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
255 continue;
256
1a75a6c8 257 if (flags & MPOL_MF_STATS)
397874df 258 gather_stats(page, private, pte_dirty(*pte));
053837fc 259 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
fc301289 260 migrate_page_add(page, private, flags);
38e35860
CL
261 else
262 break;
91612e0d 263 } while (pte++, addr += PAGE_SIZE, addr != end);
705e87c0 264 pte_unmap_unlock(orig_pte, ptl);
91612e0d
HD
265 return addr != end;
266}
267
b5810039 268static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
dc9aa5b9
CL
269 unsigned long addr, unsigned long end,
270 const nodemask_t *nodes, unsigned long flags,
38e35860 271 void *private)
91612e0d
HD
272{
273 pmd_t *pmd;
274 unsigned long next;
275
276 pmd = pmd_offset(pud, addr);
277 do {
278 next = pmd_addr_end(addr, end);
279 if (pmd_none_or_clear_bad(pmd))
280 continue;
dc9aa5b9 281 if (check_pte_range(vma, pmd, addr, next, nodes,
38e35860 282 flags, private))
91612e0d
HD
283 return -EIO;
284 } while (pmd++, addr = next, addr != end);
285 return 0;
286}
287
b5810039 288static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
dc9aa5b9
CL
289 unsigned long addr, unsigned long end,
290 const nodemask_t *nodes, unsigned long flags,
38e35860 291 void *private)
91612e0d
HD
292{
293 pud_t *pud;
294 unsigned long next;
295
296 pud = pud_offset(pgd, addr);
297 do {
298 next = pud_addr_end(addr, end);
299 if (pud_none_or_clear_bad(pud))
300 continue;
dc9aa5b9 301 if (check_pmd_range(vma, pud, addr, next, nodes,
38e35860 302 flags, private))
91612e0d
HD
303 return -EIO;
304 } while (pud++, addr = next, addr != end);
305 return 0;
306}
307
b5810039 308static inline int check_pgd_range(struct vm_area_struct *vma,
dc9aa5b9
CL
309 unsigned long addr, unsigned long end,
310 const nodemask_t *nodes, unsigned long flags,
38e35860 311 void *private)
91612e0d
HD
312{
313 pgd_t *pgd;
314 unsigned long next;
315
b5810039 316 pgd = pgd_offset(vma->vm_mm, addr);
91612e0d
HD
317 do {
318 next = pgd_addr_end(addr, end);
319 if (pgd_none_or_clear_bad(pgd))
320 continue;
dc9aa5b9 321 if (check_pud_range(vma, pgd, addr, next, nodes,
38e35860 322 flags, private))
91612e0d
HD
323 return -EIO;
324 } while (pgd++, addr = next, addr != end);
325 return 0;
1da177e4
LT
326}
327
dc9aa5b9
CL
328/*
329 * Check if all pages in a range are on a set of nodes.
330 * If pagelist != NULL then isolate pages from the LRU and
331 * put them on the pagelist.
332 */
1da177e4
LT
333static struct vm_area_struct *
334check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
38e35860 335 const nodemask_t *nodes, unsigned long flags, void *private)
1da177e4
LT
336{
337 int err;
338 struct vm_area_struct *first, *vma, *prev;
339
90036ee5 340 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
90036ee5 341
b20a3503
CL
342 err = migrate_prep();
343 if (err)
344 return ERR_PTR(err);
90036ee5 345 }
053837fc 346
1da177e4
LT
347 first = find_vma(mm, start);
348 if (!first)
349 return ERR_PTR(-EFAULT);
350 prev = NULL;
351 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
dc9aa5b9
CL
352 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
353 if (!vma->vm_next && vma->vm_end < end)
354 return ERR_PTR(-EFAULT);
355 if (prev && prev->vm_end < vma->vm_start)
356 return ERR_PTR(-EFAULT);
357 }
358 if (!is_vm_hugetlb_page(vma) &&
359 ((flags & MPOL_MF_STRICT) ||
360 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
361 vma_migratable(vma)))) {
5b952b3c 362 unsigned long endvma = vma->vm_end;
dc9aa5b9 363
5b952b3c
AK
364 if (endvma > end)
365 endvma = end;
366 if (vma->vm_start > start)
367 start = vma->vm_start;
dc9aa5b9 368 err = check_pgd_range(vma, start, endvma, nodes,
38e35860 369 flags, private);
1da177e4
LT
370 if (err) {
371 first = ERR_PTR(err);
372 break;
373 }
374 }
375 prev = vma;
376 }
377 return first;
378}
379
380/* Apply policy to a single VMA */
381static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
382{
383 int err = 0;
384 struct mempolicy *old = vma->vm_policy;
385
140d5a49 386 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
1da177e4
LT
387 vma->vm_start, vma->vm_end, vma->vm_pgoff,
388 vma->vm_ops, vma->vm_file,
389 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
390
391 if (vma->vm_ops && vma->vm_ops->set_policy)
392 err = vma->vm_ops->set_policy(vma, new);
393 if (!err) {
394 mpol_get(new);
395 vma->vm_policy = new;
396 mpol_free(old);
397 }
398 return err;
399}
400
401/* Step 2: apply policy to a range and do splits. */
402static int mbind_range(struct vm_area_struct *vma, unsigned long start,
403 unsigned long end, struct mempolicy *new)
404{
405 struct vm_area_struct *next;
406 int err;
407
408 err = 0;
409 for (; vma && vma->vm_start < end; vma = next) {
410 next = vma->vm_next;
411 if (vma->vm_start < start)
412 err = split_vma(vma->vm_mm, vma, start, 1);
413 if (!err && vma->vm_end > end)
414 err = split_vma(vma->vm_mm, vma, end, 0);
415 if (!err)
416 err = policy_vma(vma, new);
417 if (err)
418 break;
419 }
420 return err;
421}
422
8bccd85f
CL
423static int contextualize_policy(int mode, nodemask_t *nodes)
424{
425 if (!nodes)
426 return 0;
427
cf2a473c 428 cpuset_update_task_memory_state();
5966514d
PJ
429 if (!cpuset_nodes_subset_current_mems_allowed(*nodes))
430 return -EINVAL;
8bccd85f
CL
431 return mpol_check_policy(mode, nodes);
432}
433
c61afb18
PJ
434
435/*
436 * Update task->flags PF_MEMPOLICY bit: set iff non-default
437 * mempolicy. Allows more rapid checking of this (combined perhaps
438 * with other PF_* flag bits) on memory allocation hot code paths.
439 *
440 * If called from outside this file, the task 'p' should -only- be
441 * a newly forked child not yet visible on the task list, because
442 * manipulating the task flags of a visible task is not safe.
443 *
444 * The above limitation is why this routine has the funny name
445 * mpol_fix_fork_child_flag().
446 *
447 * It is also safe to call this with a task pointer of current,
448 * which the static wrapper mpol_set_task_struct_flag() does,
449 * for use within this file.
450 */
451
452void mpol_fix_fork_child_flag(struct task_struct *p)
453{
454 if (p->mempolicy)
455 p->flags |= PF_MEMPOLICY;
456 else
457 p->flags &= ~PF_MEMPOLICY;
458}
459
460static void mpol_set_task_struct_flag(void)
461{
462 mpol_fix_fork_child_flag(current);
463}
464
1da177e4 465/* Set the process memory policy */
dbcb0f19 466static long do_set_mempolicy(int mode, nodemask_t *nodes)
1da177e4 467{
1da177e4 468 struct mempolicy *new;
1da177e4 469
8bccd85f 470 if (contextualize_policy(mode, nodes))
1da177e4 471 return -EINVAL;
8bccd85f 472 new = mpol_new(mode, nodes);
1da177e4
LT
473 if (IS_ERR(new))
474 return PTR_ERR(new);
475 mpol_free(current->mempolicy);
476 current->mempolicy = new;
c61afb18 477 mpol_set_task_struct_flag();
1da177e4 478 if (new && new->policy == MPOL_INTERLEAVE)
dfcd3c0d 479 current->il_next = first_node(new->v.nodes);
1da177e4
LT
480 return 0;
481}
482
483/* Fill a zone bitmap for a policy */
dfcd3c0d 484static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
1da177e4
LT
485{
486 int i;
487
dfcd3c0d 488 nodes_clear(*nodes);
1da177e4
LT
489 switch (p->policy) {
490 case MPOL_BIND:
491 for (i = 0; p->v.zonelist->zones[i]; i++)
89fa3024 492 node_set(zone_to_nid(p->v.zonelist->zones[i]),
8bccd85f 493 *nodes);
1da177e4
LT
494 break;
495 case MPOL_DEFAULT:
496 break;
497 case MPOL_INTERLEAVE:
dfcd3c0d 498 *nodes = p->v.nodes;
1da177e4
LT
499 break;
500 case MPOL_PREFERRED:
56bbd65d 501 /* or use current node instead of memory_map? */
1da177e4 502 if (p->v.preferred_node < 0)
56bbd65d 503 *nodes = node_states[N_HIGH_MEMORY];
1da177e4 504 else
dfcd3c0d 505 node_set(p->v.preferred_node, *nodes);
1da177e4
LT
506 break;
507 default:
508 BUG();
509 }
510}
511
512static int lookup_node(struct mm_struct *mm, unsigned long addr)
513{
514 struct page *p;
515 int err;
516
517 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
518 if (err >= 0) {
519 err = page_to_nid(p);
520 put_page(p);
521 }
522 return err;
523}
524
1da177e4 525/* Retrieve NUMA policy */
dbcb0f19
AB
526static long do_get_mempolicy(int *policy, nodemask_t *nmask,
527 unsigned long addr, unsigned long flags)
1da177e4 528{
8bccd85f 529 int err;
1da177e4
LT
530 struct mm_struct *mm = current->mm;
531 struct vm_area_struct *vma = NULL;
532 struct mempolicy *pol = current->mempolicy;
533
cf2a473c 534 cpuset_update_task_memory_state();
754af6f5
LS
535 if (flags &
536 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
1da177e4 537 return -EINVAL;
754af6f5
LS
538
539 if (flags & MPOL_F_MEMS_ALLOWED) {
540 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
541 return -EINVAL;
542 *policy = 0; /* just so it's initialized */
543 *nmask = cpuset_current_mems_allowed;
544 return 0;
545 }
546
1da177e4
LT
547 if (flags & MPOL_F_ADDR) {
548 down_read(&mm->mmap_sem);
549 vma = find_vma_intersection(mm, addr, addr+1);
550 if (!vma) {
551 up_read(&mm->mmap_sem);
552 return -EFAULT;
553 }
554 if (vma->vm_ops && vma->vm_ops->get_policy)
555 pol = vma->vm_ops->get_policy(vma, addr);
556 else
557 pol = vma->vm_policy;
558 } else if (addr)
559 return -EINVAL;
560
561 if (!pol)
562 pol = &default_policy;
563
564 if (flags & MPOL_F_NODE) {
565 if (flags & MPOL_F_ADDR) {
566 err = lookup_node(mm, addr);
567 if (err < 0)
568 goto out;
8bccd85f 569 *policy = err;
1da177e4
LT
570 } else if (pol == current->mempolicy &&
571 pol->policy == MPOL_INTERLEAVE) {
8bccd85f 572 *policy = current->il_next;
1da177e4
LT
573 } else {
574 err = -EINVAL;
575 goto out;
576 }
577 } else
8bccd85f 578 *policy = pol->policy;
1da177e4
LT
579
580 if (vma) {
581 up_read(&current->mm->mmap_sem);
582 vma = NULL;
583 }
584
1da177e4 585 err = 0;
8bccd85f
CL
586 if (nmask)
587 get_zonemask(pol, nmask);
1da177e4
LT
588
589 out:
590 if (vma)
591 up_read(&current->mm->mmap_sem);
592 return err;
593}
594
b20a3503 595#ifdef CONFIG_MIGRATION
6ce3c4c0
CL
596/*
597 * page migration
598 */
fc301289
CL
599static void migrate_page_add(struct page *page, struct list_head *pagelist,
600 unsigned long flags)
6ce3c4c0
CL
601{
602 /*
fc301289 603 * Avoid migrating a page that is shared with others.
6ce3c4c0 604 */
b20a3503
CL
605 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1)
606 isolate_lru_page(page, pagelist);
7e2ab150 607}
6ce3c4c0 608
742755a1 609static struct page *new_node_page(struct page *page, unsigned long node, int **x)
95a402c3 610{
769848c0 611 return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0);
95a402c3
CL
612}
613
7e2ab150
CL
614/*
615 * Migrate pages from one node to a target node.
616 * Returns error or the number of pages not migrated.
617 */
dbcb0f19
AB
618static int migrate_to_node(struct mm_struct *mm, int source, int dest,
619 int flags)
7e2ab150
CL
620{
621 nodemask_t nmask;
622 LIST_HEAD(pagelist);
623 int err = 0;
624
625 nodes_clear(nmask);
626 node_set(source, nmask);
6ce3c4c0 627
7e2ab150
CL
628 check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
629 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
630
aaa994b3 631 if (!list_empty(&pagelist))
95a402c3
CL
632 err = migrate_pages(&pagelist, new_node_page, dest);
633
7e2ab150 634 return err;
6ce3c4c0
CL
635}
636
39743889 637/*
7e2ab150
CL
638 * Move pages between the two nodesets so as to preserve the physical
639 * layout as much as possible.
39743889
CL
640 *
641 * Returns the number of page that could not be moved.
642 */
643int do_migrate_pages(struct mm_struct *mm,
644 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
645{
646 LIST_HEAD(pagelist);
7e2ab150
CL
647 int busy = 0;
648 int err = 0;
649 nodemask_t tmp;
39743889 650
7e2ab150 651 down_read(&mm->mmap_sem);
39743889 652
7b2259b3
CL
653 err = migrate_vmas(mm, from_nodes, to_nodes, flags);
654 if (err)
655 goto out;
656
7e2ab150
CL
657/*
658 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
659 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
660 * bit in 'tmp', and return that <source, dest> pair for migration.
661 * The pair of nodemasks 'to' and 'from' define the map.
662 *
663 * If no pair of bits is found that way, fallback to picking some
664 * pair of 'source' and 'dest' bits that are not the same. If the
665 * 'source' and 'dest' bits are the same, this represents a node
666 * that will be migrating to itself, so no pages need move.
667 *
668 * If no bits are left in 'tmp', or if all remaining bits left
669 * in 'tmp' correspond to the same bit in 'to', return false
670 * (nothing left to migrate).
671 *
672 * This lets us pick a pair of nodes to migrate between, such that
673 * if possible the dest node is not already occupied by some other
674 * source node, minimizing the risk of overloading the memory on a
675 * node that would happen if we migrated incoming memory to a node
676 * before migrating outgoing memory source that same node.
677 *
678 * A single scan of tmp is sufficient. As we go, we remember the
679 * most recent <s, d> pair that moved (s != d). If we find a pair
680 * that not only moved, but what's better, moved to an empty slot
681 * (d is not set in tmp), then we break out then, with that pair.
682 * Otherwise when we finish scannng from_tmp, we at least have the
683 * most recent <s, d> pair that moved. If we get all the way through
684 * the scan of tmp without finding any node that moved, much less
685 * moved to an empty node, then there is nothing left worth migrating.
686 */
d4984711 687
7e2ab150
CL
688 tmp = *from_nodes;
689 while (!nodes_empty(tmp)) {
690 int s,d;
691 int source = -1;
692 int dest = 0;
693
694 for_each_node_mask(s, tmp) {
695 d = node_remap(s, *from_nodes, *to_nodes);
696 if (s == d)
697 continue;
698
699 source = s; /* Node moved. Memorize */
700 dest = d;
701
702 /* dest not in remaining from nodes? */
703 if (!node_isset(dest, tmp))
704 break;
705 }
706 if (source == -1)
707 break;
708
709 node_clear(source, tmp);
710 err = migrate_to_node(mm, source, dest, flags);
711 if (err > 0)
712 busy += err;
713 if (err < 0)
714 break;
39743889 715 }
7b2259b3 716out:
39743889 717 up_read(&mm->mmap_sem);
7e2ab150
CL
718 if (err < 0)
719 return err;
720 return busy;
b20a3503
CL
721
722}
723
742755a1 724static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
95a402c3
CL
725{
726 struct vm_area_struct *vma = (struct vm_area_struct *)private;
727
769848c0
MG
728 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
729 page_address_in_vma(page, vma));
95a402c3 730}
b20a3503
CL
731#else
732
733static void migrate_page_add(struct page *page, struct list_head *pagelist,
734 unsigned long flags)
735{
39743889
CL
736}
737
b20a3503
CL
738int do_migrate_pages(struct mm_struct *mm,
739 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
740{
741 return -ENOSYS;
742}
95a402c3 743
69939749 744static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
95a402c3
CL
745{
746 return NULL;
747}
b20a3503
CL
748#endif
749
dbcb0f19
AB
750static long do_mbind(unsigned long start, unsigned long len,
751 unsigned long mode, nodemask_t *nmask,
752 unsigned long flags)
6ce3c4c0
CL
753{
754 struct vm_area_struct *vma;
755 struct mm_struct *mm = current->mm;
756 struct mempolicy *new;
757 unsigned long end;
758 int err;
759 LIST_HEAD(pagelist);
760
761 if ((flags & ~(unsigned long)(MPOL_MF_STRICT |
762 MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
763 || mode > MPOL_MAX)
764 return -EINVAL;
74c00241 765 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
6ce3c4c0
CL
766 return -EPERM;
767
768 if (start & ~PAGE_MASK)
769 return -EINVAL;
770
771 if (mode == MPOL_DEFAULT)
772 flags &= ~MPOL_MF_STRICT;
773
774 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
775 end = start + len;
776
777 if (end < start)
778 return -EINVAL;
779 if (end == start)
780 return 0;
781
782 if (mpol_check_policy(mode, nmask))
783 return -EINVAL;
784
785 new = mpol_new(mode, nmask);
786 if (IS_ERR(new))
787 return PTR_ERR(new);
788
789 /*
790 * If we are using the default policy then operation
791 * on discontinuous address spaces is okay after all
792 */
793 if (!new)
794 flags |= MPOL_MF_DISCONTIG_OK;
795
140d5a49
PM
796 pr_debug("mbind %lx-%lx mode:%ld nodes:%lx\n",start,start+len,
797 mode, nmask ? nodes_addr(*nmask)[0] : -1);
6ce3c4c0
CL
798
799 down_write(&mm->mmap_sem);
800 vma = check_range(mm, start, end, nmask,
801 flags | MPOL_MF_INVERT, &pagelist);
802
803 err = PTR_ERR(vma);
804 if (!IS_ERR(vma)) {
805 int nr_failed = 0;
806
807 err = mbind_range(vma, start, end, new);
7e2ab150 808
6ce3c4c0 809 if (!list_empty(&pagelist))
95a402c3
CL
810 nr_failed = migrate_pages(&pagelist, new_vma_page,
811 (unsigned long)vma);
6ce3c4c0
CL
812
813 if (!err && nr_failed && (flags & MPOL_MF_STRICT))
814 err = -EIO;
815 }
b20a3503 816
6ce3c4c0
CL
817 up_write(&mm->mmap_sem);
818 mpol_free(new);
819 return err;
820}
821
8bccd85f
CL
822/*
823 * User space interface with variable sized bitmaps for nodelists.
824 */
825
826/* Copy a node mask from user space. */
39743889 827static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
8bccd85f
CL
828 unsigned long maxnode)
829{
830 unsigned long k;
831 unsigned long nlongs;
832 unsigned long endmask;
833
834 --maxnode;
835 nodes_clear(*nodes);
836 if (maxnode == 0 || !nmask)
837 return 0;
a9c930ba 838 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
636f13c1 839 return -EINVAL;
8bccd85f
CL
840
841 nlongs = BITS_TO_LONGS(maxnode);
842 if ((maxnode % BITS_PER_LONG) == 0)
843 endmask = ~0UL;
844 else
845 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
846
847 /* When the user specified more nodes than supported just check
848 if the non supported part is all zero. */
849 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
850 if (nlongs > PAGE_SIZE/sizeof(long))
851 return -EINVAL;
852 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
853 unsigned long t;
854 if (get_user(t, nmask + k))
855 return -EFAULT;
856 if (k == nlongs - 1) {
857 if (t & endmask)
858 return -EINVAL;
859 } else if (t)
860 return -EINVAL;
861 }
862 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
863 endmask = ~0UL;
864 }
865
866 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
867 return -EFAULT;
868 nodes_addr(*nodes)[nlongs-1] &= endmask;
869 return 0;
870}
871
872/* Copy a kernel node mask to user space */
873static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
874 nodemask_t *nodes)
875{
876 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
877 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
878
879 if (copy > nbytes) {
880 if (copy > PAGE_SIZE)
881 return -EINVAL;
882 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
883 return -EFAULT;
884 copy = nbytes;
885 }
886 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
887}
888
889asmlinkage long sys_mbind(unsigned long start, unsigned long len,
890 unsigned long mode,
891 unsigned long __user *nmask, unsigned long maxnode,
892 unsigned flags)
893{
894 nodemask_t nodes;
895 int err;
896
897 err = get_nodes(&nodes, nmask, maxnode);
898 if (err)
899 return err;
30150f8d
CL
900#ifdef CONFIG_CPUSETS
901 /* Restrict the nodes to the allowed nodes in the cpuset */
902 nodes_and(nodes, nodes, current->mems_allowed);
903#endif
8bccd85f
CL
904 return do_mbind(start, len, mode, &nodes, flags);
905}
906
907/* Set the process memory policy */
908asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
909 unsigned long maxnode)
910{
911 int err;
912 nodemask_t nodes;
913
914 if (mode < 0 || mode > MPOL_MAX)
915 return -EINVAL;
916 err = get_nodes(&nodes, nmask, maxnode);
917 if (err)
918 return err;
919 return do_set_mempolicy(mode, &nodes);
920}
921
39743889
CL
922asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
923 const unsigned long __user *old_nodes,
924 const unsigned long __user *new_nodes)
925{
926 struct mm_struct *mm;
927 struct task_struct *task;
928 nodemask_t old;
929 nodemask_t new;
930 nodemask_t task_nodes;
931 int err;
932
933 err = get_nodes(&old, old_nodes, maxnode);
934 if (err)
935 return err;
936
937 err = get_nodes(&new, new_nodes, maxnode);
938 if (err)
939 return err;
940
941 /* Find the mm_struct */
942 read_lock(&tasklist_lock);
943 task = pid ? find_task_by_pid(pid) : current;
944 if (!task) {
945 read_unlock(&tasklist_lock);
946 return -ESRCH;
947 }
948 mm = get_task_mm(task);
949 read_unlock(&tasklist_lock);
950
951 if (!mm)
952 return -EINVAL;
953
954 /*
955 * Check if this process has the right to modify the specified
956 * process. The right exists if the process has administrative
7f927fcc 957 * capabilities, superuser privileges or the same
39743889
CL
958 * userid as the target process.
959 */
960 if ((current->euid != task->suid) && (current->euid != task->uid) &&
961 (current->uid != task->suid) && (current->uid != task->uid) &&
74c00241 962 !capable(CAP_SYS_NICE)) {
39743889
CL
963 err = -EPERM;
964 goto out;
965 }
966
967 task_nodes = cpuset_mems_allowed(task);
968 /* Is the user allowed to access the target nodes? */
74c00241 969 if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
39743889
CL
970 err = -EPERM;
971 goto out;
972 }
973
37b07e41 974 if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) {
3b42d28b
CL
975 err = -EINVAL;
976 goto out;
977 }
978
86c3a764
DQ
979 err = security_task_movememory(task);
980 if (err)
981 goto out;
982
511030bc 983 err = do_migrate_pages(mm, &old, &new,
74c00241 984 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
39743889
CL
985out:
986 mmput(mm);
987 return err;
988}
989
990
8bccd85f
CL
991/* Retrieve NUMA policy */
992asmlinkage long sys_get_mempolicy(int __user *policy,
993 unsigned long __user *nmask,
994 unsigned long maxnode,
995 unsigned long addr, unsigned long flags)
996{
dbcb0f19
AB
997 int err;
998 int uninitialized_var(pval);
8bccd85f
CL
999 nodemask_t nodes;
1000
1001 if (nmask != NULL && maxnode < MAX_NUMNODES)
1002 return -EINVAL;
1003
1004 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1005
1006 if (err)
1007 return err;
1008
1009 if (policy && put_user(pval, policy))
1010 return -EFAULT;
1011
1012 if (nmask)
1013 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1014
1015 return err;
1016}
1017
1da177e4
LT
1018#ifdef CONFIG_COMPAT
1019
1020asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1021 compat_ulong_t __user *nmask,
1022 compat_ulong_t maxnode,
1023 compat_ulong_t addr, compat_ulong_t flags)
1024{
1025 long err;
1026 unsigned long __user *nm = NULL;
1027 unsigned long nr_bits, alloc_size;
1028 DECLARE_BITMAP(bm, MAX_NUMNODES);
1029
1030 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1031 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1032
1033 if (nmask)
1034 nm = compat_alloc_user_space(alloc_size);
1035
1036 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1037
1038 if (!err && nmask) {
1039 err = copy_from_user(bm, nm, alloc_size);
1040 /* ensure entire bitmap is zeroed */
1041 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1042 err |= compat_put_bitmap(nmask, bm, nr_bits);
1043 }
1044
1045 return err;
1046}
1047
1048asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1049 compat_ulong_t maxnode)
1050{
1051 long err = 0;
1052 unsigned long __user *nm = NULL;
1053 unsigned long nr_bits, alloc_size;
1054 DECLARE_BITMAP(bm, MAX_NUMNODES);
1055
1056 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1057 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1058
1059 if (nmask) {
1060 err = compat_get_bitmap(bm, nmask, nr_bits);
1061 nm = compat_alloc_user_space(alloc_size);
1062 err |= copy_to_user(nm, bm, alloc_size);
1063 }
1064
1065 if (err)
1066 return -EFAULT;
1067
1068 return sys_set_mempolicy(mode, nm, nr_bits+1);
1069}
1070
1071asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1072 compat_ulong_t mode, compat_ulong_t __user *nmask,
1073 compat_ulong_t maxnode, compat_ulong_t flags)
1074{
1075 long err = 0;
1076 unsigned long __user *nm = NULL;
1077 unsigned long nr_bits, alloc_size;
dfcd3c0d 1078 nodemask_t bm;
1da177e4
LT
1079
1080 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1081 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1082
1083 if (nmask) {
dfcd3c0d 1084 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1da177e4 1085 nm = compat_alloc_user_space(alloc_size);
dfcd3c0d 1086 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1da177e4
LT
1087 }
1088
1089 if (err)
1090 return -EFAULT;
1091
1092 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1093}
1094
1095#endif
1096
480eccf9
LS
1097/*
1098 * get_vma_policy(@task, @vma, @addr)
1099 * @task - task for fallback if vma policy == default
1100 * @vma - virtual memory area whose policy is sought
1101 * @addr - address in @vma for shared policy lookup
1102 *
1103 * Returns effective policy for a VMA at specified address.
1104 * Falls back to @task or system default policy, as necessary.
1105 * Returned policy has extra reference count if shared, vma,
1106 * or some other task's policy [show_numa_maps() can pass
1107 * @task != current]. It is the caller's responsibility to
1108 * free the reference in these cases.
1109 */
48fce342
CL
1110static struct mempolicy * get_vma_policy(struct task_struct *task,
1111 struct vm_area_struct *vma, unsigned long addr)
1da177e4 1112{
6e21c8f1 1113 struct mempolicy *pol = task->mempolicy;
480eccf9 1114 int shared_pol = 0;
1da177e4
LT
1115
1116 if (vma) {
480eccf9 1117 if (vma->vm_ops && vma->vm_ops->get_policy) {
8bccd85f 1118 pol = vma->vm_ops->get_policy(vma, addr);
480eccf9
LS
1119 shared_pol = 1; /* if pol non-NULL, add ref below */
1120 } else if (vma->vm_policy &&
1da177e4
LT
1121 vma->vm_policy->policy != MPOL_DEFAULT)
1122 pol = vma->vm_policy;
1123 }
1124 if (!pol)
1125 pol = &default_policy;
480eccf9
LS
1126 else if (!shared_pol && pol != current->mempolicy)
1127 mpol_get(pol); /* vma or other task's policy */
1da177e4
LT
1128 return pol;
1129}
1130
1131/* Return a zonelist representing a mempolicy */
dd0fc66f 1132static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
1da177e4
LT
1133{
1134 int nd;
1135
1136 switch (policy->policy) {
1137 case MPOL_PREFERRED:
1138 nd = policy->v.preferred_node;
1139 if (nd < 0)
1140 nd = numa_node_id();
1141 break;
1142 case MPOL_BIND:
1143 /* Lower zones don't get a policy applied */
1144 /* Careful: current->mems_allowed might have moved */
19655d34 1145 if (gfp_zone(gfp) >= policy_zone)
1da177e4
LT
1146 if (cpuset_zonelist_valid_mems_allowed(policy->v.zonelist))
1147 return policy->v.zonelist;
1148 /*FALL THROUGH*/
1149 case MPOL_INTERLEAVE: /* should not happen */
1150 case MPOL_DEFAULT:
1151 nd = numa_node_id();
1152 break;
1153 default:
1154 nd = 0;
1155 BUG();
1156 }
af4ca457 1157 return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp);
1da177e4
LT
1158}
1159
1160/* Do dynamic interleaving for a process */
1161static unsigned interleave_nodes(struct mempolicy *policy)
1162{
1163 unsigned nid, next;
1164 struct task_struct *me = current;
1165
1166 nid = me->il_next;
dfcd3c0d 1167 next = next_node(nid, policy->v.nodes);
1da177e4 1168 if (next >= MAX_NUMNODES)
dfcd3c0d 1169 next = first_node(policy->v.nodes);
1da177e4
LT
1170 me->il_next = next;
1171 return nid;
1172}
1173
dc85da15
CL
1174/*
1175 * Depending on the memory policy provide a node from which to allocate the
1176 * next slab entry.
1177 */
1178unsigned slab_node(struct mempolicy *policy)
1179{
765c4507
CL
1180 int pol = policy ? policy->policy : MPOL_DEFAULT;
1181
1182 switch (pol) {
dc85da15
CL
1183 case MPOL_INTERLEAVE:
1184 return interleave_nodes(policy);
1185
1186 case MPOL_BIND:
1187 /*
1188 * Follow bind policy behavior and start allocation at the
1189 * first node.
1190 */
89fa3024 1191 return zone_to_nid(policy->v.zonelist->zones[0]);
dc85da15
CL
1192
1193 case MPOL_PREFERRED:
1194 if (policy->v.preferred_node >= 0)
1195 return policy->v.preferred_node;
1196 /* Fall through */
1197
1198 default:
1199 return numa_node_id();
1200 }
1201}
1202
1da177e4
LT
1203/* Do static interleaving for a VMA with known offset. */
1204static unsigned offset_il_node(struct mempolicy *pol,
1205 struct vm_area_struct *vma, unsigned long off)
1206{
dfcd3c0d 1207 unsigned nnodes = nodes_weight(pol->v.nodes);
1da177e4
LT
1208 unsigned target = (unsigned)off % nnodes;
1209 int c;
1210 int nid = -1;
1211
1212 c = 0;
1213 do {
dfcd3c0d 1214 nid = next_node(nid, pol->v.nodes);
1da177e4
LT
1215 c++;
1216 } while (c <= target);
1da177e4
LT
1217 return nid;
1218}
1219
5da7ca86
CL
1220/* Determine a node number for interleave */
1221static inline unsigned interleave_nid(struct mempolicy *pol,
1222 struct vm_area_struct *vma, unsigned long addr, int shift)
1223{
1224 if (vma) {
1225 unsigned long off;
1226
3b98b087
NA
1227 /*
1228 * for small pages, there is no difference between
1229 * shift and PAGE_SHIFT, so the bit-shift is safe.
1230 * for huge pages, since vm_pgoff is in units of small
1231 * pages, we need to shift off the always 0 bits to get
1232 * a useful offset.
1233 */
1234 BUG_ON(shift < PAGE_SHIFT);
1235 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
5da7ca86
CL
1236 off += (addr - vma->vm_start) >> shift;
1237 return offset_il_node(pol, vma, off);
1238 } else
1239 return interleave_nodes(pol);
1240}
1241
00ac59ad 1242#ifdef CONFIG_HUGETLBFS
480eccf9
LS
1243/*
1244 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1245 * @vma = virtual memory area whose policy is sought
1246 * @addr = address in @vma for shared policy lookup and interleave policy
1247 * @gfp_flags = for requested zone
1248 * @mpol = pointer to mempolicy pointer for reference counted 'BIND policy
1249 *
1250 * Returns a zonelist suitable for a huge page allocation.
1251 * If the effective policy is 'BIND, returns pointer to policy's zonelist.
1252 * If it is also a policy for which get_vma_policy() returns an extra
1253 * reference, we must hold that reference until after allocation.
1254 * In that case, return policy via @mpol so hugetlb allocation can drop
1255 * the reference. For non-'BIND referenced policies, we can/do drop the
1256 * reference here, so the caller doesn't need to know about the special case
1257 * for default and current task policy.
1258 */
396faf03 1259struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
480eccf9 1260 gfp_t gfp_flags, struct mempolicy **mpol)
5da7ca86
CL
1261{
1262 struct mempolicy *pol = get_vma_policy(current, vma, addr);
480eccf9 1263 struct zonelist *zl;
5da7ca86 1264
480eccf9 1265 *mpol = NULL; /* probably no unref needed */
5da7ca86
CL
1266 if (pol->policy == MPOL_INTERLEAVE) {
1267 unsigned nid;
1268
1269 nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
480eccf9 1270 __mpol_free(pol); /* finished with pol */
396faf03 1271 return NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_flags);
5da7ca86 1272 }
480eccf9
LS
1273
1274 zl = zonelist_policy(GFP_HIGHUSER, pol);
1275 if (unlikely(pol != &default_policy && pol != current->mempolicy)) {
1276 if (pol->policy != MPOL_BIND)
1277 __mpol_free(pol); /* finished with pol */
1278 else
1279 *mpol = pol; /* unref needed after allocation */
1280 }
1281 return zl;
5da7ca86 1282}
00ac59ad 1283#endif
5da7ca86 1284
1da177e4
LT
1285/* Allocate a page in interleaved policy.
1286 Own path because it needs to do special accounting. */
662f3a0b
AK
1287static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1288 unsigned nid)
1da177e4
LT
1289{
1290 struct zonelist *zl;
1291 struct page *page;
1292
af4ca457 1293 zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp);
1da177e4 1294 page = __alloc_pages(gfp, order, zl);
ca889e6c
CL
1295 if (page && page_zone(page) == zl->zones[0])
1296 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1da177e4
LT
1297 return page;
1298}
1299
1300/**
1301 * alloc_page_vma - Allocate a page for a VMA.
1302 *
1303 * @gfp:
1304 * %GFP_USER user allocation.
1305 * %GFP_KERNEL kernel allocations,
1306 * %GFP_HIGHMEM highmem/user allocations,
1307 * %GFP_FS allocation should not call back into a file system.
1308 * %GFP_ATOMIC don't sleep.
1309 *
1310 * @vma: Pointer to VMA or NULL if not available.
1311 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1312 *
1313 * This function allocates a page from the kernel page pool and applies
1314 * a NUMA policy associated with the VMA or the current process.
1315 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1316 * mm_struct of the VMA to prevent it from going away. Should be used for
1317 * all allocations for pages that will be mapped into
1318 * user space. Returns NULL when no page can be allocated.
1319 *
1320 * Should be called with the mm_sem of the vma hold.
1321 */
1322struct page *
dd0fc66f 1323alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1da177e4 1324{
6e21c8f1 1325 struct mempolicy *pol = get_vma_policy(current, vma, addr);
480eccf9 1326 struct zonelist *zl;
1da177e4 1327
cf2a473c 1328 cpuset_update_task_memory_state();
1da177e4
LT
1329
1330 if (unlikely(pol->policy == MPOL_INTERLEAVE)) {
1331 unsigned nid;
5da7ca86
CL
1332
1333 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
1da177e4
LT
1334 return alloc_page_interleave(gfp, 0, nid);
1335 }
480eccf9
LS
1336 zl = zonelist_policy(gfp, pol);
1337 if (pol != &default_policy && pol != current->mempolicy) {
1338 /*
1339 * slow path: ref counted policy -- shared or vma
1340 */
1341 struct page *page = __alloc_pages(gfp, 0, zl);
1342 __mpol_free(pol);
1343 return page;
1344 }
1345 /*
1346 * fast path: default or task policy
1347 */
1348 return __alloc_pages(gfp, 0, zl);
1da177e4
LT
1349}
1350
1351/**
1352 * alloc_pages_current - Allocate pages.
1353 *
1354 * @gfp:
1355 * %GFP_USER user allocation,
1356 * %GFP_KERNEL kernel allocation,
1357 * %GFP_HIGHMEM highmem allocation,
1358 * %GFP_FS don't call back into a file system.
1359 * %GFP_ATOMIC don't sleep.
1360 * @order: Power of two of allocation size in pages. 0 is a single page.
1361 *
1362 * Allocate a page from the kernel page pool. When not in
1363 * interrupt context and apply the current process NUMA policy.
1364 * Returns NULL when no page can be allocated.
1365 *
cf2a473c 1366 * Don't call cpuset_update_task_memory_state() unless
1da177e4
LT
1367 * 1) it's ok to take cpuset_sem (can WAIT), and
1368 * 2) allocating for current task (not interrupt).
1369 */
dd0fc66f 1370struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1da177e4
LT
1371{
1372 struct mempolicy *pol = current->mempolicy;
1373
1374 if ((gfp & __GFP_WAIT) && !in_interrupt())
cf2a473c 1375 cpuset_update_task_memory_state();
9b819d20 1376 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1da177e4
LT
1377 pol = &default_policy;
1378 if (pol->policy == MPOL_INTERLEAVE)
1379 return alloc_page_interleave(gfp, order, interleave_nodes(pol));
1380 return __alloc_pages(gfp, order, zonelist_policy(gfp, pol));
1381}
1382EXPORT_SYMBOL(alloc_pages_current);
1383
4225399a
PJ
1384/*
1385 * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it
1386 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1387 * with the mems_allowed returned by cpuset_mems_allowed(). This
1388 * keeps mempolicies cpuset relative after its cpuset moves. See
1389 * further kernel/cpuset.c update_nodemask().
1390 */
1391void *cpuset_being_rebound;
1392
1da177e4
LT
1393/* Slow path of a mempolicy copy */
1394struct mempolicy *__mpol_copy(struct mempolicy *old)
1395{
1396 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1397
1398 if (!new)
1399 return ERR_PTR(-ENOMEM);
4225399a
PJ
1400 if (current_cpuset_is_being_rebound()) {
1401 nodemask_t mems = cpuset_mems_allowed(current);
1402 mpol_rebind_policy(old, &mems);
1403 }
1da177e4
LT
1404 *new = *old;
1405 atomic_set(&new->refcnt, 1);
1406 if (new->policy == MPOL_BIND) {
1407 int sz = ksize(old->v.zonelist);
e94b1766 1408 new->v.zonelist = kmemdup(old->v.zonelist, sz, GFP_KERNEL);
1da177e4
LT
1409 if (!new->v.zonelist) {
1410 kmem_cache_free(policy_cache, new);
1411 return ERR_PTR(-ENOMEM);
1412 }
1da177e4
LT
1413 }
1414 return new;
1415}
1416
1417/* Slow path of a mempolicy comparison */
1418int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1419{
1420 if (!a || !b)
1421 return 0;
1422 if (a->policy != b->policy)
1423 return 0;
1424 switch (a->policy) {
1425 case MPOL_DEFAULT:
1426 return 1;
1427 case MPOL_INTERLEAVE:
dfcd3c0d 1428 return nodes_equal(a->v.nodes, b->v.nodes);
1da177e4
LT
1429 case MPOL_PREFERRED:
1430 return a->v.preferred_node == b->v.preferred_node;
1431 case MPOL_BIND: {
1432 int i;
1433 for (i = 0; a->v.zonelist->zones[i]; i++)
1434 if (a->v.zonelist->zones[i] != b->v.zonelist->zones[i])
1435 return 0;
1436 return b->v.zonelist->zones[i] == NULL;
1437 }
1438 default:
1439 BUG();
1440 return 0;
1441 }
1442}
1443
1444/* Slow path of a mpol destructor. */
1445void __mpol_free(struct mempolicy *p)
1446{
1447 if (!atomic_dec_and_test(&p->refcnt))
1448 return;
1449 if (p->policy == MPOL_BIND)
1450 kfree(p->v.zonelist);
1451 p->policy = MPOL_DEFAULT;
1452 kmem_cache_free(policy_cache, p);
1453}
1454
1da177e4
LT
1455/*
1456 * Shared memory backing store policy support.
1457 *
1458 * Remember policies even when nobody has shared memory mapped.
1459 * The policies are kept in Red-Black tree linked from the inode.
1460 * They are protected by the sp->lock spinlock, which should be held
1461 * for any accesses to the tree.
1462 */
1463
1464/* lookup first element intersecting start-end */
1465/* Caller holds sp->lock */
1466static struct sp_node *
1467sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
1468{
1469 struct rb_node *n = sp->root.rb_node;
1470
1471 while (n) {
1472 struct sp_node *p = rb_entry(n, struct sp_node, nd);
1473
1474 if (start >= p->end)
1475 n = n->rb_right;
1476 else if (end <= p->start)
1477 n = n->rb_left;
1478 else
1479 break;
1480 }
1481 if (!n)
1482 return NULL;
1483 for (;;) {
1484 struct sp_node *w = NULL;
1485 struct rb_node *prev = rb_prev(n);
1486 if (!prev)
1487 break;
1488 w = rb_entry(prev, struct sp_node, nd);
1489 if (w->end <= start)
1490 break;
1491 n = prev;
1492 }
1493 return rb_entry(n, struct sp_node, nd);
1494}
1495
1496/* Insert a new shared policy into the list. */
1497/* Caller holds sp->lock */
1498static void sp_insert(struct shared_policy *sp, struct sp_node *new)
1499{
1500 struct rb_node **p = &sp->root.rb_node;
1501 struct rb_node *parent = NULL;
1502 struct sp_node *nd;
1503
1504 while (*p) {
1505 parent = *p;
1506 nd = rb_entry(parent, struct sp_node, nd);
1507 if (new->start < nd->start)
1508 p = &(*p)->rb_left;
1509 else if (new->end > nd->end)
1510 p = &(*p)->rb_right;
1511 else
1512 BUG();
1513 }
1514 rb_link_node(&new->nd, parent, p);
1515 rb_insert_color(&new->nd, &sp->root);
140d5a49 1516 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
1da177e4
LT
1517 new->policy ? new->policy->policy : 0);
1518}
1519
1520/* Find shared policy intersecting idx */
1521struct mempolicy *
1522mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
1523{
1524 struct mempolicy *pol = NULL;
1525 struct sp_node *sn;
1526
1527 if (!sp->root.rb_node)
1528 return NULL;
1529 spin_lock(&sp->lock);
1530 sn = sp_lookup(sp, idx, idx+1);
1531 if (sn) {
1532 mpol_get(sn->policy);
1533 pol = sn->policy;
1534 }
1535 spin_unlock(&sp->lock);
1536 return pol;
1537}
1538
1539static void sp_delete(struct shared_policy *sp, struct sp_node *n)
1540{
140d5a49 1541 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
1da177e4
LT
1542 rb_erase(&n->nd, &sp->root);
1543 mpol_free(n->policy);
1544 kmem_cache_free(sn_cache, n);
1545}
1546
dbcb0f19
AB
1547static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
1548 struct mempolicy *pol)
1da177e4
LT
1549{
1550 struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1551
1552 if (!n)
1553 return NULL;
1554 n->start = start;
1555 n->end = end;
1556 mpol_get(pol);
1557 n->policy = pol;
1558 return n;
1559}
1560
1561/* Replace a policy range. */
1562static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
1563 unsigned long end, struct sp_node *new)
1564{
1565 struct sp_node *n, *new2 = NULL;
1566
1567restart:
1568 spin_lock(&sp->lock);
1569 n = sp_lookup(sp, start, end);
1570 /* Take care of old policies in the same range. */
1571 while (n && n->start < end) {
1572 struct rb_node *next = rb_next(&n->nd);
1573 if (n->start >= start) {
1574 if (n->end <= end)
1575 sp_delete(sp, n);
1576 else
1577 n->start = end;
1578 } else {
1579 /* Old policy spanning whole new range. */
1580 if (n->end > end) {
1581 if (!new2) {
1582 spin_unlock(&sp->lock);
1583 new2 = sp_alloc(end, n->end, n->policy);
1584 if (!new2)
1585 return -ENOMEM;
1586 goto restart;
1587 }
1588 n->end = start;
1589 sp_insert(sp, new2);
1590 new2 = NULL;
1591 break;
1592 } else
1593 n->end = start;
1594 }
1595 if (!next)
1596 break;
1597 n = rb_entry(next, struct sp_node, nd);
1598 }
1599 if (new)
1600 sp_insert(sp, new);
1601 spin_unlock(&sp->lock);
1602 if (new2) {
1603 mpol_free(new2->policy);
1604 kmem_cache_free(sn_cache, new2);
1605 }
1606 return 0;
1607}
1608
7339ff83
RH
1609void mpol_shared_policy_init(struct shared_policy *info, int policy,
1610 nodemask_t *policy_nodes)
1611{
1612 info->root = RB_ROOT;
1613 spin_lock_init(&info->lock);
1614
1615 if (policy != MPOL_DEFAULT) {
1616 struct mempolicy *newpol;
1617
1618 /* Falls back to MPOL_DEFAULT on any error */
1619 newpol = mpol_new(policy, policy_nodes);
1620 if (!IS_ERR(newpol)) {
1621 /* Create pseudo-vma that contains just the policy */
1622 struct vm_area_struct pvma;
1623
1624 memset(&pvma, 0, sizeof(struct vm_area_struct));
1625 /* Policy covers entire file */
1626 pvma.vm_end = TASK_SIZE;
1627 mpol_set_shared_policy(info, &pvma, newpol);
1628 mpol_free(newpol);
1629 }
1630 }
1631}
1632
1da177e4
LT
1633int mpol_set_shared_policy(struct shared_policy *info,
1634 struct vm_area_struct *vma, struct mempolicy *npol)
1635{
1636 int err;
1637 struct sp_node *new = NULL;
1638 unsigned long sz = vma_pages(vma);
1639
140d5a49 1640 pr_debug("set_shared_policy %lx sz %lu %d %lx\n",
1da177e4
LT
1641 vma->vm_pgoff,
1642 sz, npol? npol->policy : -1,
140d5a49 1643 npol ? nodes_addr(npol->v.nodes)[0] : -1);
1da177e4
LT
1644
1645 if (npol) {
1646 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
1647 if (!new)
1648 return -ENOMEM;
1649 }
1650 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
1651 if (err && new)
1652 kmem_cache_free(sn_cache, new);
1653 return err;
1654}
1655
1656/* Free a backing policy store on inode delete. */
1657void mpol_free_shared_policy(struct shared_policy *p)
1658{
1659 struct sp_node *n;
1660 struct rb_node *next;
1661
1662 if (!p->root.rb_node)
1663 return;
1664 spin_lock(&p->lock);
1665 next = rb_first(&p->root);
1666 while (next) {
1667 n = rb_entry(next, struct sp_node, nd);
1668 next = rb_next(&n->nd);
90c5029e 1669 rb_erase(&n->nd, &p->root);
1da177e4
LT
1670 mpol_free(n->policy);
1671 kmem_cache_free(sn_cache, n);
1672 }
1673 spin_unlock(&p->lock);
1da177e4
LT
1674}
1675
1676/* assumes fs == KERNEL_DS */
1677void __init numa_policy_init(void)
1678{
b71636e2
PM
1679 nodemask_t interleave_nodes;
1680 unsigned long largest = 0;
1681 int nid, prefer = 0;
1682
1da177e4
LT
1683 policy_cache = kmem_cache_create("numa_policy",
1684 sizeof(struct mempolicy),
20c2df83 1685 0, SLAB_PANIC, NULL);
1da177e4
LT
1686
1687 sn_cache = kmem_cache_create("shared_policy_node",
1688 sizeof(struct sp_node),
20c2df83 1689 0, SLAB_PANIC, NULL);
1da177e4 1690
b71636e2
PM
1691 /*
1692 * Set interleaving policy for system init. Interleaving is only
1693 * enabled across suitably sized nodes (default is >= 16MB), or
1694 * fall back to the largest node if they're all smaller.
1695 */
1696 nodes_clear(interleave_nodes);
56bbd65d 1697 for_each_node_state(nid, N_HIGH_MEMORY) {
b71636e2
PM
1698 unsigned long total_pages = node_present_pages(nid);
1699
1700 /* Preserve the largest node */
1701 if (largest < total_pages) {
1702 largest = total_pages;
1703 prefer = nid;
1704 }
1705
1706 /* Interleave this node? */
1707 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
1708 node_set(nid, interleave_nodes);
1709 }
1710
1711 /* All too small, use the largest */
1712 if (unlikely(nodes_empty(interleave_nodes)))
1713 node_set(prefer, interleave_nodes);
1da177e4 1714
b71636e2 1715 if (do_set_mempolicy(MPOL_INTERLEAVE, &interleave_nodes))
1da177e4
LT
1716 printk("numa_policy_init: interleaving failed\n");
1717}
1718
8bccd85f 1719/* Reset policy of current process to default */
1da177e4
LT
1720void numa_default_policy(void)
1721{
8bccd85f 1722 do_set_mempolicy(MPOL_DEFAULT, NULL);
1da177e4 1723}
68860ec1
PJ
1724
1725/* Migrate a policy to a different set of nodes */
dbcb0f19
AB
1726static void mpol_rebind_policy(struct mempolicy *pol,
1727 const nodemask_t *newmask)
68860ec1 1728{
74cb2155 1729 nodemask_t *mpolmask;
68860ec1
PJ
1730 nodemask_t tmp;
1731
1732 if (!pol)
1733 return;
74cb2155
PJ
1734 mpolmask = &pol->cpuset_mems_allowed;
1735 if (nodes_equal(*mpolmask, *newmask))
1736 return;
68860ec1
PJ
1737
1738 switch (pol->policy) {
1739 case MPOL_DEFAULT:
1740 break;
1741 case MPOL_INTERLEAVE:
74cb2155 1742 nodes_remap(tmp, pol->v.nodes, *mpolmask, *newmask);
68860ec1 1743 pol->v.nodes = tmp;
74cb2155
PJ
1744 *mpolmask = *newmask;
1745 current->il_next = node_remap(current->il_next,
1746 *mpolmask, *newmask);
68860ec1
PJ
1747 break;
1748 case MPOL_PREFERRED:
1749 pol->v.preferred_node = node_remap(pol->v.preferred_node,
74cb2155
PJ
1750 *mpolmask, *newmask);
1751 *mpolmask = *newmask;
68860ec1
PJ
1752 break;
1753 case MPOL_BIND: {
1754 nodemask_t nodes;
1755 struct zone **z;
1756 struct zonelist *zonelist;
1757
1758 nodes_clear(nodes);
1759 for (z = pol->v.zonelist->zones; *z; z++)
89fa3024 1760 node_set(zone_to_nid(*z), nodes);
74cb2155 1761 nodes_remap(tmp, nodes, *mpolmask, *newmask);
68860ec1
PJ
1762 nodes = tmp;
1763
1764 zonelist = bind_zonelist(&nodes);
1765
1766 /* If no mem, then zonelist is NULL and we keep old zonelist.
1767 * If that old zonelist has no remaining mems_allowed nodes,
1768 * then zonelist_policy() will "FALL THROUGH" to MPOL_DEFAULT.
1769 */
1770
8af5e2eb 1771 if (!IS_ERR(zonelist)) {
68860ec1
PJ
1772 /* Good - got mem - substitute new zonelist */
1773 kfree(pol->v.zonelist);
1774 pol->v.zonelist = zonelist;
1775 }
74cb2155 1776 *mpolmask = *newmask;
68860ec1
PJ
1777 break;
1778 }
1779 default:
1780 BUG();
1781 break;
1782 }
1783}
1784
1785/*
74cb2155
PJ
1786 * Wrapper for mpol_rebind_policy() that just requires task
1787 * pointer, and updates task mempolicy.
68860ec1 1788 */
74cb2155
PJ
1789
1790void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
68860ec1 1791{
74cb2155 1792 mpol_rebind_policy(tsk->mempolicy, new);
68860ec1 1793}
1a75a6c8 1794
4225399a
PJ
1795/*
1796 * Rebind each vma in mm to new nodemask.
1797 *
1798 * Call holding a reference to mm. Takes mm->mmap_sem during call.
1799 */
1800
1801void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
1802{
1803 struct vm_area_struct *vma;
1804
1805 down_write(&mm->mmap_sem);
1806 for (vma = mm->mmap; vma; vma = vma->vm_next)
1807 mpol_rebind_policy(vma->vm_policy, new);
1808 up_write(&mm->mmap_sem);
1809}
1810
1a75a6c8
CL
1811/*
1812 * Display pages allocated per node and memory policy via /proc.
1813 */
1814
15ad7cdc
HD
1815static const char * const policy_types[] =
1816 { "default", "prefer", "bind", "interleave" };
1a75a6c8
CL
1817
1818/*
1819 * Convert a mempolicy into a string.
1820 * Returns the number of characters in buffer (if positive)
1821 * or an error (negative)
1822 */
1823static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1824{
1825 char *p = buffer;
1826 int l;
1827 nodemask_t nodes;
1828 int mode = pol ? pol->policy : MPOL_DEFAULT;
1829
1830 switch (mode) {
1831 case MPOL_DEFAULT:
1832 nodes_clear(nodes);
1833 break;
1834
1835 case MPOL_PREFERRED:
1836 nodes_clear(nodes);
1837 node_set(pol->v.preferred_node, nodes);
1838 break;
1839
1840 case MPOL_BIND:
1841 get_zonemask(pol, &nodes);
1842 break;
1843
1844 case MPOL_INTERLEAVE:
1845 nodes = pol->v.nodes;
1846 break;
1847
1848 default:
1849 BUG();
1850 return -EFAULT;
1851 }
1852
1853 l = strlen(policy_types[mode]);
1854 if (buffer + maxlen < p + l + 1)
1855 return -ENOSPC;
1856
1857 strcpy(p, policy_types[mode]);
1858 p += l;
1859
1860 if (!nodes_empty(nodes)) {
1861 if (buffer + maxlen < p + 2)
1862 return -ENOSPC;
1863 *p++ = '=';
1864 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
1865 }
1866 return p - buffer;
1867}
1868
1869struct numa_maps {
1870 unsigned long pages;
1871 unsigned long anon;
397874df
CL
1872 unsigned long active;
1873 unsigned long writeback;
1a75a6c8 1874 unsigned long mapcount_max;
397874df
CL
1875 unsigned long dirty;
1876 unsigned long swapcache;
1a75a6c8
CL
1877 unsigned long node[MAX_NUMNODES];
1878};
1879
397874df 1880static void gather_stats(struct page *page, void *private, int pte_dirty)
1a75a6c8
CL
1881{
1882 struct numa_maps *md = private;
1883 int count = page_mapcount(page);
1884
397874df
CL
1885 md->pages++;
1886 if (pte_dirty || PageDirty(page))
1887 md->dirty++;
1a75a6c8 1888
397874df
CL
1889 if (PageSwapCache(page))
1890 md->swapcache++;
1a75a6c8 1891
397874df
CL
1892 if (PageActive(page))
1893 md->active++;
1894
1895 if (PageWriteback(page))
1896 md->writeback++;
1a75a6c8
CL
1897
1898 if (PageAnon(page))
1899 md->anon++;
1900
397874df
CL
1901 if (count > md->mapcount_max)
1902 md->mapcount_max = count;
1903
1a75a6c8 1904 md->node[page_to_nid(page)]++;
1a75a6c8
CL
1905}
1906
7f709ed0 1907#ifdef CONFIG_HUGETLB_PAGE
397874df
CL
1908static void check_huge_range(struct vm_area_struct *vma,
1909 unsigned long start, unsigned long end,
1910 struct numa_maps *md)
1911{
1912 unsigned long addr;
1913 struct page *page;
1914
1915 for (addr = start; addr < end; addr += HPAGE_SIZE) {
1916 pte_t *ptep = huge_pte_offset(vma->vm_mm, addr & HPAGE_MASK);
1917 pte_t pte;
1918
1919 if (!ptep)
1920 continue;
1921
1922 pte = *ptep;
1923 if (pte_none(pte))
1924 continue;
1925
1926 page = pte_page(pte);
1927 if (!page)
1928 continue;
1929
1930 gather_stats(page, md, pte_dirty(*ptep));
1931 }
1932}
7f709ed0
AM
1933#else
1934static inline void check_huge_range(struct vm_area_struct *vma,
1935 unsigned long start, unsigned long end,
1936 struct numa_maps *md)
1937{
1938}
1939#endif
397874df 1940
1a75a6c8
CL
1941int show_numa_map(struct seq_file *m, void *v)
1942{
99f89551 1943 struct proc_maps_private *priv = m->private;
1a75a6c8
CL
1944 struct vm_area_struct *vma = v;
1945 struct numa_maps *md;
397874df
CL
1946 struct file *file = vma->vm_file;
1947 struct mm_struct *mm = vma->vm_mm;
480eccf9 1948 struct mempolicy *pol;
1a75a6c8
CL
1949 int n;
1950 char buffer[50];
1951
397874df 1952 if (!mm)
1a75a6c8
CL
1953 return 0;
1954
1955 md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
1956 if (!md)
1957 return 0;
1958
480eccf9
LS
1959 pol = get_vma_policy(priv->task, vma, vma->vm_start);
1960 mpol_to_str(buffer, sizeof(buffer), pol);
1961 /*
1962 * unref shared or other task's mempolicy
1963 */
1964 if (pol != &default_policy && pol != current->mempolicy)
1965 __mpol_free(pol);
397874df
CL
1966
1967 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1968
1969 if (file) {
1970 seq_printf(m, " file=");
e9536ae7 1971 seq_path(m, file->f_path.mnt, file->f_path.dentry, "\n\t= ");
397874df
CL
1972 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1973 seq_printf(m, " heap");
1974 } else if (vma->vm_start <= mm->start_stack &&
1975 vma->vm_end >= mm->start_stack) {
1976 seq_printf(m, " stack");
1977 }
1978
1979 if (is_vm_hugetlb_page(vma)) {
1980 check_huge_range(vma, vma->vm_start, vma->vm_end, md);
1981 seq_printf(m, " huge");
1982 } else {
a57ebfdb 1983 check_pgd_range(vma, vma->vm_start, vma->vm_end,
56bbd65d 1984 &node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
397874df
CL
1985 }
1986
1987 if (!md->pages)
1988 goto out;
1a75a6c8 1989
397874df
CL
1990 if (md->anon)
1991 seq_printf(m," anon=%lu",md->anon);
1a75a6c8 1992
397874df
CL
1993 if (md->dirty)
1994 seq_printf(m," dirty=%lu",md->dirty);
1a75a6c8 1995
397874df
CL
1996 if (md->pages != md->anon && md->pages != md->dirty)
1997 seq_printf(m, " mapped=%lu", md->pages);
1a75a6c8 1998
397874df
CL
1999 if (md->mapcount_max > 1)
2000 seq_printf(m, " mapmax=%lu", md->mapcount_max);
1a75a6c8 2001
397874df
CL
2002 if (md->swapcache)
2003 seq_printf(m," swapcache=%lu", md->swapcache);
2004
2005 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2006 seq_printf(m," active=%lu", md->active);
2007
2008 if (md->writeback)
2009 seq_printf(m," writeback=%lu", md->writeback);
2010
56bbd65d 2011 for_each_node_state(n, N_HIGH_MEMORY)
397874df
CL
2012 if (md->node[n])
2013 seq_printf(m, " N%d=%lu", n, md->node[n]);
2014out:
2015 seq_putc(m, '\n');
1a75a6c8
CL
2016 kfree(md);
2017
2018 if (m->count < m->size)
99f89551 2019 m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
1a75a6c8
CL
2020 return 0;
2021}
2022