]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - mm/mempolicy.c
[PATCH] Handle all and empty zones when setting up custom zonelists for mbind
[mirror_ubuntu-zesty-kernel.git] / mm / mempolicy.c
1 /*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
6 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
21 *
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
28 * preferred Try a specific node first before normal fallback.
29 * As a special case node -1 here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
33 *
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56 /* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
66 could replace all the switch()es with a mempolicy_ops structure.
67 */
68
69 #include <linux/mempolicy.h>
70 #include <linux/mm.h>
71 #include <linux/highmem.h>
72 #include <linux/hugetlb.h>
73 #include <linux/kernel.h>
74 #include <linux/sched.h>
75 #include <linux/mm.h>
76 #include <linux/nodemask.h>
77 #include <linux/cpuset.h>
78 #include <linux/gfp.h>
79 #include <linux/slab.h>
80 #include <linux/string.h>
81 #include <linux/module.h>
82 #include <linux/interrupt.h>
83 #include <linux/init.h>
84 #include <linux/compat.h>
85 #include <linux/mempolicy.h>
86 #include <linux/swap.h>
87 #include <linux/seq_file.h>
88 #include <linux/proc_fs.h>
89
90 #include <asm/tlbflush.h>
91 #include <asm/uaccess.h>
92
93 /* Internal flags */
94 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
95 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
96 #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */
97
98 /* The number of pages to migrate per call to migrate_pages() */
99 #define MIGRATE_CHUNK_SIZE 256
100
101 static kmem_cache_t *policy_cache;
102 static kmem_cache_t *sn_cache;
103
104 #define PDprintk(fmt...)
105
106 /* Highest zone. An specific allocation for a zone below that is not
107 policied. */
108 int policy_zone = ZONE_DMA;
109
110 struct mempolicy default_policy = {
111 .refcnt = ATOMIC_INIT(1), /* never free it */
112 .policy = MPOL_DEFAULT,
113 };
114
115 /* Do sanity checking on a policy */
116 static int mpol_check_policy(int mode, nodemask_t *nodes)
117 {
118 int empty = nodes_empty(*nodes);
119
120 switch (mode) {
121 case MPOL_DEFAULT:
122 if (!empty)
123 return -EINVAL;
124 break;
125 case MPOL_BIND:
126 case MPOL_INTERLEAVE:
127 /* Preferred will only use the first bit, but allow
128 more for now. */
129 if (empty)
130 return -EINVAL;
131 break;
132 }
133 return nodes_subset(*nodes, node_online_map) ? 0 : -EINVAL;
134 }
135
136 /* Generate a custom zonelist for the BIND policy. */
137 static struct zonelist *bind_zonelist(nodemask_t *nodes)
138 {
139 struct zonelist *zl;
140 int num, max, nd, k;
141
142 max = 1 + MAX_NR_ZONES * nodes_weight(*nodes);
143 zl = kmalloc(sizeof(struct zone *) * max, GFP_KERNEL);
144 if (!zl)
145 return NULL;
146 num = 0;
147 /* First put in the highest zones from all nodes, then all the next
148 lower zones etc. Avoid empty zones because the memory allocator
149 doesn't like them. If you implement node hot removal you
150 have to fix that. */
151 for (k = policy_zone; k >= 0; k--) {
152 for_each_node_mask(nd, *nodes) {
153 struct zone *z = &NODE_DATA(nd)->node_zones[k];
154 if (z->present_pages > 0)
155 zl->zones[num++] = z;
156 }
157 }
158 zl->zones[num] = NULL;
159 return zl;
160 }
161
162 /* Create a new policy */
163 static struct mempolicy *mpol_new(int mode, nodemask_t *nodes)
164 {
165 struct mempolicy *policy;
166
167 PDprintk("setting mode %d nodes[0] %lx\n", mode, nodes_addr(*nodes)[0]);
168 if (mode == MPOL_DEFAULT)
169 return NULL;
170 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
171 if (!policy)
172 return ERR_PTR(-ENOMEM);
173 atomic_set(&policy->refcnt, 1);
174 switch (mode) {
175 case MPOL_INTERLEAVE:
176 policy->v.nodes = *nodes;
177 if (nodes_weight(*nodes) == 0) {
178 kmem_cache_free(policy_cache, policy);
179 return ERR_PTR(-EINVAL);
180 }
181 break;
182 case MPOL_PREFERRED:
183 policy->v.preferred_node = first_node(*nodes);
184 if (policy->v.preferred_node >= MAX_NUMNODES)
185 policy->v.preferred_node = -1;
186 break;
187 case MPOL_BIND:
188 policy->v.zonelist = bind_zonelist(nodes);
189 if (policy->v.zonelist == NULL) {
190 kmem_cache_free(policy_cache, policy);
191 return ERR_PTR(-ENOMEM);
192 }
193 break;
194 }
195 policy->policy = mode;
196 policy->cpuset_mems_allowed = cpuset_mems_allowed(current);
197 return policy;
198 }
199
200 static void gather_stats(struct page *, void *);
201 static void migrate_page_add(struct page *page, struct list_head *pagelist,
202 unsigned long flags);
203
204 /* Scan through pages checking if pages follow certain conditions. */
205 static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
206 unsigned long addr, unsigned long end,
207 const nodemask_t *nodes, unsigned long flags,
208 void *private)
209 {
210 pte_t *orig_pte;
211 pte_t *pte;
212 spinlock_t *ptl;
213
214 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
215 do {
216 struct page *page;
217 unsigned int nid;
218
219 if (!pte_present(*pte))
220 continue;
221 page = vm_normal_page(vma, addr, *pte);
222 if (!page)
223 continue;
224 /*
225 * The check for PageReserved here is important to avoid
226 * handling zero pages and other pages that may have been
227 * marked special by the system.
228 *
229 * If the PageReserved would not be checked here then f.e.
230 * the location of the zero page could have an influence
231 * on MPOL_MF_STRICT, zero pages would be counted for
232 * the per node stats, and there would be useless attempts
233 * to put zero pages on the migration list.
234 */
235 if (PageReserved(page))
236 continue;
237 nid = page_to_nid(page);
238 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
239 continue;
240
241 if (flags & MPOL_MF_STATS)
242 gather_stats(page, private);
243 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
244 migrate_page_add(page, private, flags);
245 else
246 break;
247 } while (pte++, addr += PAGE_SIZE, addr != end);
248 pte_unmap_unlock(orig_pte, ptl);
249 return addr != end;
250 }
251
252 static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
253 unsigned long addr, unsigned long end,
254 const nodemask_t *nodes, unsigned long flags,
255 void *private)
256 {
257 pmd_t *pmd;
258 unsigned long next;
259
260 pmd = pmd_offset(pud, addr);
261 do {
262 next = pmd_addr_end(addr, end);
263 if (pmd_none_or_clear_bad(pmd))
264 continue;
265 if (check_pte_range(vma, pmd, addr, next, nodes,
266 flags, private))
267 return -EIO;
268 } while (pmd++, addr = next, addr != end);
269 return 0;
270 }
271
272 static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
273 unsigned long addr, unsigned long end,
274 const nodemask_t *nodes, unsigned long flags,
275 void *private)
276 {
277 pud_t *pud;
278 unsigned long next;
279
280 pud = pud_offset(pgd, addr);
281 do {
282 next = pud_addr_end(addr, end);
283 if (pud_none_or_clear_bad(pud))
284 continue;
285 if (check_pmd_range(vma, pud, addr, next, nodes,
286 flags, private))
287 return -EIO;
288 } while (pud++, addr = next, addr != end);
289 return 0;
290 }
291
292 static inline int check_pgd_range(struct vm_area_struct *vma,
293 unsigned long addr, unsigned long end,
294 const nodemask_t *nodes, unsigned long flags,
295 void *private)
296 {
297 pgd_t *pgd;
298 unsigned long next;
299
300 pgd = pgd_offset(vma->vm_mm, addr);
301 do {
302 next = pgd_addr_end(addr, end);
303 if (pgd_none_or_clear_bad(pgd))
304 continue;
305 if (check_pud_range(vma, pgd, addr, next, nodes,
306 flags, private))
307 return -EIO;
308 } while (pgd++, addr = next, addr != end);
309 return 0;
310 }
311
312 /* Check if a vma is migratable */
313 static inline int vma_migratable(struct vm_area_struct *vma)
314 {
315 if (vma->vm_flags & (
316 VM_LOCKED|VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED))
317 return 0;
318 return 1;
319 }
320
321 /*
322 * Check if all pages in a range are on a set of nodes.
323 * If pagelist != NULL then isolate pages from the LRU and
324 * put them on the pagelist.
325 */
326 static struct vm_area_struct *
327 check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
328 const nodemask_t *nodes, unsigned long flags, void *private)
329 {
330 int err;
331 struct vm_area_struct *first, *vma, *prev;
332
333 /* Clear the LRU lists so pages can be isolated */
334 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
335 lru_add_drain_all();
336
337 first = find_vma(mm, start);
338 if (!first)
339 return ERR_PTR(-EFAULT);
340 prev = NULL;
341 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
342 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
343 if (!vma->vm_next && vma->vm_end < end)
344 return ERR_PTR(-EFAULT);
345 if (prev && prev->vm_end < vma->vm_start)
346 return ERR_PTR(-EFAULT);
347 }
348 if (!is_vm_hugetlb_page(vma) &&
349 ((flags & MPOL_MF_STRICT) ||
350 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
351 vma_migratable(vma)))) {
352 unsigned long endvma = vma->vm_end;
353
354 if (endvma > end)
355 endvma = end;
356 if (vma->vm_start > start)
357 start = vma->vm_start;
358 err = check_pgd_range(vma, start, endvma, nodes,
359 flags, private);
360 if (err) {
361 first = ERR_PTR(err);
362 break;
363 }
364 }
365 prev = vma;
366 }
367 return first;
368 }
369
370 /* Apply policy to a single VMA */
371 static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
372 {
373 int err = 0;
374 struct mempolicy *old = vma->vm_policy;
375
376 PDprintk("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
377 vma->vm_start, vma->vm_end, vma->vm_pgoff,
378 vma->vm_ops, vma->vm_file,
379 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
380
381 if (vma->vm_ops && vma->vm_ops->set_policy)
382 err = vma->vm_ops->set_policy(vma, new);
383 if (!err) {
384 mpol_get(new);
385 vma->vm_policy = new;
386 mpol_free(old);
387 }
388 return err;
389 }
390
391 /* Step 2: apply policy to a range and do splits. */
392 static int mbind_range(struct vm_area_struct *vma, unsigned long start,
393 unsigned long end, struct mempolicy *new)
394 {
395 struct vm_area_struct *next;
396 int err;
397
398 err = 0;
399 for (; vma && vma->vm_start < end; vma = next) {
400 next = vma->vm_next;
401 if (vma->vm_start < start)
402 err = split_vma(vma->vm_mm, vma, start, 1);
403 if (!err && vma->vm_end > end)
404 err = split_vma(vma->vm_mm, vma, end, 0);
405 if (!err)
406 err = policy_vma(vma, new);
407 if (err)
408 break;
409 }
410 return err;
411 }
412
413 static int contextualize_policy(int mode, nodemask_t *nodes)
414 {
415 if (!nodes)
416 return 0;
417
418 cpuset_update_task_memory_state();
419 if (!cpuset_nodes_subset_current_mems_allowed(*nodes))
420 return -EINVAL;
421 return mpol_check_policy(mode, nodes);
422 }
423
424 /* Set the process memory policy */
425 long do_set_mempolicy(int mode, nodemask_t *nodes)
426 {
427 struct mempolicy *new;
428
429 if (contextualize_policy(mode, nodes))
430 return -EINVAL;
431 new = mpol_new(mode, nodes);
432 if (IS_ERR(new))
433 return PTR_ERR(new);
434 mpol_free(current->mempolicy);
435 current->mempolicy = new;
436 if (new && new->policy == MPOL_INTERLEAVE)
437 current->il_next = first_node(new->v.nodes);
438 return 0;
439 }
440
441 /* Fill a zone bitmap for a policy */
442 static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
443 {
444 int i;
445
446 nodes_clear(*nodes);
447 switch (p->policy) {
448 case MPOL_BIND:
449 for (i = 0; p->v.zonelist->zones[i]; i++)
450 node_set(p->v.zonelist->zones[i]->zone_pgdat->node_id,
451 *nodes);
452 break;
453 case MPOL_DEFAULT:
454 break;
455 case MPOL_INTERLEAVE:
456 *nodes = p->v.nodes;
457 break;
458 case MPOL_PREFERRED:
459 /* or use current node instead of online map? */
460 if (p->v.preferred_node < 0)
461 *nodes = node_online_map;
462 else
463 node_set(p->v.preferred_node, *nodes);
464 break;
465 default:
466 BUG();
467 }
468 }
469
470 static int lookup_node(struct mm_struct *mm, unsigned long addr)
471 {
472 struct page *p;
473 int err;
474
475 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
476 if (err >= 0) {
477 err = page_to_nid(p);
478 put_page(p);
479 }
480 return err;
481 }
482
483 /* Retrieve NUMA policy */
484 long do_get_mempolicy(int *policy, nodemask_t *nmask,
485 unsigned long addr, unsigned long flags)
486 {
487 int err;
488 struct mm_struct *mm = current->mm;
489 struct vm_area_struct *vma = NULL;
490 struct mempolicy *pol = current->mempolicy;
491
492 cpuset_update_task_memory_state();
493 if (flags & ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR))
494 return -EINVAL;
495 if (flags & MPOL_F_ADDR) {
496 down_read(&mm->mmap_sem);
497 vma = find_vma_intersection(mm, addr, addr+1);
498 if (!vma) {
499 up_read(&mm->mmap_sem);
500 return -EFAULT;
501 }
502 if (vma->vm_ops && vma->vm_ops->get_policy)
503 pol = vma->vm_ops->get_policy(vma, addr);
504 else
505 pol = vma->vm_policy;
506 } else if (addr)
507 return -EINVAL;
508
509 if (!pol)
510 pol = &default_policy;
511
512 if (flags & MPOL_F_NODE) {
513 if (flags & MPOL_F_ADDR) {
514 err = lookup_node(mm, addr);
515 if (err < 0)
516 goto out;
517 *policy = err;
518 } else if (pol == current->mempolicy &&
519 pol->policy == MPOL_INTERLEAVE) {
520 *policy = current->il_next;
521 } else {
522 err = -EINVAL;
523 goto out;
524 }
525 } else
526 *policy = pol->policy;
527
528 if (vma) {
529 up_read(&current->mm->mmap_sem);
530 vma = NULL;
531 }
532
533 err = 0;
534 if (nmask)
535 get_zonemask(pol, nmask);
536
537 out:
538 if (vma)
539 up_read(&current->mm->mmap_sem);
540 return err;
541 }
542
543 /*
544 * page migration
545 */
546
547 static void migrate_page_add(struct page *page, struct list_head *pagelist,
548 unsigned long flags)
549 {
550 /*
551 * Avoid migrating a page that is shared with others.
552 */
553 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
554 if (isolate_lru_page(page))
555 list_add(&page->lru, pagelist);
556 }
557 }
558
559 /*
560 * Migrate the list 'pagelist' of pages to a certain destination.
561 *
562 * Specify destination with either non-NULL vma or dest_node >= 0
563 * Return the number of pages not migrated or error code
564 */
565 static int migrate_pages_to(struct list_head *pagelist,
566 struct vm_area_struct *vma, int dest)
567 {
568 LIST_HEAD(newlist);
569 LIST_HEAD(moved);
570 LIST_HEAD(failed);
571 int err = 0;
572 int nr_pages;
573 struct page *page;
574 struct list_head *p;
575
576 redo:
577 nr_pages = 0;
578 list_for_each(p, pagelist) {
579 if (vma)
580 page = alloc_page_vma(GFP_HIGHUSER, vma, vma->vm_start);
581 else
582 page = alloc_pages_node(dest, GFP_HIGHUSER, 0);
583
584 if (!page) {
585 err = -ENOMEM;
586 goto out;
587 }
588 list_add(&page->lru, &newlist);
589 nr_pages++;
590 if (nr_pages > MIGRATE_CHUNK_SIZE);
591 break;
592 }
593 err = migrate_pages(pagelist, &newlist, &moved, &failed);
594
595 putback_lru_pages(&moved); /* Call release pages instead ?? */
596
597 if (err >= 0 && list_empty(&newlist) && !list_empty(pagelist))
598 goto redo;
599 out:
600 /* Return leftover allocated pages */
601 while (!list_empty(&newlist)) {
602 page = list_entry(newlist.next, struct page, lru);
603 list_del(&page->lru);
604 __free_page(page);
605 }
606 list_splice(&failed, pagelist);
607 if (err < 0)
608 return err;
609
610 /* Calculate number of leftover pages */
611 nr_pages = 0;
612 list_for_each(p, pagelist)
613 nr_pages++;
614 return nr_pages;
615 }
616
617 /*
618 * Migrate pages from one node to a target node.
619 * Returns error or the number of pages not migrated.
620 */
621 int migrate_to_node(struct mm_struct *mm, int source, int dest, int flags)
622 {
623 nodemask_t nmask;
624 LIST_HEAD(pagelist);
625 int err = 0;
626
627 nodes_clear(nmask);
628 node_set(source, nmask);
629
630 check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
631 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
632
633 if (!list_empty(&pagelist)) {
634 err = migrate_pages_to(&pagelist, NULL, dest);
635 if (!list_empty(&pagelist))
636 putback_lru_pages(&pagelist);
637 }
638 return err;
639 }
640
641 /*
642 * Move pages between the two nodesets so as to preserve the physical
643 * layout as much as possible.
644 *
645 * Returns the number of page that could not be moved.
646 */
647 int do_migrate_pages(struct mm_struct *mm,
648 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
649 {
650 LIST_HEAD(pagelist);
651 int busy = 0;
652 int err = 0;
653 nodemask_t tmp;
654
655 down_read(&mm->mmap_sem);
656
657 /*
658 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
659 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
660 * bit in 'tmp', and return that <source, dest> pair for migration.
661 * The pair of nodemasks 'to' and 'from' define the map.
662 *
663 * If no pair of bits is found that way, fallback to picking some
664 * pair of 'source' and 'dest' bits that are not the same. If the
665 * 'source' and 'dest' bits are the same, this represents a node
666 * that will be migrating to itself, so no pages need move.
667 *
668 * If no bits are left in 'tmp', or if all remaining bits left
669 * in 'tmp' correspond to the same bit in 'to', return false
670 * (nothing left to migrate).
671 *
672 * This lets us pick a pair of nodes to migrate between, such that
673 * if possible the dest node is not already occupied by some other
674 * source node, minimizing the risk of overloading the memory on a
675 * node that would happen if we migrated incoming memory to a node
676 * before migrating outgoing memory source that same node.
677 *
678 * A single scan of tmp is sufficient. As we go, we remember the
679 * most recent <s, d> pair that moved (s != d). If we find a pair
680 * that not only moved, but what's better, moved to an empty slot
681 * (d is not set in tmp), then we break out then, with that pair.
682 * Otherwise when we finish scannng from_tmp, we at least have the
683 * most recent <s, d> pair that moved. If we get all the way through
684 * the scan of tmp without finding any node that moved, much less
685 * moved to an empty node, then there is nothing left worth migrating.
686 */
687
688 tmp = *from_nodes;
689 while (!nodes_empty(tmp)) {
690 int s,d;
691 int source = -1;
692 int dest = 0;
693
694 for_each_node_mask(s, tmp) {
695 d = node_remap(s, *from_nodes, *to_nodes);
696 if (s == d)
697 continue;
698
699 source = s; /* Node moved. Memorize */
700 dest = d;
701
702 /* dest not in remaining from nodes? */
703 if (!node_isset(dest, tmp))
704 break;
705 }
706 if (source == -1)
707 break;
708
709 node_clear(source, tmp);
710 err = migrate_to_node(mm, source, dest, flags);
711 if (err > 0)
712 busy += err;
713 if (err < 0)
714 break;
715 }
716
717 up_read(&mm->mmap_sem);
718 if (err < 0)
719 return err;
720 return busy;
721 }
722
723 long do_mbind(unsigned long start, unsigned long len,
724 unsigned long mode, nodemask_t *nmask, unsigned long flags)
725 {
726 struct vm_area_struct *vma;
727 struct mm_struct *mm = current->mm;
728 struct mempolicy *new;
729 unsigned long end;
730 int err;
731 LIST_HEAD(pagelist);
732
733 if ((flags & ~(unsigned long)(MPOL_MF_STRICT |
734 MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
735 || mode > MPOL_MAX)
736 return -EINVAL;
737 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_RESOURCE))
738 return -EPERM;
739
740 if (start & ~PAGE_MASK)
741 return -EINVAL;
742
743 if (mode == MPOL_DEFAULT)
744 flags &= ~MPOL_MF_STRICT;
745
746 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
747 end = start + len;
748
749 if (end < start)
750 return -EINVAL;
751 if (end == start)
752 return 0;
753
754 if (mpol_check_policy(mode, nmask))
755 return -EINVAL;
756
757 new = mpol_new(mode, nmask);
758 if (IS_ERR(new))
759 return PTR_ERR(new);
760
761 /*
762 * If we are using the default policy then operation
763 * on discontinuous address spaces is okay after all
764 */
765 if (!new)
766 flags |= MPOL_MF_DISCONTIG_OK;
767
768 PDprintk("mbind %lx-%lx mode:%ld nodes:%lx\n",start,start+len,
769 mode,nodes_addr(nodes)[0]);
770
771 down_write(&mm->mmap_sem);
772 vma = check_range(mm, start, end, nmask,
773 flags | MPOL_MF_INVERT, &pagelist);
774
775 err = PTR_ERR(vma);
776 if (!IS_ERR(vma)) {
777 int nr_failed = 0;
778
779 err = mbind_range(vma, start, end, new);
780
781 if (!list_empty(&pagelist))
782 nr_failed = migrate_pages_to(&pagelist, vma, -1);
783
784 if (!err && nr_failed && (flags & MPOL_MF_STRICT))
785 err = -EIO;
786 }
787 if (!list_empty(&pagelist))
788 putback_lru_pages(&pagelist);
789
790 up_write(&mm->mmap_sem);
791 mpol_free(new);
792 return err;
793 }
794
795 /*
796 * User space interface with variable sized bitmaps for nodelists.
797 */
798
799 /* Copy a node mask from user space. */
800 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
801 unsigned long maxnode)
802 {
803 unsigned long k;
804 unsigned long nlongs;
805 unsigned long endmask;
806
807 --maxnode;
808 nodes_clear(*nodes);
809 if (maxnode == 0 || !nmask)
810 return 0;
811
812 nlongs = BITS_TO_LONGS(maxnode);
813 if ((maxnode % BITS_PER_LONG) == 0)
814 endmask = ~0UL;
815 else
816 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
817
818 /* When the user specified more nodes than supported just check
819 if the non supported part is all zero. */
820 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
821 if (nlongs > PAGE_SIZE/sizeof(long))
822 return -EINVAL;
823 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
824 unsigned long t;
825 if (get_user(t, nmask + k))
826 return -EFAULT;
827 if (k == nlongs - 1) {
828 if (t & endmask)
829 return -EINVAL;
830 } else if (t)
831 return -EINVAL;
832 }
833 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
834 endmask = ~0UL;
835 }
836
837 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
838 return -EFAULT;
839 nodes_addr(*nodes)[nlongs-1] &= endmask;
840 return 0;
841 }
842
843 /* Copy a kernel node mask to user space */
844 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
845 nodemask_t *nodes)
846 {
847 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
848 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
849
850 if (copy > nbytes) {
851 if (copy > PAGE_SIZE)
852 return -EINVAL;
853 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
854 return -EFAULT;
855 copy = nbytes;
856 }
857 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
858 }
859
860 asmlinkage long sys_mbind(unsigned long start, unsigned long len,
861 unsigned long mode,
862 unsigned long __user *nmask, unsigned long maxnode,
863 unsigned flags)
864 {
865 nodemask_t nodes;
866 int err;
867
868 err = get_nodes(&nodes, nmask, maxnode);
869 if (err)
870 return err;
871 return do_mbind(start, len, mode, &nodes, flags);
872 }
873
874 /* Set the process memory policy */
875 asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
876 unsigned long maxnode)
877 {
878 int err;
879 nodemask_t nodes;
880
881 if (mode < 0 || mode > MPOL_MAX)
882 return -EINVAL;
883 err = get_nodes(&nodes, nmask, maxnode);
884 if (err)
885 return err;
886 return do_set_mempolicy(mode, &nodes);
887 }
888
889 asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
890 const unsigned long __user *old_nodes,
891 const unsigned long __user *new_nodes)
892 {
893 struct mm_struct *mm;
894 struct task_struct *task;
895 nodemask_t old;
896 nodemask_t new;
897 nodemask_t task_nodes;
898 int err;
899
900 err = get_nodes(&old, old_nodes, maxnode);
901 if (err)
902 return err;
903
904 err = get_nodes(&new, new_nodes, maxnode);
905 if (err)
906 return err;
907
908 /* Find the mm_struct */
909 read_lock(&tasklist_lock);
910 task = pid ? find_task_by_pid(pid) : current;
911 if (!task) {
912 read_unlock(&tasklist_lock);
913 return -ESRCH;
914 }
915 mm = get_task_mm(task);
916 read_unlock(&tasklist_lock);
917
918 if (!mm)
919 return -EINVAL;
920
921 /*
922 * Check if this process has the right to modify the specified
923 * process. The right exists if the process has administrative
924 * capabilities, superuser priviledges or the same
925 * userid as the target process.
926 */
927 if ((current->euid != task->suid) && (current->euid != task->uid) &&
928 (current->uid != task->suid) && (current->uid != task->uid) &&
929 !capable(CAP_SYS_ADMIN)) {
930 err = -EPERM;
931 goto out;
932 }
933
934 task_nodes = cpuset_mems_allowed(task);
935 /* Is the user allowed to access the target nodes? */
936 if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_ADMIN)) {
937 err = -EPERM;
938 goto out;
939 }
940
941 err = do_migrate_pages(mm, &old, &new, MPOL_MF_MOVE);
942 out:
943 mmput(mm);
944 return err;
945 }
946
947
948 /* Retrieve NUMA policy */
949 asmlinkage long sys_get_mempolicy(int __user *policy,
950 unsigned long __user *nmask,
951 unsigned long maxnode,
952 unsigned long addr, unsigned long flags)
953 {
954 int err, pval;
955 nodemask_t nodes;
956
957 if (nmask != NULL && maxnode < MAX_NUMNODES)
958 return -EINVAL;
959
960 err = do_get_mempolicy(&pval, &nodes, addr, flags);
961
962 if (err)
963 return err;
964
965 if (policy && put_user(pval, policy))
966 return -EFAULT;
967
968 if (nmask)
969 err = copy_nodes_to_user(nmask, maxnode, &nodes);
970
971 return err;
972 }
973
974 #ifdef CONFIG_COMPAT
975
976 asmlinkage long compat_sys_get_mempolicy(int __user *policy,
977 compat_ulong_t __user *nmask,
978 compat_ulong_t maxnode,
979 compat_ulong_t addr, compat_ulong_t flags)
980 {
981 long err;
982 unsigned long __user *nm = NULL;
983 unsigned long nr_bits, alloc_size;
984 DECLARE_BITMAP(bm, MAX_NUMNODES);
985
986 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
987 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
988
989 if (nmask)
990 nm = compat_alloc_user_space(alloc_size);
991
992 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
993
994 if (!err && nmask) {
995 err = copy_from_user(bm, nm, alloc_size);
996 /* ensure entire bitmap is zeroed */
997 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
998 err |= compat_put_bitmap(nmask, bm, nr_bits);
999 }
1000
1001 return err;
1002 }
1003
1004 asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1005 compat_ulong_t maxnode)
1006 {
1007 long err = 0;
1008 unsigned long __user *nm = NULL;
1009 unsigned long nr_bits, alloc_size;
1010 DECLARE_BITMAP(bm, MAX_NUMNODES);
1011
1012 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1013 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1014
1015 if (nmask) {
1016 err = compat_get_bitmap(bm, nmask, nr_bits);
1017 nm = compat_alloc_user_space(alloc_size);
1018 err |= copy_to_user(nm, bm, alloc_size);
1019 }
1020
1021 if (err)
1022 return -EFAULT;
1023
1024 return sys_set_mempolicy(mode, nm, nr_bits+1);
1025 }
1026
1027 asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1028 compat_ulong_t mode, compat_ulong_t __user *nmask,
1029 compat_ulong_t maxnode, compat_ulong_t flags)
1030 {
1031 long err = 0;
1032 unsigned long __user *nm = NULL;
1033 unsigned long nr_bits, alloc_size;
1034 nodemask_t bm;
1035
1036 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1037 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1038
1039 if (nmask) {
1040 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1041 nm = compat_alloc_user_space(alloc_size);
1042 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1043 }
1044
1045 if (err)
1046 return -EFAULT;
1047
1048 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1049 }
1050
1051 #endif
1052
1053 /* Return effective policy for a VMA */
1054 static struct mempolicy * get_vma_policy(struct task_struct *task,
1055 struct vm_area_struct *vma, unsigned long addr)
1056 {
1057 struct mempolicy *pol = task->mempolicy;
1058
1059 if (vma) {
1060 if (vma->vm_ops && vma->vm_ops->get_policy)
1061 pol = vma->vm_ops->get_policy(vma, addr);
1062 else if (vma->vm_policy &&
1063 vma->vm_policy->policy != MPOL_DEFAULT)
1064 pol = vma->vm_policy;
1065 }
1066 if (!pol)
1067 pol = &default_policy;
1068 return pol;
1069 }
1070
1071 /* Return a zonelist representing a mempolicy */
1072 static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
1073 {
1074 int nd;
1075
1076 switch (policy->policy) {
1077 case MPOL_PREFERRED:
1078 nd = policy->v.preferred_node;
1079 if (nd < 0)
1080 nd = numa_node_id();
1081 break;
1082 case MPOL_BIND:
1083 /* Lower zones don't get a policy applied */
1084 /* Careful: current->mems_allowed might have moved */
1085 if (gfp_zone(gfp) >= policy_zone)
1086 if (cpuset_zonelist_valid_mems_allowed(policy->v.zonelist))
1087 return policy->v.zonelist;
1088 /*FALL THROUGH*/
1089 case MPOL_INTERLEAVE: /* should not happen */
1090 case MPOL_DEFAULT:
1091 nd = numa_node_id();
1092 break;
1093 default:
1094 nd = 0;
1095 BUG();
1096 }
1097 return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp);
1098 }
1099
1100 /* Do dynamic interleaving for a process */
1101 static unsigned interleave_nodes(struct mempolicy *policy)
1102 {
1103 unsigned nid, next;
1104 struct task_struct *me = current;
1105
1106 nid = me->il_next;
1107 next = next_node(nid, policy->v.nodes);
1108 if (next >= MAX_NUMNODES)
1109 next = first_node(policy->v.nodes);
1110 me->il_next = next;
1111 return nid;
1112 }
1113
1114 /*
1115 * Depending on the memory policy provide a node from which to allocate the
1116 * next slab entry.
1117 */
1118 unsigned slab_node(struct mempolicy *policy)
1119 {
1120 switch (policy->policy) {
1121 case MPOL_INTERLEAVE:
1122 return interleave_nodes(policy);
1123
1124 case MPOL_BIND:
1125 /*
1126 * Follow bind policy behavior and start allocation at the
1127 * first node.
1128 */
1129 return policy->v.zonelist->zones[0]->zone_pgdat->node_id;
1130
1131 case MPOL_PREFERRED:
1132 if (policy->v.preferred_node >= 0)
1133 return policy->v.preferred_node;
1134 /* Fall through */
1135
1136 default:
1137 return numa_node_id();
1138 }
1139 }
1140
1141 /* Do static interleaving for a VMA with known offset. */
1142 static unsigned offset_il_node(struct mempolicy *pol,
1143 struct vm_area_struct *vma, unsigned long off)
1144 {
1145 unsigned nnodes = nodes_weight(pol->v.nodes);
1146 unsigned target = (unsigned)off % nnodes;
1147 int c;
1148 int nid = -1;
1149
1150 c = 0;
1151 do {
1152 nid = next_node(nid, pol->v.nodes);
1153 c++;
1154 } while (c <= target);
1155 return nid;
1156 }
1157
1158 /* Determine a node number for interleave */
1159 static inline unsigned interleave_nid(struct mempolicy *pol,
1160 struct vm_area_struct *vma, unsigned long addr, int shift)
1161 {
1162 if (vma) {
1163 unsigned long off;
1164
1165 off = vma->vm_pgoff;
1166 off += (addr - vma->vm_start) >> shift;
1167 return offset_il_node(pol, vma, off);
1168 } else
1169 return interleave_nodes(pol);
1170 }
1171
1172 #ifdef CONFIG_HUGETLBFS
1173 /* Return a zonelist suitable for a huge page allocation. */
1174 struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr)
1175 {
1176 struct mempolicy *pol = get_vma_policy(current, vma, addr);
1177
1178 if (pol->policy == MPOL_INTERLEAVE) {
1179 unsigned nid;
1180
1181 nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
1182 return NODE_DATA(nid)->node_zonelists + gfp_zone(GFP_HIGHUSER);
1183 }
1184 return zonelist_policy(GFP_HIGHUSER, pol);
1185 }
1186 #endif
1187
1188 /* Allocate a page in interleaved policy.
1189 Own path because it needs to do special accounting. */
1190 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1191 unsigned nid)
1192 {
1193 struct zonelist *zl;
1194 struct page *page;
1195
1196 zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp);
1197 page = __alloc_pages(gfp, order, zl);
1198 if (page && page_zone(page) == zl->zones[0]) {
1199 zone_pcp(zl->zones[0],get_cpu())->interleave_hit++;
1200 put_cpu();
1201 }
1202 return page;
1203 }
1204
1205 /**
1206 * alloc_page_vma - Allocate a page for a VMA.
1207 *
1208 * @gfp:
1209 * %GFP_USER user allocation.
1210 * %GFP_KERNEL kernel allocations,
1211 * %GFP_HIGHMEM highmem/user allocations,
1212 * %GFP_FS allocation should not call back into a file system.
1213 * %GFP_ATOMIC don't sleep.
1214 *
1215 * @vma: Pointer to VMA or NULL if not available.
1216 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1217 *
1218 * This function allocates a page from the kernel page pool and applies
1219 * a NUMA policy associated with the VMA or the current process.
1220 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1221 * mm_struct of the VMA to prevent it from going away. Should be used for
1222 * all allocations for pages that will be mapped into
1223 * user space. Returns NULL when no page can be allocated.
1224 *
1225 * Should be called with the mm_sem of the vma hold.
1226 */
1227 struct page *
1228 alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1229 {
1230 struct mempolicy *pol = get_vma_policy(current, vma, addr);
1231
1232 cpuset_update_task_memory_state();
1233
1234 if (unlikely(pol->policy == MPOL_INTERLEAVE)) {
1235 unsigned nid;
1236
1237 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
1238 return alloc_page_interleave(gfp, 0, nid);
1239 }
1240 return __alloc_pages(gfp, 0, zonelist_policy(gfp, pol));
1241 }
1242
1243 /**
1244 * alloc_pages_current - Allocate pages.
1245 *
1246 * @gfp:
1247 * %GFP_USER user allocation,
1248 * %GFP_KERNEL kernel allocation,
1249 * %GFP_HIGHMEM highmem allocation,
1250 * %GFP_FS don't call back into a file system.
1251 * %GFP_ATOMIC don't sleep.
1252 * @order: Power of two of allocation size in pages. 0 is a single page.
1253 *
1254 * Allocate a page from the kernel page pool. When not in
1255 * interrupt context and apply the current process NUMA policy.
1256 * Returns NULL when no page can be allocated.
1257 *
1258 * Don't call cpuset_update_task_memory_state() unless
1259 * 1) it's ok to take cpuset_sem (can WAIT), and
1260 * 2) allocating for current task (not interrupt).
1261 */
1262 struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1263 {
1264 struct mempolicy *pol = current->mempolicy;
1265
1266 if ((gfp & __GFP_WAIT) && !in_interrupt())
1267 cpuset_update_task_memory_state();
1268 if (!pol || in_interrupt())
1269 pol = &default_policy;
1270 if (pol->policy == MPOL_INTERLEAVE)
1271 return alloc_page_interleave(gfp, order, interleave_nodes(pol));
1272 return __alloc_pages(gfp, order, zonelist_policy(gfp, pol));
1273 }
1274 EXPORT_SYMBOL(alloc_pages_current);
1275
1276 /*
1277 * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it
1278 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1279 * with the mems_allowed returned by cpuset_mems_allowed(). This
1280 * keeps mempolicies cpuset relative after its cpuset moves. See
1281 * further kernel/cpuset.c update_nodemask().
1282 */
1283 void *cpuset_being_rebound;
1284
1285 /* Slow path of a mempolicy copy */
1286 struct mempolicy *__mpol_copy(struct mempolicy *old)
1287 {
1288 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1289
1290 if (!new)
1291 return ERR_PTR(-ENOMEM);
1292 if (current_cpuset_is_being_rebound()) {
1293 nodemask_t mems = cpuset_mems_allowed(current);
1294 mpol_rebind_policy(old, &mems);
1295 }
1296 *new = *old;
1297 atomic_set(&new->refcnt, 1);
1298 if (new->policy == MPOL_BIND) {
1299 int sz = ksize(old->v.zonelist);
1300 new->v.zonelist = kmalloc(sz, SLAB_KERNEL);
1301 if (!new->v.zonelist) {
1302 kmem_cache_free(policy_cache, new);
1303 return ERR_PTR(-ENOMEM);
1304 }
1305 memcpy(new->v.zonelist, old->v.zonelist, sz);
1306 }
1307 return new;
1308 }
1309
1310 /* Slow path of a mempolicy comparison */
1311 int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1312 {
1313 if (!a || !b)
1314 return 0;
1315 if (a->policy != b->policy)
1316 return 0;
1317 switch (a->policy) {
1318 case MPOL_DEFAULT:
1319 return 1;
1320 case MPOL_INTERLEAVE:
1321 return nodes_equal(a->v.nodes, b->v.nodes);
1322 case MPOL_PREFERRED:
1323 return a->v.preferred_node == b->v.preferred_node;
1324 case MPOL_BIND: {
1325 int i;
1326 for (i = 0; a->v.zonelist->zones[i]; i++)
1327 if (a->v.zonelist->zones[i] != b->v.zonelist->zones[i])
1328 return 0;
1329 return b->v.zonelist->zones[i] == NULL;
1330 }
1331 default:
1332 BUG();
1333 return 0;
1334 }
1335 }
1336
1337 /* Slow path of a mpol destructor. */
1338 void __mpol_free(struct mempolicy *p)
1339 {
1340 if (!atomic_dec_and_test(&p->refcnt))
1341 return;
1342 if (p->policy == MPOL_BIND)
1343 kfree(p->v.zonelist);
1344 p->policy = MPOL_DEFAULT;
1345 kmem_cache_free(policy_cache, p);
1346 }
1347
1348 /*
1349 * Shared memory backing store policy support.
1350 *
1351 * Remember policies even when nobody has shared memory mapped.
1352 * The policies are kept in Red-Black tree linked from the inode.
1353 * They are protected by the sp->lock spinlock, which should be held
1354 * for any accesses to the tree.
1355 */
1356
1357 /* lookup first element intersecting start-end */
1358 /* Caller holds sp->lock */
1359 static struct sp_node *
1360 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
1361 {
1362 struct rb_node *n = sp->root.rb_node;
1363
1364 while (n) {
1365 struct sp_node *p = rb_entry(n, struct sp_node, nd);
1366
1367 if (start >= p->end)
1368 n = n->rb_right;
1369 else if (end <= p->start)
1370 n = n->rb_left;
1371 else
1372 break;
1373 }
1374 if (!n)
1375 return NULL;
1376 for (;;) {
1377 struct sp_node *w = NULL;
1378 struct rb_node *prev = rb_prev(n);
1379 if (!prev)
1380 break;
1381 w = rb_entry(prev, struct sp_node, nd);
1382 if (w->end <= start)
1383 break;
1384 n = prev;
1385 }
1386 return rb_entry(n, struct sp_node, nd);
1387 }
1388
1389 /* Insert a new shared policy into the list. */
1390 /* Caller holds sp->lock */
1391 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
1392 {
1393 struct rb_node **p = &sp->root.rb_node;
1394 struct rb_node *parent = NULL;
1395 struct sp_node *nd;
1396
1397 while (*p) {
1398 parent = *p;
1399 nd = rb_entry(parent, struct sp_node, nd);
1400 if (new->start < nd->start)
1401 p = &(*p)->rb_left;
1402 else if (new->end > nd->end)
1403 p = &(*p)->rb_right;
1404 else
1405 BUG();
1406 }
1407 rb_link_node(&new->nd, parent, p);
1408 rb_insert_color(&new->nd, &sp->root);
1409 PDprintk("inserting %lx-%lx: %d\n", new->start, new->end,
1410 new->policy ? new->policy->policy : 0);
1411 }
1412
1413 /* Find shared policy intersecting idx */
1414 struct mempolicy *
1415 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
1416 {
1417 struct mempolicy *pol = NULL;
1418 struct sp_node *sn;
1419
1420 if (!sp->root.rb_node)
1421 return NULL;
1422 spin_lock(&sp->lock);
1423 sn = sp_lookup(sp, idx, idx+1);
1424 if (sn) {
1425 mpol_get(sn->policy);
1426 pol = sn->policy;
1427 }
1428 spin_unlock(&sp->lock);
1429 return pol;
1430 }
1431
1432 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
1433 {
1434 PDprintk("deleting %lx-l%x\n", n->start, n->end);
1435 rb_erase(&n->nd, &sp->root);
1436 mpol_free(n->policy);
1437 kmem_cache_free(sn_cache, n);
1438 }
1439
1440 struct sp_node *
1441 sp_alloc(unsigned long start, unsigned long end, struct mempolicy *pol)
1442 {
1443 struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1444
1445 if (!n)
1446 return NULL;
1447 n->start = start;
1448 n->end = end;
1449 mpol_get(pol);
1450 n->policy = pol;
1451 return n;
1452 }
1453
1454 /* Replace a policy range. */
1455 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
1456 unsigned long end, struct sp_node *new)
1457 {
1458 struct sp_node *n, *new2 = NULL;
1459
1460 restart:
1461 spin_lock(&sp->lock);
1462 n = sp_lookup(sp, start, end);
1463 /* Take care of old policies in the same range. */
1464 while (n && n->start < end) {
1465 struct rb_node *next = rb_next(&n->nd);
1466 if (n->start >= start) {
1467 if (n->end <= end)
1468 sp_delete(sp, n);
1469 else
1470 n->start = end;
1471 } else {
1472 /* Old policy spanning whole new range. */
1473 if (n->end > end) {
1474 if (!new2) {
1475 spin_unlock(&sp->lock);
1476 new2 = sp_alloc(end, n->end, n->policy);
1477 if (!new2)
1478 return -ENOMEM;
1479 goto restart;
1480 }
1481 n->end = start;
1482 sp_insert(sp, new2);
1483 new2 = NULL;
1484 break;
1485 } else
1486 n->end = start;
1487 }
1488 if (!next)
1489 break;
1490 n = rb_entry(next, struct sp_node, nd);
1491 }
1492 if (new)
1493 sp_insert(sp, new);
1494 spin_unlock(&sp->lock);
1495 if (new2) {
1496 mpol_free(new2->policy);
1497 kmem_cache_free(sn_cache, new2);
1498 }
1499 return 0;
1500 }
1501
1502 void mpol_shared_policy_init(struct shared_policy *info, int policy,
1503 nodemask_t *policy_nodes)
1504 {
1505 info->root = RB_ROOT;
1506 spin_lock_init(&info->lock);
1507
1508 if (policy != MPOL_DEFAULT) {
1509 struct mempolicy *newpol;
1510
1511 /* Falls back to MPOL_DEFAULT on any error */
1512 newpol = mpol_new(policy, policy_nodes);
1513 if (!IS_ERR(newpol)) {
1514 /* Create pseudo-vma that contains just the policy */
1515 struct vm_area_struct pvma;
1516
1517 memset(&pvma, 0, sizeof(struct vm_area_struct));
1518 /* Policy covers entire file */
1519 pvma.vm_end = TASK_SIZE;
1520 mpol_set_shared_policy(info, &pvma, newpol);
1521 mpol_free(newpol);
1522 }
1523 }
1524 }
1525
1526 int mpol_set_shared_policy(struct shared_policy *info,
1527 struct vm_area_struct *vma, struct mempolicy *npol)
1528 {
1529 int err;
1530 struct sp_node *new = NULL;
1531 unsigned long sz = vma_pages(vma);
1532
1533 PDprintk("set_shared_policy %lx sz %lu %d %lx\n",
1534 vma->vm_pgoff,
1535 sz, npol? npol->policy : -1,
1536 npol ? nodes_addr(npol->v.nodes)[0] : -1);
1537
1538 if (npol) {
1539 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
1540 if (!new)
1541 return -ENOMEM;
1542 }
1543 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
1544 if (err && new)
1545 kmem_cache_free(sn_cache, new);
1546 return err;
1547 }
1548
1549 /* Free a backing policy store on inode delete. */
1550 void mpol_free_shared_policy(struct shared_policy *p)
1551 {
1552 struct sp_node *n;
1553 struct rb_node *next;
1554
1555 if (!p->root.rb_node)
1556 return;
1557 spin_lock(&p->lock);
1558 next = rb_first(&p->root);
1559 while (next) {
1560 n = rb_entry(next, struct sp_node, nd);
1561 next = rb_next(&n->nd);
1562 rb_erase(&n->nd, &p->root);
1563 mpol_free(n->policy);
1564 kmem_cache_free(sn_cache, n);
1565 }
1566 spin_unlock(&p->lock);
1567 }
1568
1569 /* assumes fs == KERNEL_DS */
1570 void __init numa_policy_init(void)
1571 {
1572 policy_cache = kmem_cache_create("numa_policy",
1573 sizeof(struct mempolicy),
1574 0, SLAB_PANIC, NULL, NULL);
1575
1576 sn_cache = kmem_cache_create("shared_policy_node",
1577 sizeof(struct sp_node),
1578 0, SLAB_PANIC, NULL, NULL);
1579
1580 /* Set interleaving policy for system init. This way not all
1581 the data structures allocated at system boot end up in node zero. */
1582
1583 if (do_set_mempolicy(MPOL_INTERLEAVE, &node_online_map))
1584 printk("numa_policy_init: interleaving failed\n");
1585 }
1586
1587 /* Reset policy of current process to default */
1588 void numa_default_policy(void)
1589 {
1590 do_set_mempolicy(MPOL_DEFAULT, NULL);
1591 }
1592
1593 /* Migrate a policy to a different set of nodes */
1594 void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
1595 {
1596 nodemask_t *mpolmask;
1597 nodemask_t tmp;
1598
1599 if (!pol)
1600 return;
1601 mpolmask = &pol->cpuset_mems_allowed;
1602 if (nodes_equal(*mpolmask, *newmask))
1603 return;
1604
1605 switch (pol->policy) {
1606 case MPOL_DEFAULT:
1607 break;
1608 case MPOL_INTERLEAVE:
1609 nodes_remap(tmp, pol->v.nodes, *mpolmask, *newmask);
1610 pol->v.nodes = tmp;
1611 *mpolmask = *newmask;
1612 current->il_next = node_remap(current->il_next,
1613 *mpolmask, *newmask);
1614 break;
1615 case MPOL_PREFERRED:
1616 pol->v.preferred_node = node_remap(pol->v.preferred_node,
1617 *mpolmask, *newmask);
1618 *mpolmask = *newmask;
1619 break;
1620 case MPOL_BIND: {
1621 nodemask_t nodes;
1622 struct zone **z;
1623 struct zonelist *zonelist;
1624
1625 nodes_clear(nodes);
1626 for (z = pol->v.zonelist->zones; *z; z++)
1627 node_set((*z)->zone_pgdat->node_id, nodes);
1628 nodes_remap(tmp, nodes, *mpolmask, *newmask);
1629 nodes = tmp;
1630
1631 zonelist = bind_zonelist(&nodes);
1632
1633 /* If no mem, then zonelist is NULL and we keep old zonelist.
1634 * If that old zonelist has no remaining mems_allowed nodes,
1635 * then zonelist_policy() will "FALL THROUGH" to MPOL_DEFAULT.
1636 */
1637
1638 if (zonelist) {
1639 /* Good - got mem - substitute new zonelist */
1640 kfree(pol->v.zonelist);
1641 pol->v.zonelist = zonelist;
1642 }
1643 *mpolmask = *newmask;
1644 break;
1645 }
1646 default:
1647 BUG();
1648 break;
1649 }
1650 }
1651
1652 /*
1653 * Wrapper for mpol_rebind_policy() that just requires task
1654 * pointer, and updates task mempolicy.
1655 */
1656
1657 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
1658 {
1659 mpol_rebind_policy(tsk->mempolicy, new);
1660 }
1661
1662 /*
1663 * Rebind each vma in mm to new nodemask.
1664 *
1665 * Call holding a reference to mm. Takes mm->mmap_sem during call.
1666 */
1667
1668 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
1669 {
1670 struct vm_area_struct *vma;
1671
1672 down_write(&mm->mmap_sem);
1673 for (vma = mm->mmap; vma; vma = vma->vm_next)
1674 mpol_rebind_policy(vma->vm_policy, new);
1675 up_write(&mm->mmap_sem);
1676 }
1677
1678 /*
1679 * Display pages allocated per node and memory policy via /proc.
1680 */
1681
1682 static const char *policy_types[] = { "default", "prefer", "bind",
1683 "interleave" };
1684
1685 /*
1686 * Convert a mempolicy into a string.
1687 * Returns the number of characters in buffer (if positive)
1688 * or an error (negative)
1689 */
1690 static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1691 {
1692 char *p = buffer;
1693 int l;
1694 nodemask_t nodes;
1695 int mode = pol ? pol->policy : MPOL_DEFAULT;
1696
1697 switch (mode) {
1698 case MPOL_DEFAULT:
1699 nodes_clear(nodes);
1700 break;
1701
1702 case MPOL_PREFERRED:
1703 nodes_clear(nodes);
1704 node_set(pol->v.preferred_node, nodes);
1705 break;
1706
1707 case MPOL_BIND:
1708 get_zonemask(pol, &nodes);
1709 break;
1710
1711 case MPOL_INTERLEAVE:
1712 nodes = pol->v.nodes;
1713 break;
1714
1715 default:
1716 BUG();
1717 return -EFAULT;
1718 }
1719
1720 l = strlen(policy_types[mode]);
1721 if (buffer + maxlen < p + l + 1)
1722 return -ENOSPC;
1723
1724 strcpy(p, policy_types[mode]);
1725 p += l;
1726
1727 if (!nodes_empty(nodes)) {
1728 if (buffer + maxlen < p + 2)
1729 return -ENOSPC;
1730 *p++ = '=';
1731 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
1732 }
1733 return p - buffer;
1734 }
1735
1736 struct numa_maps {
1737 unsigned long pages;
1738 unsigned long anon;
1739 unsigned long mapped;
1740 unsigned long mapcount_max;
1741 unsigned long node[MAX_NUMNODES];
1742 };
1743
1744 static void gather_stats(struct page *page, void *private)
1745 {
1746 struct numa_maps *md = private;
1747 int count = page_mapcount(page);
1748
1749 if (count)
1750 md->mapped++;
1751
1752 if (count > md->mapcount_max)
1753 md->mapcount_max = count;
1754
1755 md->pages++;
1756
1757 if (PageAnon(page))
1758 md->anon++;
1759
1760 md->node[page_to_nid(page)]++;
1761 cond_resched();
1762 }
1763
1764 int show_numa_map(struct seq_file *m, void *v)
1765 {
1766 struct task_struct *task = m->private;
1767 struct vm_area_struct *vma = v;
1768 struct numa_maps *md;
1769 int n;
1770 char buffer[50];
1771
1772 if (!vma->vm_mm)
1773 return 0;
1774
1775 md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
1776 if (!md)
1777 return 0;
1778
1779 check_pgd_range(vma, vma->vm_start, vma->vm_end,
1780 &node_online_map, MPOL_MF_STATS, md);
1781
1782 if (md->pages) {
1783 mpol_to_str(buffer, sizeof(buffer),
1784 get_vma_policy(task, vma, vma->vm_start));
1785
1786 seq_printf(m, "%08lx %s pages=%lu mapped=%lu maxref=%lu",
1787 vma->vm_start, buffer, md->pages,
1788 md->mapped, md->mapcount_max);
1789
1790 if (md->anon)
1791 seq_printf(m," anon=%lu",md->anon);
1792
1793 for_each_online_node(n)
1794 if (md->node[n])
1795 seq_printf(m, " N%d=%lu", n, md->node[n]);
1796
1797 seq_putc(m, '\n');
1798 }
1799 kfree(md);
1800
1801 if (m->count < m->size)
1802 m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0;
1803 return 0;
1804 }
1805