]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - mm/mempolicy.c
mm/mempolicy: fix !vma in new_vma_page()
[mirror_ubuntu-zesty-kernel.git] / mm / mempolicy.c
1 /*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
6 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
21 *
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
28 * preferred Try a specific node first before normal fallback.
29 * As a special case NUMA_NO_NODE here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
33 *
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56 /* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
66 */
67
68 #include <linux/mempolicy.h>
69 #include <linux/mm.h>
70 #include <linux/highmem.h>
71 #include <linux/hugetlb.h>
72 #include <linux/kernel.h>
73 #include <linux/sched.h>
74 #include <linux/nodemask.h>
75 #include <linux/cpuset.h>
76 #include <linux/slab.h>
77 #include <linux/string.h>
78 #include <linux/export.h>
79 #include <linux/nsproxy.h>
80 #include <linux/interrupt.h>
81 #include <linux/init.h>
82 #include <linux/compat.h>
83 #include <linux/swap.h>
84 #include <linux/seq_file.h>
85 #include <linux/proc_fs.h>
86 #include <linux/migrate.h>
87 #include <linux/ksm.h>
88 #include <linux/rmap.h>
89 #include <linux/security.h>
90 #include <linux/syscalls.h>
91 #include <linux/ctype.h>
92 #include <linux/mm_inline.h>
93 #include <linux/mmu_notifier.h>
94
95 #include <asm/tlbflush.h>
96 #include <asm/uaccess.h>
97 #include <linux/random.h>
98
99 #include "internal.h"
100
101 /* Internal flags */
102 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
103 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
104
105 static struct kmem_cache *policy_cache;
106 static struct kmem_cache *sn_cache;
107
108 /* Highest zone. An specific allocation for a zone below that is not
109 policied. */
110 enum zone_type policy_zone = 0;
111
112 /*
113 * run-time system-wide default policy => local allocation
114 */
115 static struct mempolicy default_policy = {
116 .refcnt = ATOMIC_INIT(1), /* never free it */
117 .mode = MPOL_PREFERRED,
118 .flags = MPOL_F_LOCAL,
119 };
120
121 static struct mempolicy preferred_node_policy[MAX_NUMNODES];
122
123 static struct mempolicy *get_task_policy(struct task_struct *p)
124 {
125 struct mempolicy *pol = p->mempolicy;
126
127 if (!pol) {
128 int node = numa_node_id();
129
130 if (node != NUMA_NO_NODE) {
131 pol = &preferred_node_policy[node];
132 /*
133 * preferred_node_policy is not initialised early in
134 * boot
135 */
136 if (!pol->mode)
137 pol = NULL;
138 }
139 }
140
141 return pol;
142 }
143
144 static const struct mempolicy_operations {
145 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
146 /*
147 * If read-side task has no lock to protect task->mempolicy, write-side
148 * task will rebind the task->mempolicy by two step. The first step is
149 * setting all the newly nodes, and the second step is cleaning all the
150 * disallowed nodes. In this way, we can avoid finding no node to alloc
151 * page.
152 * If we have a lock to protect task->mempolicy in read-side, we do
153 * rebind directly.
154 *
155 * step:
156 * MPOL_REBIND_ONCE - do rebind work at once
157 * MPOL_REBIND_STEP1 - set all the newly nodes
158 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
159 */
160 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
161 enum mpol_rebind_step step);
162 } mpol_ops[MPOL_MAX];
163
164 /* Check that the nodemask contains at least one populated zone */
165 static int is_valid_nodemask(const nodemask_t *nodemask)
166 {
167 return nodes_intersects(*nodemask, node_states[N_MEMORY]);
168 }
169
170 static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
171 {
172 return pol->flags & MPOL_MODE_FLAGS;
173 }
174
175 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
176 const nodemask_t *rel)
177 {
178 nodemask_t tmp;
179 nodes_fold(tmp, *orig, nodes_weight(*rel));
180 nodes_onto(*ret, tmp, *rel);
181 }
182
183 static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
184 {
185 if (nodes_empty(*nodes))
186 return -EINVAL;
187 pol->v.nodes = *nodes;
188 return 0;
189 }
190
191 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
192 {
193 if (!nodes)
194 pol->flags |= MPOL_F_LOCAL; /* local allocation */
195 else if (nodes_empty(*nodes))
196 return -EINVAL; /* no allowed nodes */
197 else
198 pol->v.preferred_node = first_node(*nodes);
199 return 0;
200 }
201
202 static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
203 {
204 if (!is_valid_nodemask(nodes))
205 return -EINVAL;
206 pol->v.nodes = *nodes;
207 return 0;
208 }
209
210 /*
211 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
212 * any, for the new policy. mpol_new() has already validated the nodes
213 * parameter with respect to the policy mode and flags. But, we need to
214 * handle an empty nodemask with MPOL_PREFERRED here.
215 *
216 * Must be called holding task's alloc_lock to protect task's mems_allowed
217 * and mempolicy. May also be called holding the mmap_semaphore for write.
218 */
219 static int mpol_set_nodemask(struct mempolicy *pol,
220 const nodemask_t *nodes, struct nodemask_scratch *nsc)
221 {
222 int ret;
223
224 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
225 if (pol == NULL)
226 return 0;
227 /* Check N_MEMORY */
228 nodes_and(nsc->mask1,
229 cpuset_current_mems_allowed, node_states[N_MEMORY]);
230
231 VM_BUG_ON(!nodes);
232 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
233 nodes = NULL; /* explicit local allocation */
234 else {
235 if (pol->flags & MPOL_F_RELATIVE_NODES)
236 mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
237 else
238 nodes_and(nsc->mask2, *nodes, nsc->mask1);
239
240 if (mpol_store_user_nodemask(pol))
241 pol->w.user_nodemask = *nodes;
242 else
243 pol->w.cpuset_mems_allowed =
244 cpuset_current_mems_allowed;
245 }
246
247 if (nodes)
248 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
249 else
250 ret = mpol_ops[pol->mode].create(pol, NULL);
251 return ret;
252 }
253
254 /*
255 * This function just creates a new policy, does some check and simple
256 * initialization. You must invoke mpol_set_nodemask() to set nodes.
257 */
258 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
259 nodemask_t *nodes)
260 {
261 struct mempolicy *policy;
262
263 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
264 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
265
266 if (mode == MPOL_DEFAULT) {
267 if (nodes && !nodes_empty(*nodes))
268 return ERR_PTR(-EINVAL);
269 return NULL;
270 }
271 VM_BUG_ON(!nodes);
272
273 /*
274 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
275 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
276 * All other modes require a valid pointer to a non-empty nodemask.
277 */
278 if (mode == MPOL_PREFERRED) {
279 if (nodes_empty(*nodes)) {
280 if (((flags & MPOL_F_STATIC_NODES) ||
281 (flags & MPOL_F_RELATIVE_NODES)))
282 return ERR_PTR(-EINVAL);
283 }
284 } else if (mode == MPOL_LOCAL) {
285 if (!nodes_empty(*nodes))
286 return ERR_PTR(-EINVAL);
287 mode = MPOL_PREFERRED;
288 } else if (nodes_empty(*nodes))
289 return ERR_PTR(-EINVAL);
290 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
291 if (!policy)
292 return ERR_PTR(-ENOMEM);
293 atomic_set(&policy->refcnt, 1);
294 policy->mode = mode;
295 policy->flags = flags;
296
297 return policy;
298 }
299
300 /* Slow path of a mpol destructor. */
301 void __mpol_put(struct mempolicy *p)
302 {
303 if (!atomic_dec_and_test(&p->refcnt))
304 return;
305 kmem_cache_free(policy_cache, p);
306 }
307
308 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
309 enum mpol_rebind_step step)
310 {
311 }
312
313 /*
314 * step:
315 * MPOL_REBIND_ONCE - do rebind work at once
316 * MPOL_REBIND_STEP1 - set all the newly nodes
317 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
318 */
319 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
320 enum mpol_rebind_step step)
321 {
322 nodemask_t tmp;
323
324 if (pol->flags & MPOL_F_STATIC_NODES)
325 nodes_and(tmp, pol->w.user_nodemask, *nodes);
326 else if (pol->flags & MPOL_F_RELATIVE_NODES)
327 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
328 else {
329 /*
330 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
331 * result
332 */
333 if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
334 nodes_remap(tmp, pol->v.nodes,
335 pol->w.cpuset_mems_allowed, *nodes);
336 pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
337 } else if (step == MPOL_REBIND_STEP2) {
338 tmp = pol->w.cpuset_mems_allowed;
339 pol->w.cpuset_mems_allowed = *nodes;
340 } else
341 BUG();
342 }
343
344 if (nodes_empty(tmp))
345 tmp = *nodes;
346
347 if (step == MPOL_REBIND_STEP1)
348 nodes_or(pol->v.nodes, pol->v.nodes, tmp);
349 else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
350 pol->v.nodes = tmp;
351 else
352 BUG();
353
354 if (!node_isset(current->il_next, tmp)) {
355 current->il_next = next_node(current->il_next, tmp);
356 if (current->il_next >= MAX_NUMNODES)
357 current->il_next = first_node(tmp);
358 if (current->il_next >= MAX_NUMNODES)
359 current->il_next = numa_node_id();
360 }
361 }
362
363 static void mpol_rebind_preferred(struct mempolicy *pol,
364 const nodemask_t *nodes,
365 enum mpol_rebind_step step)
366 {
367 nodemask_t tmp;
368
369 if (pol->flags & MPOL_F_STATIC_NODES) {
370 int node = first_node(pol->w.user_nodemask);
371
372 if (node_isset(node, *nodes)) {
373 pol->v.preferred_node = node;
374 pol->flags &= ~MPOL_F_LOCAL;
375 } else
376 pol->flags |= MPOL_F_LOCAL;
377 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
378 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
379 pol->v.preferred_node = first_node(tmp);
380 } else if (!(pol->flags & MPOL_F_LOCAL)) {
381 pol->v.preferred_node = node_remap(pol->v.preferred_node,
382 pol->w.cpuset_mems_allowed,
383 *nodes);
384 pol->w.cpuset_mems_allowed = *nodes;
385 }
386 }
387
388 /*
389 * mpol_rebind_policy - Migrate a policy to a different set of nodes
390 *
391 * If read-side task has no lock to protect task->mempolicy, write-side
392 * task will rebind the task->mempolicy by two step. The first step is
393 * setting all the newly nodes, and the second step is cleaning all the
394 * disallowed nodes. In this way, we can avoid finding no node to alloc
395 * page.
396 * If we have a lock to protect task->mempolicy in read-side, we do
397 * rebind directly.
398 *
399 * step:
400 * MPOL_REBIND_ONCE - do rebind work at once
401 * MPOL_REBIND_STEP1 - set all the newly nodes
402 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
403 */
404 static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
405 enum mpol_rebind_step step)
406 {
407 if (!pol)
408 return;
409 if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
410 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
411 return;
412
413 if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
414 return;
415
416 if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
417 BUG();
418
419 if (step == MPOL_REBIND_STEP1)
420 pol->flags |= MPOL_F_REBINDING;
421 else if (step == MPOL_REBIND_STEP2)
422 pol->flags &= ~MPOL_F_REBINDING;
423 else if (step >= MPOL_REBIND_NSTEP)
424 BUG();
425
426 mpol_ops[pol->mode].rebind(pol, newmask, step);
427 }
428
429 /*
430 * Wrapper for mpol_rebind_policy() that just requires task
431 * pointer, and updates task mempolicy.
432 *
433 * Called with task's alloc_lock held.
434 */
435
436 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
437 enum mpol_rebind_step step)
438 {
439 mpol_rebind_policy(tsk->mempolicy, new, step);
440 }
441
442 /*
443 * Rebind each vma in mm to new nodemask.
444 *
445 * Call holding a reference to mm. Takes mm->mmap_sem during call.
446 */
447
448 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
449 {
450 struct vm_area_struct *vma;
451
452 down_write(&mm->mmap_sem);
453 for (vma = mm->mmap; vma; vma = vma->vm_next)
454 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
455 up_write(&mm->mmap_sem);
456 }
457
458 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
459 [MPOL_DEFAULT] = {
460 .rebind = mpol_rebind_default,
461 },
462 [MPOL_INTERLEAVE] = {
463 .create = mpol_new_interleave,
464 .rebind = mpol_rebind_nodemask,
465 },
466 [MPOL_PREFERRED] = {
467 .create = mpol_new_preferred,
468 .rebind = mpol_rebind_preferred,
469 },
470 [MPOL_BIND] = {
471 .create = mpol_new_bind,
472 .rebind = mpol_rebind_nodemask,
473 },
474 };
475
476 static void migrate_page_add(struct page *page, struct list_head *pagelist,
477 unsigned long flags);
478
479 /*
480 * Scan through pages checking if pages follow certain conditions,
481 * and move them to the pagelist if they do.
482 */
483 static int queue_pages_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
484 unsigned long addr, unsigned long end,
485 const nodemask_t *nodes, unsigned long flags,
486 void *private)
487 {
488 pte_t *orig_pte;
489 pte_t *pte;
490 spinlock_t *ptl;
491
492 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
493 do {
494 struct page *page;
495 int nid;
496
497 if (!pte_present(*pte))
498 continue;
499 page = vm_normal_page(vma, addr, *pte);
500 if (!page)
501 continue;
502 /*
503 * vm_normal_page() filters out zero pages, but there might
504 * still be PageReserved pages to skip, perhaps in a VDSO.
505 */
506 if (PageReserved(page))
507 continue;
508 nid = page_to_nid(page);
509 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
510 continue;
511
512 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
513 migrate_page_add(page, private, flags);
514 else
515 break;
516 } while (pte++, addr += PAGE_SIZE, addr != end);
517 pte_unmap_unlock(orig_pte, ptl);
518 return addr != end;
519 }
520
521 static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma,
522 pmd_t *pmd, const nodemask_t *nodes, unsigned long flags,
523 void *private)
524 {
525 #ifdef CONFIG_HUGETLB_PAGE
526 int nid;
527 struct page *page;
528 spinlock_t *ptl;
529
530 ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, (pte_t *)pmd);
531 page = pte_page(huge_ptep_get((pte_t *)pmd));
532 nid = page_to_nid(page);
533 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
534 goto unlock;
535 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
536 if (flags & (MPOL_MF_MOVE_ALL) ||
537 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
538 isolate_huge_page(page, private);
539 unlock:
540 spin_unlock(ptl);
541 #else
542 BUG();
543 #endif
544 }
545
546 static inline int queue_pages_pmd_range(struct vm_area_struct *vma, pud_t *pud,
547 unsigned long addr, unsigned long end,
548 const nodemask_t *nodes, unsigned long flags,
549 void *private)
550 {
551 pmd_t *pmd;
552 unsigned long next;
553
554 pmd = pmd_offset(pud, addr);
555 do {
556 next = pmd_addr_end(addr, end);
557 if (!pmd_present(*pmd))
558 continue;
559 if (pmd_huge(*pmd) && is_vm_hugetlb_page(vma)) {
560 queue_pages_hugetlb_pmd_range(vma, pmd, nodes,
561 flags, private);
562 continue;
563 }
564 split_huge_page_pmd(vma, addr, pmd);
565 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
566 continue;
567 if (queue_pages_pte_range(vma, pmd, addr, next, nodes,
568 flags, private))
569 return -EIO;
570 } while (pmd++, addr = next, addr != end);
571 return 0;
572 }
573
574 static inline int queue_pages_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
575 unsigned long addr, unsigned long end,
576 const nodemask_t *nodes, unsigned long flags,
577 void *private)
578 {
579 pud_t *pud;
580 unsigned long next;
581
582 pud = pud_offset(pgd, addr);
583 do {
584 next = pud_addr_end(addr, end);
585 if (pud_huge(*pud) && is_vm_hugetlb_page(vma))
586 continue;
587 if (pud_none_or_clear_bad(pud))
588 continue;
589 if (queue_pages_pmd_range(vma, pud, addr, next, nodes,
590 flags, private))
591 return -EIO;
592 } while (pud++, addr = next, addr != end);
593 return 0;
594 }
595
596 static inline int queue_pages_pgd_range(struct vm_area_struct *vma,
597 unsigned long addr, unsigned long end,
598 const nodemask_t *nodes, unsigned long flags,
599 void *private)
600 {
601 pgd_t *pgd;
602 unsigned long next;
603
604 pgd = pgd_offset(vma->vm_mm, addr);
605 do {
606 next = pgd_addr_end(addr, end);
607 if (pgd_none_or_clear_bad(pgd))
608 continue;
609 if (queue_pages_pud_range(vma, pgd, addr, next, nodes,
610 flags, private))
611 return -EIO;
612 } while (pgd++, addr = next, addr != end);
613 return 0;
614 }
615
616 #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
617 /*
618 * This is used to mark a range of virtual addresses to be inaccessible.
619 * These are later cleared by a NUMA hinting fault. Depending on these
620 * faults, pages may be migrated for better NUMA placement.
621 *
622 * This is assuming that NUMA faults are handled using PROT_NONE. If
623 * an architecture makes a different choice, it will need further
624 * changes to the core.
625 */
626 unsigned long change_prot_numa(struct vm_area_struct *vma,
627 unsigned long addr, unsigned long end)
628 {
629 int nr_updated;
630 BUILD_BUG_ON(_PAGE_NUMA != _PAGE_PROTNONE);
631
632 nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1);
633 if (nr_updated)
634 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
635
636 return nr_updated;
637 }
638 #else
639 static unsigned long change_prot_numa(struct vm_area_struct *vma,
640 unsigned long addr, unsigned long end)
641 {
642 return 0;
643 }
644 #endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
645
646 /*
647 * Walk through page tables and collect pages to be migrated.
648 *
649 * If pages found in a given range are on a set of nodes (determined by
650 * @nodes and @flags,) it's isolated and queued to the pagelist which is
651 * passed via @private.)
652 */
653 static struct vm_area_struct *
654 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
655 const nodemask_t *nodes, unsigned long flags, void *private)
656 {
657 int err;
658 struct vm_area_struct *first, *vma, *prev;
659
660
661 first = find_vma(mm, start);
662 if (!first)
663 return ERR_PTR(-EFAULT);
664 prev = NULL;
665 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
666 unsigned long endvma = vma->vm_end;
667
668 if (endvma > end)
669 endvma = end;
670 if (vma->vm_start > start)
671 start = vma->vm_start;
672
673 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
674 if (!vma->vm_next && vma->vm_end < end)
675 return ERR_PTR(-EFAULT);
676 if (prev && prev->vm_end < vma->vm_start)
677 return ERR_PTR(-EFAULT);
678 }
679
680 if (flags & MPOL_MF_LAZY) {
681 change_prot_numa(vma, start, endvma);
682 goto next;
683 }
684
685 if ((flags & MPOL_MF_STRICT) ||
686 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
687 vma_migratable(vma))) {
688
689 err = queue_pages_pgd_range(vma, start, endvma, nodes,
690 flags, private);
691 if (err) {
692 first = ERR_PTR(err);
693 break;
694 }
695 }
696 next:
697 prev = vma;
698 }
699 return first;
700 }
701
702 /*
703 * Apply policy to a single VMA
704 * This must be called with the mmap_sem held for writing.
705 */
706 static int vma_replace_policy(struct vm_area_struct *vma,
707 struct mempolicy *pol)
708 {
709 int err;
710 struct mempolicy *old;
711 struct mempolicy *new;
712
713 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
714 vma->vm_start, vma->vm_end, vma->vm_pgoff,
715 vma->vm_ops, vma->vm_file,
716 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
717
718 new = mpol_dup(pol);
719 if (IS_ERR(new))
720 return PTR_ERR(new);
721
722 if (vma->vm_ops && vma->vm_ops->set_policy) {
723 err = vma->vm_ops->set_policy(vma, new);
724 if (err)
725 goto err_out;
726 }
727
728 old = vma->vm_policy;
729 vma->vm_policy = new; /* protected by mmap_sem */
730 mpol_put(old);
731
732 return 0;
733 err_out:
734 mpol_put(new);
735 return err;
736 }
737
738 /* Step 2: apply policy to a range and do splits. */
739 static int mbind_range(struct mm_struct *mm, unsigned long start,
740 unsigned long end, struct mempolicy *new_pol)
741 {
742 struct vm_area_struct *next;
743 struct vm_area_struct *prev;
744 struct vm_area_struct *vma;
745 int err = 0;
746 pgoff_t pgoff;
747 unsigned long vmstart;
748 unsigned long vmend;
749
750 vma = find_vma(mm, start);
751 if (!vma || vma->vm_start > start)
752 return -EFAULT;
753
754 prev = vma->vm_prev;
755 if (start > vma->vm_start)
756 prev = vma;
757
758 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
759 next = vma->vm_next;
760 vmstart = max(start, vma->vm_start);
761 vmend = min(end, vma->vm_end);
762
763 if (mpol_equal(vma_policy(vma), new_pol))
764 continue;
765
766 pgoff = vma->vm_pgoff +
767 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
768 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
769 vma->anon_vma, vma->vm_file, pgoff,
770 new_pol);
771 if (prev) {
772 vma = prev;
773 next = vma->vm_next;
774 if (mpol_equal(vma_policy(vma), new_pol))
775 continue;
776 /* vma_merge() joined vma && vma->next, case 8 */
777 goto replace;
778 }
779 if (vma->vm_start != vmstart) {
780 err = split_vma(vma->vm_mm, vma, vmstart, 1);
781 if (err)
782 goto out;
783 }
784 if (vma->vm_end != vmend) {
785 err = split_vma(vma->vm_mm, vma, vmend, 0);
786 if (err)
787 goto out;
788 }
789 replace:
790 err = vma_replace_policy(vma, new_pol);
791 if (err)
792 goto out;
793 }
794
795 out:
796 return err;
797 }
798
799 /*
800 * Update task->flags PF_MEMPOLICY bit: set iff non-default
801 * mempolicy. Allows more rapid checking of this (combined perhaps
802 * with other PF_* flag bits) on memory allocation hot code paths.
803 *
804 * If called from outside this file, the task 'p' should -only- be
805 * a newly forked child not yet visible on the task list, because
806 * manipulating the task flags of a visible task is not safe.
807 *
808 * The above limitation is why this routine has the funny name
809 * mpol_fix_fork_child_flag().
810 *
811 * It is also safe to call this with a task pointer of current,
812 * which the static wrapper mpol_set_task_struct_flag() does,
813 * for use within this file.
814 */
815
816 void mpol_fix_fork_child_flag(struct task_struct *p)
817 {
818 if (p->mempolicy)
819 p->flags |= PF_MEMPOLICY;
820 else
821 p->flags &= ~PF_MEMPOLICY;
822 }
823
824 static void mpol_set_task_struct_flag(void)
825 {
826 mpol_fix_fork_child_flag(current);
827 }
828
829 /* Set the process memory policy */
830 static long do_set_mempolicy(unsigned short mode, unsigned short flags,
831 nodemask_t *nodes)
832 {
833 struct mempolicy *new, *old;
834 struct mm_struct *mm = current->mm;
835 NODEMASK_SCRATCH(scratch);
836 int ret;
837
838 if (!scratch)
839 return -ENOMEM;
840
841 new = mpol_new(mode, flags, nodes);
842 if (IS_ERR(new)) {
843 ret = PTR_ERR(new);
844 goto out;
845 }
846 /*
847 * prevent changing our mempolicy while show_numa_maps()
848 * is using it.
849 * Note: do_set_mempolicy() can be called at init time
850 * with no 'mm'.
851 */
852 if (mm)
853 down_write(&mm->mmap_sem);
854 task_lock(current);
855 ret = mpol_set_nodemask(new, nodes, scratch);
856 if (ret) {
857 task_unlock(current);
858 if (mm)
859 up_write(&mm->mmap_sem);
860 mpol_put(new);
861 goto out;
862 }
863 old = current->mempolicy;
864 current->mempolicy = new;
865 mpol_set_task_struct_flag();
866 if (new && new->mode == MPOL_INTERLEAVE &&
867 nodes_weight(new->v.nodes))
868 current->il_next = first_node(new->v.nodes);
869 task_unlock(current);
870 if (mm)
871 up_write(&mm->mmap_sem);
872
873 mpol_put(old);
874 ret = 0;
875 out:
876 NODEMASK_SCRATCH_FREE(scratch);
877 return ret;
878 }
879
880 /*
881 * Return nodemask for policy for get_mempolicy() query
882 *
883 * Called with task's alloc_lock held
884 */
885 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
886 {
887 nodes_clear(*nodes);
888 if (p == &default_policy)
889 return;
890
891 switch (p->mode) {
892 case MPOL_BIND:
893 /* Fall through */
894 case MPOL_INTERLEAVE:
895 *nodes = p->v.nodes;
896 break;
897 case MPOL_PREFERRED:
898 if (!(p->flags & MPOL_F_LOCAL))
899 node_set(p->v.preferred_node, *nodes);
900 /* else return empty node mask for local allocation */
901 break;
902 default:
903 BUG();
904 }
905 }
906
907 static int lookup_node(struct mm_struct *mm, unsigned long addr)
908 {
909 struct page *p;
910 int err;
911
912 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
913 if (err >= 0) {
914 err = page_to_nid(p);
915 put_page(p);
916 }
917 return err;
918 }
919
920 /* Retrieve NUMA policy */
921 static long do_get_mempolicy(int *policy, nodemask_t *nmask,
922 unsigned long addr, unsigned long flags)
923 {
924 int err;
925 struct mm_struct *mm = current->mm;
926 struct vm_area_struct *vma = NULL;
927 struct mempolicy *pol = current->mempolicy;
928
929 if (flags &
930 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
931 return -EINVAL;
932
933 if (flags & MPOL_F_MEMS_ALLOWED) {
934 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
935 return -EINVAL;
936 *policy = 0; /* just so it's initialized */
937 task_lock(current);
938 *nmask = cpuset_current_mems_allowed;
939 task_unlock(current);
940 return 0;
941 }
942
943 if (flags & MPOL_F_ADDR) {
944 /*
945 * Do NOT fall back to task policy if the
946 * vma/shared policy at addr is NULL. We
947 * want to return MPOL_DEFAULT in this case.
948 */
949 down_read(&mm->mmap_sem);
950 vma = find_vma_intersection(mm, addr, addr+1);
951 if (!vma) {
952 up_read(&mm->mmap_sem);
953 return -EFAULT;
954 }
955 if (vma->vm_ops && vma->vm_ops->get_policy)
956 pol = vma->vm_ops->get_policy(vma, addr);
957 else
958 pol = vma->vm_policy;
959 } else if (addr)
960 return -EINVAL;
961
962 if (!pol)
963 pol = &default_policy; /* indicates default behavior */
964
965 if (flags & MPOL_F_NODE) {
966 if (flags & MPOL_F_ADDR) {
967 err = lookup_node(mm, addr);
968 if (err < 0)
969 goto out;
970 *policy = err;
971 } else if (pol == current->mempolicy &&
972 pol->mode == MPOL_INTERLEAVE) {
973 *policy = current->il_next;
974 } else {
975 err = -EINVAL;
976 goto out;
977 }
978 } else {
979 *policy = pol == &default_policy ? MPOL_DEFAULT :
980 pol->mode;
981 /*
982 * Internal mempolicy flags must be masked off before exposing
983 * the policy to userspace.
984 */
985 *policy |= (pol->flags & MPOL_MODE_FLAGS);
986 }
987
988 if (vma) {
989 up_read(&current->mm->mmap_sem);
990 vma = NULL;
991 }
992
993 err = 0;
994 if (nmask) {
995 if (mpol_store_user_nodemask(pol)) {
996 *nmask = pol->w.user_nodemask;
997 } else {
998 task_lock(current);
999 get_policy_nodemask(pol, nmask);
1000 task_unlock(current);
1001 }
1002 }
1003
1004 out:
1005 mpol_cond_put(pol);
1006 if (vma)
1007 up_read(&current->mm->mmap_sem);
1008 return err;
1009 }
1010
1011 #ifdef CONFIG_MIGRATION
1012 /*
1013 * page migration
1014 */
1015 static void migrate_page_add(struct page *page, struct list_head *pagelist,
1016 unsigned long flags)
1017 {
1018 /*
1019 * Avoid migrating a page that is shared with others.
1020 */
1021 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
1022 if (!isolate_lru_page(page)) {
1023 list_add_tail(&page->lru, pagelist);
1024 inc_zone_page_state(page, NR_ISOLATED_ANON +
1025 page_is_file_cache(page));
1026 }
1027 }
1028 }
1029
1030 static struct page *new_node_page(struct page *page, unsigned long node, int **x)
1031 {
1032 if (PageHuge(page))
1033 return alloc_huge_page_node(page_hstate(compound_head(page)),
1034 node);
1035 else
1036 return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
1037 }
1038
1039 /*
1040 * Migrate pages from one node to a target node.
1041 * Returns error or the number of pages not migrated.
1042 */
1043 static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1044 int flags)
1045 {
1046 nodemask_t nmask;
1047 LIST_HEAD(pagelist);
1048 int err = 0;
1049
1050 nodes_clear(nmask);
1051 node_set(source, nmask);
1052
1053 /*
1054 * This does not "check" the range but isolates all pages that
1055 * need migration. Between passing in the full user address
1056 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1057 */
1058 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1059 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
1060 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1061
1062 if (!list_empty(&pagelist)) {
1063 err = migrate_pages(&pagelist, new_node_page, dest,
1064 MIGRATE_SYNC, MR_SYSCALL);
1065 if (err)
1066 putback_movable_pages(&pagelist);
1067 }
1068
1069 return err;
1070 }
1071
1072 /*
1073 * Move pages between the two nodesets so as to preserve the physical
1074 * layout as much as possible.
1075 *
1076 * Returns the number of page that could not be moved.
1077 */
1078 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1079 const nodemask_t *to, int flags)
1080 {
1081 int busy = 0;
1082 int err;
1083 nodemask_t tmp;
1084
1085 err = migrate_prep();
1086 if (err)
1087 return err;
1088
1089 down_read(&mm->mmap_sem);
1090
1091 err = migrate_vmas(mm, from, to, flags);
1092 if (err)
1093 goto out;
1094
1095 /*
1096 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1097 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1098 * bit in 'tmp', and return that <source, dest> pair for migration.
1099 * The pair of nodemasks 'to' and 'from' define the map.
1100 *
1101 * If no pair of bits is found that way, fallback to picking some
1102 * pair of 'source' and 'dest' bits that are not the same. If the
1103 * 'source' and 'dest' bits are the same, this represents a node
1104 * that will be migrating to itself, so no pages need move.
1105 *
1106 * If no bits are left in 'tmp', or if all remaining bits left
1107 * in 'tmp' correspond to the same bit in 'to', return false
1108 * (nothing left to migrate).
1109 *
1110 * This lets us pick a pair of nodes to migrate between, such that
1111 * if possible the dest node is not already occupied by some other
1112 * source node, minimizing the risk of overloading the memory on a
1113 * node that would happen if we migrated incoming memory to a node
1114 * before migrating outgoing memory source that same node.
1115 *
1116 * A single scan of tmp is sufficient. As we go, we remember the
1117 * most recent <s, d> pair that moved (s != d). If we find a pair
1118 * that not only moved, but what's better, moved to an empty slot
1119 * (d is not set in tmp), then we break out then, with that pair.
1120 * Otherwise when we finish scanning from_tmp, we at least have the
1121 * most recent <s, d> pair that moved. If we get all the way through
1122 * the scan of tmp without finding any node that moved, much less
1123 * moved to an empty node, then there is nothing left worth migrating.
1124 */
1125
1126 tmp = *from;
1127 while (!nodes_empty(tmp)) {
1128 int s,d;
1129 int source = NUMA_NO_NODE;
1130 int dest = 0;
1131
1132 for_each_node_mask(s, tmp) {
1133
1134 /*
1135 * do_migrate_pages() tries to maintain the relative
1136 * node relationship of the pages established between
1137 * threads and memory areas.
1138 *
1139 * However if the number of source nodes is not equal to
1140 * the number of destination nodes we can not preserve
1141 * this node relative relationship. In that case, skip
1142 * copying memory from a node that is in the destination
1143 * mask.
1144 *
1145 * Example: [2,3,4] -> [3,4,5] moves everything.
1146 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1147 */
1148
1149 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1150 (node_isset(s, *to)))
1151 continue;
1152
1153 d = node_remap(s, *from, *to);
1154 if (s == d)
1155 continue;
1156
1157 source = s; /* Node moved. Memorize */
1158 dest = d;
1159
1160 /* dest not in remaining from nodes? */
1161 if (!node_isset(dest, tmp))
1162 break;
1163 }
1164 if (source == NUMA_NO_NODE)
1165 break;
1166
1167 node_clear(source, tmp);
1168 err = migrate_to_node(mm, source, dest, flags);
1169 if (err > 0)
1170 busy += err;
1171 if (err < 0)
1172 break;
1173 }
1174 out:
1175 up_read(&mm->mmap_sem);
1176 if (err < 0)
1177 return err;
1178 return busy;
1179
1180 }
1181
1182 /*
1183 * Allocate a new page for page migration based on vma policy.
1184 * Start assuming that page is mapped by vma pointed to by @private.
1185 * Search forward from there, if not. N.B., this assumes that the
1186 * list of pages handed to migrate_pages()--which is how we get here--
1187 * is in virtual address order.
1188 */
1189 static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
1190 {
1191 struct vm_area_struct *vma = (struct vm_area_struct *)private;
1192 unsigned long uninitialized_var(address);
1193
1194 while (vma) {
1195 address = page_address_in_vma(page, vma);
1196 if (address != -EFAULT)
1197 break;
1198 vma = vma->vm_next;
1199 }
1200
1201 if (PageHuge(page)) {
1202 if (vma)
1203 return alloc_huge_page_noerr(vma, address, 1);
1204 else
1205 return NULL;
1206 }
1207 /*
1208 * if !vma, alloc_page_vma() will use task or system default policy
1209 */
1210 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1211 }
1212 #else
1213
1214 static void migrate_page_add(struct page *page, struct list_head *pagelist,
1215 unsigned long flags)
1216 {
1217 }
1218
1219 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1220 const nodemask_t *to, int flags)
1221 {
1222 return -ENOSYS;
1223 }
1224
1225 static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
1226 {
1227 return NULL;
1228 }
1229 #endif
1230
1231 static long do_mbind(unsigned long start, unsigned long len,
1232 unsigned short mode, unsigned short mode_flags,
1233 nodemask_t *nmask, unsigned long flags)
1234 {
1235 struct vm_area_struct *vma;
1236 struct mm_struct *mm = current->mm;
1237 struct mempolicy *new;
1238 unsigned long end;
1239 int err;
1240 LIST_HEAD(pagelist);
1241
1242 if (flags & ~(unsigned long)MPOL_MF_VALID)
1243 return -EINVAL;
1244 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1245 return -EPERM;
1246
1247 if (start & ~PAGE_MASK)
1248 return -EINVAL;
1249
1250 if (mode == MPOL_DEFAULT)
1251 flags &= ~MPOL_MF_STRICT;
1252
1253 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1254 end = start + len;
1255
1256 if (end < start)
1257 return -EINVAL;
1258 if (end == start)
1259 return 0;
1260
1261 new = mpol_new(mode, mode_flags, nmask);
1262 if (IS_ERR(new))
1263 return PTR_ERR(new);
1264
1265 if (flags & MPOL_MF_LAZY)
1266 new->flags |= MPOL_F_MOF;
1267
1268 /*
1269 * If we are using the default policy then operation
1270 * on discontinuous address spaces is okay after all
1271 */
1272 if (!new)
1273 flags |= MPOL_MF_DISCONTIG_OK;
1274
1275 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1276 start, start + len, mode, mode_flags,
1277 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
1278
1279 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1280
1281 err = migrate_prep();
1282 if (err)
1283 goto mpol_out;
1284 }
1285 {
1286 NODEMASK_SCRATCH(scratch);
1287 if (scratch) {
1288 down_write(&mm->mmap_sem);
1289 task_lock(current);
1290 err = mpol_set_nodemask(new, nmask, scratch);
1291 task_unlock(current);
1292 if (err)
1293 up_write(&mm->mmap_sem);
1294 } else
1295 err = -ENOMEM;
1296 NODEMASK_SCRATCH_FREE(scratch);
1297 }
1298 if (err)
1299 goto mpol_out;
1300
1301 vma = queue_pages_range(mm, start, end, nmask,
1302 flags | MPOL_MF_INVERT, &pagelist);
1303
1304 err = PTR_ERR(vma); /* maybe ... */
1305 if (!IS_ERR(vma))
1306 err = mbind_range(mm, start, end, new);
1307
1308 if (!err) {
1309 int nr_failed = 0;
1310
1311 if (!list_empty(&pagelist)) {
1312 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1313 nr_failed = migrate_pages(&pagelist, new_vma_page,
1314 (unsigned long)vma,
1315 MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1316 if (nr_failed)
1317 putback_movable_pages(&pagelist);
1318 }
1319
1320 if (nr_failed && (flags & MPOL_MF_STRICT))
1321 err = -EIO;
1322 } else
1323 putback_movable_pages(&pagelist);
1324
1325 up_write(&mm->mmap_sem);
1326 mpol_out:
1327 mpol_put(new);
1328 return err;
1329 }
1330
1331 /*
1332 * User space interface with variable sized bitmaps for nodelists.
1333 */
1334
1335 /* Copy a node mask from user space. */
1336 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1337 unsigned long maxnode)
1338 {
1339 unsigned long k;
1340 unsigned long nlongs;
1341 unsigned long endmask;
1342
1343 --maxnode;
1344 nodes_clear(*nodes);
1345 if (maxnode == 0 || !nmask)
1346 return 0;
1347 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1348 return -EINVAL;
1349
1350 nlongs = BITS_TO_LONGS(maxnode);
1351 if ((maxnode % BITS_PER_LONG) == 0)
1352 endmask = ~0UL;
1353 else
1354 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1355
1356 /* When the user specified more nodes than supported just check
1357 if the non supported part is all zero. */
1358 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1359 if (nlongs > PAGE_SIZE/sizeof(long))
1360 return -EINVAL;
1361 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1362 unsigned long t;
1363 if (get_user(t, nmask + k))
1364 return -EFAULT;
1365 if (k == nlongs - 1) {
1366 if (t & endmask)
1367 return -EINVAL;
1368 } else if (t)
1369 return -EINVAL;
1370 }
1371 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1372 endmask = ~0UL;
1373 }
1374
1375 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1376 return -EFAULT;
1377 nodes_addr(*nodes)[nlongs-1] &= endmask;
1378 return 0;
1379 }
1380
1381 /* Copy a kernel node mask to user space */
1382 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1383 nodemask_t *nodes)
1384 {
1385 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1386 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1387
1388 if (copy > nbytes) {
1389 if (copy > PAGE_SIZE)
1390 return -EINVAL;
1391 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1392 return -EFAULT;
1393 copy = nbytes;
1394 }
1395 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1396 }
1397
1398 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1399 unsigned long, mode, unsigned long __user *, nmask,
1400 unsigned long, maxnode, unsigned, flags)
1401 {
1402 nodemask_t nodes;
1403 int err;
1404 unsigned short mode_flags;
1405
1406 mode_flags = mode & MPOL_MODE_FLAGS;
1407 mode &= ~MPOL_MODE_FLAGS;
1408 if (mode >= MPOL_MAX)
1409 return -EINVAL;
1410 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1411 (mode_flags & MPOL_F_RELATIVE_NODES))
1412 return -EINVAL;
1413 err = get_nodes(&nodes, nmask, maxnode);
1414 if (err)
1415 return err;
1416 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1417 }
1418
1419 /* Set the process memory policy */
1420 SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1421 unsigned long, maxnode)
1422 {
1423 int err;
1424 nodemask_t nodes;
1425 unsigned short flags;
1426
1427 flags = mode & MPOL_MODE_FLAGS;
1428 mode &= ~MPOL_MODE_FLAGS;
1429 if ((unsigned int)mode >= MPOL_MAX)
1430 return -EINVAL;
1431 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1432 return -EINVAL;
1433 err = get_nodes(&nodes, nmask, maxnode);
1434 if (err)
1435 return err;
1436 return do_set_mempolicy(mode, flags, &nodes);
1437 }
1438
1439 SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1440 const unsigned long __user *, old_nodes,
1441 const unsigned long __user *, new_nodes)
1442 {
1443 const struct cred *cred = current_cred(), *tcred;
1444 struct mm_struct *mm = NULL;
1445 struct task_struct *task;
1446 nodemask_t task_nodes;
1447 int err;
1448 nodemask_t *old;
1449 nodemask_t *new;
1450 NODEMASK_SCRATCH(scratch);
1451
1452 if (!scratch)
1453 return -ENOMEM;
1454
1455 old = &scratch->mask1;
1456 new = &scratch->mask2;
1457
1458 err = get_nodes(old, old_nodes, maxnode);
1459 if (err)
1460 goto out;
1461
1462 err = get_nodes(new, new_nodes, maxnode);
1463 if (err)
1464 goto out;
1465
1466 /* Find the mm_struct */
1467 rcu_read_lock();
1468 task = pid ? find_task_by_vpid(pid) : current;
1469 if (!task) {
1470 rcu_read_unlock();
1471 err = -ESRCH;
1472 goto out;
1473 }
1474 get_task_struct(task);
1475
1476 err = -EINVAL;
1477
1478 /*
1479 * Check if this process has the right to modify the specified
1480 * process. The right exists if the process has administrative
1481 * capabilities, superuser privileges or the same
1482 * userid as the target process.
1483 */
1484 tcred = __task_cred(task);
1485 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1486 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
1487 !capable(CAP_SYS_NICE)) {
1488 rcu_read_unlock();
1489 err = -EPERM;
1490 goto out_put;
1491 }
1492 rcu_read_unlock();
1493
1494 task_nodes = cpuset_mems_allowed(task);
1495 /* Is the user allowed to access the target nodes? */
1496 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1497 err = -EPERM;
1498 goto out_put;
1499 }
1500
1501 if (!nodes_subset(*new, node_states[N_MEMORY])) {
1502 err = -EINVAL;
1503 goto out_put;
1504 }
1505
1506 err = security_task_movememory(task);
1507 if (err)
1508 goto out_put;
1509
1510 mm = get_task_mm(task);
1511 put_task_struct(task);
1512
1513 if (!mm) {
1514 err = -EINVAL;
1515 goto out;
1516 }
1517
1518 err = do_migrate_pages(mm, old, new,
1519 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1520
1521 mmput(mm);
1522 out:
1523 NODEMASK_SCRATCH_FREE(scratch);
1524
1525 return err;
1526
1527 out_put:
1528 put_task_struct(task);
1529 goto out;
1530
1531 }
1532
1533
1534 /* Retrieve NUMA policy */
1535 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1536 unsigned long __user *, nmask, unsigned long, maxnode,
1537 unsigned long, addr, unsigned long, flags)
1538 {
1539 int err;
1540 int uninitialized_var(pval);
1541 nodemask_t nodes;
1542
1543 if (nmask != NULL && maxnode < MAX_NUMNODES)
1544 return -EINVAL;
1545
1546 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1547
1548 if (err)
1549 return err;
1550
1551 if (policy && put_user(pval, policy))
1552 return -EFAULT;
1553
1554 if (nmask)
1555 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1556
1557 return err;
1558 }
1559
1560 #ifdef CONFIG_COMPAT
1561
1562 asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1563 compat_ulong_t __user *nmask,
1564 compat_ulong_t maxnode,
1565 compat_ulong_t addr, compat_ulong_t flags)
1566 {
1567 long err;
1568 unsigned long __user *nm = NULL;
1569 unsigned long nr_bits, alloc_size;
1570 DECLARE_BITMAP(bm, MAX_NUMNODES);
1571
1572 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1573 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1574
1575 if (nmask)
1576 nm = compat_alloc_user_space(alloc_size);
1577
1578 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1579
1580 if (!err && nmask) {
1581 unsigned long copy_size;
1582 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1583 err = copy_from_user(bm, nm, copy_size);
1584 /* ensure entire bitmap is zeroed */
1585 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1586 err |= compat_put_bitmap(nmask, bm, nr_bits);
1587 }
1588
1589 return err;
1590 }
1591
1592 asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1593 compat_ulong_t maxnode)
1594 {
1595 long err = 0;
1596 unsigned long __user *nm = NULL;
1597 unsigned long nr_bits, alloc_size;
1598 DECLARE_BITMAP(bm, MAX_NUMNODES);
1599
1600 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1601 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1602
1603 if (nmask) {
1604 err = compat_get_bitmap(bm, nmask, nr_bits);
1605 nm = compat_alloc_user_space(alloc_size);
1606 err |= copy_to_user(nm, bm, alloc_size);
1607 }
1608
1609 if (err)
1610 return -EFAULT;
1611
1612 return sys_set_mempolicy(mode, nm, nr_bits+1);
1613 }
1614
1615 asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1616 compat_ulong_t mode, compat_ulong_t __user *nmask,
1617 compat_ulong_t maxnode, compat_ulong_t flags)
1618 {
1619 long err = 0;
1620 unsigned long __user *nm = NULL;
1621 unsigned long nr_bits, alloc_size;
1622 nodemask_t bm;
1623
1624 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1625 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1626
1627 if (nmask) {
1628 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1629 nm = compat_alloc_user_space(alloc_size);
1630 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1631 }
1632
1633 if (err)
1634 return -EFAULT;
1635
1636 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1637 }
1638
1639 #endif
1640
1641 /*
1642 * get_vma_policy(@task, @vma, @addr)
1643 * @task - task for fallback if vma policy == default
1644 * @vma - virtual memory area whose policy is sought
1645 * @addr - address in @vma for shared policy lookup
1646 *
1647 * Returns effective policy for a VMA at specified address.
1648 * Falls back to @task or system default policy, as necessary.
1649 * Current or other task's task mempolicy and non-shared vma policies must be
1650 * protected by task_lock(task) by the caller.
1651 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1652 * count--added by the get_policy() vm_op, as appropriate--to protect against
1653 * freeing by another task. It is the caller's responsibility to free the
1654 * extra reference for shared policies.
1655 */
1656 struct mempolicy *get_vma_policy(struct task_struct *task,
1657 struct vm_area_struct *vma, unsigned long addr)
1658 {
1659 struct mempolicy *pol = get_task_policy(task);
1660
1661 if (vma) {
1662 if (vma->vm_ops && vma->vm_ops->get_policy) {
1663 struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1664 addr);
1665 if (vpol)
1666 pol = vpol;
1667 } else if (vma->vm_policy) {
1668 pol = vma->vm_policy;
1669
1670 /*
1671 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1672 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1673 * count on these policies which will be dropped by
1674 * mpol_cond_put() later
1675 */
1676 if (mpol_needs_cond_ref(pol))
1677 mpol_get(pol);
1678 }
1679 }
1680 if (!pol)
1681 pol = &default_policy;
1682 return pol;
1683 }
1684
1685 bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma)
1686 {
1687 struct mempolicy *pol = get_task_policy(task);
1688 if (vma) {
1689 if (vma->vm_ops && vma->vm_ops->get_policy) {
1690 bool ret = false;
1691
1692 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1693 if (pol && (pol->flags & MPOL_F_MOF))
1694 ret = true;
1695 mpol_cond_put(pol);
1696
1697 return ret;
1698 } else if (vma->vm_policy) {
1699 pol = vma->vm_policy;
1700 }
1701 }
1702
1703 if (!pol)
1704 return default_policy.flags & MPOL_F_MOF;
1705
1706 return pol->flags & MPOL_F_MOF;
1707 }
1708
1709 static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1710 {
1711 enum zone_type dynamic_policy_zone = policy_zone;
1712
1713 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1714
1715 /*
1716 * if policy->v.nodes has movable memory only,
1717 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1718 *
1719 * policy->v.nodes is intersect with node_states[N_MEMORY].
1720 * so if the following test faile, it implies
1721 * policy->v.nodes has movable memory only.
1722 */
1723 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1724 dynamic_policy_zone = ZONE_MOVABLE;
1725
1726 return zone >= dynamic_policy_zone;
1727 }
1728
1729 /*
1730 * Return a nodemask representing a mempolicy for filtering nodes for
1731 * page allocation
1732 */
1733 static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1734 {
1735 /* Lower zones don't get a nodemask applied for MPOL_BIND */
1736 if (unlikely(policy->mode == MPOL_BIND) &&
1737 apply_policy_zone(policy, gfp_zone(gfp)) &&
1738 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1739 return &policy->v.nodes;
1740
1741 return NULL;
1742 }
1743
1744 /* Return a zonelist indicated by gfp for node representing a mempolicy */
1745 static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1746 int nd)
1747 {
1748 switch (policy->mode) {
1749 case MPOL_PREFERRED:
1750 if (!(policy->flags & MPOL_F_LOCAL))
1751 nd = policy->v.preferred_node;
1752 break;
1753 case MPOL_BIND:
1754 /*
1755 * Normally, MPOL_BIND allocations are node-local within the
1756 * allowed nodemask. However, if __GFP_THISNODE is set and the
1757 * current node isn't part of the mask, we use the zonelist for
1758 * the first node in the mask instead.
1759 */
1760 if (unlikely(gfp & __GFP_THISNODE) &&
1761 unlikely(!node_isset(nd, policy->v.nodes)))
1762 nd = first_node(policy->v.nodes);
1763 break;
1764 default:
1765 BUG();
1766 }
1767 return node_zonelist(nd, gfp);
1768 }
1769
1770 /* Do dynamic interleaving for a process */
1771 static unsigned interleave_nodes(struct mempolicy *policy)
1772 {
1773 unsigned nid, next;
1774 struct task_struct *me = current;
1775
1776 nid = me->il_next;
1777 next = next_node(nid, policy->v.nodes);
1778 if (next >= MAX_NUMNODES)
1779 next = first_node(policy->v.nodes);
1780 if (next < MAX_NUMNODES)
1781 me->il_next = next;
1782 return nid;
1783 }
1784
1785 /*
1786 * Depending on the memory policy provide a node from which to allocate the
1787 * next slab entry.
1788 * @policy must be protected by freeing by the caller. If @policy is
1789 * the current task's mempolicy, this protection is implicit, as only the
1790 * task can change it's policy. The system default policy requires no
1791 * such protection.
1792 */
1793 unsigned slab_node(void)
1794 {
1795 struct mempolicy *policy;
1796
1797 if (in_interrupt())
1798 return numa_node_id();
1799
1800 policy = current->mempolicy;
1801 if (!policy || policy->flags & MPOL_F_LOCAL)
1802 return numa_node_id();
1803
1804 switch (policy->mode) {
1805 case MPOL_PREFERRED:
1806 /*
1807 * handled MPOL_F_LOCAL above
1808 */
1809 return policy->v.preferred_node;
1810
1811 case MPOL_INTERLEAVE:
1812 return interleave_nodes(policy);
1813
1814 case MPOL_BIND: {
1815 /*
1816 * Follow bind policy behavior and start allocation at the
1817 * first node.
1818 */
1819 struct zonelist *zonelist;
1820 struct zone *zone;
1821 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1822 zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1823 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1824 &policy->v.nodes,
1825 &zone);
1826 return zone ? zone->node : numa_node_id();
1827 }
1828
1829 default:
1830 BUG();
1831 }
1832 }
1833
1834 /* Do static interleaving for a VMA with known offset. */
1835 static unsigned offset_il_node(struct mempolicy *pol,
1836 struct vm_area_struct *vma, unsigned long off)
1837 {
1838 unsigned nnodes = nodes_weight(pol->v.nodes);
1839 unsigned target;
1840 int c;
1841 int nid = NUMA_NO_NODE;
1842
1843 if (!nnodes)
1844 return numa_node_id();
1845 target = (unsigned int)off % nnodes;
1846 c = 0;
1847 do {
1848 nid = next_node(nid, pol->v.nodes);
1849 c++;
1850 } while (c <= target);
1851 return nid;
1852 }
1853
1854 /* Determine a node number for interleave */
1855 static inline unsigned interleave_nid(struct mempolicy *pol,
1856 struct vm_area_struct *vma, unsigned long addr, int shift)
1857 {
1858 if (vma) {
1859 unsigned long off;
1860
1861 /*
1862 * for small pages, there is no difference between
1863 * shift and PAGE_SHIFT, so the bit-shift is safe.
1864 * for huge pages, since vm_pgoff is in units of small
1865 * pages, we need to shift off the always 0 bits to get
1866 * a useful offset.
1867 */
1868 BUG_ON(shift < PAGE_SHIFT);
1869 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1870 off += (addr - vma->vm_start) >> shift;
1871 return offset_il_node(pol, vma, off);
1872 } else
1873 return interleave_nodes(pol);
1874 }
1875
1876 /*
1877 * Return the bit number of a random bit set in the nodemask.
1878 * (returns NUMA_NO_NODE if nodemask is empty)
1879 */
1880 int node_random(const nodemask_t *maskp)
1881 {
1882 int w, bit = NUMA_NO_NODE;
1883
1884 w = nodes_weight(*maskp);
1885 if (w)
1886 bit = bitmap_ord_to_pos(maskp->bits,
1887 get_random_int() % w, MAX_NUMNODES);
1888 return bit;
1889 }
1890
1891 #ifdef CONFIG_HUGETLBFS
1892 /*
1893 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1894 * @vma = virtual memory area whose policy is sought
1895 * @addr = address in @vma for shared policy lookup and interleave policy
1896 * @gfp_flags = for requested zone
1897 * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1898 * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
1899 *
1900 * Returns a zonelist suitable for a huge page allocation and a pointer
1901 * to the struct mempolicy for conditional unref after allocation.
1902 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1903 * @nodemask for filtering the zonelist.
1904 *
1905 * Must be protected by get_mems_allowed()
1906 */
1907 struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1908 gfp_t gfp_flags, struct mempolicy **mpol,
1909 nodemask_t **nodemask)
1910 {
1911 struct zonelist *zl;
1912
1913 *mpol = get_vma_policy(current, vma, addr);
1914 *nodemask = NULL; /* assume !MPOL_BIND */
1915
1916 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1917 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1918 huge_page_shift(hstate_vma(vma))), gfp_flags);
1919 } else {
1920 zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
1921 if ((*mpol)->mode == MPOL_BIND)
1922 *nodemask = &(*mpol)->v.nodes;
1923 }
1924 return zl;
1925 }
1926
1927 /*
1928 * init_nodemask_of_mempolicy
1929 *
1930 * If the current task's mempolicy is "default" [NULL], return 'false'
1931 * to indicate default policy. Otherwise, extract the policy nodemask
1932 * for 'bind' or 'interleave' policy into the argument nodemask, or
1933 * initialize the argument nodemask to contain the single node for
1934 * 'preferred' or 'local' policy and return 'true' to indicate presence
1935 * of non-default mempolicy.
1936 *
1937 * We don't bother with reference counting the mempolicy [mpol_get/put]
1938 * because the current task is examining it's own mempolicy and a task's
1939 * mempolicy is only ever changed by the task itself.
1940 *
1941 * N.B., it is the caller's responsibility to free a returned nodemask.
1942 */
1943 bool init_nodemask_of_mempolicy(nodemask_t *mask)
1944 {
1945 struct mempolicy *mempolicy;
1946 int nid;
1947
1948 if (!(mask && current->mempolicy))
1949 return false;
1950
1951 task_lock(current);
1952 mempolicy = current->mempolicy;
1953 switch (mempolicy->mode) {
1954 case MPOL_PREFERRED:
1955 if (mempolicy->flags & MPOL_F_LOCAL)
1956 nid = numa_node_id();
1957 else
1958 nid = mempolicy->v.preferred_node;
1959 init_nodemask_of_node(mask, nid);
1960 break;
1961
1962 case MPOL_BIND:
1963 /* Fall through */
1964 case MPOL_INTERLEAVE:
1965 *mask = mempolicy->v.nodes;
1966 break;
1967
1968 default:
1969 BUG();
1970 }
1971 task_unlock(current);
1972
1973 return true;
1974 }
1975 #endif
1976
1977 /*
1978 * mempolicy_nodemask_intersects
1979 *
1980 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1981 * policy. Otherwise, check for intersection between mask and the policy
1982 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1983 * policy, always return true since it may allocate elsewhere on fallback.
1984 *
1985 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1986 */
1987 bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1988 const nodemask_t *mask)
1989 {
1990 struct mempolicy *mempolicy;
1991 bool ret = true;
1992
1993 if (!mask)
1994 return ret;
1995 task_lock(tsk);
1996 mempolicy = tsk->mempolicy;
1997 if (!mempolicy)
1998 goto out;
1999
2000 switch (mempolicy->mode) {
2001 case MPOL_PREFERRED:
2002 /*
2003 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
2004 * allocate from, they may fallback to other nodes when oom.
2005 * Thus, it's possible for tsk to have allocated memory from
2006 * nodes in mask.
2007 */
2008 break;
2009 case MPOL_BIND:
2010 case MPOL_INTERLEAVE:
2011 ret = nodes_intersects(mempolicy->v.nodes, *mask);
2012 break;
2013 default:
2014 BUG();
2015 }
2016 out:
2017 task_unlock(tsk);
2018 return ret;
2019 }
2020
2021 /* Allocate a page in interleaved policy.
2022 Own path because it needs to do special accounting. */
2023 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2024 unsigned nid)
2025 {
2026 struct zonelist *zl;
2027 struct page *page;
2028
2029 zl = node_zonelist(nid, gfp);
2030 page = __alloc_pages(gfp, order, zl);
2031 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
2032 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
2033 return page;
2034 }
2035
2036 /**
2037 * alloc_pages_vma - Allocate a page for a VMA.
2038 *
2039 * @gfp:
2040 * %GFP_USER user allocation.
2041 * %GFP_KERNEL kernel allocations,
2042 * %GFP_HIGHMEM highmem/user allocations,
2043 * %GFP_FS allocation should not call back into a file system.
2044 * %GFP_ATOMIC don't sleep.
2045 *
2046 * @order:Order of the GFP allocation.
2047 * @vma: Pointer to VMA or NULL if not available.
2048 * @addr: Virtual Address of the allocation. Must be inside the VMA.
2049 *
2050 * This function allocates a page from the kernel page pool and applies
2051 * a NUMA policy associated with the VMA or the current process.
2052 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
2053 * mm_struct of the VMA to prevent it from going away. Should be used for
2054 * all allocations for pages that will be mapped into
2055 * user space. Returns NULL when no page can be allocated.
2056 *
2057 * Should be called with the mm_sem of the vma hold.
2058 */
2059 struct page *
2060 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2061 unsigned long addr, int node)
2062 {
2063 struct mempolicy *pol;
2064 struct page *page;
2065 unsigned int cpuset_mems_cookie;
2066
2067 retry_cpuset:
2068 pol = get_vma_policy(current, vma, addr);
2069 cpuset_mems_cookie = get_mems_allowed();
2070
2071 if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
2072 unsigned nid;
2073
2074 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2075 mpol_cond_put(pol);
2076 page = alloc_page_interleave(gfp, order, nid);
2077 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2078 goto retry_cpuset;
2079
2080 return page;
2081 }
2082 page = __alloc_pages_nodemask(gfp, order,
2083 policy_zonelist(gfp, pol, node),
2084 policy_nodemask(gfp, pol));
2085 if (unlikely(mpol_needs_cond_ref(pol)))
2086 __mpol_put(pol);
2087 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2088 goto retry_cpuset;
2089 return page;
2090 }
2091
2092 /**
2093 * alloc_pages_current - Allocate pages.
2094 *
2095 * @gfp:
2096 * %GFP_USER user allocation,
2097 * %GFP_KERNEL kernel allocation,
2098 * %GFP_HIGHMEM highmem allocation,
2099 * %GFP_FS don't call back into a file system.
2100 * %GFP_ATOMIC don't sleep.
2101 * @order: Power of two of allocation size in pages. 0 is a single page.
2102 *
2103 * Allocate a page from the kernel page pool. When not in
2104 * interrupt context and apply the current process NUMA policy.
2105 * Returns NULL when no page can be allocated.
2106 *
2107 * Don't call cpuset_update_task_memory_state() unless
2108 * 1) it's ok to take cpuset_sem (can WAIT), and
2109 * 2) allocating for current task (not interrupt).
2110 */
2111 struct page *alloc_pages_current(gfp_t gfp, unsigned order)
2112 {
2113 struct mempolicy *pol = get_task_policy(current);
2114 struct page *page;
2115 unsigned int cpuset_mems_cookie;
2116
2117 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
2118 pol = &default_policy;
2119
2120 retry_cpuset:
2121 cpuset_mems_cookie = get_mems_allowed();
2122
2123 /*
2124 * No reference counting needed for current->mempolicy
2125 * nor system default_policy
2126 */
2127 if (pol->mode == MPOL_INTERLEAVE)
2128 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2129 else
2130 page = __alloc_pages_nodemask(gfp, order,
2131 policy_zonelist(gfp, pol, numa_node_id()),
2132 policy_nodemask(gfp, pol));
2133
2134 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2135 goto retry_cpuset;
2136
2137 return page;
2138 }
2139 EXPORT_SYMBOL(alloc_pages_current);
2140
2141 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2142 {
2143 struct mempolicy *pol = mpol_dup(vma_policy(src));
2144
2145 if (IS_ERR(pol))
2146 return PTR_ERR(pol);
2147 dst->vm_policy = pol;
2148 return 0;
2149 }
2150
2151 /*
2152 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2153 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2154 * with the mems_allowed returned by cpuset_mems_allowed(). This
2155 * keeps mempolicies cpuset relative after its cpuset moves. See
2156 * further kernel/cpuset.c update_nodemask().
2157 *
2158 * current's mempolicy may be rebinded by the other task(the task that changes
2159 * cpuset's mems), so we needn't do rebind work for current task.
2160 */
2161
2162 /* Slow path of a mempolicy duplicate */
2163 struct mempolicy *__mpol_dup(struct mempolicy *old)
2164 {
2165 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2166
2167 if (!new)
2168 return ERR_PTR(-ENOMEM);
2169
2170 /* task's mempolicy is protected by alloc_lock */
2171 if (old == current->mempolicy) {
2172 task_lock(current);
2173 *new = *old;
2174 task_unlock(current);
2175 } else
2176 *new = *old;
2177
2178 rcu_read_lock();
2179 if (current_cpuset_is_being_rebound()) {
2180 nodemask_t mems = cpuset_mems_allowed(current);
2181 if (new->flags & MPOL_F_REBINDING)
2182 mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2183 else
2184 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
2185 }
2186 rcu_read_unlock();
2187 atomic_set(&new->refcnt, 1);
2188 return new;
2189 }
2190
2191 /* Slow path of a mempolicy comparison */
2192 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2193 {
2194 if (!a || !b)
2195 return false;
2196 if (a->mode != b->mode)
2197 return false;
2198 if (a->flags != b->flags)
2199 return false;
2200 if (mpol_store_user_nodemask(a))
2201 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2202 return false;
2203
2204 switch (a->mode) {
2205 case MPOL_BIND:
2206 /* Fall through */
2207 case MPOL_INTERLEAVE:
2208 return !!nodes_equal(a->v.nodes, b->v.nodes);
2209 case MPOL_PREFERRED:
2210 return a->v.preferred_node == b->v.preferred_node;
2211 default:
2212 BUG();
2213 return false;
2214 }
2215 }
2216
2217 /*
2218 * Shared memory backing store policy support.
2219 *
2220 * Remember policies even when nobody has shared memory mapped.
2221 * The policies are kept in Red-Black tree linked from the inode.
2222 * They are protected by the sp->lock spinlock, which should be held
2223 * for any accesses to the tree.
2224 */
2225
2226 /* lookup first element intersecting start-end */
2227 /* Caller holds sp->lock */
2228 static struct sp_node *
2229 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2230 {
2231 struct rb_node *n = sp->root.rb_node;
2232
2233 while (n) {
2234 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2235
2236 if (start >= p->end)
2237 n = n->rb_right;
2238 else if (end <= p->start)
2239 n = n->rb_left;
2240 else
2241 break;
2242 }
2243 if (!n)
2244 return NULL;
2245 for (;;) {
2246 struct sp_node *w = NULL;
2247 struct rb_node *prev = rb_prev(n);
2248 if (!prev)
2249 break;
2250 w = rb_entry(prev, struct sp_node, nd);
2251 if (w->end <= start)
2252 break;
2253 n = prev;
2254 }
2255 return rb_entry(n, struct sp_node, nd);
2256 }
2257
2258 /* Insert a new shared policy into the list. */
2259 /* Caller holds sp->lock */
2260 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2261 {
2262 struct rb_node **p = &sp->root.rb_node;
2263 struct rb_node *parent = NULL;
2264 struct sp_node *nd;
2265
2266 while (*p) {
2267 parent = *p;
2268 nd = rb_entry(parent, struct sp_node, nd);
2269 if (new->start < nd->start)
2270 p = &(*p)->rb_left;
2271 else if (new->end > nd->end)
2272 p = &(*p)->rb_right;
2273 else
2274 BUG();
2275 }
2276 rb_link_node(&new->nd, parent, p);
2277 rb_insert_color(&new->nd, &sp->root);
2278 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2279 new->policy ? new->policy->mode : 0);
2280 }
2281
2282 /* Find shared policy intersecting idx */
2283 struct mempolicy *
2284 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2285 {
2286 struct mempolicy *pol = NULL;
2287 struct sp_node *sn;
2288
2289 if (!sp->root.rb_node)
2290 return NULL;
2291 spin_lock(&sp->lock);
2292 sn = sp_lookup(sp, idx, idx+1);
2293 if (sn) {
2294 mpol_get(sn->policy);
2295 pol = sn->policy;
2296 }
2297 spin_unlock(&sp->lock);
2298 return pol;
2299 }
2300
2301 static void sp_free(struct sp_node *n)
2302 {
2303 mpol_put(n->policy);
2304 kmem_cache_free(sn_cache, n);
2305 }
2306
2307 #ifdef CONFIG_NUMA_BALANCING
2308 static bool numa_migrate_deferred(struct task_struct *p, int last_cpupid)
2309 {
2310 /* Never defer a private fault */
2311 if (cpupid_match_pid(p, last_cpupid))
2312 return false;
2313
2314 if (p->numa_migrate_deferred) {
2315 p->numa_migrate_deferred--;
2316 return true;
2317 }
2318 return false;
2319 }
2320
2321 static inline void defer_numa_migrate(struct task_struct *p)
2322 {
2323 p->numa_migrate_deferred = sysctl_numa_balancing_migrate_deferred;
2324 }
2325 #else
2326 static inline bool numa_migrate_deferred(struct task_struct *p, int last_cpupid)
2327 {
2328 return false;
2329 }
2330
2331 static inline void defer_numa_migrate(struct task_struct *p)
2332 {
2333 }
2334 #endif /* CONFIG_NUMA_BALANCING */
2335
2336 /**
2337 * mpol_misplaced - check whether current page node is valid in policy
2338 *
2339 * @page - page to be checked
2340 * @vma - vm area where page mapped
2341 * @addr - virtual address where page mapped
2342 *
2343 * Lookup current policy node id for vma,addr and "compare to" page's
2344 * node id.
2345 *
2346 * Returns:
2347 * -1 - not misplaced, page is in the right node
2348 * node - node id where the page should be
2349 *
2350 * Policy determination "mimics" alloc_page_vma().
2351 * Called from fault path where we know the vma and faulting address.
2352 */
2353 int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2354 {
2355 struct mempolicy *pol;
2356 struct zone *zone;
2357 int curnid = page_to_nid(page);
2358 unsigned long pgoff;
2359 int thiscpu = raw_smp_processor_id();
2360 int thisnid = cpu_to_node(thiscpu);
2361 int polnid = -1;
2362 int ret = -1;
2363
2364 BUG_ON(!vma);
2365
2366 pol = get_vma_policy(current, vma, addr);
2367 if (!(pol->flags & MPOL_F_MOF))
2368 goto out;
2369
2370 switch (pol->mode) {
2371 case MPOL_INTERLEAVE:
2372 BUG_ON(addr >= vma->vm_end);
2373 BUG_ON(addr < vma->vm_start);
2374
2375 pgoff = vma->vm_pgoff;
2376 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2377 polnid = offset_il_node(pol, vma, pgoff);
2378 break;
2379
2380 case MPOL_PREFERRED:
2381 if (pol->flags & MPOL_F_LOCAL)
2382 polnid = numa_node_id();
2383 else
2384 polnid = pol->v.preferred_node;
2385 break;
2386
2387 case MPOL_BIND:
2388 /*
2389 * allows binding to multiple nodes.
2390 * use current page if in policy nodemask,
2391 * else select nearest allowed node, if any.
2392 * If no allowed nodes, use current [!misplaced].
2393 */
2394 if (node_isset(curnid, pol->v.nodes))
2395 goto out;
2396 (void)first_zones_zonelist(
2397 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2398 gfp_zone(GFP_HIGHUSER),
2399 &pol->v.nodes, &zone);
2400 polnid = zone->node;
2401 break;
2402
2403 default:
2404 BUG();
2405 }
2406
2407 /* Migrate the page towards the node whose CPU is referencing it */
2408 if (pol->flags & MPOL_F_MORON) {
2409 int last_cpupid;
2410 int this_cpupid;
2411
2412 polnid = thisnid;
2413 this_cpupid = cpu_pid_to_cpupid(thiscpu, current->pid);
2414
2415 /*
2416 * Multi-stage node selection is used in conjunction
2417 * with a periodic migration fault to build a temporal
2418 * task<->page relation. By using a two-stage filter we
2419 * remove short/unlikely relations.
2420 *
2421 * Using P(p) ~ n_p / n_t as per frequentist
2422 * probability, we can equate a task's usage of a
2423 * particular page (n_p) per total usage of this
2424 * page (n_t) (in a given time-span) to a probability.
2425 *
2426 * Our periodic faults will sample this probability and
2427 * getting the same result twice in a row, given these
2428 * samples are fully independent, is then given by
2429 * P(n)^2, provided our sample period is sufficiently
2430 * short compared to the usage pattern.
2431 *
2432 * This quadric squishes small probabilities, making
2433 * it less likely we act on an unlikely task<->page
2434 * relation.
2435 */
2436 last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
2437 if (!cpupid_pid_unset(last_cpupid) && cpupid_to_nid(last_cpupid) != thisnid) {
2438
2439 /* See sysctl_numa_balancing_migrate_deferred comment */
2440 if (!cpupid_match_pid(current, last_cpupid))
2441 defer_numa_migrate(current);
2442
2443 goto out;
2444 }
2445
2446 /*
2447 * The quadratic filter above reduces extraneous migration
2448 * of shared pages somewhat. This code reduces it even more,
2449 * reducing the overhead of page migrations of shared pages.
2450 * This makes workloads with shared pages rely more on
2451 * "move task near its memory", and less on "move memory
2452 * towards its task", which is exactly what we want.
2453 */
2454 if (numa_migrate_deferred(current, last_cpupid))
2455 goto out;
2456 }
2457
2458 if (curnid != polnid)
2459 ret = polnid;
2460 out:
2461 mpol_cond_put(pol);
2462
2463 return ret;
2464 }
2465
2466 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2467 {
2468 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2469 rb_erase(&n->nd, &sp->root);
2470 sp_free(n);
2471 }
2472
2473 static void sp_node_init(struct sp_node *node, unsigned long start,
2474 unsigned long end, struct mempolicy *pol)
2475 {
2476 node->start = start;
2477 node->end = end;
2478 node->policy = pol;
2479 }
2480
2481 static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2482 struct mempolicy *pol)
2483 {
2484 struct sp_node *n;
2485 struct mempolicy *newpol;
2486
2487 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2488 if (!n)
2489 return NULL;
2490
2491 newpol = mpol_dup(pol);
2492 if (IS_ERR(newpol)) {
2493 kmem_cache_free(sn_cache, n);
2494 return NULL;
2495 }
2496 newpol->flags |= MPOL_F_SHARED;
2497 sp_node_init(n, start, end, newpol);
2498
2499 return n;
2500 }
2501
2502 /* Replace a policy range. */
2503 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2504 unsigned long end, struct sp_node *new)
2505 {
2506 struct sp_node *n;
2507 struct sp_node *n_new = NULL;
2508 struct mempolicy *mpol_new = NULL;
2509 int ret = 0;
2510
2511 restart:
2512 spin_lock(&sp->lock);
2513 n = sp_lookup(sp, start, end);
2514 /* Take care of old policies in the same range. */
2515 while (n && n->start < end) {
2516 struct rb_node *next = rb_next(&n->nd);
2517 if (n->start >= start) {
2518 if (n->end <= end)
2519 sp_delete(sp, n);
2520 else
2521 n->start = end;
2522 } else {
2523 /* Old policy spanning whole new range. */
2524 if (n->end > end) {
2525 if (!n_new)
2526 goto alloc_new;
2527
2528 *mpol_new = *n->policy;
2529 atomic_set(&mpol_new->refcnt, 1);
2530 sp_node_init(n_new, end, n->end, mpol_new);
2531 n->end = start;
2532 sp_insert(sp, n_new);
2533 n_new = NULL;
2534 mpol_new = NULL;
2535 break;
2536 } else
2537 n->end = start;
2538 }
2539 if (!next)
2540 break;
2541 n = rb_entry(next, struct sp_node, nd);
2542 }
2543 if (new)
2544 sp_insert(sp, new);
2545 spin_unlock(&sp->lock);
2546 ret = 0;
2547
2548 err_out:
2549 if (mpol_new)
2550 mpol_put(mpol_new);
2551 if (n_new)
2552 kmem_cache_free(sn_cache, n_new);
2553
2554 return ret;
2555
2556 alloc_new:
2557 spin_unlock(&sp->lock);
2558 ret = -ENOMEM;
2559 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2560 if (!n_new)
2561 goto err_out;
2562 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2563 if (!mpol_new)
2564 goto err_out;
2565 goto restart;
2566 }
2567
2568 /**
2569 * mpol_shared_policy_init - initialize shared policy for inode
2570 * @sp: pointer to inode shared policy
2571 * @mpol: struct mempolicy to install
2572 *
2573 * Install non-NULL @mpol in inode's shared policy rb-tree.
2574 * On entry, the current task has a reference on a non-NULL @mpol.
2575 * This must be released on exit.
2576 * This is called at get_inode() calls and we can use GFP_KERNEL.
2577 */
2578 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2579 {
2580 int ret;
2581
2582 sp->root = RB_ROOT; /* empty tree == default mempolicy */
2583 spin_lock_init(&sp->lock);
2584
2585 if (mpol) {
2586 struct vm_area_struct pvma;
2587 struct mempolicy *new;
2588 NODEMASK_SCRATCH(scratch);
2589
2590 if (!scratch)
2591 goto put_mpol;
2592 /* contextualize the tmpfs mount point mempolicy */
2593 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2594 if (IS_ERR(new))
2595 goto free_scratch; /* no valid nodemask intersection */
2596
2597 task_lock(current);
2598 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2599 task_unlock(current);
2600 if (ret)
2601 goto put_new;
2602
2603 /* Create pseudo-vma that contains just the policy */
2604 memset(&pvma, 0, sizeof(struct vm_area_struct));
2605 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2606 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2607
2608 put_new:
2609 mpol_put(new); /* drop initial ref */
2610 free_scratch:
2611 NODEMASK_SCRATCH_FREE(scratch);
2612 put_mpol:
2613 mpol_put(mpol); /* drop our incoming ref on sb mpol */
2614 }
2615 }
2616
2617 int mpol_set_shared_policy(struct shared_policy *info,
2618 struct vm_area_struct *vma, struct mempolicy *npol)
2619 {
2620 int err;
2621 struct sp_node *new = NULL;
2622 unsigned long sz = vma_pages(vma);
2623
2624 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2625 vma->vm_pgoff,
2626 sz, npol ? npol->mode : -1,
2627 npol ? npol->flags : -1,
2628 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
2629
2630 if (npol) {
2631 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2632 if (!new)
2633 return -ENOMEM;
2634 }
2635 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2636 if (err && new)
2637 sp_free(new);
2638 return err;
2639 }
2640
2641 /* Free a backing policy store on inode delete. */
2642 void mpol_free_shared_policy(struct shared_policy *p)
2643 {
2644 struct sp_node *n;
2645 struct rb_node *next;
2646
2647 if (!p->root.rb_node)
2648 return;
2649 spin_lock(&p->lock);
2650 next = rb_first(&p->root);
2651 while (next) {
2652 n = rb_entry(next, struct sp_node, nd);
2653 next = rb_next(&n->nd);
2654 sp_delete(p, n);
2655 }
2656 spin_unlock(&p->lock);
2657 }
2658
2659 #ifdef CONFIG_NUMA_BALANCING
2660 static bool __initdata numabalancing_override;
2661
2662 static void __init check_numabalancing_enable(void)
2663 {
2664 bool numabalancing_default = false;
2665
2666 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2667 numabalancing_default = true;
2668
2669 if (nr_node_ids > 1 && !numabalancing_override) {
2670 printk(KERN_INFO "Enabling automatic NUMA balancing. "
2671 "Configure with numa_balancing= or sysctl");
2672 set_numabalancing_state(numabalancing_default);
2673 }
2674 }
2675
2676 static int __init setup_numabalancing(char *str)
2677 {
2678 int ret = 0;
2679 if (!str)
2680 goto out;
2681 numabalancing_override = true;
2682
2683 if (!strcmp(str, "enable")) {
2684 set_numabalancing_state(true);
2685 ret = 1;
2686 } else if (!strcmp(str, "disable")) {
2687 set_numabalancing_state(false);
2688 ret = 1;
2689 }
2690 out:
2691 if (!ret)
2692 printk(KERN_WARNING "Unable to parse numa_balancing=\n");
2693
2694 return ret;
2695 }
2696 __setup("numa_balancing=", setup_numabalancing);
2697 #else
2698 static inline void __init check_numabalancing_enable(void)
2699 {
2700 }
2701 #endif /* CONFIG_NUMA_BALANCING */
2702
2703 /* assumes fs == KERNEL_DS */
2704 void __init numa_policy_init(void)
2705 {
2706 nodemask_t interleave_nodes;
2707 unsigned long largest = 0;
2708 int nid, prefer = 0;
2709
2710 policy_cache = kmem_cache_create("numa_policy",
2711 sizeof(struct mempolicy),
2712 0, SLAB_PANIC, NULL);
2713
2714 sn_cache = kmem_cache_create("shared_policy_node",
2715 sizeof(struct sp_node),
2716 0, SLAB_PANIC, NULL);
2717
2718 for_each_node(nid) {
2719 preferred_node_policy[nid] = (struct mempolicy) {
2720 .refcnt = ATOMIC_INIT(1),
2721 .mode = MPOL_PREFERRED,
2722 .flags = MPOL_F_MOF | MPOL_F_MORON,
2723 .v = { .preferred_node = nid, },
2724 };
2725 }
2726
2727 /*
2728 * Set interleaving policy for system init. Interleaving is only
2729 * enabled across suitably sized nodes (default is >= 16MB), or
2730 * fall back to the largest node if they're all smaller.
2731 */
2732 nodes_clear(interleave_nodes);
2733 for_each_node_state(nid, N_MEMORY) {
2734 unsigned long total_pages = node_present_pages(nid);
2735
2736 /* Preserve the largest node */
2737 if (largest < total_pages) {
2738 largest = total_pages;
2739 prefer = nid;
2740 }
2741
2742 /* Interleave this node? */
2743 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2744 node_set(nid, interleave_nodes);
2745 }
2746
2747 /* All too small, use the largest */
2748 if (unlikely(nodes_empty(interleave_nodes)))
2749 node_set(prefer, interleave_nodes);
2750
2751 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2752 printk("numa_policy_init: interleaving failed\n");
2753
2754 check_numabalancing_enable();
2755 }
2756
2757 /* Reset policy of current process to default */
2758 void numa_default_policy(void)
2759 {
2760 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2761 }
2762
2763 /*
2764 * Parse and format mempolicy from/to strings
2765 */
2766
2767 /*
2768 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
2769 */
2770 static const char * const policy_modes[] =
2771 {
2772 [MPOL_DEFAULT] = "default",
2773 [MPOL_PREFERRED] = "prefer",
2774 [MPOL_BIND] = "bind",
2775 [MPOL_INTERLEAVE] = "interleave",
2776 [MPOL_LOCAL] = "local",
2777 };
2778
2779
2780 #ifdef CONFIG_TMPFS
2781 /**
2782 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2783 * @str: string containing mempolicy to parse
2784 * @mpol: pointer to struct mempolicy pointer, returned on success.
2785 *
2786 * Format of input:
2787 * <mode>[=<flags>][:<nodelist>]
2788 *
2789 * On success, returns 0, else 1
2790 */
2791 int mpol_parse_str(char *str, struct mempolicy **mpol)
2792 {
2793 struct mempolicy *new = NULL;
2794 unsigned short mode;
2795 unsigned short mode_flags;
2796 nodemask_t nodes;
2797 char *nodelist = strchr(str, ':');
2798 char *flags = strchr(str, '=');
2799 int err = 1;
2800
2801 if (nodelist) {
2802 /* NUL-terminate mode or flags string */
2803 *nodelist++ = '\0';
2804 if (nodelist_parse(nodelist, nodes))
2805 goto out;
2806 if (!nodes_subset(nodes, node_states[N_MEMORY]))
2807 goto out;
2808 } else
2809 nodes_clear(nodes);
2810
2811 if (flags)
2812 *flags++ = '\0'; /* terminate mode string */
2813
2814 for (mode = 0; mode < MPOL_MAX; mode++) {
2815 if (!strcmp(str, policy_modes[mode])) {
2816 break;
2817 }
2818 }
2819 if (mode >= MPOL_MAX)
2820 goto out;
2821
2822 switch (mode) {
2823 case MPOL_PREFERRED:
2824 /*
2825 * Insist on a nodelist of one node only
2826 */
2827 if (nodelist) {
2828 char *rest = nodelist;
2829 while (isdigit(*rest))
2830 rest++;
2831 if (*rest)
2832 goto out;
2833 }
2834 break;
2835 case MPOL_INTERLEAVE:
2836 /*
2837 * Default to online nodes with memory if no nodelist
2838 */
2839 if (!nodelist)
2840 nodes = node_states[N_MEMORY];
2841 break;
2842 case MPOL_LOCAL:
2843 /*
2844 * Don't allow a nodelist; mpol_new() checks flags
2845 */
2846 if (nodelist)
2847 goto out;
2848 mode = MPOL_PREFERRED;
2849 break;
2850 case MPOL_DEFAULT:
2851 /*
2852 * Insist on a empty nodelist
2853 */
2854 if (!nodelist)
2855 err = 0;
2856 goto out;
2857 case MPOL_BIND:
2858 /*
2859 * Insist on a nodelist
2860 */
2861 if (!nodelist)
2862 goto out;
2863 }
2864
2865 mode_flags = 0;
2866 if (flags) {
2867 /*
2868 * Currently, we only support two mutually exclusive
2869 * mode flags.
2870 */
2871 if (!strcmp(flags, "static"))
2872 mode_flags |= MPOL_F_STATIC_NODES;
2873 else if (!strcmp(flags, "relative"))
2874 mode_flags |= MPOL_F_RELATIVE_NODES;
2875 else
2876 goto out;
2877 }
2878
2879 new = mpol_new(mode, mode_flags, &nodes);
2880 if (IS_ERR(new))
2881 goto out;
2882
2883 /*
2884 * Save nodes for mpol_to_str() to show the tmpfs mount options
2885 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2886 */
2887 if (mode != MPOL_PREFERRED)
2888 new->v.nodes = nodes;
2889 else if (nodelist)
2890 new->v.preferred_node = first_node(nodes);
2891 else
2892 new->flags |= MPOL_F_LOCAL;
2893
2894 /*
2895 * Save nodes for contextualization: this will be used to "clone"
2896 * the mempolicy in a specific context [cpuset] at a later time.
2897 */
2898 new->w.user_nodemask = nodes;
2899
2900 err = 0;
2901
2902 out:
2903 /* Restore string for error message */
2904 if (nodelist)
2905 *--nodelist = ':';
2906 if (flags)
2907 *--flags = '=';
2908 if (!err)
2909 *mpol = new;
2910 return err;
2911 }
2912 #endif /* CONFIG_TMPFS */
2913
2914 /**
2915 * mpol_to_str - format a mempolicy structure for printing
2916 * @buffer: to contain formatted mempolicy string
2917 * @maxlen: length of @buffer
2918 * @pol: pointer to mempolicy to be formatted
2919 *
2920 * Convert @pol into a string. If @buffer is too short, truncate the string.
2921 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2922 * longest flag, "relative", and to display at least a few node ids.
2923 */
2924 void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
2925 {
2926 char *p = buffer;
2927 nodemask_t nodes = NODE_MASK_NONE;
2928 unsigned short mode = MPOL_DEFAULT;
2929 unsigned short flags = 0;
2930
2931 if (pol && pol != &default_policy) {
2932 mode = pol->mode;
2933 flags = pol->flags;
2934 }
2935
2936 switch (mode) {
2937 case MPOL_DEFAULT:
2938 break;
2939 case MPOL_PREFERRED:
2940 if (flags & MPOL_F_LOCAL)
2941 mode = MPOL_LOCAL;
2942 else
2943 node_set(pol->v.preferred_node, nodes);
2944 break;
2945 case MPOL_BIND:
2946 case MPOL_INTERLEAVE:
2947 nodes = pol->v.nodes;
2948 break;
2949 default:
2950 WARN_ON_ONCE(1);
2951 snprintf(p, maxlen, "unknown");
2952 return;
2953 }
2954
2955 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
2956
2957 if (flags & MPOL_MODE_FLAGS) {
2958 p += snprintf(p, buffer + maxlen - p, "=");
2959
2960 /*
2961 * Currently, the only defined flags are mutually exclusive
2962 */
2963 if (flags & MPOL_F_STATIC_NODES)
2964 p += snprintf(p, buffer + maxlen - p, "static");
2965 else if (flags & MPOL_F_RELATIVE_NODES)
2966 p += snprintf(p, buffer + maxlen - p, "relative");
2967 }
2968
2969 if (!nodes_empty(nodes)) {
2970 p += snprintf(p, buffer + maxlen - p, ":");
2971 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
2972 }
2973 }