]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - mm/mempolicy.c
Merge tag 'io_uring-worker.v3-2021-02-25' of git://git.kernel.dk/linux-block
[mirror_ubuntu-jammy-kernel.git] / mm / mempolicy.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Simple NUMA memory policy for the Linux kernel.
4 *
5 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
6 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
21 *
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
28 * preferred Try a specific node first before normal fallback.
29 * As a special case NUMA_NO_NODE here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
33 *
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56 /* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
66 */
67
68 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69
70 #include <linux/mempolicy.h>
71 #include <linux/pagewalk.h>
72 #include <linux/highmem.h>
73 #include <linux/hugetlb.h>
74 #include <linux/kernel.h>
75 #include <linux/sched.h>
76 #include <linux/sched/mm.h>
77 #include <linux/sched/numa_balancing.h>
78 #include <linux/sched/task.h>
79 #include <linux/nodemask.h>
80 #include <linux/cpuset.h>
81 #include <linux/slab.h>
82 #include <linux/string.h>
83 #include <linux/export.h>
84 #include <linux/nsproxy.h>
85 #include <linux/interrupt.h>
86 #include <linux/init.h>
87 #include <linux/compat.h>
88 #include <linux/ptrace.h>
89 #include <linux/swap.h>
90 #include <linux/seq_file.h>
91 #include <linux/proc_fs.h>
92 #include <linux/migrate.h>
93 #include <linux/ksm.h>
94 #include <linux/rmap.h>
95 #include <linux/security.h>
96 #include <linux/syscalls.h>
97 #include <linux/ctype.h>
98 #include <linux/mm_inline.h>
99 #include <linux/mmu_notifier.h>
100 #include <linux/printk.h>
101 #include <linux/swapops.h>
102
103 #include <asm/tlbflush.h>
104 #include <linux/uaccess.h>
105
106 #include "internal.h"
107
108 /* Internal flags */
109 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
110 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
111
112 static struct kmem_cache *policy_cache;
113 static struct kmem_cache *sn_cache;
114
115 /* Highest zone. An specific allocation for a zone below that is not
116 policied. */
117 enum zone_type policy_zone = 0;
118
119 /*
120 * run-time system-wide default policy => local allocation
121 */
122 static struct mempolicy default_policy = {
123 .refcnt = ATOMIC_INIT(1), /* never free it */
124 .mode = MPOL_PREFERRED,
125 .flags = MPOL_F_LOCAL,
126 };
127
128 static struct mempolicy preferred_node_policy[MAX_NUMNODES];
129
130 /**
131 * numa_map_to_online_node - Find closest online node
132 * @node: Node id to start the search
133 *
134 * Lookup the next closest node by distance if @nid is not online.
135 */
136 int numa_map_to_online_node(int node)
137 {
138 int min_dist = INT_MAX, dist, n, min_node;
139
140 if (node == NUMA_NO_NODE || node_online(node))
141 return node;
142
143 min_node = node;
144 for_each_online_node(n) {
145 dist = node_distance(node, n);
146 if (dist < min_dist) {
147 min_dist = dist;
148 min_node = n;
149 }
150 }
151
152 return min_node;
153 }
154 EXPORT_SYMBOL_GPL(numa_map_to_online_node);
155
156 struct mempolicy *get_task_policy(struct task_struct *p)
157 {
158 struct mempolicy *pol = p->mempolicy;
159 int node;
160
161 if (pol)
162 return pol;
163
164 node = numa_node_id();
165 if (node != NUMA_NO_NODE) {
166 pol = &preferred_node_policy[node];
167 /* preferred_node_policy is not initialised early in boot */
168 if (pol->mode)
169 return pol;
170 }
171
172 return &default_policy;
173 }
174
175 static const struct mempolicy_operations {
176 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
177 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
178 } mpol_ops[MPOL_MAX];
179
180 static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
181 {
182 return pol->flags & MPOL_MODE_FLAGS;
183 }
184
185 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
186 const nodemask_t *rel)
187 {
188 nodemask_t tmp;
189 nodes_fold(tmp, *orig, nodes_weight(*rel));
190 nodes_onto(*ret, tmp, *rel);
191 }
192
193 static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
194 {
195 if (nodes_empty(*nodes))
196 return -EINVAL;
197 pol->v.nodes = *nodes;
198 return 0;
199 }
200
201 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
202 {
203 if (!nodes)
204 pol->flags |= MPOL_F_LOCAL; /* local allocation */
205 else if (nodes_empty(*nodes))
206 return -EINVAL; /* no allowed nodes */
207 else
208 pol->v.preferred_node = first_node(*nodes);
209 return 0;
210 }
211
212 static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
213 {
214 if (nodes_empty(*nodes))
215 return -EINVAL;
216 pol->v.nodes = *nodes;
217 return 0;
218 }
219
220 /*
221 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
222 * any, for the new policy. mpol_new() has already validated the nodes
223 * parameter with respect to the policy mode and flags. But, we need to
224 * handle an empty nodemask with MPOL_PREFERRED here.
225 *
226 * Must be called holding task's alloc_lock to protect task's mems_allowed
227 * and mempolicy. May also be called holding the mmap_lock for write.
228 */
229 static int mpol_set_nodemask(struct mempolicy *pol,
230 const nodemask_t *nodes, struct nodemask_scratch *nsc)
231 {
232 int ret;
233
234 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
235 if (pol == NULL)
236 return 0;
237 /* Check N_MEMORY */
238 nodes_and(nsc->mask1,
239 cpuset_current_mems_allowed, node_states[N_MEMORY]);
240
241 VM_BUG_ON(!nodes);
242 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
243 nodes = NULL; /* explicit local allocation */
244 else {
245 if (pol->flags & MPOL_F_RELATIVE_NODES)
246 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
247 else
248 nodes_and(nsc->mask2, *nodes, nsc->mask1);
249
250 if (mpol_store_user_nodemask(pol))
251 pol->w.user_nodemask = *nodes;
252 else
253 pol->w.cpuset_mems_allowed =
254 cpuset_current_mems_allowed;
255 }
256
257 if (nodes)
258 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
259 else
260 ret = mpol_ops[pol->mode].create(pol, NULL);
261 return ret;
262 }
263
264 /*
265 * This function just creates a new policy, does some check and simple
266 * initialization. You must invoke mpol_set_nodemask() to set nodes.
267 */
268 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
269 nodemask_t *nodes)
270 {
271 struct mempolicy *policy;
272
273 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
274 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
275
276 if (mode == MPOL_DEFAULT) {
277 if (nodes && !nodes_empty(*nodes))
278 return ERR_PTR(-EINVAL);
279 return NULL;
280 }
281 VM_BUG_ON(!nodes);
282
283 /*
284 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
285 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
286 * All other modes require a valid pointer to a non-empty nodemask.
287 */
288 if (mode == MPOL_PREFERRED) {
289 if (nodes_empty(*nodes)) {
290 if (((flags & MPOL_F_STATIC_NODES) ||
291 (flags & MPOL_F_RELATIVE_NODES)))
292 return ERR_PTR(-EINVAL);
293 }
294 } else if (mode == MPOL_LOCAL) {
295 if (!nodes_empty(*nodes) ||
296 (flags & MPOL_F_STATIC_NODES) ||
297 (flags & MPOL_F_RELATIVE_NODES))
298 return ERR_PTR(-EINVAL);
299 mode = MPOL_PREFERRED;
300 } else if (nodes_empty(*nodes))
301 return ERR_PTR(-EINVAL);
302 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
303 if (!policy)
304 return ERR_PTR(-ENOMEM);
305 atomic_set(&policy->refcnt, 1);
306 policy->mode = mode;
307 policy->flags = flags;
308
309 return policy;
310 }
311
312 /* Slow path of a mpol destructor. */
313 void __mpol_put(struct mempolicy *p)
314 {
315 if (!atomic_dec_and_test(&p->refcnt))
316 return;
317 kmem_cache_free(policy_cache, p);
318 }
319
320 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
321 {
322 }
323
324 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
325 {
326 nodemask_t tmp;
327
328 if (pol->flags & MPOL_F_STATIC_NODES)
329 nodes_and(tmp, pol->w.user_nodemask, *nodes);
330 else if (pol->flags & MPOL_F_RELATIVE_NODES)
331 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
332 else {
333 nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
334 *nodes);
335 pol->w.cpuset_mems_allowed = *nodes;
336 }
337
338 if (nodes_empty(tmp))
339 tmp = *nodes;
340
341 pol->v.nodes = tmp;
342 }
343
344 static void mpol_rebind_preferred(struct mempolicy *pol,
345 const nodemask_t *nodes)
346 {
347 nodemask_t tmp;
348
349 if (pol->flags & MPOL_F_STATIC_NODES) {
350 int node = first_node(pol->w.user_nodemask);
351
352 if (node_isset(node, *nodes)) {
353 pol->v.preferred_node = node;
354 pol->flags &= ~MPOL_F_LOCAL;
355 } else
356 pol->flags |= MPOL_F_LOCAL;
357 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
358 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
359 pol->v.preferred_node = first_node(tmp);
360 } else if (!(pol->flags & MPOL_F_LOCAL)) {
361 pol->v.preferred_node = node_remap(pol->v.preferred_node,
362 pol->w.cpuset_mems_allowed,
363 *nodes);
364 pol->w.cpuset_mems_allowed = *nodes;
365 }
366 }
367
368 /*
369 * mpol_rebind_policy - Migrate a policy to a different set of nodes
370 *
371 * Per-vma policies are protected by mmap_lock. Allocations using per-task
372 * policies are protected by task->mems_allowed_seq to prevent a premature
373 * OOM/allocation failure due to parallel nodemask modification.
374 */
375 static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
376 {
377 if (!pol)
378 return;
379 if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) &&
380 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
381 return;
382
383 mpol_ops[pol->mode].rebind(pol, newmask);
384 }
385
386 /*
387 * Wrapper for mpol_rebind_policy() that just requires task
388 * pointer, and updates task mempolicy.
389 *
390 * Called with task's alloc_lock held.
391 */
392
393 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
394 {
395 mpol_rebind_policy(tsk->mempolicy, new);
396 }
397
398 /*
399 * Rebind each vma in mm to new nodemask.
400 *
401 * Call holding a reference to mm. Takes mm->mmap_lock during call.
402 */
403
404 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
405 {
406 struct vm_area_struct *vma;
407
408 mmap_write_lock(mm);
409 for (vma = mm->mmap; vma; vma = vma->vm_next)
410 mpol_rebind_policy(vma->vm_policy, new);
411 mmap_write_unlock(mm);
412 }
413
414 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
415 [MPOL_DEFAULT] = {
416 .rebind = mpol_rebind_default,
417 },
418 [MPOL_INTERLEAVE] = {
419 .create = mpol_new_interleave,
420 .rebind = mpol_rebind_nodemask,
421 },
422 [MPOL_PREFERRED] = {
423 .create = mpol_new_preferred,
424 .rebind = mpol_rebind_preferred,
425 },
426 [MPOL_BIND] = {
427 .create = mpol_new_bind,
428 .rebind = mpol_rebind_nodemask,
429 },
430 };
431
432 static int migrate_page_add(struct page *page, struct list_head *pagelist,
433 unsigned long flags);
434
435 struct queue_pages {
436 struct list_head *pagelist;
437 unsigned long flags;
438 nodemask_t *nmask;
439 unsigned long start;
440 unsigned long end;
441 struct vm_area_struct *first;
442 };
443
444 /*
445 * Check if the page's nid is in qp->nmask.
446 *
447 * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
448 * in the invert of qp->nmask.
449 */
450 static inline bool queue_pages_required(struct page *page,
451 struct queue_pages *qp)
452 {
453 int nid = page_to_nid(page);
454 unsigned long flags = qp->flags;
455
456 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
457 }
458
459 /*
460 * queue_pages_pmd() has four possible return values:
461 * 0 - pages are placed on the right node or queued successfully.
462 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
463 * specified.
464 * 2 - THP was split.
465 * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
466 * existing page was already on a node that does not follow the
467 * policy.
468 */
469 static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
470 unsigned long end, struct mm_walk *walk)
471 __releases(ptl)
472 {
473 int ret = 0;
474 struct page *page;
475 struct queue_pages *qp = walk->private;
476 unsigned long flags;
477
478 if (unlikely(is_pmd_migration_entry(*pmd))) {
479 ret = -EIO;
480 goto unlock;
481 }
482 page = pmd_page(*pmd);
483 if (is_huge_zero_page(page)) {
484 spin_unlock(ptl);
485 __split_huge_pmd(walk->vma, pmd, addr, false, NULL);
486 ret = 2;
487 goto out;
488 }
489 if (!queue_pages_required(page, qp))
490 goto unlock;
491
492 flags = qp->flags;
493 /* go to thp migration */
494 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
495 if (!vma_migratable(walk->vma) ||
496 migrate_page_add(page, qp->pagelist, flags)) {
497 ret = 1;
498 goto unlock;
499 }
500 } else
501 ret = -EIO;
502 unlock:
503 spin_unlock(ptl);
504 out:
505 return ret;
506 }
507
508 /*
509 * Scan through pages checking if pages follow certain conditions,
510 * and move them to the pagelist if they do.
511 *
512 * queue_pages_pte_range() has three possible return values:
513 * 0 - pages are placed on the right node or queued successfully.
514 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
515 * specified.
516 * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
517 * on a node that does not follow the policy.
518 */
519 static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
520 unsigned long end, struct mm_walk *walk)
521 {
522 struct vm_area_struct *vma = walk->vma;
523 struct page *page;
524 struct queue_pages *qp = walk->private;
525 unsigned long flags = qp->flags;
526 int ret;
527 bool has_unmovable = false;
528 pte_t *pte, *mapped_pte;
529 spinlock_t *ptl;
530
531 ptl = pmd_trans_huge_lock(pmd, vma);
532 if (ptl) {
533 ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
534 if (ret != 2)
535 return ret;
536 }
537 /* THP was split, fall through to pte walk */
538
539 if (pmd_trans_unstable(pmd))
540 return 0;
541
542 mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
543 for (; addr != end; pte++, addr += PAGE_SIZE) {
544 if (!pte_present(*pte))
545 continue;
546 page = vm_normal_page(vma, addr, *pte);
547 if (!page)
548 continue;
549 /*
550 * vm_normal_page() filters out zero pages, but there might
551 * still be PageReserved pages to skip, perhaps in a VDSO.
552 */
553 if (PageReserved(page))
554 continue;
555 if (!queue_pages_required(page, qp))
556 continue;
557 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
558 /* MPOL_MF_STRICT must be specified if we get here */
559 if (!vma_migratable(vma)) {
560 has_unmovable = true;
561 break;
562 }
563
564 /*
565 * Do not abort immediately since there may be
566 * temporary off LRU pages in the range. Still
567 * need migrate other LRU pages.
568 */
569 if (migrate_page_add(page, qp->pagelist, flags))
570 has_unmovable = true;
571 } else
572 break;
573 }
574 pte_unmap_unlock(mapped_pte, ptl);
575 cond_resched();
576
577 if (has_unmovable)
578 return 1;
579
580 return addr != end ? -EIO : 0;
581 }
582
583 static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
584 unsigned long addr, unsigned long end,
585 struct mm_walk *walk)
586 {
587 int ret = 0;
588 #ifdef CONFIG_HUGETLB_PAGE
589 struct queue_pages *qp = walk->private;
590 unsigned long flags = (qp->flags & MPOL_MF_VALID);
591 struct page *page;
592 spinlock_t *ptl;
593 pte_t entry;
594
595 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
596 entry = huge_ptep_get(pte);
597 if (!pte_present(entry))
598 goto unlock;
599 page = pte_page(entry);
600 if (!queue_pages_required(page, qp))
601 goto unlock;
602
603 if (flags == MPOL_MF_STRICT) {
604 /*
605 * STRICT alone means only detecting misplaced page and no
606 * need to further check other vma.
607 */
608 ret = -EIO;
609 goto unlock;
610 }
611
612 if (!vma_migratable(walk->vma)) {
613 /*
614 * Must be STRICT with MOVE*, otherwise .test_walk() have
615 * stopped walking current vma.
616 * Detecting misplaced page but allow migrating pages which
617 * have been queued.
618 */
619 ret = 1;
620 goto unlock;
621 }
622
623 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
624 if (flags & (MPOL_MF_MOVE_ALL) ||
625 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) {
626 if (!isolate_huge_page(page, qp->pagelist) &&
627 (flags & MPOL_MF_STRICT))
628 /*
629 * Failed to isolate page but allow migrating pages
630 * which have been queued.
631 */
632 ret = 1;
633 }
634 unlock:
635 spin_unlock(ptl);
636 #else
637 BUG();
638 #endif
639 return ret;
640 }
641
642 #ifdef CONFIG_NUMA_BALANCING
643 /*
644 * This is used to mark a range of virtual addresses to be inaccessible.
645 * These are later cleared by a NUMA hinting fault. Depending on these
646 * faults, pages may be migrated for better NUMA placement.
647 *
648 * This is assuming that NUMA faults are handled using PROT_NONE. If
649 * an architecture makes a different choice, it will need further
650 * changes to the core.
651 */
652 unsigned long change_prot_numa(struct vm_area_struct *vma,
653 unsigned long addr, unsigned long end)
654 {
655 int nr_updated;
656
657 nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA);
658 if (nr_updated)
659 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
660
661 return nr_updated;
662 }
663 #else
664 static unsigned long change_prot_numa(struct vm_area_struct *vma,
665 unsigned long addr, unsigned long end)
666 {
667 return 0;
668 }
669 #endif /* CONFIG_NUMA_BALANCING */
670
671 static int queue_pages_test_walk(unsigned long start, unsigned long end,
672 struct mm_walk *walk)
673 {
674 struct vm_area_struct *vma = walk->vma;
675 struct queue_pages *qp = walk->private;
676 unsigned long endvma = vma->vm_end;
677 unsigned long flags = qp->flags;
678
679 /* range check first */
680 VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma);
681
682 if (!qp->first) {
683 qp->first = vma;
684 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
685 (qp->start < vma->vm_start))
686 /* hole at head side of range */
687 return -EFAULT;
688 }
689 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
690 ((vma->vm_end < qp->end) &&
691 (!vma->vm_next || vma->vm_end < vma->vm_next->vm_start)))
692 /* hole at middle or tail of range */
693 return -EFAULT;
694
695 /*
696 * Need check MPOL_MF_STRICT to return -EIO if possible
697 * regardless of vma_migratable
698 */
699 if (!vma_migratable(vma) &&
700 !(flags & MPOL_MF_STRICT))
701 return 1;
702
703 if (endvma > end)
704 endvma = end;
705
706 if (flags & MPOL_MF_LAZY) {
707 /* Similar to task_numa_work, skip inaccessible VMAs */
708 if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
709 !(vma->vm_flags & VM_MIXEDMAP))
710 change_prot_numa(vma, start, endvma);
711 return 1;
712 }
713
714 /* queue pages from current vma */
715 if (flags & MPOL_MF_VALID)
716 return 0;
717 return 1;
718 }
719
720 static const struct mm_walk_ops queue_pages_walk_ops = {
721 .hugetlb_entry = queue_pages_hugetlb,
722 .pmd_entry = queue_pages_pte_range,
723 .test_walk = queue_pages_test_walk,
724 };
725
726 /*
727 * Walk through page tables and collect pages to be migrated.
728 *
729 * If pages found in a given range are on a set of nodes (determined by
730 * @nodes and @flags,) it's isolated and queued to the pagelist which is
731 * passed via @private.
732 *
733 * queue_pages_range() has three possible return values:
734 * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
735 * specified.
736 * 0 - queue pages successfully or no misplaced page.
737 * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
738 * memory range specified by nodemask and maxnode points outside
739 * your accessible address space (-EFAULT)
740 */
741 static int
742 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
743 nodemask_t *nodes, unsigned long flags,
744 struct list_head *pagelist)
745 {
746 int err;
747 struct queue_pages qp = {
748 .pagelist = pagelist,
749 .flags = flags,
750 .nmask = nodes,
751 .start = start,
752 .end = end,
753 .first = NULL,
754 };
755
756 err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
757
758 if (!qp.first)
759 /* whole range in hole */
760 err = -EFAULT;
761
762 return err;
763 }
764
765 /*
766 * Apply policy to a single VMA
767 * This must be called with the mmap_lock held for writing.
768 */
769 static int vma_replace_policy(struct vm_area_struct *vma,
770 struct mempolicy *pol)
771 {
772 int err;
773 struct mempolicy *old;
774 struct mempolicy *new;
775
776 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
777 vma->vm_start, vma->vm_end, vma->vm_pgoff,
778 vma->vm_ops, vma->vm_file,
779 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
780
781 new = mpol_dup(pol);
782 if (IS_ERR(new))
783 return PTR_ERR(new);
784
785 if (vma->vm_ops && vma->vm_ops->set_policy) {
786 err = vma->vm_ops->set_policy(vma, new);
787 if (err)
788 goto err_out;
789 }
790
791 old = vma->vm_policy;
792 vma->vm_policy = new; /* protected by mmap_lock */
793 mpol_put(old);
794
795 return 0;
796 err_out:
797 mpol_put(new);
798 return err;
799 }
800
801 /* Step 2: apply policy to a range and do splits. */
802 static int mbind_range(struct mm_struct *mm, unsigned long start,
803 unsigned long end, struct mempolicy *new_pol)
804 {
805 struct vm_area_struct *next;
806 struct vm_area_struct *prev;
807 struct vm_area_struct *vma;
808 int err = 0;
809 pgoff_t pgoff;
810 unsigned long vmstart;
811 unsigned long vmend;
812
813 vma = find_vma(mm, start);
814 VM_BUG_ON(!vma);
815
816 prev = vma->vm_prev;
817 if (start > vma->vm_start)
818 prev = vma;
819
820 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
821 next = vma->vm_next;
822 vmstart = max(start, vma->vm_start);
823 vmend = min(end, vma->vm_end);
824
825 if (mpol_equal(vma_policy(vma), new_pol))
826 continue;
827
828 pgoff = vma->vm_pgoff +
829 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
830 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
831 vma->anon_vma, vma->vm_file, pgoff,
832 new_pol, vma->vm_userfaultfd_ctx);
833 if (prev) {
834 vma = prev;
835 next = vma->vm_next;
836 if (mpol_equal(vma_policy(vma), new_pol))
837 continue;
838 /* vma_merge() joined vma && vma->next, case 8 */
839 goto replace;
840 }
841 if (vma->vm_start != vmstart) {
842 err = split_vma(vma->vm_mm, vma, vmstart, 1);
843 if (err)
844 goto out;
845 }
846 if (vma->vm_end != vmend) {
847 err = split_vma(vma->vm_mm, vma, vmend, 0);
848 if (err)
849 goto out;
850 }
851 replace:
852 err = vma_replace_policy(vma, new_pol);
853 if (err)
854 goto out;
855 }
856
857 out:
858 return err;
859 }
860
861 /* Set the process memory policy */
862 static long do_set_mempolicy(unsigned short mode, unsigned short flags,
863 nodemask_t *nodes)
864 {
865 struct mempolicy *new, *old;
866 NODEMASK_SCRATCH(scratch);
867 int ret;
868
869 if (!scratch)
870 return -ENOMEM;
871
872 new = mpol_new(mode, flags, nodes);
873 if (IS_ERR(new)) {
874 ret = PTR_ERR(new);
875 goto out;
876 }
877
878 if (flags & MPOL_F_NUMA_BALANCING) {
879 if (new && new->mode == MPOL_BIND) {
880 new->flags |= (MPOL_F_MOF | MPOL_F_MORON);
881 } else {
882 ret = -EINVAL;
883 mpol_put(new);
884 goto out;
885 }
886 }
887
888 ret = mpol_set_nodemask(new, nodes, scratch);
889 if (ret) {
890 mpol_put(new);
891 goto out;
892 }
893 task_lock(current);
894 old = current->mempolicy;
895 current->mempolicy = new;
896 if (new && new->mode == MPOL_INTERLEAVE)
897 current->il_prev = MAX_NUMNODES-1;
898 task_unlock(current);
899 mpol_put(old);
900 ret = 0;
901 out:
902 NODEMASK_SCRATCH_FREE(scratch);
903 return ret;
904 }
905
906 /*
907 * Return nodemask for policy for get_mempolicy() query
908 *
909 * Called with task's alloc_lock held
910 */
911 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
912 {
913 nodes_clear(*nodes);
914 if (p == &default_policy)
915 return;
916
917 switch (p->mode) {
918 case MPOL_BIND:
919 case MPOL_INTERLEAVE:
920 *nodes = p->v.nodes;
921 break;
922 case MPOL_PREFERRED:
923 if (!(p->flags & MPOL_F_LOCAL))
924 node_set(p->v.preferred_node, *nodes);
925 /* else return empty node mask for local allocation */
926 break;
927 default:
928 BUG();
929 }
930 }
931
932 static int lookup_node(struct mm_struct *mm, unsigned long addr)
933 {
934 struct page *p = NULL;
935 int err;
936
937 int locked = 1;
938 err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
939 if (err > 0) {
940 err = page_to_nid(p);
941 put_page(p);
942 }
943 if (locked)
944 mmap_read_unlock(mm);
945 return err;
946 }
947
948 /* Retrieve NUMA policy */
949 static long do_get_mempolicy(int *policy, nodemask_t *nmask,
950 unsigned long addr, unsigned long flags)
951 {
952 int err;
953 struct mm_struct *mm = current->mm;
954 struct vm_area_struct *vma = NULL;
955 struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
956
957 if (flags &
958 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
959 return -EINVAL;
960
961 if (flags & MPOL_F_MEMS_ALLOWED) {
962 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
963 return -EINVAL;
964 *policy = 0; /* just so it's initialized */
965 task_lock(current);
966 *nmask = cpuset_current_mems_allowed;
967 task_unlock(current);
968 return 0;
969 }
970
971 if (flags & MPOL_F_ADDR) {
972 /*
973 * Do NOT fall back to task policy if the
974 * vma/shared policy at addr is NULL. We
975 * want to return MPOL_DEFAULT in this case.
976 */
977 mmap_read_lock(mm);
978 vma = find_vma_intersection(mm, addr, addr+1);
979 if (!vma) {
980 mmap_read_unlock(mm);
981 return -EFAULT;
982 }
983 if (vma->vm_ops && vma->vm_ops->get_policy)
984 pol = vma->vm_ops->get_policy(vma, addr);
985 else
986 pol = vma->vm_policy;
987 } else if (addr)
988 return -EINVAL;
989
990 if (!pol)
991 pol = &default_policy; /* indicates default behavior */
992
993 if (flags & MPOL_F_NODE) {
994 if (flags & MPOL_F_ADDR) {
995 /*
996 * Take a refcount on the mpol, lookup_node()
997 * wil drop the mmap_lock, so after calling
998 * lookup_node() only "pol" remains valid, "vma"
999 * is stale.
1000 */
1001 pol_refcount = pol;
1002 vma = NULL;
1003 mpol_get(pol);
1004 err = lookup_node(mm, addr);
1005 if (err < 0)
1006 goto out;
1007 *policy = err;
1008 } else if (pol == current->mempolicy &&
1009 pol->mode == MPOL_INTERLEAVE) {
1010 *policy = next_node_in(current->il_prev, pol->v.nodes);
1011 } else {
1012 err = -EINVAL;
1013 goto out;
1014 }
1015 } else {
1016 *policy = pol == &default_policy ? MPOL_DEFAULT :
1017 pol->mode;
1018 /*
1019 * Internal mempolicy flags must be masked off before exposing
1020 * the policy to userspace.
1021 */
1022 *policy |= (pol->flags & MPOL_MODE_FLAGS);
1023 }
1024
1025 err = 0;
1026 if (nmask) {
1027 if (mpol_store_user_nodemask(pol)) {
1028 *nmask = pol->w.user_nodemask;
1029 } else {
1030 task_lock(current);
1031 get_policy_nodemask(pol, nmask);
1032 task_unlock(current);
1033 }
1034 }
1035
1036 out:
1037 mpol_cond_put(pol);
1038 if (vma)
1039 mmap_read_unlock(mm);
1040 if (pol_refcount)
1041 mpol_put(pol_refcount);
1042 return err;
1043 }
1044
1045 #ifdef CONFIG_MIGRATION
1046 /*
1047 * page migration, thp tail pages can be passed.
1048 */
1049 static int migrate_page_add(struct page *page, struct list_head *pagelist,
1050 unsigned long flags)
1051 {
1052 struct page *head = compound_head(page);
1053 /*
1054 * Avoid migrating a page that is shared with others.
1055 */
1056 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
1057 if (!isolate_lru_page(head)) {
1058 list_add_tail(&head->lru, pagelist);
1059 mod_node_page_state(page_pgdat(head),
1060 NR_ISOLATED_ANON + page_is_file_lru(head),
1061 thp_nr_pages(head));
1062 } else if (flags & MPOL_MF_STRICT) {
1063 /*
1064 * Non-movable page may reach here. And, there may be
1065 * temporary off LRU pages or non-LRU movable pages.
1066 * Treat them as unmovable pages since they can't be
1067 * isolated, so they can't be moved at the moment. It
1068 * should return -EIO for this case too.
1069 */
1070 return -EIO;
1071 }
1072 }
1073
1074 return 0;
1075 }
1076
1077 /*
1078 * Migrate pages from one node to a target node.
1079 * Returns error or the number of pages not migrated.
1080 */
1081 static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1082 int flags)
1083 {
1084 nodemask_t nmask;
1085 LIST_HEAD(pagelist);
1086 int err = 0;
1087 struct migration_target_control mtc = {
1088 .nid = dest,
1089 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1090 };
1091
1092 nodes_clear(nmask);
1093 node_set(source, nmask);
1094
1095 /*
1096 * This does not "check" the range but isolates all pages that
1097 * need migration. Between passing in the full user address
1098 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1099 */
1100 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1101 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
1102 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1103
1104 if (!list_empty(&pagelist)) {
1105 err = migrate_pages(&pagelist, alloc_migration_target, NULL,
1106 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL);
1107 if (err)
1108 putback_movable_pages(&pagelist);
1109 }
1110
1111 return err;
1112 }
1113
1114 /*
1115 * Move pages between the two nodesets so as to preserve the physical
1116 * layout as much as possible.
1117 *
1118 * Returns the number of page that could not be moved.
1119 */
1120 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1121 const nodemask_t *to, int flags)
1122 {
1123 int busy = 0;
1124 int err = 0;
1125 nodemask_t tmp;
1126
1127 migrate_prep();
1128
1129 mmap_read_lock(mm);
1130
1131 /*
1132 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1133 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1134 * bit in 'tmp', and return that <source, dest> pair for migration.
1135 * The pair of nodemasks 'to' and 'from' define the map.
1136 *
1137 * If no pair of bits is found that way, fallback to picking some
1138 * pair of 'source' and 'dest' bits that are not the same. If the
1139 * 'source' and 'dest' bits are the same, this represents a node
1140 * that will be migrating to itself, so no pages need move.
1141 *
1142 * If no bits are left in 'tmp', or if all remaining bits left
1143 * in 'tmp' correspond to the same bit in 'to', return false
1144 * (nothing left to migrate).
1145 *
1146 * This lets us pick a pair of nodes to migrate between, such that
1147 * if possible the dest node is not already occupied by some other
1148 * source node, minimizing the risk of overloading the memory on a
1149 * node that would happen if we migrated incoming memory to a node
1150 * before migrating outgoing memory source that same node.
1151 *
1152 * A single scan of tmp is sufficient. As we go, we remember the
1153 * most recent <s, d> pair that moved (s != d). If we find a pair
1154 * that not only moved, but what's better, moved to an empty slot
1155 * (d is not set in tmp), then we break out then, with that pair.
1156 * Otherwise when we finish scanning from_tmp, we at least have the
1157 * most recent <s, d> pair that moved. If we get all the way through
1158 * the scan of tmp without finding any node that moved, much less
1159 * moved to an empty node, then there is nothing left worth migrating.
1160 */
1161
1162 tmp = *from;
1163 while (!nodes_empty(tmp)) {
1164 int s,d;
1165 int source = NUMA_NO_NODE;
1166 int dest = 0;
1167
1168 for_each_node_mask(s, tmp) {
1169
1170 /*
1171 * do_migrate_pages() tries to maintain the relative
1172 * node relationship of the pages established between
1173 * threads and memory areas.
1174 *
1175 * However if the number of source nodes is not equal to
1176 * the number of destination nodes we can not preserve
1177 * this node relative relationship. In that case, skip
1178 * copying memory from a node that is in the destination
1179 * mask.
1180 *
1181 * Example: [2,3,4] -> [3,4,5] moves everything.
1182 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1183 */
1184
1185 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1186 (node_isset(s, *to)))
1187 continue;
1188
1189 d = node_remap(s, *from, *to);
1190 if (s == d)
1191 continue;
1192
1193 source = s; /* Node moved. Memorize */
1194 dest = d;
1195
1196 /* dest not in remaining from nodes? */
1197 if (!node_isset(dest, tmp))
1198 break;
1199 }
1200 if (source == NUMA_NO_NODE)
1201 break;
1202
1203 node_clear(source, tmp);
1204 err = migrate_to_node(mm, source, dest, flags);
1205 if (err > 0)
1206 busy += err;
1207 if (err < 0)
1208 break;
1209 }
1210 mmap_read_unlock(mm);
1211 if (err < 0)
1212 return err;
1213 return busy;
1214
1215 }
1216
1217 /*
1218 * Allocate a new page for page migration based on vma policy.
1219 * Start by assuming the page is mapped by the same vma as contains @start.
1220 * Search forward from there, if not. N.B., this assumes that the
1221 * list of pages handed to migrate_pages()--which is how we get here--
1222 * is in virtual address order.
1223 */
1224 static struct page *new_page(struct page *page, unsigned long start)
1225 {
1226 struct vm_area_struct *vma;
1227 unsigned long address;
1228
1229 vma = find_vma(current->mm, start);
1230 while (vma) {
1231 address = page_address_in_vma(page, vma);
1232 if (address != -EFAULT)
1233 break;
1234 vma = vma->vm_next;
1235 }
1236
1237 if (PageHuge(page)) {
1238 return alloc_huge_page_vma(page_hstate(compound_head(page)),
1239 vma, address);
1240 } else if (PageTransHuge(page)) {
1241 struct page *thp;
1242
1243 thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
1244 HPAGE_PMD_ORDER);
1245 if (!thp)
1246 return NULL;
1247 prep_transhuge_page(thp);
1248 return thp;
1249 }
1250 /*
1251 * if !vma, alloc_page_vma() will use task or system default policy
1252 */
1253 return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
1254 vma, address);
1255 }
1256 #else
1257
1258 static int migrate_page_add(struct page *page, struct list_head *pagelist,
1259 unsigned long flags)
1260 {
1261 return -EIO;
1262 }
1263
1264 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1265 const nodemask_t *to, int flags)
1266 {
1267 return -ENOSYS;
1268 }
1269
1270 static struct page *new_page(struct page *page, unsigned long start)
1271 {
1272 return NULL;
1273 }
1274 #endif
1275
1276 static long do_mbind(unsigned long start, unsigned long len,
1277 unsigned short mode, unsigned short mode_flags,
1278 nodemask_t *nmask, unsigned long flags)
1279 {
1280 struct mm_struct *mm = current->mm;
1281 struct mempolicy *new;
1282 unsigned long end;
1283 int err;
1284 int ret;
1285 LIST_HEAD(pagelist);
1286
1287 if (flags & ~(unsigned long)MPOL_MF_VALID)
1288 return -EINVAL;
1289 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1290 return -EPERM;
1291
1292 if (start & ~PAGE_MASK)
1293 return -EINVAL;
1294
1295 if (mode == MPOL_DEFAULT)
1296 flags &= ~MPOL_MF_STRICT;
1297
1298 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1299 end = start + len;
1300
1301 if (end < start)
1302 return -EINVAL;
1303 if (end == start)
1304 return 0;
1305
1306 new = mpol_new(mode, mode_flags, nmask);
1307 if (IS_ERR(new))
1308 return PTR_ERR(new);
1309
1310 if (flags & MPOL_MF_LAZY)
1311 new->flags |= MPOL_F_MOF;
1312
1313 /*
1314 * If we are using the default policy then operation
1315 * on discontinuous address spaces is okay after all
1316 */
1317 if (!new)
1318 flags |= MPOL_MF_DISCONTIG_OK;
1319
1320 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1321 start, start + len, mode, mode_flags,
1322 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
1323
1324 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1325
1326 migrate_prep();
1327 }
1328 {
1329 NODEMASK_SCRATCH(scratch);
1330 if (scratch) {
1331 mmap_write_lock(mm);
1332 err = mpol_set_nodemask(new, nmask, scratch);
1333 if (err)
1334 mmap_write_unlock(mm);
1335 } else
1336 err = -ENOMEM;
1337 NODEMASK_SCRATCH_FREE(scratch);
1338 }
1339 if (err)
1340 goto mpol_out;
1341
1342 ret = queue_pages_range(mm, start, end, nmask,
1343 flags | MPOL_MF_INVERT, &pagelist);
1344
1345 if (ret < 0) {
1346 err = ret;
1347 goto up_out;
1348 }
1349
1350 err = mbind_range(mm, start, end, new);
1351
1352 if (!err) {
1353 int nr_failed = 0;
1354
1355 if (!list_empty(&pagelist)) {
1356 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1357 nr_failed = migrate_pages(&pagelist, new_page, NULL,
1358 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1359 if (nr_failed)
1360 putback_movable_pages(&pagelist);
1361 }
1362
1363 if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
1364 err = -EIO;
1365 } else {
1366 up_out:
1367 if (!list_empty(&pagelist))
1368 putback_movable_pages(&pagelist);
1369 }
1370
1371 mmap_write_unlock(mm);
1372 mpol_out:
1373 mpol_put(new);
1374 return err;
1375 }
1376
1377 /*
1378 * User space interface with variable sized bitmaps for nodelists.
1379 */
1380
1381 /* Copy a node mask from user space. */
1382 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1383 unsigned long maxnode)
1384 {
1385 unsigned long k;
1386 unsigned long t;
1387 unsigned long nlongs;
1388 unsigned long endmask;
1389
1390 --maxnode;
1391 nodes_clear(*nodes);
1392 if (maxnode == 0 || !nmask)
1393 return 0;
1394 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1395 return -EINVAL;
1396
1397 nlongs = BITS_TO_LONGS(maxnode);
1398 if ((maxnode % BITS_PER_LONG) == 0)
1399 endmask = ~0UL;
1400 else
1401 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1402
1403 /*
1404 * When the user specified more nodes than supported just check
1405 * if the non supported part is all zero.
1406 *
1407 * If maxnode have more longs than MAX_NUMNODES, check
1408 * the bits in that area first. And then go through to
1409 * check the rest bits which equal or bigger than MAX_NUMNODES.
1410 * Otherwise, just check bits [MAX_NUMNODES, maxnode).
1411 */
1412 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1413 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1414 if (get_user(t, nmask + k))
1415 return -EFAULT;
1416 if (k == nlongs - 1) {
1417 if (t & endmask)
1418 return -EINVAL;
1419 } else if (t)
1420 return -EINVAL;
1421 }
1422 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1423 endmask = ~0UL;
1424 }
1425
1426 if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
1427 unsigned long valid_mask = endmask;
1428
1429 valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1430 if (get_user(t, nmask + nlongs - 1))
1431 return -EFAULT;
1432 if (t & valid_mask)
1433 return -EINVAL;
1434 }
1435
1436 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1437 return -EFAULT;
1438 nodes_addr(*nodes)[nlongs-1] &= endmask;
1439 return 0;
1440 }
1441
1442 /* Copy a kernel node mask to user space */
1443 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1444 nodemask_t *nodes)
1445 {
1446 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1447 unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
1448
1449 if (copy > nbytes) {
1450 if (copy > PAGE_SIZE)
1451 return -EINVAL;
1452 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1453 return -EFAULT;
1454 copy = nbytes;
1455 }
1456 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1457 }
1458
1459 static long kernel_mbind(unsigned long start, unsigned long len,
1460 unsigned long mode, const unsigned long __user *nmask,
1461 unsigned long maxnode, unsigned int flags)
1462 {
1463 nodemask_t nodes;
1464 int err;
1465 unsigned short mode_flags;
1466
1467 start = untagged_addr(start);
1468 mode_flags = mode & MPOL_MODE_FLAGS;
1469 mode &= ~MPOL_MODE_FLAGS;
1470 if (mode >= MPOL_MAX)
1471 return -EINVAL;
1472 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1473 (mode_flags & MPOL_F_RELATIVE_NODES))
1474 return -EINVAL;
1475 err = get_nodes(&nodes, nmask, maxnode);
1476 if (err)
1477 return err;
1478 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1479 }
1480
1481 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1482 unsigned long, mode, const unsigned long __user *, nmask,
1483 unsigned long, maxnode, unsigned int, flags)
1484 {
1485 return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1486 }
1487
1488 /* Set the process memory policy */
1489 static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1490 unsigned long maxnode)
1491 {
1492 int err;
1493 nodemask_t nodes;
1494 unsigned short flags;
1495
1496 flags = mode & MPOL_MODE_FLAGS;
1497 mode &= ~MPOL_MODE_FLAGS;
1498 if ((unsigned int)mode >= MPOL_MAX)
1499 return -EINVAL;
1500 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1501 return -EINVAL;
1502 err = get_nodes(&nodes, nmask, maxnode);
1503 if (err)
1504 return err;
1505 return do_set_mempolicy(mode, flags, &nodes);
1506 }
1507
1508 SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1509 unsigned long, maxnode)
1510 {
1511 return kernel_set_mempolicy(mode, nmask, maxnode);
1512 }
1513
1514 static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1515 const unsigned long __user *old_nodes,
1516 const unsigned long __user *new_nodes)
1517 {
1518 struct mm_struct *mm = NULL;
1519 struct task_struct *task;
1520 nodemask_t task_nodes;
1521 int err;
1522 nodemask_t *old;
1523 nodemask_t *new;
1524 NODEMASK_SCRATCH(scratch);
1525
1526 if (!scratch)
1527 return -ENOMEM;
1528
1529 old = &scratch->mask1;
1530 new = &scratch->mask2;
1531
1532 err = get_nodes(old, old_nodes, maxnode);
1533 if (err)
1534 goto out;
1535
1536 err = get_nodes(new, new_nodes, maxnode);
1537 if (err)
1538 goto out;
1539
1540 /* Find the mm_struct */
1541 rcu_read_lock();
1542 task = pid ? find_task_by_vpid(pid) : current;
1543 if (!task) {
1544 rcu_read_unlock();
1545 err = -ESRCH;
1546 goto out;
1547 }
1548 get_task_struct(task);
1549
1550 err = -EINVAL;
1551
1552 /*
1553 * Check if this process has the right to modify the specified process.
1554 * Use the regular "ptrace_may_access()" checks.
1555 */
1556 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1557 rcu_read_unlock();
1558 err = -EPERM;
1559 goto out_put;
1560 }
1561 rcu_read_unlock();
1562
1563 task_nodes = cpuset_mems_allowed(task);
1564 /* Is the user allowed to access the target nodes? */
1565 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1566 err = -EPERM;
1567 goto out_put;
1568 }
1569
1570 task_nodes = cpuset_mems_allowed(current);
1571 nodes_and(*new, *new, task_nodes);
1572 if (nodes_empty(*new))
1573 goto out_put;
1574
1575 err = security_task_movememory(task);
1576 if (err)
1577 goto out_put;
1578
1579 mm = get_task_mm(task);
1580 put_task_struct(task);
1581
1582 if (!mm) {
1583 err = -EINVAL;
1584 goto out;
1585 }
1586
1587 err = do_migrate_pages(mm, old, new,
1588 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1589
1590 mmput(mm);
1591 out:
1592 NODEMASK_SCRATCH_FREE(scratch);
1593
1594 return err;
1595
1596 out_put:
1597 put_task_struct(task);
1598 goto out;
1599
1600 }
1601
1602 SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1603 const unsigned long __user *, old_nodes,
1604 const unsigned long __user *, new_nodes)
1605 {
1606 return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1607 }
1608
1609
1610 /* Retrieve NUMA policy */
1611 static int kernel_get_mempolicy(int __user *policy,
1612 unsigned long __user *nmask,
1613 unsigned long maxnode,
1614 unsigned long addr,
1615 unsigned long flags)
1616 {
1617 int err;
1618 int pval;
1619 nodemask_t nodes;
1620
1621 if (nmask != NULL && maxnode < nr_node_ids)
1622 return -EINVAL;
1623
1624 addr = untagged_addr(addr);
1625
1626 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1627
1628 if (err)
1629 return err;
1630
1631 if (policy && put_user(pval, policy))
1632 return -EFAULT;
1633
1634 if (nmask)
1635 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1636
1637 return err;
1638 }
1639
1640 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1641 unsigned long __user *, nmask, unsigned long, maxnode,
1642 unsigned long, addr, unsigned long, flags)
1643 {
1644 return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1645 }
1646
1647 #ifdef CONFIG_COMPAT
1648
1649 COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1650 compat_ulong_t __user *, nmask,
1651 compat_ulong_t, maxnode,
1652 compat_ulong_t, addr, compat_ulong_t, flags)
1653 {
1654 long err;
1655 unsigned long __user *nm = NULL;
1656 unsigned long nr_bits, alloc_size;
1657 DECLARE_BITMAP(bm, MAX_NUMNODES);
1658
1659 nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
1660 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1661
1662 if (nmask)
1663 nm = compat_alloc_user_space(alloc_size);
1664
1665 err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1666
1667 if (!err && nmask) {
1668 unsigned long copy_size;
1669 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1670 err = copy_from_user(bm, nm, copy_size);
1671 /* ensure entire bitmap is zeroed */
1672 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1673 err |= compat_put_bitmap(nmask, bm, nr_bits);
1674 }
1675
1676 return err;
1677 }
1678
1679 COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1680 compat_ulong_t, maxnode)
1681 {
1682 unsigned long __user *nm = NULL;
1683 unsigned long nr_bits, alloc_size;
1684 DECLARE_BITMAP(bm, MAX_NUMNODES);
1685
1686 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1687 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1688
1689 if (nmask) {
1690 if (compat_get_bitmap(bm, nmask, nr_bits))
1691 return -EFAULT;
1692 nm = compat_alloc_user_space(alloc_size);
1693 if (copy_to_user(nm, bm, alloc_size))
1694 return -EFAULT;
1695 }
1696
1697 return kernel_set_mempolicy(mode, nm, nr_bits+1);
1698 }
1699
1700 COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1701 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1702 compat_ulong_t, maxnode, compat_ulong_t, flags)
1703 {
1704 unsigned long __user *nm = NULL;
1705 unsigned long nr_bits, alloc_size;
1706 nodemask_t bm;
1707
1708 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1709 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1710
1711 if (nmask) {
1712 if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1713 return -EFAULT;
1714 nm = compat_alloc_user_space(alloc_size);
1715 if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1716 return -EFAULT;
1717 }
1718
1719 return kernel_mbind(start, len, mode, nm, nr_bits+1, flags);
1720 }
1721
1722 COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid,
1723 compat_ulong_t, maxnode,
1724 const compat_ulong_t __user *, old_nodes,
1725 const compat_ulong_t __user *, new_nodes)
1726 {
1727 unsigned long __user *old = NULL;
1728 unsigned long __user *new = NULL;
1729 nodemask_t tmp_mask;
1730 unsigned long nr_bits;
1731 unsigned long size;
1732
1733 nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES);
1734 size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1735 if (old_nodes) {
1736 if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits))
1737 return -EFAULT;
1738 old = compat_alloc_user_space(new_nodes ? size * 2 : size);
1739 if (new_nodes)
1740 new = old + size / sizeof(unsigned long);
1741 if (copy_to_user(old, nodes_addr(tmp_mask), size))
1742 return -EFAULT;
1743 }
1744 if (new_nodes) {
1745 if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits))
1746 return -EFAULT;
1747 if (new == NULL)
1748 new = compat_alloc_user_space(size);
1749 if (copy_to_user(new, nodes_addr(tmp_mask), size))
1750 return -EFAULT;
1751 }
1752 return kernel_migrate_pages(pid, nr_bits + 1, old, new);
1753 }
1754
1755 #endif /* CONFIG_COMPAT */
1756
1757 bool vma_migratable(struct vm_area_struct *vma)
1758 {
1759 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1760 return false;
1761
1762 /*
1763 * DAX device mappings require predictable access latency, so avoid
1764 * incurring periodic faults.
1765 */
1766 if (vma_is_dax(vma))
1767 return false;
1768
1769 if (is_vm_hugetlb_page(vma) &&
1770 !hugepage_migration_supported(hstate_vma(vma)))
1771 return false;
1772
1773 /*
1774 * Migration allocates pages in the highest zone. If we cannot
1775 * do so then migration (at least from node to node) is not
1776 * possible.
1777 */
1778 if (vma->vm_file &&
1779 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
1780 < policy_zone)
1781 return false;
1782 return true;
1783 }
1784
1785 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1786 unsigned long addr)
1787 {
1788 struct mempolicy *pol = NULL;
1789
1790 if (vma) {
1791 if (vma->vm_ops && vma->vm_ops->get_policy) {
1792 pol = vma->vm_ops->get_policy(vma, addr);
1793 } else if (vma->vm_policy) {
1794 pol = vma->vm_policy;
1795
1796 /*
1797 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1798 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1799 * count on these policies which will be dropped by
1800 * mpol_cond_put() later
1801 */
1802 if (mpol_needs_cond_ref(pol))
1803 mpol_get(pol);
1804 }
1805 }
1806
1807 return pol;
1808 }
1809
1810 /*
1811 * get_vma_policy(@vma, @addr)
1812 * @vma: virtual memory area whose policy is sought
1813 * @addr: address in @vma for shared policy lookup
1814 *
1815 * Returns effective policy for a VMA at specified address.
1816 * Falls back to current->mempolicy or system default policy, as necessary.
1817 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1818 * count--added by the get_policy() vm_op, as appropriate--to protect against
1819 * freeing by another task. It is the caller's responsibility to free the
1820 * extra reference for shared policies.
1821 */
1822 static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1823 unsigned long addr)
1824 {
1825 struct mempolicy *pol = __get_vma_policy(vma, addr);
1826
1827 if (!pol)
1828 pol = get_task_policy(current);
1829
1830 return pol;
1831 }
1832
1833 bool vma_policy_mof(struct vm_area_struct *vma)
1834 {
1835 struct mempolicy *pol;
1836
1837 if (vma->vm_ops && vma->vm_ops->get_policy) {
1838 bool ret = false;
1839
1840 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1841 if (pol && (pol->flags & MPOL_F_MOF))
1842 ret = true;
1843 mpol_cond_put(pol);
1844
1845 return ret;
1846 }
1847
1848 pol = vma->vm_policy;
1849 if (!pol)
1850 pol = get_task_policy(current);
1851
1852 return pol->flags & MPOL_F_MOF;
1853 }
1854
1855 static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1856 {
1857 enum zone_type dynamic_policy_zone = policy_zone;
1858
1859 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1860
1861 /*
1862 * if policy->v.nodes has movable memory only,
1863 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1864 *
1865 * policy->v.nodes is intersect with node_states[N_MEMORY].
1866 * so if the following test faile, it implies
1867 * policy->v.nodes has movable memory only.
1868 */
1869 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1870 dynamic_policy_zone = ZONE_MOVABLE;
1871
1872 return zone >= dynamic_policy_zone;
1873 }
1874
1875 /*
1876 * Return a nodemask representing a mempolicy for filtering nodes for
1877 * page allocation
1878 */
1879 nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1880 {
1881 /* Lower zones don't get a nodemask applied for MPOL_BIND */
1882 if (unlikely(policy->mode == MPOL_BIND) &&
1883 apply_policy_zone(policy, gfp_zone(gfp)) &&
1884 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1885 return &policy->v.nodes;
1886
1887 return NULL;
1888 }
1889
1890 /* Return the node id preferred by the given mempolicy, or the given id */
1891 static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
1892 {
1893 if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
1894 nd = policy->v.preferred_node;
1895 else {
1896 /*
1897 * __GFP_THISNODE shouldn't even be used with the bind policy
1898 * because we might easily break the expectation to stay on the
1899 * requested node and not break the policy.
1900 */
1901 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1902 }
1903
1904 return nd;
1905 }
1906
1907 /* Do dynamic interleaving for a process */
1908 static unsigned interleave_nodes(struct mempolicy *policy)
1909 {
1910 unsigned next;
1911 struct task_struct *me = current;
1912
1913 next = next_node_in(me->il_prev, policy->v.nodes);
1914 if (next < MAX_NUMNODES)
1915 me->il_prev = next;
1916 return next;
1917 }
1918
1919 /*
1920 * Depending on the memory policy provide a node from which to allocate the
1921 * next slab entry.
1922 */
1923 unsigned int mempolicy_slab_node(void)
1924 {
1925 struct mempolicy *policy;
1926 int node = numa_mem_id();
1927
1928 if (in_interrupt())
1929 return node;
1930
1931 policy = current->mempolicy;
1932 if (!policy || policy->flags & MPOL_F_LOCAL)
1933 return node;
1934
1935 switch (policy->mode) {
1936 case MPOL_PREFERRED:
1937 /*
1938 * handled MPOL_F_LOCAL above
1939 */
1940 return policy->v.preferred_node;
1941
1942 case MPOL_INTERLEAVE:
1943 return interleave_nodes(policy);
1944
1945 case MPOL_BIND: {
1946 struct zoneref *z;
1947
1948 /*
1949 * Follow bind policy behavior and start allocation at the
1950 * first node.
1951 */
1952 struct zonelist *zonelist;
1953 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1954 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1955 z = first_zones_zonelist(zonelist, highest_zoneidx,
1956 &policy->v.nodes);
1957 return z->zone ? zone_to_nid(z->zone) : node;
1958 }
1959
1960 default:
1961 BUG();
1962 }
1963 }
1964
1965 /*
1966 * Do static interleaving for a VMA with known offset @n. Returns the n'th
1967 * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1968 * number of present nodes.
1969 */
1970 static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
1971 {
1972 unsigned nnodes = nodes_weight(pol->v.nodes);
1973 unsigned target;
1974 int i;
1975 int nid;
1976
1977 if (!nnodes)
1978 return numa_node_id();
1979 target = (unsigned int)n % nnodes;
1980 nid = first_node(pol->v.nodes);
1981 for (i = 0; i < target; i++)
1982 nid = next_node(nid, pol->v.nodes);
1983 return nid;
1984 }
1985
1986 /* Determine a node number for interleave */
1987 static inline unsigned interleave_nid(struct mempolicy *pol,
1988 struct vm_area_struct *vma, unsigned long addr, int shift)
1989 {
1990 if (vma) {
1991 unsigned long off;
1992
1993 /*
1994 * for small pages, there is no difference between
1995 * shift and PAGE_SHIFT, so the bit-shift is safe.
1996 * for huge pages, since vm_pgoff is in units of small
1997 * pages, we need to shift off the always 0 bits to get
1998 * a useful offset.
1999 */
2000 BUG_ON(shift < PAGE_SHIFT);
2001 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
2002 off += (addr - vma->vm_start) >> shift;
2003 return offset_il_node(pol, off);
2004 } else
2005 return interleave_nodes(pol);
2006 }
2007
2008 #ifdef CONFIG_HUGETLBFS
2009 /*
2010 * huge_node(@vma, @addr, @gfp_flags, @mpol)
2011 * @vma: virtual memory area whose policy is sought
2012 * @addr: address in @vma for shared policy lookup and interleave policy
2013 * @gfp_flags: for requested zone
2014 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
2015 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
2016 *
2017 * Returns a nid suitable for a huge page allocation and a pointer
2018 * to the struct mempolicy for conditional unref after allocation.
2019 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
2020 * @nodemask for filtering the zonelist.
2021 *
2022 * Must be protected by read_mems_allowed_begin()
2023 */
2024 int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
2025 struct mempolicy **mpol, nodemask_t **nodemask)
2026 {
2027 int nid;
2028
2029 *mpol = get_vma_policy(vma, addr);
2030 *nodemask = NULL; /* assume !MPOL_BIND */
2031
2032 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
2033 nid = interleave_nid(*mpol, vma, addr,
2034 huge_page_shift(hstate_vma(vma)));
2035 } else {
2036 nid = policy_node(gfp_flags, *mpol, numa_node_id());
2037 if ((*mpol)->mode == MPOL_BIND)
2038 *nodemask = &(*mpol)->v.nodes;
2039 }
2040 return nid;
2041 }
2042
2043 /*
2044 * init_nodemask_of_mempolicy
2045 *
2046 * If the current task's mempolicy is "default" [NULL], return 'false'
2047 * to indicate default policy. Otherwise, extract the policy nodemask
2048 * for 'bind' or 'interleave' policy into the argument nodemask, or
2049 * initialize the argument nodemask to contain the single node for
2050 * 'preferred' or 'local' policy and return 'true' to indicate presence
2051 * of non-default mempolicy.
2052 *
2053 * We don't bother with reference counting the mempolicy [mpol_get/put]
2054 * because the current task is examining it's own mempolicy and a task's
2055 * mempolicy is only ever changed by the task itself.
2056 *
2057 * N.B., it is the caller's responsibility to free a returned nodemask.
2058 */
2059 bool init_nodemask_of_mempolicy(nodemask_t *mask)
2060 {
2061 struct mempolicy *mempolicy;
2062 int nid;
2063
2064 if (!(mask && current->mempolicy))
2065 return false;
2066
2067 task_lock(current);
2068 mempolicy = current->mempolicy;
2069 switch (mempolicy->mode) {
2070 case MPOL_PREFERRED:
2071 if (mempolicy->flags & MPOL_F_LOCAL)
2072 nid = numa_node_id();
2073 else
2074 nid = mempolicy->v.preferred_node;
2075 init_nodemask_of_node(mask, nid);
2076 break;
2077
2078 case MPOL_BIND:
2079 case MPOL_INTERLEAVE:
2080 *mask = mempolicy->v.nodes;
2081 break;
2082
2083 default:
2084 BUG();
2085 }
2086 task_unlock(current);
2087
2088 return true;
2089 }
2090 #endif
2091
2092 /*
2093 * mempolicy_nodemask_intersects
2094 *
2095 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
2096 * policy. Otherwise, check for intersection between mask and the policy
2097 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
2098 * policy, always return true since it may allocate elsewhere on fallback.
2099 *
2100 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
2101 */
2102 bool mempolicy_nodemask_intersects(struct task_struct *tsk,
2103 const nodemask_t *mask)
2104 {
2105 struct mempolicy *mempolicy;
2106 bool ret = true;
2107
2108 if (!mask)
2109 return ret;
2110 task_lock(tsk);
2111 mempolicy = tsk->mempolicy;
2112 if (!mempolicy)
2113 goto out;
2114
2115 switch (mempolicy->mode) {
2116 case MPOL_PREFERRED:
2117 /*
2118 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
2119 * allocate from, they may fallback to other nodes when oom.
2120 * Thus, it's possible for tsk to have allocated memory from
2121 * nodes in mask.
2122 */
2123 break;
2124 case MPOL_BIND:
2125 case MPOL_INTERLEAVE:
2126 ret = nodes_intersects(mempolicy->v.nodes, *mask);
2127 break;
2128 default:
2129 BUG();
2130 }
2131 out:
2132 task_unlock(tsk);
2133 return ret;
2134 }
2135
2136 /* Allocate a page in interleaved policy.
2137 Own path because it needs to do special accounting. */
2138 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2139 unsigned nid)
2140 {
2141 struct page *page;
2142
2143 page = __alloc_pages(gfp, order, nid);
2144 /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
2145 if (!static_branch_likely(&vm_numa_stat_key))
2146 return page;
2147 if (page && page_to_nid(page) == nid) {
2148 preempt_disable();
2149 __inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
2150 preempt_enable();
2151 }
2152 return page;
2153 }
2154
2155 /**
2156 * alloc_pages_vma - Allocate a page for a VMA.
2157 *
2158 * @gfp:
2159 * %GFP_USER user allocation.
2160 * %GFP_KERNEL kernel allocations,
2161 * %GFP_HIGHMEM highmem/user allocations,
2162 * %GFP_FS allocation should not call back into a file system.
2163 * %GFP_ATOMIC don't sleep.
2164 *
2165 * @order:Order of the GFP allocation.
2166 * @vma: Pointer to VMA or NULL if not available.
2167 * @addr: Virtual Address of the allocation. Must be inside the VMA.
2168 * @node: Which node to prefer for allocation (modulo policy).
2169 * @hugepage: for hugepages try only the preferred node if possible
2170 *
2171 * This function allocates a page from the kernel page pool and applies
2172 * a NUMA policy associated with the VMA or the current process.
2173 * When VMA is not NULL caller must read-lock the mmap_lock of the
2174 * mm_struct of the VMA to prevent it from going away. Should be used for
2175 * all allocations for pages that will be mapped into user space. Returns
2176 * NULL when no page can be allocated.
2177 */
2178 struct page *
2179 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2180 unsigned long addr, int node, bool hugepage)
2181 {
2182 struct mempolicy *pol;
2183 struct page *page;
2184 int preferred_nid;
2185 nodemask_t *nmask;
2186
2187 pol = get_vma_policy(vma, addr);
2188
2189 if (pol->mode == MPOL_INTERLEAVE) {
2190 unsigned nid;
2191
2192 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2193 mpol_cond_put(pol);
2194 page = alloc_page_interleave(gfp, order, nid);
2195 goto out;
2196 }
2197
2198 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2199 int hpage_node = node;
2200
2201 /*
2202 * For hugepage allocation and non-interleave policy which
2203 * allows the current node (or other explicitly preferred
2204 * node) we only try to allocate from the current/preferred
2205 * node and don't fall back to other nodes, as the cost of
2206 * remote accesses would likely offset THP benefits.
2207 *
2208 * If the policy is interleave, or does not allow the current
2209 * node in its nodemask, we allocate the standard way.
2210 */
2211 if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
2212 hpage_node = pol->v.preferred_node;
2213
2214 nmask = policy_nodemask(gfp, pol);
2215 if (!nmask || node_isset(hpage_node, *nmask)) {
2216 mpol_cond_put(pol);
2217 /*
2218 * First, try to allocate THP only on local node, but
2219 * don't reclaim unnecessarily, just compact.
2220 */
2221 page = __alloc_pages_node(hpage_node,
2222 gfp | __GFP_THISNODE | __GFP_NORETRY, order);
2223
2224 /*
2225 * If hugepage allocations are configured to always
2226 * synchronous compact or the vma has been madvised
2227 * to prefer hugepage backing, retry allowing remote
2228 * memory with both reclaim and compact as well.
2229 */
2230 if (!page && (gfp & __GFP_DIRECT_RECLAIM))
2231 page = __alloc_pages_node(hpage_node,
2232 gfp, order);
2233
2234 goto out;
2235 }
2236 }
2237
2238 nmask = policy_nodemask(gfp, pol);
2239 preferred_nid = policy_node(gfp, pol, node);
2240 page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
2241 mpol_cond_put(pol);
2242 out:
2243 return page;
2244 }
2245 EXPORT_SYMBOL(alloc_pages_vma);
2246
2247 /**
2248 * alloc_pages_current - Allocate pages.
2249 *
2250 * @gfp:
2251 * %GFP_USER user allocation,
2252 * %GFP_KERNEL kernel allocation,
2253 * %GFP_HIGHMEM highmem allocation,
2254 * %GFP_FS don't call back into a file system.
2255 * %GFP_ATOMIC don't sleep.
2256 * @order: Power of two of allocation size in pages. 0 is a single page.
2257 *
2258 * Allocate a page from the kernel page pool. When not in
2259 * interrupt context and apply the current process NUMA policy.
2260 * Returns NULL when no page can be allocated.
2261 */
2262 struct page *alloc_pages_current(gfp_t gfp, unsigned order)
2263 {
2264 struct mempolicy *pol = &default_policy;
2265 struct page *page;
2266
2267 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2268 pol = get_task_policy(current);
2269
2270 /*
2271 * No reference counting needed for current->mempolicy
2272 * nor system default_policy
2273 */
2274 if (pol->mode == MPOL_INTERLEAVE)
2275 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2276 else
2277 page = __alloc_pages_nodemask(gfp, order,
2278 policy_node(gfp, pol, numa_node_id()),
2279 policy_nodemask(gfp, pol));
2280
2281 return page;
2282 }
2283 EXPORT_SYMBOL(alloc_pages_current);
2284
2285 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2286 {
2287 struct mempolicy *pol = mpol_dup(vma_policy(src));
2288
2289 if (IS_ERR(pol))
2290 return PTR_ERR(pol);
2291 dst->vm_policy = pol;
2292 return 0;
2293 }
2294
2295 /*
2296 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2297 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2298 * with the mems_allowed returned by cpuset_mems_allowed(). This
2299 * keeps mempolicies cpuset relative after its cpuset moves. See
2300 * further kernel/cpuset.c update_nodemask().
2301 *
2302 * current's mempolicy may be rebinded by the other task(the task that changes
2303 * cpuset's mems), so we needn't do rebind work for current task.
2304 */
2305
2306 /* Slow path of a mempolicy duplicate */
2307 struct mempolicy *__mpol_dup(struct mempolicy *old)
2308 {
2309 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2310
2311 if (!new)
2312 return ERR_PTR(-ENOMEM);
2313
2314 /* task's mempolicy is protected by alloc_lock */
2315 if (old == current->mempolicy) {
2316 task_lock(current);
2317 *new = *old;
2318 task_unlock(current);
2319 } else
2320 *new = *old;
2321
2322 if (current_cpuset_is_being_rebound()) {
2323 nodemask_t mems = cpuset_mems_allowed(current);
2324 mpol_rebind_policy(new, &mems);
2325 }
2326 atomic_set(&new->refcnt, 1);
2327 return new;
2328 }
2329
2330 /* Slow path of a mempolicy comparison */
2331 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2332 {
2333 if (!a || !b)
2334 return false;
2335 if (a->mode != b->mode)
2336 return false;
2337 if (a->flags != b->flags)
2338 return false;
2339 if (mpol_store_user_nodemask(a))
2340 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2341 return false;
2342
2343 switch (a->mode) {
2344 case MPOL_BIND:
2345 case MPOL_INTERLEAVE:
2346 return !!nodes_equal(a->v.nodes, b->v.nodes);
2347 case MPOL_PREFERRED:
2348 /* a's ->flags is the same as b's */
2349 if (a->flags & MPOL_F_LOCAL)
2350 return true;
2351 return a->v.preferred_node == b->v.preferred_node;
2352 default:
2353 BUG();
2354 return false;
2355 }
2356 }
2357
2358 /*
2359 * Shared memory backing store policy support.
2360 *
2361 * Remember policies even when nobody has shared memory mapped.
2362 * The policies are kept in Red-Black tree linked from the inode.
2363 * They are protected by the sp->lock rwlock, which should be held
2364 * for any accesses to the tree.
2365 */
2366
2367 /*
2368 * lookup first element intersecting start-end. Caller holds sp->lock for
2369 * reading or for writing
2370 */
2371 static struct sp_node *
2372 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2373 {
2374 struct rb_node *n = sp->root.rb_node;
2375
2376 while (n) {
2377 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2378
2379 if (start >= p->end)
2380 n = n->rb_right;
2381 else if (end <= p->start)
2382 n = n->rb_left;
2383 else
2384 break;
2385 }
2386 if (!n)
2387 return NULL;
2388 for (;;) {
2389 struct sp_node *w = NULL;
2390 struct rb_node *prev = rb_prev(n);
2391 if (!prev)
2392 break;
2393 w = rb_entry(prev, struct sp_node, nd);
2394 if (w->end <= start)
2395 break;
2396 n = prev;
2397 }
2398 return rb_entry(n, struct sp_node, nd);
2399 }
2400
2401 /*
2402 * Insert a new shared policy into the list. Caller holds sp->lock for
2403 * writing.
2404 */
2405 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2406 {
2407 struct rb_node **p = &sp->root.rb_node;
2408 struct rb_node *parent = NULL;
2409 struct sp_node *nd;
2410
2411 while (*p) {
2412 parent = *p;
2413 nd = rb_entry(parent, struct sp_node, nd);
2414 if (new->start < nd->start)
2415 p = &(*p)->rb_left;
2416 else if (new->end > nd->end)
2417 p = &(*p)->rb_right;
2418 else
2419 BUG();
2420 }
2421 rb_link_node(&new->nd, parent, p);
2422 rb_insert_color(&new->nd, &sp->root);
2423 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2424 new->policy ? new->policy->mode : 0);
2425 }
2426
2427 /* Find shared policy intersecting idx */
2428 struct mempolicy *
2429 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2430 {
2431 struct mempolicy *pol = NULL;
2432 struct sp_node *sn;
2433
2434 if (!sp->root.rb_node)
2435 return NULL;
2436 read_lock(&sp->lock);
2437 sn = sp_lookup(sp, idx, idx+1);
2438 if (sn) {
2439 mpol_get(sn->policy);
2440 pol = sn->policy;
2441 }
2442 read_unlock(&sp->lock);
2443 return pol;
2444 }
2445
2446 static void sp_free(struct sp_node *n)
2447 {
2448 mpol_put(n->policy);
2449 kmem_cache_free(sn_cache, n);
2450 }
2451
2452 /**
2453 * mpol_misplaced - check whether current page node is valid in policy
2454 *
2455 * @page: page to be checked
2456 * @vma: vm area where page mapped
2457 * @addr: virtual address where page mapped
2458 *
2459 * Lookup current policy node id for vma,addr and "compare to" page's
2460 * node id.
2461 *
2462 * Returns:
2463 * -1 - not misplaced, page is in the right node
2464 * node - node id where the page should be
2465 *
2466 * Policy determination "mimics" alloc_page_vma().
2467 * Called from fault path where we know the vma and faulting address.
2468 */
2469 int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2470 {
2471 struct mempolicy *pol;
2472 struct zoneref *z;
2473 int curnid = page_to_nid(page);
2474 unsigned long pgoff;
2475 int thiscpu = raw_smp_processor_id();
2476 int thisnid = cpu_to_node(thiscpu);
2477 int polnid = NUMA_NO_NODE;
2478 int ret = -1;
2479
2480 pol = get_vma_policy(vma, addr);
2481 if (!(pol->flags & MPOL_F_MOF))
2482 goto out;
2483
2484 switch (pol->mode) {
2485 case MPOL_INTERLEAVE:
2486 pgoff = vma->vm_pgoff;
2487 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2488 polnid = offset_il_node(pol, pgoff);
2489 break;
2490
2491 case MPOL_PREFERRED:
2492 if (pol->flags & MPOL_F_LOCAL)
2493 polnid = numa_node_id();
2494 else
2495 polnid = pol->v.preferred_node;
2496 break;
2497
2498 case MPOL_BIND:
2499 /* Optimize placement among multiple nodes via NUMA balancing */
2500 if (pol->flags & MPOL_F_MORON) {
2501 if (node_isset(thisnid, pol->v.nodes))
2502 break;
2503 goto out;
2504 }
2505
2506 /*
2507 * allows binding to multiple nodes.
2508 * use current page if in policy nodemask,
2509 * else select nearest allowed node, if any.
2510 * If no allowed nodes, use current [!misplaced].
2511 */
2512 if (node_isset(curnid, pol->v.nodes))
2513 goto out;
2514 z = first_zones_zonelist(
2515 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2516 gfp_zone(GFP_HIGHUSER),
2517 &pol->v.nodes);
2518 polnid = zone_to_nid(z->zone);
2519 break;
2520
2521 default:
2522 BUG();
2523 }
2524
2525 /* Migrate the page towards the node whose CPU is referencing it */
2526 if (pol->flags & MPOL_F_MORON) {
2527 polnid = thisnid;
2528
2529 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2530 goto out;
2531 }
2532
2533 if (curnid != polnid)
2534 ret = polnid;
2535 out:
2536 mpol_cond_put(pol);
2537
2538 return ret;
2539 }
2540
2541 /*
2542 * Drop the (possibly final) reference to task->mempolicy. It needs to be
2543 * dropped after task->mempolicy is set to NULL so that any allocation done as
2544 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2545 * policy.
2546 */
2547 void mpol_put_task_policy(struct task_struct *task)
2548 {
2549 struct mempolicy *pol;
2550
2551 task_lock(task);
2552 pol = task->mempolicy;
2553 task->mempolicy = NULL;
2554 task_unlock(task);
2555 mpol_put(pol);
2556 }
2557
2558 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2559 {
2560 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2561 rb_erase(&n->nd, &sp->root);
2562 sp_free(n);
2563 }
2564
2565 static void sp_node_init(struct sp_node *node, unsigned long start,
2566 unsigned long end, struct mempolicy *pol)
2567 {
2568 node->start = start;
2569 node->end = end;
2570 node->policy = pol;
2571 }
2572
2573 static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2574 struct mempolicy *pol)
2575 {
2576 struct sp_node *n;
2577 struct mempolicy *newpol;
2578
2579 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2580 if (!n)
2581 return NULL;
2582
2583 newpol = mpol_dup(pol);
2584 if (IS_ERR(newpol)) {
2585 kmem_cache_free(sn_cache, n);
2586 return NULL;
2587 }
2588 newpol->flags |= MPOL_F_SHARED;
2589 sp_node_init(n, start, end, newpol);
2590
2591 return n;
2592 }
2593
2594 /* Replace a policy range. */
2595 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2596 unsigned long end, struct sp_node *new)
2597 {
2598 struct sp_node *n;
2599 struct sp_node *n_new = NULL;
2600 struct mempolicy *mpol_new = NULL;
2601 int ret = 0;
2602
2603 restart:
2604 write_lock(&sp->lock);
2605 n = sp_lookup(sp, start, end);
2606 /* Take care of old policies in the same range. */
2607 while (n && n->start < end) {
2608 struct rb_node *next = rb_next(&n->nd);
2609 if (n->start >= start) {
2610 if (n->end <= end)
2611 sp_delete(sp, n);
2612 else
2613 n->start = end;
2614 } else {
2615 /* Old policy spanning whole new range. */
2616 if (n->end > end) {
2617 if (!n_new)
2618 goto alloc_new;
2619
2620 *mpol_new = *n->policy;
2621 atomic_set(&mpol_new->refcnt, 1);
2622 sp_node_init(n_new, end, n->end, mpol_new);
2623 n->end = start;
2624 sp_insert(sp, n_new);
2625 n_new = NULL;
2626 mpol_new = NULL;
2627 break;
2628 } else
2629 n->end = start;
2630 }
2631 if (!next)
2632 break;
2633 n = rb_entry(next, struct sp_node, nd);
2634 }
2635 if (new)
2636 sp_insert(sp, new);
2637 write_unlock(&sp->lock);
2638 ret = 0;
2639
2640 err_out:
2641 if (mpol_new)
2642 mpol_put(mpol_new);
2643 if (n_new)
2644 kmem_cache_free(sn_cache, n_new);
2645
2646 return ret;
2647
2648 alloc_new:
2649 write_unlock(&sp->lock);
2650 ret = -ENOMEM;
2651 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2652 if (!n_new)
2653 goto err_out;
2654 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2655 if (!mpol_new)
2656 goto err_out;
2657 goto restart;
2658 }
2659
2660 /**
2661 * mpol_shared_policy_init - initialize shared policy for inode
2662 * @sp: pointer to inode shared policy
2663 * @mpol: struct mempolicy to install
2664 *
2665 * Install non-NULL @mpol in inode's shared policy rb-tree.
2666 * On entry, the current task has a reference on a non-NULL @mpol.
2667 * This must be released on exit.
2668 * This is called at get_inode() calls and we can use GFP_KERNEL.
2669 */
2670 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2671 {
2672 int ret;
2673
2674 sp->root = RB_ROOT; /* empty tree == default mempolicy */
2675 rwlock_init(&sp->lock);
2676
2677 if (mpol) {
2678 struct vm_area_struct pvma;
2679 struct mempolicy *new;
2680 NODEMASK_SCRATCH(scratch);
2681
2682 if (!scratch)
2683 goto put_mpol;
2684 /* contextualize the tmpfs mount point mempolicy */
2685 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2686 if (IS_ERR(new))
2687 goto free_scratch; /* no valid nodemask intersection */
2688
2689 task_lock(current);
2690 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2691 task_unlock(current);
2692 if (ret)
2693 goto put_new;
2694
2695 /* Create pseudo-vma that contains just the policy */
2696 vma_init(&pvma, NULL);
2697 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2698 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2699
2700 put_new:
2701 mpol_put(new); /* drop initial ref */
2702 free_scratch:
2703 NODEMASK_SCRATCH_FREE(scratch);
2704 put_mpol:
2705 mpol_put(mpol); /* drop our incoming ref on sb mpol */
2706 }
2707 }
2708
2709 int mpol_set_shared_policy(struct shared_policy *info,
2710 struct vm_area_struct *vma, struct mempolicy *npol)
2711 {
2712 int err;
2713 struct sp_node *new = NULL;
2714 unsigned long sz = vma_pages(vma);
2715
2716 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2717 vma->vm_pgoff,
2718 sz, npol ? npol->mode : -1,
2719 npol ? npol->flags : -1,
2720 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
2721
2722 if (npol) {
2723 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2724 if (!new)
2725 return -ENOMEM;
2726 }
2727 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2728 if (err && new)
2729 sp_free(new);
2730 return err;
2731 }
2732
2733 /* Free a backing policy store on inode delete. */
2734 void mpol_free_shared_policy(struct shared_policy *p)
2735 {
2736 struct sp_node *n;
2737 struct rb_node *next;
2738
2739 if (!p->root.rb_node)
2740 return;
2741 write_lock(&p->lock);
2742 next = rb_first(&p->root);
2743 while (next) {
2744 n = rb_entry(next, struct sp_node, nd);
2745 next = rb_next(&n->nd);
2746 sp_delete(p, n);
2747 }
2748 write_unlock(&p->lock);
2749 }
2750
2751 #ifdef CONFIG_NUMA_BALANCING
2752 static int __initdata numabalancing_override;
2753
2754 static void __init check_numabalancing_enable(void)
2755 {
2756 bool numabalancing_default = false;
2757
2758 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2759 numabalancing_default = true;
2760
2761 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2762 if (numabalancing_override)
2763 set_numabalancing_state(numabalancing_override == 1);
2764
2765 if (num_online_nodes() > 1 && !numabalancing_override) {
2766 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2767 numabalancing_default ? "Enabling" : "Disabling");
2768 set_numabalancing_state(numabalancing_default);
2769 }
2770 }
2771
2772 static int __init setup_numabalancing(char *str)
2773 {
2774 int ret = 0;
2775 if (!str)
2776 goto out;
2777
2778 if (!strcmp(str, "enable")) {
2779 numabalancing_override = 1;
2780 ret = 1;
2781 } else if (!strcmp(str, "disable")) {
2782 numabalancing_override = -1;
2783 ret = 1;
2784 }
2785 out:
2786 if (!ret)
2787 pr_warn("Unable to parse numa_balancing=\n");
2788
2789 return ret;
2790 }
2791 __setup("numa_balancing=", setup_numabalancing);
2792 #else
2793 static inline void __init check_numabalancing_enable(void)
2794 {
2795 }
2796 #endif /* CONFIG_NUMA_BALANCING */
2797
2798 /* assumes fs == KERNEL_DS */
2799 void __init numa_policy_init(void)
2800 {
2801 nodemask_t interleave_nodes;
2802 unsigned long largest = 0;
2803 int nid, prefer = 0;
2804
2805 policy_cache = kmem_cache_create("numa_policy",
2806 sizeof(struct mempolicy),
2807 0, SLAB_PANIC, NULL);
2808
2809 sn_cache = kmem_cache_create("shared_policy_node",
2810 sizeof(struct sp_node),
2811 0, SLAB_PANIC, NULL);
2812
2813 for_each_node(nid) {
2814 preferred_node_policy[nid] = (struct mempolicy) {
2815 .refcnt = ATOMIC_INIT(1),
2816 .mode = MPOL_PREFERRED,
2817 .flags = MPOL_F_MOF | MPOL_F_MORON,
2818 .v = { .preferred_node = nid, },
2819 };
2820 }
2821
2822 /*
2823 * Set interleaving policy for system init. Interleaving is only
2824 * enabled across suitably sized nodes (default is >= 16MB), or
2825 * fall back to the largest node if they're all smaller.
2826 */
2827 nodes_clear(interleave_nodes);
2828 for_each_node_state(nid, N_MEMORY) {
2829 unsigned long total_pages = node_present_pages(nid);
2830
2831 /* Preserve the largest node */
2832 if (largest < total_pages) {
2833 largest = total_pages;
2834 prefer = nid;
2835 }
2836
2837 /* Interleave this node? */
2838 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2839 node_set(nid, interleave_nodes);
2840 }
2841
2842 /* All too small, use the largest */
2843 if (unlikely(nodes_empty(interleave_nodes)))
2844 node_set(prefer, interleave_nodes);
2845
2846 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2847 pr_err("%s: interleaving failed\n", __func__);
2848
2849 check_numabalancing_enable();
2850 }
2851
2852 /* Reset policy of current process to default */
2853 void numa_default_policy(void)
2854 {
2855 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2856 }
2857
2858 /*
2859 * Parse and format mempolicy from/to strings
2860 */
2861
2862 /*
2863 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
2864 */
2865 static const char * const policy_modes[] =
2866 {
2867 [MPOL_DEFAULT] = "default",
2868 [MPOL_PREFERRED] = "prefer",
2869 [MPOL_BIND] = "bind",
2870 [MPOL_INTERLEAVE] = "interleave",
2871 [MPOL_LOCAL] = "local",
2872 };
2873
2874
2875 #ifdef CONFIG_TMPFS
2876 /**
2877 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2878 * @str: string containing mempolicy to parse
2879 * @mpol: pointer to struct mempolicy pointer, returned on success.
2880 *
2881 * Format of input:
2882 * <mode>[=<flags>][:<nodelist>]
2883 *
2884 * On success, returns 0, else 1
2885 */
2886 int mpol_parse_str(char *str, struct mempolicy **mpol)
2887 {
2888 struct mempolicy *new = NULL;
2889 unsigned short mode_flags;
2890 nodemask_t nodes;
2891 char *nodelist = strchr(str, ':');
2892 char *flags = strchr(str, '=');
2893 int err = 1, mode;
2894
2895 if (flags)
2896 *flags++ = '\0'; /* terminate mode string */
2897
2898 if (nodelist) {
2899 /* NUL-terminate mode or flags string */
2900 *nodelist++ = '\0';
2901 if (nodelist_parse(nodelist, nodes))
2902 goto out;
2903 if (!nodes_subset(nodes, node_states[N_MEMORY]))
2904 goto out;
2905 } else
2906 nodes_clear(nodes);
2907
2908 mode = match_string(policy_modes, MPOL_MAX, str);
2909 if (mode < 0)
2910 goto out;
2911
2912 switch (mode) {
2913 case MPOL_PREFERRED:
2914 /*
2915 * Insist on a nodelist of one node only, although later
2916 * we use first_node(nodes) to grab a single node, so here
2917 * nodelist (or nodes) cannot be empty.
2918 */
2919 if (nodelist) {
2920 char *rest = nodelist;
2921 while (isdigit(*rest))
2922 rest++;
2923 if (*rest)
2924 goto out;
2925 if (nodes_empty(nodes))
2926 goto out;
2927 }
2928 break;
2929 case MPOL_INTERLEAVE:
2930 /*
2931 * Default to online nodes with memory if no nodelist
2932 */
2933 if (!nodelist)
2934 nodes = node_states[N_MEMORY];
2935 break;
2936 case MPOL_LOCAL:
2937 /*
2938 * Don't allow a nodelist; mpol_new() checks flags
2939 */
2940 if (nodelist)
2941 goto out;
2942 mode = MPOL_PREFERRED;
2943 break;
2944 case MPOL_DEFAULT:
2945 /*
2946 * Insist on a empty nodelist
2947 */
2948 if (!nodelist)
2949 err = 0;
2950 goto out;
2951 case MPOL_BIND:
2952 /*
2953 * Insist on a nodelist
2954 */
2955 if (!nodelist)
2956 goto out;
2957 }
2958
2959 mode_flags = 0;
2960 if (flags) {
2961 /*
2962 * Currently, we only support two mutually exclusive
2963 * mode flags.
2964 */
2965 if (!strcmp(flags, "static"))
2966 mode_flags |= MPOL_F_STATIC_NODES;
2967 else if (!strcmp(flags, "relative"))
2968 mode_flags |= MPOL_F_RELATIVE_NODES;
2969 else
2970 goto out;
2971 }
2972
2973 new = mpol_new(mode, mode_flags, &nodes);
2974 if (IS_ERR(new))
2975 goto out;
2976
2977 /*
2978 * Save nodes for mpol_to_str() to show the tmpfs mount options
2979 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2980 */
2981 if (mode != MPOL_PREFERRED)
2982 new->v.nodes = nodes;
2983 else if (nodelist)
2984 new->v.preferred_node = first_node(nodes);
2985 else
2986 new->flags |= MPOL_F_LOCAL;
2987
2988 /*
2989 * Save nodes for contextualization: this will be used to "clone"
2990 * the mempolicy in a specific context [cpuset] at a later time.
2991 */
2992 new->w.user_nodemask = nodes;
2993
2994 err = 0;
2995
2996 out:
2997 /* Restore string for error message */
2998 if (nodelist)
2999 *--nodelist = ':';
3000 if (flags)
3001 *--flags = '=';
3002 if (!err)
3003 *mpol = new;
3004 return err;
3005 }
3006 #endif /* CONFIG_TMPFS */
3007
3008 /**
3009 * mpol_to_str - format a mempolicy structure for printing
3010 * @buffer: to contain formatted mempolicy string
3011 * @maxlen: length of @buffer
3012 * @pol: pointer to mempolicy to be formatted
3013 *
3014 * Convert @pol into a string. If @buffer is too short, truncate the string.
3015 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
3016 * longest flag, "relative", and to display at least a few node ids.
3017 */
3018 void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
3019 {
3020 char *p = buffer;
3021 nodemask_t nodes = NODE_MASK_NONE;
3022 unsigned short mode = MPOL_DEFAULT;
3023 unsigned short flags = 0;
3024
3025 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
3026 mode = pol->mode;
3027 flags = pol->flags;
3028 }
3029
3030 switch (mode) {
3031 case MPOL_DEFAULT:
3032 break;
3033 case MPOL_PREFERRED:
3034 if (flags & MPOL_F_LOCAL)
3035 mode = MPOL_LOCAL;
3036 else
3037 node_set(pol->v.preferred_node, nodes);
3038 break;
3039 case MPOL_BIND:
3040 case MPOL_INTERLEAVE:
3041 nodes = pol->v.nodes;
3042 break;
3043 default:
3044 WARN_ON_ONCE(1);
3045 snprintf(p, maxlen, "unknown");
3046 return;
3047 }
3048
3049 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
3050
3051 if (flags & MPOL_MODE_FLAGS) {
3052 p += snprintf(p, buffer + maxlen - p, "=");
3053
3054 /*
3055 * Currently, the only defined flags are mutually exclusive
3056 */
3057 if (flags & MPOL_F_STATIC_NODES)
3058 p += snprintf(p, buffer + maxlen - p, "static");
3059 else if (flags & MPOL_F_RELATIVE_NODES)
3060 p += snprintf(p, buffer + maxlen - p, "relative");
3061 }
3062
3063 if (!nodes_empty(nodes))
3064 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
3065 nodemask_pr_args(&nodes));
3066 }