]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - mm/mempolicy.c
mm: use dedicated helper to access rlimit value
[mirror_ubuntu-artful-kernel.git] / mm / mempolicy.c
CommitLineData
1da177e4
LT
1/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
8bccd85f 5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
1da177e4
LT
6 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
8bccd85f 21 *
1da177e4
LT
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
8bccd85f
CL
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
1da177e4 28 * preferred Try a specific node first before normal fallback.
00ef2d2f 29 * As a special case NUMA_NO_NODE here means do the allocation
1da177e4
LT
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
8bccd85f 33 *
1da177e4
LT
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
1da177e4
LT
66*/
67
b1de0d13
MH
68#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69
1da177e4
LT
70#include <linux/mempolicy.h>
71#include <linux/mm.h>
72#include <linux/highmem.h>
73#include <linux/hugetlb.h>
74#include <linux/kernel.h>
75#include <linux/sched.h>
6e84f315 76#include <linux/sched/mm.h>
6a3827d7 77#include <linux/sched/numa_balancing.h>
f719ff9b 78#include <linux/sched/task.h>
1da177e4
LT
79#include <linux/nodemask.h>
80#include <linux/cpuset.h>
1da177e4
LT
81#include <linux/slab.h>
82#include <linux/string.h>
b95f1b31 83#include <linux/export.h>
b488893a 84#include <linux/nsproxy.h>
1da177e4
LT
85#include <linux/interrupt.h>
86#include <linux/init.h>
87#include <linux/compat.h>
dc9aa5b9 88#include <linux/swap.h>
1a75a6c8
CL
89#include <linux/seq_file.h>
90#include <linux/proc_fs.h>
b20a3503 91#include <linux/migrate.h>
62b61f61 92#include <linux/ksm.h>
95a402c3 93#include <linux/rmap.h>
86c3a764 94#include <linux/security.h>
dbcb0f19 95#include <linux/syscalls.h>
095f1fc4 96#include <linux/ctype.h>
6d9c285a 97#include <linux/mm_inline.h>
b24f53a0 98#include <linux/mmu_notifier.h>
b1de0d13 99#include <linux/printk.h>
dc9aa5b9 100
1da177e4 101#include <asm/tlbflush.h>
7c0f6ba6 102#include <linux/uaccess.h>
1da177e4 103
62695a84
NP
104#include "internal.h"
105
38e35860 106/* Internal flags */
dc9aa5b9 107#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
38e35860 108#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
dc9aa5b9 109
fcc234f8
PE
110static struct kmem_cache *policy_cache;
111static struct kmem_cache *sn_cache;
1da177e4 112
1da177e4
LT
113/* Highest zone. An specific allocation for a zone below that is not
114 policied. */
6267276f 115enum zone_type policy_zone = 0;
1da177e4 116
bea904d5
LS
117/*
118 * run-time system-wide default policy => local allocation
119 */
e754d79d 120static struct mempolicy default_policy = {
1da177e4 121 .refcnt = ATOMIC_INIT(1), /* never free it */
bea904d5 122 .mode = MPOL_PREFERRED,
fc36b8d3 123 .flags = MPOL_F_LOCAL,
1da177e4
LT
124};
125
5606e387
MG
126static struct mempolicy preferred_node_policy[MAX_NUMNODES];
127
74d2c3a0 128struct mempolicy *get_task_policy(struct task_struct *p)
5606e387
MG
129{
130 struct mempolicy *pol = p->mempolicy;
f15ca78e 131 int node;
5606e387 132
f15ca78e
ON
133 if (pol)
134 return pol;
5606e387 135
f15ca78e
ON
136 node = numa_node_id();
137 if (node != NUMA_NO_NODE) {
138 pol = &preferred_node_policy[node];
139 /* preferred_node_policy is not initialised early in boot */
140 if (pol->mode)
141 return pol;
5606e387
MG
142 }
143
f15ca78e 144 return &default_policy;
5606e387
MG
145}
146
37012946
DR
147static const struct mempolicy_operations {
148 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
213980c0 149 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
37012946
DR
150} mpol_ops[MPOL_MAX];
151
f5b087b5
DR
152static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
153{
6d556294 154 return pol->flags & MPOL_MODE_FLAGS;
4c50bc01
DR
155}
156
157static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
158 const nodemask_t *rel)
159{
160 nodemask_t tmp;
161 nodes_fold(tmp, *orig, nodes_weight(*rel));
162 nodes_onto(*ret, tmp, *rel);
f5b087b5
DR
163}
164
37012946
DR
165static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
166{
167 if (nodes_empty(*nodes))
168 return -EINVAL;
169 pol->v.nodes = *nodes;
170 return 0;
171}
172
173static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
174{
175 if (!nodes)
fc36b8d3 176 pol->flags |= MPOL_F_LOCAL; /* local allocation */
37012946
DR
177 else if (nodes_empty(*nodes))
178 return -EINVAL; /* no allowed nodes */
179 else
180 pol->v.preferred_node = first_node(*nodes);
181 return 0;
182}
183
184static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
185{
859f7ef1 186 if (nodes_empty(*nodes))
37012946
DR
187 return -EINVAL;
188 pol->v.nodes = *nodes;
189 return 0;
190}
191
58568d2a
MX
192/*
193 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
194 * any, for the new policy. mpol_new() has already validated the nodes
195 * parameter with respect to the policy mode and flags. But, we need to
196 * handle an empty nodemask with MPOL_PREFERRED here.
197 *
198 * Must be called holding task's alloc_lock to protect task's mems_allowed
199 * and mempolicy. May also be called holding the mmap_semaphore for write.
200 */
4bfc4495
KH
201static int mpol_set_nodemask(struct mempolicy *pol,
202 const nodemask_t *nodes, struct nodemask_scratch *nsc)
58568d2a 203{
58568d2a
MX
204 int ret;
205
206 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
207 if (pol == NULL)
208 return 0;
01f13bd6 209 /* Check N_MEMORY */
4bfc4495 210 nodes_and(nsc->mask1,
01f13bd6 211 cpuset_current_mems_allowed, node_states[N_MEMORY]);
58568d2a
MX
212
213 VM_BUG_ON(!nodes);
214 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
215 nodes = NULL; /* explicit local allocation */
216 else {
217 if (pol->flags & MPOL_F_RELATIVE_NODES)
859f7ef1 218 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
58568d2a 219 else
4bfc4495
KH
220 nodes_and(nsc->mask2, *nodes, nsc->mask1);
221
58568d2a
MX
222 if (mpol_store_user_nodemask(pol))
223 pol->w.user_nodemask = *nodes;
224 else
225 pol->w.cpuset_mems_allowed =
226 cpuset_current_mems_allowed;
227 }
228
4bfc4495
KH
229 if (nodes)
230 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
231 else
232 ret = mpol_ops[pol->mode].create(pol, NULL);
58568d2a
MX
233 return ret;
234}
235
236/*
237 * This function just creates a new policy, does some check and simple
238 * initialization. You must invoke mpol_set_nodemask() to set nodes.
239 */
028fec41
DR
240static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
241 nodemask_t *nodes)
1da177e4
LT
242{
243 struct mempolicy *policy;
244
028fec41 245 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
00ef2d2f 246 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
140d5a49 247
3e1f0645
DR
248 if (mode == MPOL_DEFAULT) {
249 if (nodes && !nodes_empty(*nodes))
37012946 250 return ERR_PTR(-EINVAL);
d3a71033 251 return NULL;
37012946 252 }
3e1f0645
DR
253 VM_BUG_ON(!nodes);
254
255 /*
256 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
257 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
258 * All other modes require a valid pointer to a non-empty nodemask.
259 */
260 if (mode == MPOL_PREFERRED) {
261 if (nodes_empty(*nodes)) {
262 if (((flags & MPOL_F_STATIC_NODES) ||
263 (flags & MPOL_F_RELATIVE_NODES)))
264 return ERR_PTR(-EINVAL);
3e1f0645 265 }
479e2802 266 } else if (mode == MPOL_LOCAL) {
8d303e44
PK
267 if (!nodes_empty(*nodes) ||
268 (flags & MPOL_F_STATIC_NODES) ||
269 (flags & MPOL_F_RELATIVE_NODES))
479e2802
PZ
270 return ERR_PTR(-EINVAL);
271 mode = MPOL_PREFERRED;
3e1f0645
DR
272 } else if (nodes_empty(*nodes))
273 return ERR_PTR(-EINVAL);
1da177e4
LT
274 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
275 if (!policy)
276 return ERR_PTR(-ENOMEM);
277 atomic_set(&policy->refcnt, 1);
45c4745a 278 policy->mode = mode;
3e1f0645 279 policy->flags = flags;
37012946 280
1da177e4 281 return policy;
37012946
DR
282}
283
52cd3b07
LS
284/* Slow path of a mpol destructor. */
285void __mpol_put(struct mempolicy *p)
286{
287 if (!atomic_dec_and_test(&p->refcnt))
288 return;
52cd3b07
LS
289 kmem_cache_free(policy_cache, p);
290}
291
213980c0 292static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
37012946
DR
293{
294}
295
213980c0 296static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
37012946
DR
297{
298 nodemask_t tmp;
299
300 if (pol->flags & MPOL_F_STATIC_NODES)
301 nodes_and(tmp, pol->w.user_nodemask, *nodes);
302 else if (pol->flags & MPOL_F_RELATIVE_NODES)
303 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
304 else {
213980c0
VB
305 nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
306 *nodes);
307 pol->w.cpuset_mems_allowed = tmp;
37012946 308 }
f5b087b5 309
708c1bbc
MX
310 if (nodes_empty(tmp))
311 tmp = *nodes;
312
213980c0 313 pol->v.nodes = tmp;
37012946
DR
314}
315
316static void mpol_rebind_preferred(struct mempolicy *pol,
213980c0 317 const nodemask_t *nodes)
37012946
DR
318{
319 nodemask_t tmp;
320
37012946
DR
321 if (pol->flags & MPOL_F_STATIC_NODES) {
322 int node = first_node(pol->w.user_nodemask);
323
fc36b8d3 324 if (node_isset(node, *nodes)) {
37012946 325 pol->v.preferred_node = node;
fc36b8d3
LS
326 pol->flags &= ~MPOL_F_LOCAL;
327 } else
328 pol->flags |= MPOL_F_LOCAL;
37012946
DR
329 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
330 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
331 pol->v.preferred_node = first_node(tmp);
fc36b8d3 332 } else if (!(pol->flags & MPOL_F_LOCAL)) {
37012946
DR
333 pol->v.preferred_node = node_remap(pol->v.preferred_node,
334 pol->w.cpuset_mems_allowed,
335 *nodes);
336 pol->w.cpuset_mems_allowed = *nodes;
337 }
1da177e4
LT
338}
339
708c1bbc
MX
340/*
341 * mpol_rebind_policy - Migrate a policy to a different set of nodes
342 *
213980c0
VB
343 * Per-vma policies are protected by mmap_sem. Allocations using per-task
344 * policies are protected by task->mems_allowed_seq to prevent a premature
345 * OOM/allocation failure due to parallel nodemask modification.
708c1bbc 346 */
213980c0 347static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
1d0d2680 348{
1d0d2680
DR
349 if (!pol)
350 return;
213980c0 351 if (!mpol_store_user_nodemask(pol) &&
1d0d2680
DR
352 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
353 return;
708c1bbc 354
213980c0 355 mpol_ops[pol->mode].rebind(pol, newmask);
1d0d2680
DR
356}
357
358/*
359 * Wrapper for mpol_rebind_policy() that just requires task
360 * pointer, and updates task mempolicy.
58568d2a
MX
361 *
362 * Called with task's alloc_lock held.
1d0d2680
DR
363 */
364
213980c0 365void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
1d0d2680 366{
213980c0 367 mpol_rebind_policy(tsk->mempolicy, new);
1d0d2680
DR
368}
369
370/*
371 * Rebind each vma in mm to new nodemask.
372 *
373 * Call holding a reference to mm. Takes mm->mmap_sem during call.
374 */
375
376void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
377{
378 struct vm_area_struct *vma;
379
380 down_write(&mm->mmap_sem);
381 for (vma = mm->mmap; vma; vma = vma->vm_next)
213980c0 382 mpol_rebind_policy(vma->vm_policy, new);
1d0d2680
DR
383 up_write(&mm->mmap_sem);
384}
385
37012946
DR
386static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
387 [MPOL_DEFAULT] = {
388 .rebind = mpol_rebind_default,
389 },
390 [MPOL_INTERLEAVE] = {
391 .create = mpol_new_interleave,
392 .rebind = mpol_rebind_nodemask,
393 },
394 [MPOL_PREFERRED] = {
395 .create = mpol_new_preferred,
396 .rebind = mpol_rebind_preferred,
397 },
398 [MPOL_BIND] = {
399 .create = mpol_new_bind,
400 .rebind = mpol_rebind_nodemask,
401 },
402};
403
fc301289
CL
404static void migrate_page_add(struct page *page, struct list_head *pagelist,
405 unsigned long flags);
1a75a6c8 406
6f4576e3
NH
407struct queue_pages {
408 struct list_head *pagelist;
409 unsigned long flags;
410 nodemask_t *nmask;
411 struct vm_area_struct *prev;
412};
413
98094945
NH
414/*
415 * Scan through pages checking if pages follow certain conditions,
416 * and move them to the pagelist if they do.
417 */
6f4576e3
NH
418static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
419 unsigned long end, struct mm_walk *walk)
1da177e4 420{
6f4576e3
NH
421 struct vm_area_struct *vma = walk->vma;
422 struct page *page;
423 struct queue_pages *qp = walk->private;
424 unsigned long flags = qp->flags;
248db92d 425 int nid, ret;
91612e0d 426 pte_t *pte;
705e87c0 427 spinlock_t *ptl;
941150a3 428
248db92d
KS
429 if (pmd_trans_huge(*pmd)) {
430 ptl = pmd_lock(walk->mm, pmd);
431 if (pmd_trans_huge(*pmd)) {
432 page = pmd_page(*pmd);
433 if (is_huge_zero_page(page)) {
434 spin_unlock(ptl);
fd60775a 435 __split_huge_pmd(vma, pmd, addr, false, NULL);
248db92d
KS
436 } else {
437 get_page(page);
438 spin_unlock(ptl);
439 lock_page(page);
440 ret = split_huge_page(page);
441 unlock_page(page);
442 put_page(page);
443 if (ret)
444 return 0;
445 }
446 } else {
447 spin_unlock(ptl);
448 }
449 }
91612e0d 450
337d9abf
NH
451 if (pmd_trans_unstable(pmd))
452 return 0;
248db92d 453retry:
6f4576e3
NH
454 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
455 for (; addr != end; pte++, addr += PAGE_SIZE) {
91612e0d 456 if (!pte_present(*pte))
1da177e4 457 continue;
6aab341e
LT
458 page = vm_normal_page(vma, addr, *pte);
459 if (!page)
1da177e4 460 continue;
053837fc 461 /*
62b61f61
HD
462 * vm_normal_page() filters out zero pages, but there might
463 * still be PageReserved pages to skip, perhaps in a VDSO.
053837fc 464 */
b79bc0a0 465 if (PageReserved(page))
f4598c8b 466 continue;
6aab341e 467 nid = page_to_nid(page);
6f4576e3 468 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
38e35860 469 continue;
800d8c63 470 if (PageTransCompound(page)) {
248db92d
KS
471 get_page(page);
472 pte_unmap_unlock(pte, ptl);
473 lock_page(page);
474 ret = split_huge_page(page);
475 unlock_page(page);
476 put_page(page);
477 /* Failed to split -- skip. */
478 if (ret) {
479 pte = pte_offset_map_lock(walk->mm, pmd,
480 addr, &ptl);
481 continue;
482 }
483 goto retry;
484 }
38e35860 485
77bf45e7 486 migrate_page_add(page, qp->pagelist, flags);
6f4576e3
NH
487 }
488 pte_unmap_unlock(pte - 1, ptl);
489 cond_resched();
490 return 0;
91612e0d
HD
491}
492
6f4576e3
NH
493static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
494 unsigned long addr, unsigned long end,
495 struct mm_walk *walk)
e2d8cf40
NH
496{
497#ifdef CONFIG_HUGETLB_PAGE
6f4576e3
NH
498 struct queue_pages *qp = walk->private;
499 unsigned long flags = qp->flags;
e2d8cf40
NH
500 int nid;
501 struct page *page;
cb900f41 502 spinlock_t *ptl;
d4c54919 503 pte_t entry;
e2d8cf40 504
6f4576e3
NH
505 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
506 entry = huge_ptep_get(pte);
d4c54919
NH
507 if (!pte_present(entry))
508 goto unlock;
509 page = pte_page(entry);
e2d8cf40 510 nid = page_to_nid(page);
6f4576e3 511 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
e2d8cf40
NH
512 goto unlock;
513 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
514 if (flags & (MPOL_MF_MOVE_ALL) ||
515 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
6f4576e3 516 isolate_huge_page(page, qp->pagelist);
e2d8cf40 517unlock:
cb900f41 518 spin_unlock(ptl);
e2d8cf40
NH
519#else
520 BUG();
521#endif
91612e0d 522 return 0;
1da177e4
LT
523}
524
5877231f 525#ifdef CONFIG_NUMA_BALANCING
b24f53a0 526/*
4b10e7d5
MG
527 * This is used to mark a range of virtual addresses to be inaccessible.
528 * These are later cleared by a NUMA hinting fault. Depending on these
529 * faults, pages may be migrated for better NUMA placement.
530 *
531 * This is assuming that NUMA faults are handled using PROT_NONE. If
532 * an architecture makes a different choice, it will need further
533 * changes to the core.
b24f53a0 534 */
4b10e7d5
MG
535unsigned long change_prot_numa(struct vm_area_struct *vma,
536 unsigned long addr, unsigned long end)
b24f53a0 537{
4b10e7d5 538 int nr_updated;
b24f53a0 539
4d942466 540 nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
03c5a6e1
MG
541 if (nr_updated)
542 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
b24f53a0 543
4b10e7d5 544 return nr_updated;
b24f53a0
LS
545}
546#else
547static unsigned long change_prot_numa(struct vm_area_struct *vma,
548 unsigned long addr, unsigned long end)
549{
550 return 0;
551}
5877231f 552#endif /* CONFIG_NUMA_BALANCING */
b24f53a0 553
6f4576e3
NH
554static int queue_pages_test_walk(unsigned long start, unsigned long end,
555 struct mm_walk *walk)
556{
557 struct vm_area_struct *vma = walk->vma;
558 struct queue_pages *qp = walk->private;
559 unsigned long endvma = vma->vm_end;
560 unsigned long flags = qp->flags;
561
77bf45e7 562 if (!vma_migratable(vma))
48684a65
NH
563 return 1;
564
6f4576e3
NH
565 if (endvma > end)
566 endvma = end;
567 if (vma->vm_start > start)
568 start = vma->vm_start;
569
570 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
571 if (!vma->vm_next && vma->vm_end < end)
572 return -EFAULT;
573 if (qp->prev && qp->prev->vm_end < vma->vm_start)
574 return -EFAULT;
575 }
576
577 qp->prev = vma;
578
6f4576e3
NH
579 if (flags & MPOL_MF_LAZY) {
580 /* Similar to task_numa_work, skip inaccessible VMAs */
4355c018
LC
581 if (!is_vm_hugetlb_page(vma) &&
582 (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
583 !(vma->vm_flags & VM_MIXEDMAP))
6f4576e3
NH
584 change_prot_numa(vma, start, endvma);
585 return 1;
586 }
587
77bf45e7
KS
588 /* queue pages from current vma */
589 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
6f4576e3
NH
590 return 0;
591 return 1;
592}
593
dc9aa5b9 594/*
98094945
NH
595 * Walk through page tables and collect pages to be migrated.
596 *
597 * If pages found in a given range are on a set of nodes (determined by
598 * @nodes and @flags,) it's isolated and queued to the pagelist which is
599 * passed via @private.)
dc9aa5b9 600 */
d05f0cdc 601static int
98094945 602queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
6f4576e3
NH
603 nodemask_t *nodes, unsigned long flags,
604 struct list_head *pagelist)
1da177e4 605{
6f4576e3
NH
606 struct queue_pages qp = {
607 .pagelist = pagelist,
608 .flags = flags,
609 .nmask = nodes,
610 .prev = NULL,
611 };
612 struct mm_walk queue_pages_walk = {
613 .hugetlb_entry = queue_pages_hugetlb,
614 .pmd_entry = queue_pages_pte_range,
615 .test_walk = queue_pages_test_walk,
616 .mm = mm,
617 .private = &qp,
618 };
619
620 return walk_page_range(start, end, &queue_pages_walk);
1da177e4
LT
621}
622
869833f2
KM
623/*
624 * Apply policy to a single VMA
625 * This must be called with the mmap_sem held for writing.
626 */
627static int vma_replace_policy(struct vm_area_struct *vma,
628 struct mempolicy *pol)
8d34694c 629{
869833f2
KM
630 int err;
631 struct mempolicy *old;
632 struct mempolicy *new;
8d34694c
KM
633
634 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
635 vma->vm_start, vma->vm_end, vma->vm_pgoff,
636 vma->vm_ops, vma->vm_file,
637 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
638
869833f2
KM
639 new = mpol_dup(pol);
640 if (IS_ERR(new))
641 return PTR_ERR(new);
642
643 if (vma->vm_ops && vma->vm_ops->set_policy) {
8d34694c 644 err = vma->vm_ops->set_policy(vma, new);
869833f2
KM
645 if (err)
646 goto err_out;
8d34694c 647 }
869833f2
KM
648
649 old = vma->vm_policy;
650 vma->vm_policy = new; /* protected by mmap_sem */
651 mpol_put(old);
652
653 return 0;
654 err_out:
655 mpol_put(new);
8d34694c
KM
656 return err;
657}
658
1da177e4 659/* Step 2: apply policy to a range and do splits. */
9d8cebd4
KM
660static int mbind_range(struct mm_struct *mm, unsigned long start,
661 unsigned long end, struct mempolicy *new_pol)
1da177e4
LT
662{
663 struct vm_area_struct *next;
9d8cebd4
KM
664 struct vm_area_struct *prev;
665 struct vm_area_struct *vma;
666 int err = 0;
e26a5114 667 pgoff_t pgoff;
9d8cebd4
KM
668 unsigned long vmstart;
669 unsigned long vmend;
1da177e4 670
097d5910 671 vma = find_vma(mm, start);
9d8cebd4
KM
672 if (!vma || vma->vm_start > start)
673 return -EFAULT;
674
097d5910 675 prev = vma->vm_prev;
e26a5114
KM
676 if (start > vma->vm_start)
677 prev = vma;
678
9d8cebd4 679 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
1da177e4 680 next = vma->vm_next;
9d8cebd4
KM
681 vmstart = max(start, vma->vm_start);
682 vmend = min(end, vma->vm_end);
683
e26a5114
KM
684 if (mpol_equal(vma_policy(vma), new_pol))
685 continue;
686
687 pgoff = vma->vm_pgoff +
688 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
9d8cebd4 689 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
19a809af
AA
690 vma->anon_vma, vma->vm_file, pgoff,
691 new_pol, vma->vm_userfaultfd_ctx);
9d8cebd4
KM
692 if (prev) {
693 vma = prev;
694 next = vma->vm_next;
3964acd0
ON
695 if (mpol_equal(vma_policy(vma), new_pol))
696 continue;
697 /* vma_merge() joined vma && vma->next, case 8 */
698 goto replace;
9d8cebd4
KM
699 }
700 if (vma->vm_start != vmstart) {
701 err = split_vma(vma->vm_mm, vma, vmstart, 1);
702 if (err)
703 goto out;
704 }
705 if (vma->vm_end != vmend) {
706 err = split_vma(vma->vm_mm, vma, vmend, 0);
707 if (err)
708 goto out;
709 }
3964acd0 710 replace:
869833f2 711 err = vma_replace_policy(vma, new_pol);
8d34694c
KM
712 if (err)
713 goto out;
1da177e4 714 }
9d8cebd4
KM
715
716 out:
1da177e4
LT
717 return err;
718}
719
1da177e4 720/* Set the process memory policy */
028fec41
DR
721static long do_set_mempolicy(unsigned short mode, unsigned short flags,
722 nodemask_t *nodes)
1da177e4 723{
58568d2a 724 struct mempolicy *new, *old;
4bfc4495 725 NODEMASK_SCRATCH(scratch);
58568d2a 726 int ret;
1da177e4 727
4bfc4495
KH
728 if (!scratch)
729 return -ENOMEM;
f4e53d91 730
4bfc4495
KH
731 new = mpol_new(mode, flags, nodes);
732 if (IS_ERR(new)) {
733 ret = PTR_ERR(new);
734 goto out;
735 }
2c7c3a7d 736
58568d2a 737 task_lock(current);
4bfc4495 738 ret = mpol_set_nodemask(new, nodes, scratch);
58568d2a
MX
739 if (ret) {
740 task_unlock(current);
58568d2a 741 mpol_put(new);
4bfc4495 742 goto out;
58568d2a
MX
743 }
744 old = current->mempolicy;
1da177e4 745 current->mempolicy = new;
45816682
VB
746 if (new && new->mode == MPOL_INTERLEAVE)
747 current->il_prev = MAX_NUMNODES-1;
58568d2a 748 task_unlock(current);
58568d2a 749 mpol_put(old);
4bfc4495
KH
750 ret = 0;
751out:
752 NODEMASK_SCRATCH_FREE(scratch);
753 return ret;
1da177e4
LT
754}
755
bea904d5
LS
756/*
757 * Return nodemask for policy for get_mempolicy() query
58568d2a
MX
758 *
759 * Called with task's alloc_lock held
bea904d5
LS
760 */
761static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
1da177e4 762{
dfcd3c0d 763 nodes_clear(*nodes);
bea904d5
LS
764 if (p == &default_policy)
765 return;
766
45c4745a 767 switch (p->mode) {
19770b32
MG
768 case MPOL_BIND:
769 /* Fall through */
1da177e4 770 case MPOL_INTERLEAVE:
dfcd3c0d 771 *nodes = p->v.nodes;
1da177e4
LT
772 break;
773 case MPOL_PREFERRED:
fc36b8d3 774 if (!(p->flags & MPOL_F_LOCAL))
dfcd3c0d 775 node_set(p->v.preferred_node, *nodes);
53f2556b 776 /* else return empty node mask for local allocation */
1da177e4
LT
777 break;
778 default:
779 BUG();
780 }
781}
782
d4edcf0d 783static int lookup_node(unsigned long addr)
1da177e4
LT
784{
785 struct page *p;
786 int err;
787
768ae309 788 err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL);
1da177e4
LT
789 if (err >= 0) {
790 err = page_to_nid(p);
791 put_page(p);
792 }
793 return err;
794}
795
1da177e4 796/* Retrieve NUMA policy */
dbcb0f19
AB
797static long do_get_mempolicy(int *policy, nodemask_t *nmask,
798 unsigned long addr, unsigned long flags)
1da177e4 799{
8bccd85f 800 int err;
1da177e4
LT
801 struct mm_struct *mm = current->mm;
802 struct vm_area_struct *vma = NULL;
803 struct mempolicy *pol = current->mempolicy;
804
754af6f5
LS
805 if (flags &
806 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
1da177e4 807 return -EINVAL;
754af6f5
LS
808
809 if (flags & MPOL_F_MEMS_ALLOWED) {
810 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
811 return -EINVAL;
812 *policy = 0; /* just so it's initialized */
58568d2a 813 task_lock(current);
754af6f5 814 *nmask = cpuset_current_mems_allowed;
58568d2a 815 task_unlock(current);
754af6f5
LS
816 return 0;
817 }
818
1da177e4 819 if (flags & MPOL_F_ADDR) {
bea904d5
LS
820 /*
821 * Do NOT fall back to task policy if the
822 * vma/shared policy at addr is NULL. We
823 * want to return MPOL_DEFAULT in this case.
824 */
1da177e4
LT
825 down_read(&mm->mmap_sem);
826 vma = find_vma_intersection(mm, addr, addr+1);
827 if (!vma) {
828 up_read(&mm->mmap_sem);
829 return -EFAULT;
830 }
831 if (vma->vm_ops && vma->vm_ops->get_policy)
832 pol = vma->vm_ops->get_policy(vma, addr);
833 else
834 pol = vma->vm_policy;
835 } else if (addr)
836 return -EINVAL;
837
838 if (!pol)
bea904d5 839 pol = &default_policy; /* indicates default behavior */
1da177e4
LT
840
841 if (flags & MPOL_F_NODE) {
842 if (flags & MPOL_F_ADDR) {
d4edcf0d 843 err = lookup_node(addr);
1da177e4
LT
844 if (err < 0)
845 goto out;
8bccd85f 846 *policy = err;
1da177e4 847 } else if (pol == current->mempolicy &&
45c4745a 848 pol->mode == MPOL_INTERLEAVE) {
45816682 849 *policy = next_node_in(current->il_prev, pol->v.nodes);
1da177e4
LT
850 } else {
851 err = -EINVAL;
852 goto out;
853 }
bea904d5
LS
854 } else {
855 *policy = pol == &default_policy ? MPOL_DEFAULT :
856 pol->mode;
d79df630
DR
857 /*
858 * Internal mempolicy flags must be masked off before exposing
859 * the policy to userspace.
860 */
861 *policy |= (pol->flags & MPOL_MODE_FLAGS);
bea904d5 862 }
1da177e4
LT
863
864 if (vma) {
865 up_read(&current->mm->mmap_sem);
866 vma = NULL;
867 }
868
1da177e4 869 err = 0;
58568d2a 870 if (nmask) {
c6b6ef8b
LS
871 if (mpol_store_user_nodemask(pol)) {
872 *nmask = pol->w.user_nodemask;
873 } else {
874 task_lock(current);
875 get_policy_nodemask(pol, nmask);
876 task_unlock(current);
877 }
58568d2a 878 }
1da177e4
LT
879
880 out:
52cd3b07 881 mpol_cond_put(pol);
1da177e4
LT
882 if (vma)
883 up_read(&current->mm->mmap_sem);
884 return err;
885}
886
b20a3503 887#ifdef CONFIG_MIGRATION
6ce3c4c0
CL
888/*
889 * page migration
890 */
fc301289
CL
891static void migrate_page_add(struct page *page, struct list_head *pagelist,
892 unsigned long flags)
6ce3c4c0
CL
893{
894 /*
fc301289 895 * Avoid migrating a page that is shared with others.
6ce3c4c0 896 */
62695a84
NP
897 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
898 if (!isolate_lru_page(page)) {
899 list_add_tail(&page->lru, pagelist);
599d0c95 900 inc_node_page_state(page, NR_ISOLATED_ANON +
6d9c285a 901 page_is_file_cache(page));
62695a84
NP
902 }
903 }
7e2ab150 904}
6ce3c4c0 905
742755a1 906static struct page *new_node_page(struct page *page, unsigned long node, int **x)
95a402c3 907{
e2d8cf40
NH
908 if (PageHuge(page))
909 return alloc_huge_page_node(page_hstate(compound_head(page)),
910 node);
911 else
96db800f 912 return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
b360edb4 913 __GFP_THISNODE, 0);
95a402c3
CL
914}
915
7e2ab150
CL
916/*
917 * Migrate pages from one node to a target node.
918 * Returns error or the number of pages not migrated.
919 */
dbcb0f19
AB
920static int migrate_to_node(struct mm_struct *mm, int source, int dest,
921 int flags)
7e2ab150
CL
922{
923 nodemask_t nmask;
924 LIST_HEAD(pagelist);
925 int err = 0;
926
927 nodes_clear(nmask);
928 node_set(source, nmask);
6ce3c4c0 929
08270807
MK
930 /*
931 * This does not "check" the range but isolates all pages that
932 * need migration. Between passing in the full user address
933 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
934 */
935 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
98094945 936 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
7e2ab150
CL
937 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
938
cf608ac1 939 if (!list_empty(&pagelist)) {
68711a74 940 err = migrate_pages(&pagelist, new_node_page, NULL, dest,
9c620e2b 941 MIGRATE_SYNC, MR_SYSCALL);
cf608ac1 942 if (err)
e2d8cf40 943 putback_movable_pages(&pagelist);
cf608ac1 944 }
95a402c3 945
7e2ab150 946 return err;
6ce3c4c0
CL
947}
948
39743889 949/*
7e2ab150
CL
950 * Move pages between the two nodesets so as to preserve the physical
951 * layout as much as possible.
39743889
CL
952 *
953 * Returns the number of page that could not be moved.
954 */
0ce72d4f
AM
955int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
956 const nodemask_t *to, int flags)
39743889 957{
7e2ab150 958 int busy = 0;
0aedadf9 959 int err;
7e2ab150 960 nodemask_t tmp;
39743889 961
0aedadf9
CL
962 err = migrate_prep();
963 if (err)
964 return err;
965
53f2556b 966 down_read(&mm->mmap_sem);
39743889 967
da0aa138
KM
968 /*
969 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
970 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
971 * bit in 'tmp', and return that <source, dest> pair for migration.
972 * The pair of nodemasks 'to' and 'from' define the map.
973 *
974 * If no pair of bits is found that way, fallback to picking some
975 * pair of 'source' and 'dest' bits that are not the same. If the
976 * 'source' and 'dest' bits are the same, this represents a node
977 * that will be migrating to itself, so no pages need move.
978 *
979 * If no bits are left in 'tmp', or if all remaining bits left
980 * in 'tmp' correspond to the same bit in 'to', return false
981 * (nothing left to migrate).
982 *
983 * This lets us pick a pair of nodes to migrate between, such that
984 * if possible the dest node is not already occupied by some other
985 * source node, minimizing the risk of overloading the memory on a
986 * node that would happen if we migrated incoming memory to a node
987 * before migrating outgoing memory source that same node.
988 *
989 * A single scan of tmp is sufficient. As we go, we remember the
990 * most recent <s, d> pair that moved (s != d). If we find a pair
991 * that not only moved, but what's better, moved to an empty slot
992 * (d is not set in tmp), then we break out then, with that pair.
ae0e47f0 993 * Otherwise when we finish scanning from_tmp, we at least have the
da0aa138
KM
994 * most recent <s, d> pair that moved. If we get all the way through
995 * the scan of tmp without finding any node that moved, much less
996 * moved to an empty node, then there is nothing left worth migrating.
997 */
d4984711 998
0ce72d4f 999 tmp = *from;
7e2ab150
CL
1000 while (!nodes_empty(tmp)) {
1001 int s,d;
b76ac7e7 1002 int source = NUMA_NO_NODE;
7e2ab150
CL
1003 int dest = 0;
1004
1005 for_each_node_mask(s, tmp) {
4a5b18cc
LW
1006
1007 /*
1008 * do_migrate_pages() tries to maintain the relative
1009 * node relationship of the pages established between
1010 * threads and memory areas.
1011 *
1012 * However if the number of source nodes is not equal to
1013 * the number of destination nodes we can not preserve
1014 * this node relative relationship. In that case, skip
1015 * copying memory from a node that is in the destination
1016 * mask.
1017 *
1018 * Example: [2,3,4] -> [3,4,5] moves everything.
1019 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1020 */
1021
0ce72d4f
AM
1022 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1023 (node_isset(s, *to)))
4a5b18cc
LW
1024 continue;
1025
0ce72d4f 1026 d = node_remap(s, *from, *to);
7e2ab150
CL
1027 if (s == d)
1028 continue;
1029
1030 source = s; /* Node moved. Memorize */
1031 dest = d;
1032
1033 /* dest not in remaining from nodes? */
1034 if (!node_isset(dest, tmp))
1035 break;
1036 }
b76ac7e7 1037 if (source == NUMA_NO_NODE)
7e2ab150
CL
1038 break;
1039
1040 node_clear(source, tmp);
1041 err = migrate_to_node(mm, source, dest, flags);
1042 if (err > 0)
1043 busy += err;
1044 if (err < 0)
1045 break;
39743889
CL
1046 }
1047 up_read(&mm->mmap_sem);
7e2ab150
CL
1048 if (err < 0)
1049 return err;
1050 return busy;
b20a3503
CL
1051
1052}
1053
3ad33b24
LS
1054/*
1055 * Allocate a new page for page migration based on vma policy.
d05f0cdc 1056 * Start by assuming the page is mapped by the same vma as contains @start.
3ad33b24
LS
1057 * Search forward from there, if not. N.B., this assumes that the
1058 * list of pages handed to migrate_pages()--which is how we get here--
1059 * is in virtual address order.
1060 */
d05f0cdc 1061static struct page *new_page(struct page *page, unsigned long start, int **x)
95a402c3 1062{
d05f0cdc 1063 struct vm_area_struct *vma;
3ad33b24 1064 unsigned long uninitialized_var(address);
95a402c3 1065
d05f0cdc 1066 vma = find_vma(current->mm, start);
3ad33b24
LS
1067 while (vma) {
1068 address = page_address_in_vma(page, vma);
1069 if (address != -EFAULT)
1070 break;
1071 vma = vma->vm_next;
1072 }
11c731e8
WL
1073
1074 if (PageHuge(page)) {
cc81717e
MH
1075 BUG_ON(!vma);
1076 return alloc_huge_page_noerr(vma, address, 1);
11c731e8 1077 }
0bf598d8 1078 /*
11c731e8 1079 * if !vma, alloc_page_vma() will use task or system default policy
0bf598d8 1080 */
3ad33b24 1081 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
95a402c3 1082}
b20a3503
CL
1083#else
1084
1085static void migrate_page_add(struct page *page, struct list_head *pagelist,
1086 unsigned long flags)
1087{
39743889
CL
1088}
1089
0ce72d4f
AM
1090int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1091 const nodemask_t *to, int flags)
b20a3503
CL
1092{
1093 return -ENOSYS;
1094}
95a402c3 1095
d05f0cdc 1096static struct page *new_page(struct page *page, unsigned long start, int **x)
95a402c3
CL
1097{
1098 return NULL;
1099}
b20a3503
CL
1100#endif
1101
dbcb0f19 1102static long do_mbind(unsigned long start, unsigned long len,
028fec41
DR
1103 unsigned short mode, unsigned short mode_flags,
1104 nodemask_t *nmask, unsigned long flags)
6ce3c4c0 1105{
6ce3c4c0
CL
1106 struct mm_struct *mm = current->mm;
1107 struct mempolicy *new;
1108 unsigned long end;
1109 int err;
1110 LIST_HEAD(pagelist);
1111
b24f53a0 1112 if (flags & ~(unsigned long)MPOL_MF_VALID)
6ce3c4c0 1113 return -EINVAL;
74c00241 1114 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
6ce3c4c0
CL
1115 return -EPERM;
1116
1117 if (start & ~PAGE_MASK)
1118 return -EINVAL;
1119
1120 if (mode == MPOL_DEFAULT)
1121 flags &= ~MPOL_MF_STRICT;
1122
1123 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1124 end = start + len;
1125
1126 if (end < start)
1127 return -EINVAL;
1128 if (end == start)
1129 return 0;
1130
028fec41 1131 new = mpol_new(mode, mode_flags, nmask);
6ce3c4c0
CL
1132 if (IS_ERR(new))
1133 return PTR_ERR(new);
1134
b24f53a0
LS
1135 if (flags & MPOL_MF_LAZY)
1136 new->flags |= MPOL_F_MOF;
1137
6ce3c4c0
CL
1138 /*
1139 * If we are using the default policy then operation
1140 * on discontinuous address spaces is okay after all
1141 */
1142 if (!new)
1143 flags |= MPOL_MF_DISCONTIG_OK;
1144
028fec41
DR
1145 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1146 start, start + len, mode, mode_flags,
00ef2d2f 1147 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
6ce3c4c0 1148
0aedadf9
CL
1149 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1150
1151 err = migrate_prep();
1152 if (err)
b05ca738 1153 goto mpol_out;
0aedadf9 1154 }
4bfc4495
KH
1155 {
1156 NODEMASK_SCRATCH(scratch);
1157 if (scratch) {
1158 down_write(&mm->mmap_sem);
1159 task_lock(current);
1160 err = mpol_set_nodemask(new, nmask, scratch);
1161 task_unlock(current);
1162 if (err)
1163 up_write(&mm->mmap_sem);
1164 } else
1165 err = -ENOMEM;
1166 NODEMASK_SCRATCH_FREE(scratch);
1167 }
b05ca738
KM
1168 if (err)
1169 goto mpol_out;
1170
d05f0cdc 1171 err = queue_pages_range(mm, start, end, nmask,
6ce3c4c0 1172 flags | MPOL_MF_INVERT, &pagelist);
d05f0cdc 1173 if (!err)
9d8cebd4 1174 err = mbind_range(mm, start, end, new);
7e2ab150 1175
b24f53a0
LS
1176 if (!err) {
1177 int nr_failed = 0;
1178
cf608ac1 1179 if (!list_empty(&pagelist)) {
b24f53a0 1180 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
d05f0cdc
HD
1181 nr_failed = migrate_pages(&pagelist, new_page, NULL,
1182 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
cf608ac1 1183 if (nr_failed)
74060e4d 1184 putback_movable_pages(&pagelist);
cf608ac1 1185 }
6ce3c4c0 1186
b24f53a0 1187 if (nr_failed && (flags & MPOL_MF_STRICT))
6ce3c4c0 1188 err = -EIO;
ab8a3e14 1189 } else
b0e5fd73 1190 putback_movable_pages(&pagelist);
b20a3503 1191
6ce3c4c0 1192 up_write(&mm->mmap_sem);
b05ca738 1193 mpol_out:
f0be3d32 1194 mpol_put(new);
6ce3c4c0
CL
1195 return err;
1196}
1197
8bccd85f
CL
1198/*
1199 * User space interface with variable sized bitmaps for nodelists.
1200 */
1201
1202/* Copy a node mask from user space. */
39743889 1203static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
8bccd85f
CL
1204 unsigned long maxnode)
1205{
1206 unsigned long k;
1207 unsigned long nlongs;
1208 unsigned long endmask;
1209
1210 --maxnode;
1211 nodes_clear(*nodes);
1212 if (maxnode == 0 || !nmask)
1213 return 0;
a9c930ba 1214 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
636f13c1 1215 return -EINVAL;
8bccd85f
CL
1216
1217 nlongs = BITS_TO_LONGS(maxnode);
1218 if ((maxnode % BITS_PER_LONG) == 0)
1219 endmask = ~0UL;
1220 else
1221 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1222
1223 /* When the user specified more nodes than supported just check
1224 if the non supported part is all zero. */
1225 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1226 if (nlongs > PAGE_SIZE/sizeof(long))
1227 return -EINVAL;
1228 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1229 unsigned long t;
1230 if (get_user(t, nmask + k))
1231 return -EFAULT;
1232 if (k == nlongs - 1) {
1233 if (t & endmask)
1234 return -EINVAL;
1235 } else if (t)
1236 return -EINVAL;
1237 }
1238 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1239 endmask = ~0UL;
1240 }
1241
1242 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1243 return -EFAULT;
1244 nodes_addr(*nodes)[nlongs-1] &= endmask;
1245 return 0;
1246}
1247
1248/* Copy a kernel node mask to user space */
1249static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1250 nodemask_t *nodes)
1251{
1252 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1253 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1254
1255 if (copy > nbytes) {
1256 if (copy > PAGE_SIZE)
1257 return -EINVAL;
1258 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1259 return -EFAULT;
1260 copy = nbytes;
1261 }
1262 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1263}
1264
938bb9f5 1265SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
f7f28ca9 1266 unsigned long, mode, const unsigned long __user *, nmask,
938bb9f5 1267 unsigned long, maxnode, unsigned, flags)
8bccd85f
CL
1268{
1269 nodemask_t nodes;
1270 int err;
028fec41 1271 unsigned short mode_flags;
8bccd85f 1272
028fec41
DR
1273 mode_flags = mode & MPOL_MODE_FLAGS;
1274 mode &= ~MPOL_MODE_FLAGS;
a3b51e01
DR
1275 if (mode >= MPOL_MAX)
1276 return -EINVAL;
4c50bc01
DR
1277 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1278 (mode_flags & MPOL_F_RELATIVE_NODES))
1279 return -EINVAL;
8bccd85f
CL
1280 err = get_nodes(&nodes, nmask, maxnode);
1281 if (err)
1282 return err;
028fec41 1283 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
8bccd85f
CL
1284}
1285
1286/* Set the process memory policy */
23c8902d 1287SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
938bb9f5 1288 unsigned long, maxnode)
8bccd85f
CL
1289{
1290 int err;
1291 nodemask_t nodes;
028fec41 1292 unsigned short flags;
8bccd85f 1293
028fec41
DR
1294 flags = mode & MPOL_MODE_FLAGS;
1295 mode &= ~MPOL_MODE_FLAGS;
1296 if ((unsigned int)mode >= MPOL_MAX)
8bccd85f 1297 return -EINVAL;
4c50bc01
DR
1298 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1299 return -EINVAL;
8bccd85f
CL
1300 err = get_nodes(&nodes, nmask, maxnode);
1301 if (err)
1302 return err;
028fec41 1303 return do_set_mempolicy(mode, flags, &nodes);
8bccd85f
CL
1304}
1305
938bb9f5
HC
1306SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1307 const unsigned long __user *, old_nodes,
1308 const unsigned long __user *, new_nodes)
39743889 1309{
c69e8d9c 1310 const struct cred *cred = current_cred(), *tcred;
596d7cfa 1311 struct mm_struct *mm = NULL;
39743889 1312 struct task_struct *task;
39743889
CL
1313 nodemask_t task_nodes;
1314 int err;
596d7cfa
KM
1315 nodemask_t *old;
1316 nodemask_t *new;
1317 NODEMASK_SCRATCH(scratch);
1318
1319 if (!scratch)
1320 return -ENOMEM;
39743889 1321
596d7cfa
KM
1322 old = &scratch->mask1;
1323 new = &scratch->mask2;
1324
1325 err = get_nodes(old, old_nodes, maxnode);
39743889 1326 if (err)
596d7cfa 1327 goto out;
39743889 1328
596d7cfa 1329 err = get_nodes(new, new_nodes, maxnode);
39743889 1330 if (err)
596d7cfa 1331 goto out;
39743889
CL
1332
1333 /* Find the mm_struct */
55cfaa3c 1334 rcu_read_lock();
228ebcbe 1335 task = pid ? find_task_by_vpid(pid) : current;
39743889 1336 if (!task) {
55cfaa3c 1337 rcu_read_unlock();
596d7cfa
KM
1338 err = -ESRCH;
1339 goto out;
39743889 1340 }
3268c63e 1341 get_task_struct(task);
39743889 1342
596d7cfa 1343 err = -EINVAL;
39743889
CL
1344
1345 /*
1346 * Check if this process has the right to modify the specified
1347 * process. The right exists if the process has administrative
7f927fcc 1348 * capabilities, superuser privileges or the same
39743889
CL
1349 * userid as the target process.
1350 */
c69e8d9c 1351 tcred = __task_cred(task);
b38a86eb
EB
1352 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1353 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
74c00241 1354 !capable(CAP_SYS_NICE)) {
c69e8d9c 1355 rcu_read_unlock();
39743889 1356 err = -EPERM;
3268c63e 1357 goto out_put;
39743889 1358 }
c69e8d9c 1359 rcu_read_unlock();
39743889
CL
1360
1361 task_nodes = cpuset_mems_allowed(task);
1362 /* Is the user allowed to access the target nodes? */
596d7cfa 1363 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
39743889 1364 err = -EPERM;
3268c63e 1365 goto out_put;
39743889
CL
1366 }
1367
01f13bd6 1368 if (!nodes_subset(*new, node_states[N_MEMORY])) {
3b42d28b 1369 err = -EINVAL;
3268c63e 1370 goto out_put;
3b42d28b
CL
1371 }
1372
86c3a764
DQ
1373 err = security_task_movememory(task);
1374 if (err)
3268c63e 1375 goto out_put;
86c3a764 1376
3268c63e
CL
1377 mm = get_task_mm(task);
1378 put_task_struct(task);
f2a9ef88
SL
1379
1380 if (!mm) {
3268c63e 1381 err = -EINVAL;
f2a9ef88
SL
1382 goto out;
1383 }
1384
1385 err = do_migrate_pages(mm, old, new,
1386 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
3268c63e
CL
1387
1388 mmput(mm);
1389out:
596d7cfa
KM
1390 NODEMASK_SCRATCH_FREE(scratch);
1391
39743889 1392 return err;
3268c63e
CL
1393
1394out_put:
1395 put_task_struct(task);
1396 goto out;
1397
39743889
CL
1398}
1399
1400
8bccd85f 1401/* Retrieve NUMA policy */
938bb9f5
HC
1402SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1403 unsigned long __user *, nmask, unsigned long, maxnode,
1404 unsigned long, addr, unsigned long, flags)
8bccd85f 1405{
dbcb0f19
AB
1406 int err;
1407 int uninitialized_var(pval);
8bccd85f
CL
1408 nodemask_t nodes;
1409
1410 if (nmask != NULL && maxnode < MAX_NUMNODES)
1411 return -EINVAL;
1412
1413 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1414
1415 if (err)
1416 return err;
1417
1418 if (policy && put_user(pval, policy))
1419 return -EFAULT;
1420
1421 if (nmask)
1422 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1423
1424 return err;
1425}
1426
1da177e4
LT
1427#ifdef CONFIG_COMPAT
1428
c93e0f6c
HC
1429COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1430 compat_ulong_t __user *, nmask,
1431 compat_ulong_t, maxnode,
1432 compat_ulong_t, addr, compat_ulong_t, flags)
1da177e4
LT
1433{
1434 long err;
1435 unsigned long __user *nm = NULL;
1436 unsigned long nr_bits, alloc_size;
1437 DECLARE_BITMAP(bm, MAX_NUMNODES);
1438
1439 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1440 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1441
1442 if (nmask)
1443 nm = compat_alloc_user_space(alloc_size);
1444
1445 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1446
1447 if (!err && nmask) {
2bbff6c7
KH
1448 unsigned long copy_size;
1449 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1450 err = copy_from_user(bm, nm, copy_size);
1da177e4
LT
1451 /* ensure entire bitmap is zeroed */
1452 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1453 err |= compat_put_bitmap(nmask, bm, nr_bits);
1454 }
1455
1456 return err;
1457}
1458
c93e0f6c
HC
1459COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1460 compat_ulong_t, maxnode)
1da177e4 1461{
1da177e4
LT
1462 unsigned long __user *nm = NULL;
1463 unsigned long nr_bits, alloc_size;
1464 DECLARE_BITMAP(bm, MAX_NUMNODES);
1465
1466 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1467 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1468
1469 if (nmask) {
cf01fb99
CS
1470 if (compat_get_bitmap(bm, nmask, nr_bits))
1471 return -EFAULT;
1da177e4 1472 nm = compat_alloc_user_space(alloc_size);
cf01fb99
CS
1473 if (copy_to_user(nm, bm, alloc_size))
1474 return -EFAULT;
1da177e4
LT
1475 }
1476
1da177e4
LT
1477 return sys_set_mempolicy(mode, nm, nr_bits+1);
1478}
1479
c93e0f6c
HC
1480COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1481 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1482 compat_ulong_t, maxnode, compat_ulong_t, flags)
1da177e4 1483{
1da177e4
LT
1484 unsigned long __user *nm = NULL;
1485 unsigned long nr_bits, alloc_size;
dfcd3c0d 1486 nodemask_t bm;
1da177e4
LT
1487
1488 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1489 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1490
1491 if (nmask) {
cf01fb99
CS
1492 if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1493 return -EFAULT;
1da177e4 1494 nm = compat_alloc_user_space(alloc_size);
cf01fb99
CS
1495 if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1496 return -EFAULT;
1da177e4
LT
1497 }
1498
1da177e4
LT
1499 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1500}
1501
1502#endif
1503
74d2c3a0
ON
1504struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1505 unsigned long addr)
1da177e4 1506{
8d90274b 1507 struct mempolicy *pol = NULL;
1da177e4
LT
1508
1509 if (vma) {
480eccf9 1510 if (vma->vm_ops && vma->vm_ops->get_policy) {
8d90274b 1511 pol = vma->vm_ops->get_policy(vma, addr);
00442ad0 1512 } else if (vma->vm_policy) {
1da177e4 1513 pol = vma->vm_policy;
00442ad0
MG
1514
1515 /*
1516 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1517 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1518 * count on these policies which will be dropped by
1519 * mpol_cond_put() later
1520 */
1521 if (mpol_needs_cond_ref(pol))
1522 mpol_get(pol);
1523 }
1da177e4 1524 }
f15ca78e 1525
74d2c3a0
ON
1526 return pol;
1527}
1528
1529/*
dd6eecb9 1530 * get_vma_policy(@vma, @addr)
74d2c3a0
ON
1531 * @vma: virtual memory area whose policy is sought
1532 * @addr: address in @vma for shared policy lookup
1533 *
1534 * Returns effective policy for a VMA at specified address.
dd6eecb9 1535 * Falls back to current->mempolicy or system default policy, as necessary.
74d2c3a0
ON
1536 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1537 * count--added by the get_policy() vm_op, as appropriate--to protect against
1538 * freeing by another task. It is the caller's responsibility to free the
1539 * extra reference for shared policies.
1540 */
dd6eecb9
ON
1541static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1542 unsigned long addr)
74d2c3a0
ON
1543{
1544 struct mempolicy *pol = __get_vma_policy(vma, addr);
1545
8d90274b 1546 if (!pol)
dd6eecb9 1547 pol = get_task_policy(current);
8d90274b 1548
1da177e4
LT
1549 return pol;
1550}
1551
6b6482bb 1552bool vma_policy_mof(struct vm_area_struct *vma)
fc314724 1553{
6b6482bb 1554 struct mempolicy *pol;
fc314724 1555
6b6482bb
ON
1556 if (vma->vm_ops && vma->vm_ops->get_policy) {
1557 bool ret = false;
fc314724 1558
6b6482bb
ON
1559 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1560 if (pol && (pol->flags & MPOL_F_MOF))
1561 ret = true;
1562 mpol_cond_put(pol);
8d90274b 1563
6b6482bb 1564 return ret;
fc314724
MG
1565 }
1566
6b6482bb 1567 pol = vma->vm_policy;
8d90274b 1568 if (!pol)
6b6482bb 1569 pol = get_task_policy(current);
8d90274b 1570
fc314724
MG
1571 return pol->flags & MPOL_F_MOF;
1572}
1573
d3eb1570
LJ
1574static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1575{
1576 enum zone_type dynamic_policy_zone = policy_zone;
1577
1578 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1579
1580 /*
1581 * if policy->v.nodes has movable memory only,
1582 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1583 *
1584 * policy->v.nodes is intersect with node_states[N_MEMORY].
1585 * so if the following test faile, it implies
1586 * policy->v.nodes has movable memory only.
1587 */
1588 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1589 dynamic_policy_zone = ZONE_MOVABLE;
1590
1591 return zone >= dynamic_policy_zone;
1592}
1593
52cd3b07
LS
1594/*
1595 * Return a nodemask representing a mempolicy for filtering nodes for
1596 * page allocation
1597 */
1598static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
19770b32
MG
1599{
1600 /* Lower zones don't get a nodemask applied for MPOL_BIND */
45c4745a 1601 if (unlikely(policy->mode == MPOL_BIND) &&
d3eb1570 1602 apply_policy_zone(policy, gfp_zone(gfp)) &&
19770b32
MG
1603 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1604 return &policy->v.nodes;
1605
1606 return NULL;
1607}
1608
04ec6264
VB
1609/* Return the node id preferred by the given mempolicy, or the given id */
1610static int policy_node(gfp_t gfp, struct mempolicy *policy,
1611 int nd)
1da177e4 1612{
6d840958
MH
1613 if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
1614 nd = policy->v.preferred_node;
1615 else {
19770b32 1616 /*
6d840958
MH
1617 * __GFP_THISNODE shouldn't even be used with the bind policy
1618 * because we might easily break the expectation to stay on the
1619 * requested node and not break the policy.
19770b32 1620 */
6d840958 1621 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1da177e4 1622 }
6d840958 1623
04ec6264 1624 return nd;
1da177e4
LT
1625}
1626
1627/* Do dynamic interleaving for a process */
1628static unsigned interleave_nodes(struct mempolicy *policy)
1629{
45816682 1630 unsigned next;
1da177e4
LT
1631 struct task_struct *me = current;
1632
45816682 1633 next = next_node_in(me->il_prev, policy->v.nodes);
f5b087b5 1634 if (next < MAX_NUMNODES)
45816682
VB
1635 me->il_prev = next;
1636 return next;
1da177e4
LT
1637}
1638
dc85da15
CL
1639/*
1640 * Depending on the memory policy provide a node from which to allocate the
1641 * next slab entry.
1642 */
2a389610 1643unsigned int mempolicy_slab_node(void)
dc85da15 1644{
e7b691b0 1645 struct mempolicy *policy;
2a389610 1646 int node = numa_mem_id();
e7b691b0
AK
1647
1648 if (in_interrupt())
2a389610 1649 return node;
e7b691b0
AK
1650
1651 policy = current->mempolicy;
fc36b8d3 1652 if (!policy || policy->flags & MPOL_F_LOCAL)
2a389610 1653 return node;
bea904d5
LS
1654
1655 switch (policy->mode) {
1656 case MPOL_PREFERRED:
fc36b8d3
LS
1657 /*
1658 * handled MPOL_F_LOCAL above
1659 */
1660 return policy->v.preferred_node;
765c4507 1661
dc85da15
CL
1662 case MPOL_INTERLEAVE:
1663 return interleave_nodes(policy);
1664
dd1a239f 1665 case MPOL_BIND: {
c33d6c06
MG
1666 struct zoneref *z;
1667
dc85da15
CL
1668 /*
1669 * Follow bind policy behavior and start allocation at the
1670 * first node.
1671 */
19770b32 1672 struct zonelist *zonelist;
19770b32 1673 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
c9634cf0 1674 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
c33d6c06
MG
1675 z = first_zones_zonelist(zonelist, highest_zoneidx,
1676 &policy->v.nodes);
1677 return z->zone ? z->zone->node : node;
dd1a239f 1678 }
dc85da15 1679
dc85da15 1680 default:
bea904d5 1681 BUG();
dc85da15
CL
1682 }
1683}
1684
fee83b3a
AM
1685/*
1686 * Do static interleaving for a VMA with known offset @n. Returns the n'th
1687 * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1688 * number of present nodes.
1689 */
1da177e4 1690static unsigned offset_il_node(struct mempolicy *pol,
fee83b3a 1691 struct vm_area_struct *vma, unsigned long n)
1da177e4 1692{
dfcd3c0d 1693 unsigned nnodes = nodes_weight(pol->v.nodes);
f5b087b5 1694 unsigned target;
fee83b3a
AM
1695 int i;
1696 int nid;
1da177e4 1697
f5b087b5
DR
1698 if (!nnodes)
1699 return numa_node_id();
fee83b3a
AM
1700 target = (unsigned int)n % nnodes;
1701 nid = first_node(pol->v.nodes);
1702 for (i = 0; i < target; i++)
dfcd3c0d 1703 nid = next_node(nid, pol->v.nodes);
1da177e4
LT
1704 return nid;
1705}
1706
5da7ca86
CL
1707/* Determine a node number for interleave */
1708static inline unsigned interleave_nid(struct mempolicy *pol,
1709 struct vm_area_struct *vma, unsigned long addr, int shift)
1710{
1711 if (vma) {
1712 unsigned long off;
1713
3b98b087
NA
1714 /*
1715 * for small pages, there is no difference between
1716 * shift and PAGE_SHIFT, so the bit-shift is safe.
1717 * for huge pages, since vm_pgoff is in units of small
1718 * pages, we need to shift off the always 0 bits to get
1719 * a useful offset.
1720 */
1721 BUG_ON(shift < PAGE_SHIFT);
1722 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
5da7ca86
CL
1723 off += (addr - vma->vm_start) >> shift;
1724 return offset_il_node(pol, vma, off);
1725 } else
1726 return interleave_nodes(pol);
1727}
1728
00ac59ad 1729#ifdef CONFIG_HUGETLBFS
480eccf9 1730/*
04ec6264 1731 * huge_node(@vma, @addr, @gfp_flags, @mpol)
b46e14ac
FF
1732 * @vma: virtual memory area whose policy is sought
1733 * @addr: address in @vma for shared policy lookup and interleave policy
1734 * @gfp_flags: for requested zone
1735 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1736 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
480eccf9 1737 *
04ec6264 1738 * Returns a nid suitable for a huge page allocation and a pointer
52cd3b07
LS
1739 * to the struct mempolicy for conditional unref after allocation.
1740 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1741 * @nodemask for filtering the zonelist.
c0ff7453 1742 *
d26914d1 1743 * Must be protected by read_mems_allowed_begin()
480eccf9 1744 */
04ec6264
VB
1745int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
1746 struct mempolicy **mpol, nodemask_t **nodemask)
5da7ca86 1747{
04ec6264 1748 int nid;
5da7ca86 1749
dd6eecb9 1750 *mpol = get_vma_policy(vma, addr);
19770b32 1751 *nodemask = NULL; /* assume !MPOL_BIND */
5da7ca86 1752
52cd3b07 1753 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
04ec6264
VB
1754 nid = interleave_nid(*mpol, vma, addr,
1755 huge_page_shift(hstate_vma(vma)));
52cd3b07 1756 } else {
04ec6264 1757 nid = policy_node(gfp_flags, *mpol, numa_node_id());
52cd3b07
LS
1758 if ((*mpol)->mode == MPOL_BIND)
1759 *nodemask = &(*mpol)->v.nodes;
480eccf9 1760 }
04ec6264 1761 return nid;
5da7ca86 1762}
06808b08
LS
1763
1764/*
1765 * init_nodemask_of_mempolicy
1766 *
1767 * If the current task's mempolicy is "default" [NULL], return 'false'
1768 * to indicate default policy. Otherwise, extract the policy nodemask
1769 * for 'bind' or 'interleave' policy into the argument nodemask, or
1770 * initialize the argument nodemask to contain the single node for
1771 * 'preferred' or 'local' policy and return 'true' to indicate presence
1772 * of non-default mempolicy.
1773 *
1774 * We don't bother with reference counting the mempolicy [mpol_get/put]
1775 * because the current task is examining it's own mempolicy and a task's
1776 * mempolicy is only ever changed by the task itself.
1777 *
1778 * N.B., it is the caller's responsibility to free a returned nodemask.
1779 */
1780bool init_nodemask_of_mempolicy(nodemask_t *mask)
1781{
1782 struct mempolicy *mempolicy;
1783 int nid;
1784
1785 if (!(mask && current->mempolicy))
1786 return false;
1787
c0ff7453 1788 task_lock(current);
06808b08
LS
1789 mempolicy = current->mempolicy;
1790 switch (mempolicy->mode) {
1791 case MPOL_PREFERRED:
1792 if (mempolicy->flags & MPOL_F_LOCAL)
1793 nid = numa_node_id();
1794 else
1795 nid = mempolicy->v.preferred_node;
1796 init_nodemask_of_node(mask, nid);
1797 break;
1798
1799 case MPOL_BIND:
1800 /* Fall through */
1801 case MPOL_INTERLEAVE:
1802 *mask = mempolicy->v.nodes;
1803 break;
1804
1805 default:
1806 BUG();
1807 }
c0ff7453 1808 task_unlock(current);
06808b08
LS
1809
1810 return true;
1811}
00ac59ad 1812#endif
5da7ca86 1813
6f48d0eb
DR
1814/*
1815 * mempolicy_nodemask_intersects
1816 *
1817 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1818 * policy. Otherwise, check for intersection between mask and the policy
1819 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1820 * policy, always return true since it may allocate elsewhere on fallback.
1821 *
1822 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1823 */
1824bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1825 const nodemask_t *mask)
1826{
1827 struct mempolicy *mempolicy;
1828 bool ret = true;
1829
1830 if (!mask)
1831 return ret;
1832 task_lock(tsk);
1833 mempolicy = tsk->mempolicy;
1834 if (!mempolicy)
1835 goto out;
1836
1837 switch (mempolicy->mode) {
1838 case MPOL_PREFERRED:
1839 /*
1840 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1841 * allocate from, they may fallback to other nodes when oom.
1842 * Thus, it's possible for tsk to have allocated memory from
1843 * nodes in mask.
1844 */
1845 break;
1846 case MPOL_BIND:
1847 case MPOL_INTERLEAVE:
1848 ret = nodes_intersects(mempolicy->v.nodes, *mask);
1849 break;
1850 default:
1851 BUG();
1852 }
1853out:
1854 task_unlock(tsk);
1855 return ret;
1856}
1857
1da177e4
LT
1858/* Allocate a page in interleaved policy.
1859 Own path because it needs to do special accounting. */
662f3a0b
AK
1860static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1861 unsigned nid)
1da177e4 1862{
1da177e4
LT
1863 struct page *page;
1864
04ec6264
VB
1865 page = __alloc_pages(gfp, order, nid);
1866 if (page && page_to_nid(page) == nid)
ca889e6c 1867 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1da177e4
LT
1868 return page;
1869}
1870
1871/**
0bbbc0b3 1872 * alloc_pages_vma - Allocate a page for a VMA.
1da177e4
LT
1873 *
1874 * @gfp:
1875 * %GFP_USER user allocation.
1876 * %GFP_KERNEL kernel allocations,
1877 * %GFP_HIGHMEM highmem/user allocations,
1878 * %GFP_FS allocation should not call back into a file system.
1879 * %GFP_ATOMIC don't sleep.
1880 *
0bbbc0b3 1881 * @order:Order of the GFP allocation.
1da177e4
LT
1882 * @vma: Pointer to VMA or NULL if not available.
1883 * @addr: Virtual Address of the allocation. Must be inside the VMA.
be97a41b
VB
1884 * @node: Which node to prefer for allocation (modulo policy).
1885 * @hugepage: for hugepages try only the preferred node if possible
1da177e4
LT
1886 *
1887 * This function allocates a page from the kernel page pool and applies
1888 * a NUMA policy associated with the VMA or the current process.
1889 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1890 * mm_struct of the VMA to prevent it from going away. Should be used for
be97a41b
VB
1891 * all allocations for pages that will be mapped into user space. Returns
1892 * NULL when no page can be allocated.
1da177e4
LT
1893 */
1894struct page *
0bbbc0b3 1895alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
be97a41b 1896 unsigned long addr, int node, bool hugepage)
1da177e4 1897{
cc9a6c87 1898 struct mempolicy *pol;
c0ff7453 1899 struct page *page;
04ec6264 1900 int preferred_nid;
be97a41b 1901 nodemask_t *nmask;
cc9a6c87 1902
dd6eecb9 1903 pol = get_vma_policy(vma, addr);
1da177e4 1904
0867a57c
VB
1905 if (pol->mode == MPOL_INTERLEAVE) {
1906 unsigned nid;
1907
1908 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
1909 mpol_cond_put(pol);
1910 page = alloc_page_interleave(gfp, order, nid);
1911 goto out;
1912 }
1913
1914 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
1915 int hpage_node = node;
1916
be97a41b
VB
1917 /*
1918 * For hugepage allocation and non-interleave policy which
0867a57c
VB
1919 * allows the current node (or other explicitly preferred
1920 * node) we only try to allocate from the current/preferred
1921 * node and don't fall back to other nodes, as the cost of
1922 * remote accesses would likely offset THP benefits.
be97a41b
VB
1923 *
1924 * If the policy is interleave, or does not allow the current
1925 * node in its nodemask, we allocate the standard way.
1926 */
0867a57c
VB
1927 if (pol->mode == MPOL_PREFERRED &&
1928 !(pol->flags & MPOL_F_LOCAL))
1929 hpage_node = pol->v.preferred_node;
1930
be97a41b 1931 nmask = policy_nodemask(gfp, pol);
0867a57c 1932 if (!nmask || node_isset(hpage_node, *nmask)) {
be97a41b 1933 mpol_cond_put(pol);
96db800f 1934 page = __alloc_pages_node(hpage_node,
5265047a 1935 gfp | __GFP_THISNODE, order);
be97a41b
VB
1936 goto out;
1937 }
1938 }
1939
be97a41b 1940 nmask = policy_nodemask(gfp, pol);
04ec6264
VB
1941 preferred_nid = policy_node(gfp, pol, node);
1942 page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
d51e9894 1943 mpol_cond_put(pol);
be97a41b 1944out:
c0ff7453 1945 return page;
1da177e4
LT
1946}
1947
1948/**
1949 * alloc_pages_current - Allocate pages.
1950 *
1951 * @gfp:
1952 * %GFP_USER user allocation,
1953 * %GFP_KERNEL kernel allocation,
1954 * %GFP_HIGHMEM highmem allocation,
1955 * %GFP_FS don't call back into a file system.
1956 * %GFP_ATOMIC don't sleep.
1957 * @order: Power of two of allocation size in pages. 0 is a single page.
1958 *
1959 * Allocate a page from the kernel page pool. When not in
1960 * interrupt context and apply the current process NUMA policy.
1961 * Returns NULL when no page can be allocated.
1da177e4 1962 */
dd0fc66f 1963struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1da177e4 1964{
8d90274b 1965 struct mempolicy *pol = &default_policy;
c0ff7453 1966 struct page *page;
1da177e4 1967
8d90274b
ON
1968 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
1969 pol = get_task_policy(current);
52cd3b07
LS
1970
1971 /*
1972 * No reference counting needed for current->mempolicy
1973 * nor system default_policy
1974 */
45c4745a 1975 if (pol->mode == MPOL_INTERLEAVE)
c0ff7453
MX
1976 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
1977 else
1978 page = __alloc_pages_nodemask(gfp, order,
04ec6264 1979 policy_node(gfp, pol, numa_node_id()),
5c4b4be3 1980 policy_nodemask(gfp, pol));
cc9a6c87 1981
c0ff7453 1982 return page;
1da177e4
LT
1983}
1984EXPORT_SYMBOL(alloc_pages_current);
1985
ef0855d3
ON
1986int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
1987{
1988 struct mempolicy *pol = mpol_dup(vma_policy(src));
1989
1990 if (IS_ERR(pol))
1991 return PTR_ERR(pol);
1992 dst->vm_policy = pol;
1993 return 0;
1994}
1995
4225399a 1996/*
846a16bf 1997 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
4225399a
PJ
1998 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1999 * with the mems_allowed returned by cpuset_mems_allowed(). This
2000 * keeps mempolicies cpuset relative after its cpuset moves. See
2001 * further kernel/cpuset.c update_nodemask().
708c1bbc
MX
2002 *
2003 * current's mempolicy may be rebinded by the other task(the task that changes
2004 * cpuset's mems), so we needn't do rebind work for current task.
4225399a 2005 */
4225399a 2006
846a16bf
LS
2007/* Slow path of a mempolicy duplicate */
2008struct mempolicy *__mpol_dup(struct mempolicy *old)
1da177e4
LT
2009{
2010 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2011
2012 if (!new)
2013 return ERR_PTR(-ENOMEM);
708c1bbc
MX
2014
2015 /* task's mempolicy is protected by alloc_lock */
2016 if (old == current->mempolicy) {
2017 task_lock(current);
2018 *new = *old;
2019 task_unlock(current);
2020 } else
2021 *new = *old;
2022
4225399a
PJ
2023 if (current_cpuset_is_being_rebound()) {
2024 nodemask_t mems = cpuset_mems_allowed(current);
213980c0 2025 mpol_rebind_policy(new, &mems);
4225399a 2026 }
1da177e4 2027 atomic_set(&new->refcnt, 1);
1da177e4
LT
2028 return new;
2029}
2030
2031/* Slow path of a mempolicy comparison */
fcfb4dcc 2032bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1da177e4
LT
2033{
2034 if (!a || !b)
fcfb4dcc 2035 return false;
45c4745a 2036 if (a->mode != b->mode)
fcfb4dcc 2037 return false;
19800502 2038 if (a->flags != b->flags)
fcfb4dcc 2039 return false;
19800502
BL
2040 if (mpol_store_user_nodemask(a))
2041 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
fcfb4dcc 2042 return false;
19800502 2043
45c4745a 2044 switch (a->mode) {
19770b32
MG
2045 case MPOL_BIND:
2046 /* Fall through */
1da177e4 2047 case MPOL_INTERLEAVE:
fcfb4dcc 2048 return !!nodes_equal(a->v.nodes, b->v.nodes);
1da177e4 2049 case MPOL_PREFERRED:
75719661 2050 return a->v.preferred_node == b->v.preferred_node;
1da177e4
LT
2051 default:
2052 BUG();
fcfb4dcc 2053 return false;
1da177e4
LT
2054 }
2055}
2056
1da177e4
LT
2057/*
2058 * Shared memory backing store policy support.
2059 *
2060 * Remember policies even when nobody has shared memory mapped.
2061 * The policies are kept in Red-Black tree linked from the inode.
4a8c7bb5 2062 * They are protected by the sp->lock rwlock, which should be held
1da177e4
LT
2063 * for any accesses to the tree.
2064 */
2065
4a8c7bb5
NZ
2066/*
2067 * lookup first element intersecting start-end. Caller holds sp->lock for
2068 * reading or for writing
2069 */
1da177e4
LT
2070static struct sp_node *
2071sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2072{
2073 struct rb_node *n = sp->root.rb_node;
2074
2075 while (n) {
2076 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2077
2078 if (start >= p->end)
2079 n = n->rb_right;
2080 else if (end <= p->start)
2081 n = n->rb_left;
2082 else
2083 break;
2084 }
2085 if (!n)
2086 return NULL;
2087 for (;;) {
2088 struct sp_node *w = NULL;
2089 struct rb_node *prev = rb_prev(n);
2090 if (!prev)
2091 break;
2092 w = rb_entry(prev, struct sp_node, nd);
2093 if (w->end <= start)
2094 break;
2095 n = prev;
2096 }
2097 return rb_entry(n, struct sp_node, nd);
2098}
2099
4a8c7bb5
NZ
2100/*
2101 * Insert a new shared policy into the list. Caller holds sp->lock for
2102 * writing.
2103 */
1da177e4
LT
2104static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2105{
2106 struct rb_node **p = &sp->root.rb_node;
2107 struct rb_node *parent = NULL;
2108 struct sp_node *nd;
2109
2110 while (*p) {
2111 parent = *p;
2112 nd = rb_entry(parent, struct sp_node, nd);
2113 if (new->start < nd->start)
2114 p = &(*p)->rb_left;
2115 else if (new->end > nd->end)
2116 p = &(*p)->rb_right;
2117 else
2118 BUG();
2119 }
2120 rb_link_node(&new->nd, parent, p);
2121 rb_insert_color(&new->nd, &sp->root);
140d5a49 2122 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
45c4745a 2123 new->policy ? new->policy->mode : 0);
1da177e4
LT
2124}
2125
2126/* Find shared policy intersecting idx */
2127struct mempolicy *
2128mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2129{
2130 struct mempolicy *pol = NULL;
2131 struct sp_node *sn;
2132
2133 if (!sp->root.rb_node)
2134 return NULL;
4a8c7bb5 2135 read_lock(&sp->lock);
1da177e4
LT
2136 sn = sp_lookup(sp, idx, idx+1);
2137 if (sn) {
2138 mpol_get(sn->policy);
2139 pol = sn->policy;
2140 }
4a8c7bb5 2141 read_unlock(&sp->lock);
1da177e4
LT
2142 return pol;
2143}
2144
63f74ca2
KM
2145static void sp_free(struct sp_node *n)
2146{
2147 mpol_put(n->policy);
2148 kmem_cache_free(sn_cache, n);
2149}
2150
771fb4d8
LS
2151/**
2152 * mpol_misplaced - check whether current page node is valid in policy
2153 *
b46e14ac
FF
2154 * @page: page to be checked
2155 * @vma: vm area where page mapped
2156 * @addr: virtual address where page mapped
771fb4d8
LS
2157 *
2158 * Lookup current policy node id for vma,addr and "compare to" page's
2159 * node id.
2160 *
2161 * Returns:
2162 * -1 - not misplaced, page is in the right node
2163 * node - node id where the page should be
2164 *
2165 * Policy determination "mimics" alloc_page_vma().
2166 * Called from fault path where we know the vma and faulting address.
2167 */
2168int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2169{
2170 struct mempolicy *pol;
c33d6c06 2171 struct zoneref *z;
771fb4d8
LS
2172 int curnid = page_to_nid(page);
2173 unsigned long pgoff;
90572890
PZ
2174 int thiscpu = raw_smp_processor_id();
2175 int thisnid = cpu_to_node(thiscpu);
771fb4d8
LS
2176 int polnid = -1;
2177 int ret = -1;
2178
2179 BUG_ON(!vma);
2180
dd6eecb9 2181 pol = get_vma_policy(vma, addr);
771fb4d8
LS
2182 if (!(pol->flags & MPOL_F_MOF))
2183 goto out;
2184
2185 switch (pol->mode) {
2186 case MPOL_INTERLEAVE:
2187 BUG_ON(addr >= vma->vm_end);
2188 BUG_ON(addr < vma->vm_start);
2189
2190 pgoff = vma->vm_pgoff;
2191 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2192 polnid = offset_il_node(pol, vma, pgoff);
2193 break;
2194
2195 case MPOL_PREFERRED:
2196 if (pol->flags & MPOL_F_LOCAL)
2197 polnid = numa_node_id();
2198 else
2199 polnid = pol->v.preferred_node;
2200 break;
2201
2202 case MPOL_BIND:
c33d6c06 2203
771fb4d8
LS
2204 /*
2205 * allows binding to multiple nodes.
2206 * use current page if in policy nodemask,
2207 * else select nearest allowed node, if any.
2208 * If no allowed nodes, use current [!misplaced].
2209 */
2210 if (node_isset(curnid, pol->v.nodes))
2211 goto out;
c33d6c06 2212 z = first_zones_zonelist(
771fb4d8
LS
2213 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2214 gfp_zone(GFP_HIGHUSER),
c33d6c06
MG
2215 &pol->v.nodes);
2216 polnid = z->zone->node;
771fb4d8
LS
2217 break;
2218
2219 default:
2220 BUG();
2221 }
5606e387
MG
2222
2223 /* Migrate the page towards the node whose CPU is referencing it */
e42c8ff2 2224 if (pol->flags & MPOL_F_MORON) {
90572890 2225 polnid = thisnid;
5606e387 2226
10f39042 2227 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
de1c9ce6 2228 goto out;
e42c8ff2
MG
2229 }
2230
771fb4d8
LS
2231 if (curnid != polnid)
2232 ret = polnid;
2233out:
2234 mpol_cond_put(pol);
2235
2236 return ret;
2237}
2238
c11600e4
DR
2239/*
2240 * Drop the (possibly final) reference to task->mempolicy. It needs to be
2241 * dropped after task->mempolicy is set to NULL so that any allocation done as
2242 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2243 * policy.
2244 */
2245void mpol_put_task_policy(struct task_struct *task)
2246{
2247 struct mempolicy *pol;
2248
2249 task_lock(task);
2250 pol = task->mempolicy;
2251 task->mempolicy = NULL;
2252 task_unlock(task);
2253 mpol_put(pol);
2254}
2255
1da177e4
LT
2256static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2257{
140d5a49 2258 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
1da177e4 2259 rb_erase(&n->nd, &sp->root);
63f74ca2 2260 sp_free(n);
1da177e4
LT
2261}
2262
42288fe3
MG
2263static void sp_node_init(struct sp_node *node, unsigned long start,
2264 unsigned long end, struct mempolicy *pol)
2265{
2266 node->start = start;
2267 node->end = end;
2268 node->policy = pol;
2269}
2270
dbcb0f19
AB
2271static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2272 struct mempolicy *pol)
1da177e4 2273{
869833f2
KM
2274 struct sp_node *n;
2275 struct mempolicy *newpol;
1da177e4 2276
869833f2 2277 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1da177e4
LT
2278 if (!n)
2279 return NULL;
869833f2
KM
2280
2281 newpol = mpol_dup(pol);
2282 if (IS_ERR(newpol)) {
2283 kmem_cache_free(sn_cache, n);
2284 return NULL;
2285 }
2286 newpol->flags |= MPOL_F_SHARED;
42288fe3 2287 sp_node_init(n, start, end, newpol);
869833f2 2288
1da177e4
LT
2289 return n;
2290}
2291
2292/* Replace a policy range. */
2293static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2294 unsigned long end, struct sp_node *new)
2295{
b22d127a 2296 struct sp_node *n;
42288fe3
MG
2297 struct sp_node *n_new = NULL;
2298 struct mempolicy *mpol_new = NULL;
b22d127a 2299 int ret = 0;
1da177e4 2300
42288fe3 2301restart:
4a8c7bb5 2302 write_lock(&sp->lock);
1da177e4
LT
2303 n = sp_lookup(sp, start, end);
2304 /* Take care of old policies in the same range. */
2305 while (n && n->start < end) {
2306 struct rb_node *next = rb_next(&n->nd);
2307 if (n->start >= start) {
2308 if (n->end <= end)
2309 sp_delete(sp, n);
2310 else
2311 n->start = end;
2312 } else {
2313 /* Old policy spanning whole new range. */
2314 if (n->end > end) {
42288fe3
MG
2315 if (!n_new)
2316 goto alloc_new;
2317
2318 *mpol_new = *n->policy;
2319 atomic_set(&mpol_new->refcnt, 1);
7880639c 2320 sp_node_init(n_new, end, n->end, mpol_new);
1da177e4 2321 n->end = start;
5ca39575 2322 sp_insert(sp, n_new);
42288fe3
MG
2323 n_new = NULL;
2324 mpol_new = NULL;
1da177e4
LT
2325 break;
2326 } else
2327 n->end = start;
2328 }
2329 if (!next)
2330 break;
2331 n = rb_entry(next, struct sp_node, nd);
2332 }
2333 if (new)
2334 sp_insert(sp, new);
4a8c7bb5 2335 write_unlock(&sp->lock);
42288fe3
MG
2336 ret = 0;
2337
2338err_out:
2339 if (mpol_new)
2340 mpol_put(mpol_new);
2341 if (n_new)
2342 kmem_cache_free(sn_cache, n_new);
2343
b22d127a 2344 return ret;
42288fe3
MG
2345
2346alloc_new:
4a8c7bb5 2347 write_unlock(&sp->lock);
42288fe3
MG
2348 ret = -ENOMEM;
2349 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2350 if (!n_new)
2351 goto err_out;
2352 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2353 if (!mpol_new)
2354 goto err_out;
2355 goto restart;
1da177e4
LT
2356}
2357
71fe804b
LS
2358/**
2359 * mpol_shared_policy_init - initialize shared policy for inode
2360 * @sp: pointer to inode shared policy
2361 * @mpol: struct mempolicy to install
2362 *
2363 * Install non-NULL @mpol in inode's shared policy rb-tree.
2364 * On entry, the current task has a reference on a non-NULL @mpol.
2365 * This must be released on exit.
4bfc4495 2366 * This is called at get_inode() calls and we can use GFP_KERNEL.
71fe804b
LS
2367 */
2368void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2369{
58568d2a
MX
2370 int ret;
2371
71fe804b 2372 sp->root = RB_ROOT; /* empty tree == default mempolicy */
4a8c7bb5 2373 rwlock_init(&sp->lock);
71fe804b
LS
2374
2375 if (mpol) {
2376 struct vm_area_struct pvma;
2377 struct mempolicy *new;
4bfc4495 2378 NODEMASK_SCRATCH(scratch);
71fe804b 2379
4bfc4495 2380 if (!scratch)
5c0c1654 2381 goto put_mpol;
71fe804b
LS
2382 /* contextualize the tmpfs mount point mempolicy */
2383 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
15d77835 2384 if (IS_ERR(new))
0cae3457 2385 goto free_scratch; /* no valid nodemask intersection */
58568d2a
MX
2386
2387 task_lock(current);
4bfc4495 2388 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
58568d2a 2389 task_unlock(current);
15d77835 2390 if (ret)
5c0c1654 2391 goto put_new;
71fe804b
LS
2392
2393 /* Create pseudo-vma that contains just the policy */
2394 memset(&pvma, 0, sizeof(struct vm_area_struct));
2395 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2396 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
15d77835 2397
5c0c1654 2398put_new:
71fe804b 2399 mpol_put(new); /* drop initial ref */
0cae3457 2400free_scratch:
4bfc4495 2401 NODEMASK_SCRATCH_FREE(scratch);
5c0c1654
LS
2402put_mpol:
2403 mpol_put(mpol); /* drop our incoming ref on sb mpol */
7339ff83
RH
2404 }
2405}
2406
1da177e4
LT
2407int mpol_set_shared_policy(struct shared_policy *info,
2408 struct vm_area_struct *vma, struct mempolicy *npol)
2409{
2410 int err;
2411 struct sp_node *new = NULL;
2412 unsigned long sz = vma_pages(vma);
2413
028fec41 2414 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
1da177e4 2415 vma->vm_pgoff,
45c4745a 2416 sz, npol ? npol->mode : -1,
028fec41 2417 npol ? npol->flags : -1,
00ef2d2f 2418 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
1da177e4
LT
2419
2420 if (npol) {
2421 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2422 if (!new)
2423 return -ENOMEM;
2424 }
2425 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2426 if (err && new)
63f74ca2 2427 sp_free(new);
1da177e4
LT
2428 return err;
2429}
2430
2431/* Free a backing policy store on inode delete. */
2432void mpol_free_shared_policy(struct shared_policy *p)
2433{
2434 struct sp_node *n;
2435 struct rb_node *next;
2436
2437 if (!p->root.rb_node)
2438 return;
4a8c7bb5 2439 write_lock(&p->lock);
1da177e4
LT
2440 next = rb_first(&p->root);
2441 while (next) {
2442 n = rb_entry(next, struct sp_node, nd);
2443 next = rb_next(&n->nd);
63f74ca2 2444 sp_delete(p, n);
1da177e4 2445 }
4a8c7bb5 2446 write_unlock(&p->lock);
1da177e4
LT
2447}
2448
1a687c2e 2449#ifdef CONFIG_NUMA_BALANCING
c297663c 2450static int __initdata numabalancing_override;
1a687c2e
MG
2451
2452static void __init check_numabalancing_enable(void)
2453{
2454 bool numabalancing_default = false;
2455
2456 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2457 numabalancing_default = true;
2458
c297663c
MG
2459 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2460 if (numabalancing_override)
2461 set_numabalancing_state(numabalancing_override == 1);
2462
b0dc2b9b 2463 if (num_online_nodes() > 1 && !numabalancing_override) {
756a025f 2464 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
c297663c 2465 numabalancing_default ? "Enabling" : "Disabling");
1a687c2e
MG
2466 set_numabalancing_state(numabalancing_default);
2467 }
2468}
2469
2470static int __init setup_numabalancing(char *str)
2471{
2472 int ret = 0;
2473 if (!str)
2474 goto out;
1a687c2e
MG
2475
2476 if (!strcmp(str, "enable")) {
c297663c 2477 numabalancing_override = 1;
1a687c2e
MG
2478 ret = 1;
2479 } else if (!strcmp(str, "disable")) {
c297663c 2480 numabalancing_override = -1;
1a687c2e
MG
2481 ret = 1;
2482 }
2483out:
2484 if (!ret)
4a404bea 2485 pr_warn("Unable to parse numa_balancing=\n");
1a687c2e
MG
2486
2487 return ret;
2488}
2489__setup("numa_balancing=", setup_numabalancing);
2490#else
2491static inline void __init check_numabalancing_enable(void)
2492{
2493}
2494#endif /* CONFIG_NUMA_BALANCING */
2495
1da177e4
LT
2496/* assumes fs == KERNEL_DS */
2497void __init numa_policy_init(void)
2498{
b71636e2
PM
2499 nodemask_t interleave_nodes;
2500 unsigned long largest = 0;
2501 int nid, prefer = 0;
2502
1da177e4
LT
2503 policy_cache = kmem_cache_create("numa_policy",
2504 sizeof(struct mempolicy),
20c2df83 2505 0, SLAB_PANIC, NULL);
1da177e4
LT
2506
2507 sn_cache = kmem_cache_create("shared_policy_node",
2508 sizeof(struct sp_node),
20c2df83 2509 0, SLAB_PANIC, NULL);
1da177e4 2510
5606e387
MG
2511 for_each_node(nid) {
2512 preferred_node_policy[nid] = (struct mempolicy) {
2513 .refcnt = ATOMIC_INIT(1),
2514 .mode = MPOL_PREFERRED,
2515 .flags = MPOL_F_MOF | MPOL_F_MORON,
2516 .v = { .preferred_node = nid, },
2517 };
2518 }
2519
b71636e2
PM
2520 /*
2521 * Set interleaving policy for system init. Interleaving is only
2522 * enabled across suitably sized nodes (default is >= 16MB), or
2523 * fall back to the largest node if they're all smaller.
2524 */
2525 nodes_clear(interleave_nodes);
01f13bd6 2526 for_each_node_state(nid, N_MEMORY) {
b71636e2
PM
2527 unsigned long total_pages = node_present_pages(nid);
2528
2529 /* Preserve the largest node */
2530 if (largest < total_pages) {
2531 largest = total_pages;
2532 prefer = nid;
2533 }
2534
2535 /* Interleave this node? */
2536 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2537 node_set(nid, interleave_nodes);
2538 }
2539
2540 /* All too small, use the largest */
2541 if (unlikely(nodes_empty(interleave_nodes)))
2542 node_set(prefer, interleave_nodes);
1da177e4 2543
028fec41 2544 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
b1de0d13 2545 pr_err("%s: interleaving failed\n", __func__);
1a687c2e
MG
2546
2547 check_numabalancing_enable();
1da177e4
LT
2548}
2549
8bccd85f 2550/* Reset policy of current process to default */
1da177e4
LT
2551void numa_default_policy(void)
2552{
028fec41 2553 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
1da177e4 2554}
68860ec1 2555
095f1fc4
LS
2556/*
2557 * Parse and format mempolicy from/to strings
2558 */
2559
1a75a6c8 2560/*
f2a07f40 2561 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
1a75a6c8 2562 */
345ace9c
LS
2563static const char * const policy_modes[] =
2564{
2565 [MPOL_DEFAULT] = "default",
2566 [MPOL_PREFERRED] = "prefer",
2567 [MPOL_BIND] = "bind",
2568 [MPOL_INTERLEAVE] = "interleave",
d3a71033 2569 [MPOL_LOCAL] = "local",
345ace9c 2570};
1a75a6c8 2571
095f1fc4
LS
2572
2573#ifdef CONFIG_TMPFS
2574/**
f2a07f40 2575 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
095f1fc4 2576 * @str: string containing mempolicy to parse
71fe804b 2577 * @mpol: pointer to struct mempolicy pointer, returned on success.
095f1fc4
LS
2578 *
2579 * Format of input:
2580 * <mode>[=<flags>][:<nodelist>]
2581 *
71fe804b 2582 * On success, returns 0, else 1
095f1fc4 2583 */
a7a88b23 2584int mpol_parse_str(char *str, struct mempolicy **mpol)
095f1fc4 2585{
71fe804b 2586 struct mempolicy *new = NULL;
b4652e84 2587 unsigned short mode;
f2a07f40 2588 unsigned short mode_flags;
71fe804b 2589 nodemask_t nodes;
095f1fc4
LS
2590 char *nodelist = strchr(str, ':');
2591 char *flags = strchr(str, '=');
095f1fc4
LS
2592 int err = 1;
2593
2594 if (nodelist) {
2595 /* NUL-terminate mode or flags string */
2596 *nodelist++ = '\0';
71fe804b 2597 if (nodelist_parse(nodelist, nodes))
095f1fc4 2598 goto out;
01f13bd6 2599 if (!nodes_subset(nodes, node_states[N_MEMORY]))
095f1fc4 2600 goto out;
71fe804b
LS
2601 } else
2602 nodes_clear(nodes);
2603
095f1fc4
LS
2604 if (flags)
2605 *flags++ = '\0'; /* terminate mode string */
2606
479e2802 2607 for (mode = 0; mode < MPOL_MAX; mode++) {
345ace9c 2608 if (!strcmp(str, policy_modes[mode])) {
095f1fc4
LS
2609 break;
2610 }
2611 }
a720094d 2612 if (mode >= MPOL_MAX)
095f1fc4
LS
2613 goto out;
2614
71fe804b 2615 switch (mode) {
095f1fc4 2616 case MPOL_PREFERRED:
71fe804b
LS
2617 /*
2618 * Insist on a nodelist of one node only
2619 */
095f1fc4
LS
2620 if (nodelist) {
2621 char *rest = nodelist;
2622 while (isdigit(*rest))
2623 rest++;
926f2ae0
KM
2624 if (*rest)
2625 goto out;
095f1fc4
LS
2626 }
2627 break;
095f1fc4
LS
2628 case MPOL_INTERLEAVE:
2629 /*
2630 * Default to online nodes with memory if no nodelist
2631 */
2632 if (!nodelist)
01f13bd6 2633 nodes = node_states[N_MEMORY];
3f226aa1 2634 break;
71fe804b 2635 case MPOL_LOCAL:
3f226aa1 2636 /*
71fe804b 2637 * Don't allow a nodelist; mpol_new() checks flags
3f226aa1 2638 */
71fe804b 2639 if (nodelist)
3f226aa1 2640 goto out;
71fe804b 2641 mode = MPOL_PREFERRED;
3f226aa1 2642 break;
413b43de
RT
2643 case MPOL_DEFAULT:
2644 /*
2645 * Insist on a empty nodelist
2646 */
2647 if (!nodelist)
2648 err = 0;
2649 goto out;
d69b2e63
KM
2650 case MPOL_BIND:
2651 /*
2652 * Insist on a nodelist
2653 */
2654 if (!nodelist)
2655 goto out;
095f1fc4
LS
2656 }
2657
71fe804b 2658 mode_flags = 0;
095f1fc4
LS
2659 if (flags) {
2660 /*
2661 * Currently, we only support two mutually exclusive
2662 * mode flags.
2663 */
2664 if (!strcmp(flags, "static"))
71fe804b 2665 mode_flags |= MPOL_F_STATIC_NODES;
095f1fc4 2666 else if (!strcmp(flags, "relative"))
71fe804b 2667 mode_flags |= MPOL_F_RELATIVE_NODES;
095f1fc4 2668 else
926f2ae0 2669 goto out;
095f1fc4 2670 }
71fe804b
LS
2671
2672 new = mpol_new(mode, mode_flags, &nodes);
2673 if (IS_ERR(new))
926f2ae0
KM
2674 goto out;
2675
f2a07f40
HD
2676 /*
2677 * Save nodes for mpol_to_str() to show the tmpfs mount options
2678 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2679 */
2680 if (mode != MPOL_PREFERRED)
2681 new->v.nodes = nodes;
2682 else if (nodelist)
2683 new->v.preferred_node = first_node(nodes);
2684 else
2685 new->flags |= MPOL_F_LOCAL;
2686
2687 /*
2688 * Save nodes for contextualization: this will be used to "clone"
2689 * the mempolicy in a specific context [cpuset] at a later time.
2690 */
2691 new->w.user_nodemask = nodes;
2692
926f2ae0 2693 err = 0;
71fe804b 2694
095f1fc4
LS
2695out:
2696 /* Restore string for error message */
2697 if (nodelist)
2698 *--nodelist = ':';
2699 if (flags)
2700 *--flags = '=';
71fe804b
LS
2701 if (!err)
2702 *mpol = new;
095f1fc4
LS
2703 return err;
2704}
2705#endif /* CONFIG_TMPFS */
2706
71fe804b
LS
2707/**
2708 * mpol_to_str - format a mempolicy structure for printing
2709 * @buffer: to contain formatted mempolicy string
2710 * @maxlen: length of @buffer
2711 * @pol: pointer to mempolicy to be formatted
71fe804b 2712 *
948927ee
DR
2713 * Convert @pol into a string. If @buffer is too short, truncate the string.
2714 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2715 * longest flag, "relative", and to display at least a few node ids.
1a75a6c8 2716 */
948927ee 2717void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1a75a6c8
CL
2718{
2719 char *p = buffer;
948927ee
DR
2720 nodemask_t nodes = NODE_MASK_NONE;
2721 unsigned short mode = MPOL_DEFAULT;
2722 unsigned short flags = 0;
2291990a 2723
8790c71a 2724 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
bea904d5 2725 mode = pol->mode;
948927ee
DR
2726 flags = pol->flags;
2727 }
bea904d5 2728
1a75a6c8
CL
2729 switch (mode) {
2730 case MPOL_DEFAULT:
1a75a6c8 2731 break;
1a75a6c8 2732 case MPOL_PREFERRED:
fc36b8d3 2733 if (flags & MPOL_F_LOCAL)
f2a07f40 2734 mode = MPOL_LOCAL;
53f2556b 2735 else
fc36b8d3 2736 node_set(pol->v.preferred_node, nodes);
1a75a6c8 2737 break;
1a75a6c8 2738 case MPOL_BIND:
1a75a6c8 2739 case MPOL_INTERLEAVE:
f2a07f40 2740 nodes = pol->v.nodes;
1a75a6c8 2741 break;
1a75a6c8 2742 default:
948927ee
DR
2743 WARN_ON_ONCE(1);
2744 snprintf(p, maxlen, "unknown");
2745 return;
1a75a6c8
CL
2746 }
2747
b7a9f420 2748 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
1a75a6c8 2749
fc36b8d3 2750 if (flags & MPOL_MODE_FLAGS) {
948927ee 2751 p += snprintf(p, buffer + maxlen - p, "=");
f5b087b5 2752
2291990a
LS
2753 /*
2754 * Currently, the only defined flags are mutually exclusive
2755 */
f5b087b5 2756 if (flags & MPOL_F_STATIC_NODES)
2291990a
LS
2757 p += snprintf(p, buffer + maxlen - p, "static");
2758 else if (flags & MPOL_F_RELATIVE_NODES)
2759 p += snprintf(p, buffer + maxlen - p, "relative");
f5b087b5
DR
2760 }
2761
9e763e0f
TH
2762 if (!nodes_empty(nodes))
2763 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
2764 nodemask_pr_args(&nodes));
1a75a6c8 2765}