]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - mm/mempolicy.c
treewide: setup_timer() -> timer_setup()
[mirror_ubuntu-bionic-kernel.git] / mm / mempolicy.c
CommitLineData
1da177e4
LT
1/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
8bccd85f 5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
1da177e4
LT
6 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
8bccd85f 21 *
1da177e4
LT
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
8bccd85f
CL
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
1da177e4 28 * preferred Try a specific node first before normal fallback.
00ef2d2f 29 * As a special case NUMA_NO_NODE here means do the allocation
1da177e4
LT
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
8bccd85f 33 *
1da177e4
LT
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
1da177e4
LT
66*/
67
b1de0d13
MH
68#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69
1da177e4
LT
70#include <linux/mempolicy.h>
71#include <linux/mm.h>
72#include <linux/highmem.h>
73#include <linux/hugetlb.h>
74#include <linux/kernel.h>
75#include <linux/sched.h>
6e84f315 76#include <linux/sched/mm.h>
6a3827d7 77#include <linux/sched/numa_balancing.h>
f719ff9b 78#include <linux/sched/task.h>
1da177e4
LT
79#include <linux/nodemask.h>
80#include <linux/cpuset.h>
1da177e4
LT
81#include <linux/slab.h>
82#include <linux/string.h>
b95f1b31 83#include <linux/export.h>
b488893a 84#include <linux/nsproxy.h>
1da177e4
LT
85#include <linux/interrupt.h>
86#include <linux/init.h>
87#include <linux/compat.h>
31367466 88#include <linux/ptrace.h>
dc9aa5b9 89#include <linux/swap.h>
1a75a6c8
CL
90#include <linux/seq_file.h>
91#include <linux/proc_fs.h>
b20a3503 92#include <linux/migrate.h>
62b61f61 93#include <linux/ksm.h>
95a402c3 94#include <linux/rmap.h>
86c3a764 95#include <linux/security.h>
dbcb0f19 96#include <linux/syscalls.h>
095f1fc4 97#include <linux/ctype.h>
6d9c285a 98#include <linux/mm_inline.h>
b24f53a0 99#include <linux/mmu_notifier.h>
b1de0d13 100#include <linux/printk.h>
c8633798 101#include <linux/swapops.h>
dc9aa5b9 102
1da177e4 103#include <asm/tlbflush.h>
7c0f6ba6 104#include <linux/uaccess.h>
1da177e4 105
62695a84
NP
106#include "internal.h"
107
38e35860 108/* Internal flags */
dc9aa5b9 109#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
38e35860 110#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
dc9aa5b9 111
fcc234f8
PE
112static struct kmem_cache *policy_cache;
113static struct kmem_cache *sn_cache;
1da177e4 114
1da177e4
LT
115/* Highest zone. An specific allocation for a zone below that is not
116 policied. */
6267276f 117enum zone_type policy_zone = 0;
1da177e4 118
bea904d5
LS
119/*
120 * run-time system-wide default policy => local allocation
121 */
e754d79d 122static struct mempolicy default_policy = {
1da177e4 123 .refcnt = ATOMIC_INIT(1), /* never free it */
bea904d5 124 .mode = MPOL_PREFERRED,
fc36b8d3 125 .flags = MPOL_F_LOCAL,
1da177e4
LT
126};
127
5606e387
MG
128static struct mempolicy preferred_node_policy[MAX_NUMNODES];
129
74d2c3a0 130struct mempolicy *get_task_policy(struct task_struct *p)
5606e387
MG
131{
132 struct mempolicy *pol = p->mempolicy;
f15ca78e 133 int node;
5606e387 134
f15ca78e
ON
135 if (pol)
136 return pol;
5606e387 137
f15ca78e
ON
138 node = numa_node_id();
139 if (node != NUMA_NO_NODE) {
140 pol = &preferred_node_policy[node];
141 /* preferred_node_policy is not initialised early in boot */
142 if (pol->mode)
143 return pol;
5606e387
MG
144 }
145
f15ca78e 146 return &default_policy;
5606e387
MG
147}
148
37012946
DR
149static const struct mempolicy_operations {
150 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
213980c0 151 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
37012946
DR
152} mpol_ops[MPOL_MAX];
153
f5b087b5
DR
154static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
155{
6d556294 156 return pol->flags & MPOL_MODE_FLAGS;
4c50bc01
DR
157}
158
159static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
160 const nodemask_t *rel)
161{
162 nodemask_t tmp;
163 nodes_fold(tmp, *orig, nodes_weight(*rel));
164 nodes_onto(*ret, tmp, *rel);
f5b087b5
DR
165}
166
37012946
DR
167static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
168{
169 if (nodes_empty(*nodes))
170 return -EINVAL;
171 pol->v.nodes = *nodes;
172 return 0;
173}
174
175static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
176{
177 if (!nodes)
fc36b8d3 178 pol->flags |= MPOL_F_LOCAL; /* local allocation */
37012946
DR
179 else if (nodes_empty(*nodes))
180 return -EINVAL; /* no allowed nodes */
181 else
182 pol->v.preferred_node = first_node(*nodes);
183 return 0;
184}
185
186static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
187{
859f7ef1 188 if (nodes_empty(*nodes))
37012946
DR
189 return -EINVAL;
190 pol->v.nodes = *nodes;
191 return 0;
192}
193
58568d2a
MX
194/*
195 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
196 * any, for the new policy. mpol_new() has already validated the nodes
197 * parameter with respect to the policy mode and flags. But, we need to
198 * handle an empty nodemask with MPOL_PREFERRED here.
199 *
200 * Must be called holding task's alloc_lock to protect task's mems_allowed
201 * and mempolicy. May also be called holding the mmap_semaphore for write.
202 */
4bfc4495
KH
203static int mpol_set_nodemask(struct mempolicy *pol,
204 const nodemask_t *nodes, struct nodemask_scratch *nsc)
58568d2a 205{
58568d2a
MX
206 int ret;
207
208 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
209 if (pol == NULL)
210 return 0;
01f13bd6 211 /* Check N_MEMORY */
4bfc4495 212 nodes_and(nsc->mask1,
01f13bd6 213 cpuset_current_mems_allowed, node_states[N_MEMORY]);
58568d2a
MX
214
215 VM_BUG_ON(!nodes);
216 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
217 nodes = NULL; /* explicit local allocation */
218 else {
219 if (pol->flags & MPOL_F_RELATIVE_NODES)
859f7ef1 220 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
58568d2a 221 else
4bfc4495
KH
222 nodes_and(nsc->mask2, *nodes, nsc->mask1);
223
58568d2a
MX
224 if (mpol_store_user_nodemask(pol))
225 pol->w.user_nodemask = *nodes;
226 else
227 pol->w.cpuset_mems_allowed =
228 cpuset_current_mems_allowed;
229 }
230
4bfc4495
KH
231 if (nodes)
232 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
233 else
234 ret = mpol_ops[pol->mode].create(pol, NULL);
58568d2a
MX
235 return ret;
236}
237
238/*
239 * This function just creates a new policy, does some check and simple
240 * initialization. You must invoke mpol_set_nodemask() to set nodes.
241 */
028fec41
DR
242static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
243 nodemask_t *nodes)
1da177e4
LT
244{
245 struct mempolicy *policy;
246
028fec41 247 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
00ef2d2f 248 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
140d5a49 249
3e1f0645
DR
250 if (mode == MPOL_DEFAULT) {
251 if (nodes && !nodes_empty(*nodes))
37012946 252 return ERR_PTR(-EINVAL);
d3a71033 253 return NULL;
37012946 254 }
3e1f0645
DR
255 VM_BUG_ON(!nodes);
256
257 /*
258 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
259 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
260 * All other modes require a valid pointer to a non-empty nodemask.
261 */
262 if (mode == MPOL_PREFERRED) {
263 if (nodes_empty(*nodes)) {
264 if (((flags & MPOL_F_STATIC_NODES) ||
265 (flags & MPOL_F_RELATIVE_NODES)))
266 return ERR_PTR(-EINVAL);
3e1f0645 267 }
479e2802 268 } else if (mode == MPOL_LOCAL) {
8d303e44
PK
269 if (!nodes_empty(*nodes) ||
270 (flags & MPOL_F_STATIC_NODES) ||
271 (flags & MPOL_F_RELATIVE_NODES))
479e2802
PZ
272 return ERR_PTR(-EINVAL);
273 mode = MPOL_PREFERRED;
3e1f0645
DR
274 } else if (nodes_empty(*nodes))
275 return ERR_PTR(-EINVAL);
1da177e4
LT
276 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
277 if (!policy)
278 return ERR_PTR(-ENOMEM);
279 atomic_set(&policy->refcnt, 1);
45c4745a 280 policy->mode = mode;
3e1f0645 281 policy->flags = flags;
37012946 282
1da177e4 283 return policy;
37012946
DR
284}
285
52cd3b07
LS
286/* Slow path of a mpol destructor. */
287void __mpol_put(struct mempolicy *p)
288{
289 if (!atomic_dec_and_test(&p->refcnt))
290 return;
52cd3b07
LS
291 kmem_cache_free(policy_cache, p);
292}
293
213980c0 294static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
37012946
DR
295{
296}
297
213980c0 298static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
37012946
DR
299{
300 nodemask_t tmp;
301
302 if (pol->flags & MPOL_F_STATIC_NODES)
303 nodes_and(tmp, pol->w.user_nodemask, *nodes);
304 else if (pol->flags & MPOL_F_RELATIVE_NODES)
305 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
306 else {
213980c0
VB
307 nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
308 *nodes);
309 pol->w.cpuset_mems_allowed = tmp;
37012946 310 }
f5b087b5 311
708c1bbc
MX
312 if (nodes_empty(tmp))
313 tmp = *nodes;
314
213980c0 315 pol->v.nodes = tmp;
37012946
DR
316}
317
318static void mpol_rebind_preferred(struct mempolicy *pol,
213980c0 319 const nodemask_t *nodes)
37012946
DR
320{
321 nodemask_t tmp;
322
37012946
DR
323 if (pol->flags & MPOL_F_STATIC_NODES) {
324 int node = first_node(pol->w.user_nodemask);
325
fc36b8d3 326 if (node_isset(node, *nodes)) {
37012946 327 pol->v.preferred_node = node;
fc36b8d3
LS
328 pol->flags &= ~MPOL_F_LOCAL;
329 } else
330 pol->flags |= MPOL_F_LOCAL;
37012946
DR
331 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
332 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
333 pol->v.preferred_node = first_node(tmp);
fc36b8d3 334 } else if (!(pol->flags & MPOL_F_LOCAL)) {
37012946
DR
335 pol->v.preferred_node = node_remap(pol->v.preferred_node,
336 pol->w.cpuset_mems_allowed,
337 *nodes);
338 pol->w.cpuset_mems_allowed = *nodes;
339 }
1da177e4
LT
340}
341
708c1bbc
MX
342/*
343 * mpol_rebind_policy - Migrate a policy to a different set of nodes
344 *
213980c0
VB
345 * Per-vma policies are protected by mmap_sem. Allocations using per-task
346 * policies are protected by task->mems_allowed_seq to prevent a premature
347 * OOM/allocation failure due to parallel nodemask modification.
708c1bbc 348 */
213980c0 349static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
1d0d2680 350{
1d0d2680
DR
351 if (!pol)
352 return;
213980c0 353 if (!mpol_store_user_nodemask(pol) &&
1d0d2680
DR
354 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
355 return;
708c1bbc 356
213980c0 357 mpol_ops[pol->mode].rebind(pol, newmask);
1d0d2680
DR
358}
359
360/*
361 * Wrapper for mpol_rebind_policy() that just requires task
362 * pointer, and updates task mempolicy.
58568d2a
MX
363 *
364 * Called with task's alloc_lock held.
1d0d2680
DR
365 */
366
213980c0 367void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
1d0d2680 368{
213980c0 369 mpol_rebind_policy(tsk->mempolicy, new);
1d0d2680
DR
370}
371
372/*
373 * Rebind each vma in mm to new nodemask.
374 *
375 * Call holding a reference to mm. Takes mm->mmap_sem during call.
376 */
377
378void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
379{
380 struct vm_area_struct *vma;
381
382 down_write(&mm->mmap_sem);
383 for (vma = mm->mmap; vma; vma = vma->vm_next)
213980c0 384 mpol_rebind_policy(vma->vm_policy, new);
1d0d2680
DR
385 up_write(&mm->mmap_sem);
386}
387
37012946
DR
388static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
389 [MPOL_DEFAULT] = {
390 .rebind = mpol_rebind_default,
391 },
392 [MPOL_INTERLEAVE] = {
393 .create = mpol_new_interleave,
394 .rebind = mpol_rebind_nodemask,
395 },
396 [MPOL_PREFERRED] = {
397 .create = mpol_new_preferred,
398 .rebind = mpol_rebind_preferred,
399 },
400 [MPOL_BIND] = {
401 .create = mpol_new_bind,
402 .rebind = mpol_rebind_nodemask,
403 },
404};
405
fc301289
CL
406static void migrate_page_add(struct page *page, struct list_head *pagelist,
407 unsigned long flags);
1a75a6c8 408
6f4576e3
NH
409struct queue_pages {
410 struct list_head *pagelist;
411 unsigned long flags;
412 nodemask_t *nmask;
413 struct vm_area_struct *prev;
414};
415
88aaa2a1
NH
416/*
417 * Check if the page's nid is in qp->nmask.
418 *
419 * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
420 * in the invert of qp->nmask.
421 */
422static inline bool queue_pages_required(struct page *page,
423 struct queue_pages *qp)
424{
425 int nid = page_to_nid(page);
426 unsigned long flags = qp->flags;
427
428 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
429}
430
c8633798
NH
431static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
432 unsigned long end, struct mm_walk *walk)
433{
434 int ret = 0;
435 struct page *page;
436 struct queue_pages *qp = walk->private;
437 unsigned long flags;
438
439 if (unlikely(is_pmd_migration_entry(*pmd))) {
440 ret = 1;
441 goto unlock;
442 }
443 page = pmd_page(*pmd);
444 if (is_huge_zero_page(page)) {
445 spin_unlock(ptl);
446 __split_huge_pmd(walk->vma, pmd, addr, false, NULL);
447 goto out;
448 }
449 if (!thp_migration_supported()) {
450 get_page(page);
451 spin_unlock(ptl);
452 lock_page(page);
453 ret = split_huge_page(page);
454 unlock_page(page);
455 put_page(page);
456 goto out;
457 }
458 if (!queue_pages_required(page, qp)) {
459 ret = 1;
460 goto unlock;
461 }
462
463 ret = 1;
464 flags = qp->flags;
465 /* go to thp migration */
466 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
467 migrate_page_add(page, qp->pagelist, flags);
468unlock:
469 spin_unlock(ptl);
470out:
471 return ret;
472}
473
98094945
NH
474/*
475 * Scan through pages checking if pages follow certain conditions,
476 * and move them to the pagelist if they do.
477 */
6f4576e3
NH
478static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
479 unsigned long end, struct mm_walk *walk)
1da177e4 480{
6f4576e3
NH
481 struct vm_area_struct *vma = walk->vma;
482 struct page *page;
483 struct queue_pages *qp = walk->private;
484 unsigned long flags = qp->flags;
c8633798 485 int ret;
91612e0d 486 pte_t *pte;
705e87c0 487 spinlock_t *ptl;
941150a3 488
c8633798
NH
489 ptl = pmd_trans_huge_lock(pmd, vma);
490 if (ptl) {
491 ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
492 if (ret)
493 return 0;
248db92d 494 }
91612e0d 495
337d9abf
NH
496 if (pmd_trans_unstable(pmd))
497 return 0;
248db92d 498retry:
6f4576e3
NH
499 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
500 for (; addr != end; pte++, addr += PAGE_SIZE) {
91612e0d 501 if (!pte_present(*pte))
1da177e4 502 continue;
6aab341e
LT
503 page = vm_normal_page(vma, addr, *pte);
504 if (!page)
1da177e4 505 continue;
053837fc 506 /*
62b61f61
HD
507 * vm_normal_page() filters out zero pages, but there might
508 * still be PageReserved pages to skip, perhaps in a VDSO.
053837fc 509 */
b79bc0a0 510 if (PageReserved(page))
f4598c8b 511 continue;
88aaa2a1 512 if (!queue_pages_required(page, qp))
38e35860 513 continue;
c8633798 514 if (PageTransCompound(page) && !thp_migration_supported()) {
248db92d
KS
515 get_page(page);
516 pte_unmap_unlock(pte, ptl);
517 lock_page(page);
518 ret = split_huge_page(page);
519 unlock_page(page);
520 put_page(page);
521 /* Failed to split -- skip. */
522 if (ret) {
523 pte = pte_offset_map_lock(walk->mm, pmd,
524 addr, &ptl);
525 continue;
526 }
527 goto retry;
528 }
38e35860 529
77bf45e7 530 migrate_page_add(page, qp->pagelist, flags);
6f4576e3
NH
531 }
532 pte_unmap_unlock(pte - 1, ptl);
533 cond_resched();
534 return 0;
91612e0d
HD
535}
536
6f4576e3
NH
537static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
538 unsigned long addr, unsigned long end,
539 struct mm_walk *walk)
e2d8cf40
NH
540{
541#ifdef CONFIG_HUGETLB_PAGE
6f4576e3
NH
542 struct queue_pages *qp = walk->private;
543 unsigned long flags = qp->flags;
e2d8cf40 544 struct page *page;
cb900f41 545 spinlock_t *ptl;
d4c54919 546 pte_t entry;
e2d8cf40 547
6f4576e3
NH
548 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
549 entry = huge_ptep_get(pte);
d4c54919
NH
550 if (!pte_present(entry))
551 goto unlock;
552 page = pte_page(entry);
88aaa2a1 553 if (!queue_pages_required(page, qp))
e2d8cf40
NH
554 goto unlock;
555 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
556 if (flags & (MPOL_MF_MOVE_ALL) ||
557 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
6f4576e3 558 isolate_huge_page(page, qp->pagelist);
e2d8cf40 559unlock:
cb900f41 560 spin_unlock(ptl);
e2d8cf40
NH
561#else
562 BUG();
563#endif
91612e0d 564 return 0;
1da177e4
LT
565}
566
5877231f 567#ifdef CONFIG_NUMA_BALANCING
b24f53a0 568/*
4b10e7d5
MG
569 * This is used to mark a range of virtual addresses to be inaccessible.
570 * These are later cleared by a NUMA hinting fault. Depending on these
571 * faults, pages may be migrated for better NUMA placement.
572 *
573 * This is assuming that NUMA faults are handled using PROT_NONE. If
574 * an architecture makes a different choice, it will need further
575 * changes to the core.
b24f53a0 576 */
4b10e7d5
MG
577unsigned long change_prot_numa(struct vm_area_struct *vma,
578 unsigned long addr, unsigned long end)
b24f53a0 579{
4b10e7d5 580 int nr_updated;
b24f53a0 581
4d942466 582 nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
03c5a6e1
MG
583 if (nr_updated)
584 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
b24f53a0 585
4b10e7d5 586 return nr_updated;
b24f53a0
LS
587}
588#else
589static unsigned long change_prot_numa(struct vm_area_struct *vma,
590 unsigned long addr, unsigned long end)
591{
592 return 0;
593}
5877231f 594#endif /* CONFIG_NUMA_BALANCING */
b24f53a0 595
6f4576e3
NH
596static int queue_pages_test_walk(unsigned long start, unsigned long end,
597 struct mm_walk *walk)
598{
599 struct vm_area_struct *vma = walk->vma;
600 struct queue_pages *qp = walk->private;
601 unsigned long endvma = vma->vm_end;
602 unsigned long flags = qp->flags;
603
77bf45e7 604 if (!vma_migratable(vma))
48684a65
NH
605 return 1;
606
6f4576e3
NH
607 if (endvma > end)
608 endvma = end;
609 if (vma->vm_start > start)
610 start = vma->vm_start;
611
612 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
613 if (!vma->vm_next && vma->vm_end < end)
614 return -EFAULT;
615 if (qp->prev && qp->prev->vm_end < vma->vm_start)
616 return -EFAULT;
617 }
618
619 qp->prev = vma;
620
6f4576e3
NH
621 if (flags & MPOL_MF_LAZY) {
622 /* Similar to task_numa_work, skip inaccessible VMAs */
4355c018
LC
623 if (!is_vm_hugetlb_page(vma) &&
624 (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
625 !(vma->vm_flags & VM_MIXEDMAP))
6f4576e3
NH
626 change_prot_numa(vma, start, endvma);
627 return 1;
628 }
629
77bf45e7
KS
630 /* queue pages from current vma */
631 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
6f4576e3
NH
632 return 0;
633 return 1;
634}
635
dc9aa5b9 636/*
98094945
NH
637 * Walk through page tables and collect pages to be migrated.
638 *
639 * If pages found in a given range are on a set of nodes (determined by
640 * @nodes and @flags,) it's isolated and queued to the pagelist which is
641 * passed via @private.)
dc9aa5b9 642 */
d05f0cdc 643static int
98094945 644queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
6f4576e3
NH
645 nodemask_t *nodes, unsigned long flags,
646 struct list_head *pagelist)
1da177e4 647{
6f4576e3
NH
648 struct queue_pages qp = {
649 .pagelist = pagelist,
650 .flags = flags,
651 .nmask = nodes,
652 .prev = NULL,
653 };
654 struct mm_walk queue_pages_walk = {
655 .hugetlb_entry = queue_pages_hugetlb,
656 .pmd_entry = queue_pages_pte_range,
657 .test_walk = queue_pages_test_walk,
658 .mm = mm,
659 .private = &qp,
660 };
661
662 return walk_page_range(start, end, &queue_pages_walk);
1da177e4
LT
663}
664
869833f2
KM
665/*
666 * Apply policy to a single VMA
667 * This must be called with the mmap_sem held for writing.
668 */
669static int vma_replace_policy(struct vm_area_struct *vma,
670 struct mempolicy *pol)
8d34694c 671{
869833f2
KM
672 int err;
673 struct mempolicy *old;
674 struct mempolicy *new;
8d34694c
KM
675
676 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
677 vma->vm_start, vma->vm_end, vma->vm_pgoff,
678 vma->vm_ops, vma->vm_file,
679 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
680
869833f2
KM
681 new = mpol_dup(pol);
682 if (IS_ERR(new))
683 return PTR_ERR(new);
684
685 if (vma->vm_ops && vma->vm_ops->set_policy) {
8d34694c 686 err = vma->vm_ops->set_policy(vma, new);
869833f2
KM
687 if (err)
688 goto err_out;
8d34694c 689 }
869833f2
KM
690
691 old = vma->vm_policy;
692 vma->vm_policy = new; /* protected by mmap_sem */
693 mpol_put(old);
694
695 return 0;
696 err_out:
697 mpol_put(new);
8d34694c
KM
698 return err;
699}
700
1da177e4 701/* Step 2: apply policy to a range and do splits. */
9d8cebd4
KM
702static int mbind_range(struct mm_struct *mm, unsigned long start,
703 unsigned long end, struct mempolicy *new_pol)
1da177e4
LT
704{
705 struct vm_area_struct *next;
9d8cebd4
KM
706 struct vm_area_struct *prev;
707 struct vm_area_struct *vma;
708 int err = 0;
e26a5114 709 pgoff_t pgoff;
9d8cebd4
KM
710 unsigned long vmstart;
711 unsigned long vmend;
1da177e4 712
097d5910 713 vma = find_vma(mm, start);
9d8cebd4
KM
714 if (!vma || vma->vm_start > start)
715 return -EFAULT;
716
097d5910 717 prev = vma->vm_prev;
e26a5114
KM
718 if (start > vma->vm_start)
719 prev = vma;
720
9d8cebd4 721 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
1da177e4 722 next = vma->vm_next;
9d8cebd4
KM
723 vmstart = max(start, vma->vm_start);
724 vmend = min(end, vma->vm_end);
725
e26a5114
KM
726 if (mpol_equal(vma_policy(vma), new_pol))
727 continue;
728
729 pgoff = vma->vm_pgoff +
730 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
9d8cebd4 731 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
19a809af
AA
732 vma->anon_vma, vma->vm_file, pgoff,
733 new_pol, vma->vm_userfaultfd_ctx);
9d8cebd4
KM
734 if (prev) {
735 vma = prev;
736 next = vma->vm_next;
3964acd0
ON
737 if (mpol_equal(vma_policy(vma), new_pol))
738 continue;
739 /* vma_merge() joined vma && vma->next, case 8 */
740 goto replace;
9d8cebd4
KM
741 }
742 if (vma->vm_start != vmstart) {
743 err = split_vma(vma->vm_mm, vma, vmstart, 1);
744 if (err)
745 goto out;
746 }
747 if (vma->vm_end != vmend) {
748 err = split_vma(vma->vm_mm, vma, vmend, 0);
749 if (err)
750 goto out;
751 }
3964acd0 752 replace:
869833f2 753 err = vma_replace_policy(vma, new_pol);
8d34694c
KM
754 if (err)
755 goto out;
1da177e4 756 }
9d8cebd4
KM
757
758 out:
1da177e4
LT
759 return err;
760}
761
1da177e4 762/* Set the process memory policy */
028fec41
DR
763static long do_set_mempolicy(unsigned short mode, unsigned short flags,
764 nodemask_t *nodes)
1da177e4 765{
58568d2a 766 struct mempolicy *new, *old;
4bfc4495 767 NODEMASK_SCRATCH(scratch);
58568d2a 768 int ret;
1da177e4 769
4bfc4495
KH
770 if (!scratch)
771 return -ENOMEM;
f4e53d91 772
4bfc4495
KH
773 new = mpol_new(mode, flags, nodes);
774 if (IS_ERR(new)) {
775 ret = PTR_ERR(new);
776 goto out;
777 }
2c7c3a7d 778
58568d2a 779 task_lock(current);
4bfc4495 780 ret = mpol_set_nodemask(new, nodes, scratch);
58568d2a
MX
781 if (ret) {
782 task_unlock(current);
58568d2a 783 mpol_put(new);
4bfc4495 784 goto out;
58568d2a
MX
785 }
786 old = current->mempolicy;
1da177e4 787 current->mempolicy = new;
45816682
VB
788 if (new && new->mode == MPOL_INTERLEAVE)
789 current->il_prev = MAX_NUMNODES-1;
58568d2a 790 task_unlock(current);
58568d2a 791 mpol_put(old);
4bfc4495
KH
792 ret = 0;
793out:
794 NODEMASK_SCRATCH_FREE(scratch);
795 return ret;
1da177e4
LT
796}
797
bea904d5
LS
798/*
799 * Return nodemask for policy for get_mempolicy() query
58568d2a
MX
800 *
801 * Called with task's alloc_lock held
bea904d5
LS
802 */
803static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
1da177e4 804{
dfcd3c0d 805 nodes_clear(*nodes);
bea904d5
LS
806 if (p == &default_policy)
807 return;
808
45c4745a 809 switch (p->mode) {
19770b32
MG
810 case MPOL_BIND:
811 /* Fall through */
1da177e4 812 case MPOL_INTERLEAVE:
dfcd3c0d 813 *nodes = p->v.nodes;
1da177e4
LT
814 break;
815 case MPOL_PREFERRED:
fc36b8d3 816 if (!(p->flags & MPOL_F_LOCAL))
dfcd3c0d 817 node_set(p->v.preferred_node, *nodes);
53f2556b 818 /* else return empty node mask for local allocation */
1da177e4
LT
819 break;
820 default:
821 BUG();
822 }
823}
824
d4edcf0d 825static int lookup_node(unsigned long addr)
1da177e4
LT
826{
827 struct page *p;
828 int err;
829
768ae309 830 err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL);
1da177e4
LT
831 if (err >= 0) {
832 err = page_to_nid(p);
833 put_page(p);
834 }
835 return err;
836}
837
1da177e4 838/* Retrieve NUMA policy */
dbcb0f19
AB
839static long do_get_mempolicy(int *policy, nodemask_t *nmask,
840 unsigned long addr, unsigned long flags)
1da177e4 841{
8bccd85f 842 int err;
1da177e4
LT
843 struct mm_struct *mm = current->mm;
844 struct vm_area_struct *vma = NULL;
845 struct mempolicy *pol = current->mempolicy;
846
754af6f5
LS
847 if (flags &
848 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
1da177e4 849 return -EINVAL;
754af6f5
LS
850
851 if (flags & MPOL_F_MEMS_ALLOWED) {
852 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
853 return -EINVAL;
854 *policy = 0; /* just so it's initialized */
58568d2a 855 task_lock(current);
754af6f5 856 *nmask = cpuset_current_mems_allowed;
58568d2a 857 task_unlock(current);
754af6f5
LS
858 return 0;
859 }
860
1da177e4 861 if (flags & MPOL_F_ADDR) {
bea904d5
LS
862 /*
863 * Do NOT fall back to task policy if the
864 * vma/shared policy at addr is NULL. We
865 * want to return MPOL_DEFAULT in this case.
866 */
1da177e4
LT
867 down_read(&mm->mmap_sem);
868 vma = find_vma_intersection(mm, addr, addr+1);
869 if (!vma) {
870 up_read(&mm->mmap_sem);
871 return -EFAULT;
872 }
873 if (vma->vm_ops && vma->vm_ops->get_policy)
874 pol = vma->vm_ops->get_policy(vma, addr);
875 else
876 pol = vma->vm_policy;
877 } else if (addr)
878 return -EINVAL;
879
880 if (!pol)
bea904d5 881 pol = &default_policy; /* indicates default behavior */
1da177e4
LT
882
883 if (flags & MPOL_F_NODE) {
884 if (flags & MPOL_F_ADDR) {
d4edcf0d 885 err = lookup_node(addr);
1da177e4
LT
886 if (err < 0)
887 goto out;
8bccd85f 888 *policy = err;
1da177e4 889 } else if (pol == current->mempolicy &&
45c4745a 890 pol->mode == MPOL_INTERLEAVE) {
45816682 891 *policy = next_node_in(current->il_prev, pol->v.nodes);
1da177e4
LT
892 } else {
893 err = -EINVAL;
894 goto out;
895 }
bea904d5
LS
896 } else {
897 *policy = pol == &default_policy ? MPOL_DEFAULT :
898 pol->mode;
d79df630
DR
899 /*
900 * Internal mempolicy flags must be masked off before exposing
901 * the policy to userspace.
902 */
903 *policy |= (pol->flags & MPOL_MODE_FLAGS);
bea904d5 904 }
1da177e4 905
1da177e4 906 err = 0;
58568d2a 907 if (nmask) {
c6b6ef8b
LS
908 if (mpol_store_user_nodemask(pol)) {
909 *nmask = pol->w.user_nodemask;
910 } else {
911 task_lock(current);
912 get_policy_nodemask(pol, nmask);
913 task_unlock(current);
914 }
58568d2a 915 }
1da177e4
LT
916
917 out:
52cd3b07 918 mpol_cond_put(pol);
1da177e4
LT
919 if (vma)
920 up_read(&current->mm->mmap_sem);
921 return err;
922}
923
b20a3503 924#ifdef CONFIG_MIGRATION
6ce3c4c0 925/*
c8633798 926 * page migration, thp tail pages can be passed.
6ce3c4c0 927 */
fc301289
CL
928static void migrate_page_add(struct page *page, struct list_head *pagelist,
929 unsigned long flags)
6ce3c4c0 930{
c8633798 931 struct page *head = compound_head(page);
6ce3c4c0 932 /*
fc301289 933 * Avoid migrating a page that is shared with others.
6ce3c4c0 934 */
c8633798
NH
935 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
936 if (!isolate_lru_page(head)) {
937 list_add_tail(&head->lru, pagelist);
938 mod_node_page_state(page_pgdat(head),
939 NR_ISOLATED_ANON + page_is_file_cache(head),
940 hpage_nr_pages(head));
62695a84
NP
941 }
942 }
7e2ab150 943}
6ce3c4c0 944
742755a1 945static struct page *new_node_page(struct page *page, unsigned long node, int **x)
95a402c3 946{
e2d8cf40
NH
947 if (PageHuge(page))
948 return alloc_huge_page_node(page_hstate(compound_head(page)),
949 node);
c8633798
NH
950 else if (thp_migration_supported() && PageTransHuge(page)) {
951 struct page *thp;
952
953 thp = alloc_pages_node(node,
954 (GFP_TRANSHUGE | __GFP_THISNODE),
955 HPAGE_PMD_ORDER);
956 if (!thp)
957 return NULL;
958 prep_transhuge_page(thp);
959 return thp;
960 } else
96db800f 961 return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
b360edb4 962 __GFP_THISNODE, 0);
95a402c3
CL
963}
964
7e2ab150
CL
965/*
966 * Migrate pages from one node to a target node.
967 * Returns error or the number of pages not migrated.
968 */
dbcb0f19
AB
969static int migrate_to_node(struct mm_struct *mm, int source, int dest,
970 int flags)
7e2ab150
CL
971{
972 nodemask_t nmask;
973 LIST_HEAD(pagelist);
974 int err = 0;
975
976 nodes_clear(nmask);
977 node_set(source, nmask);
6ce3c4c0 978
08270807
MK
979 /*
980 * This does not "check" the range but isolates all pages that
981 * need migration. Between passing in the full user address
982 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
983 */
984 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
98094945 985 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
7e2ab150
CL
986 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
987
cf608ac1 988 if (!list_empty(&pagelist)) {
68711a74 989 err = migrate_pages(&pagelist, new_node_page, NULL, dest,
9c620e2b 990 MIGRATE_SYNC, MR_SYSCALL);
cf608ac1 991 if (err)
e2d8cf40 992 putback_movable_pages(&pagelist);
cf608ac1 993 }
95a402c3 994
7e2ab150 995 return err;
6ce3c4c0
CL
996}
997
39743889 998/*
7e2ab150
CL
999 * Move pages between the two nodesets so as to preserve the physical
1000 * layout as much as possible.
39743889
CL
1001 *
1002 * Returns the number of page that could not be moved.
1003 */
0ce72d4f
AM
1004int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1005 const nodemask_t *to, int flags)
39743889 1006{
7e2ab150 1007 int busy = 0;
0aedadf9 1008 int err;
7e2ab150 1009 nodemask_t tmp;
39743889 1010
0aedadf9
CL
1011 err = migrate_prep();
1012 if (err)
1013 return err;
1014
53f2556b 1015 down_read(&mm->mmap_sem);
39743889 1016
da0aa138
KM
1017 /*
1018 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1019 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1020 * bit in 'tmp', and return that <source, dest> pair for migration.
1021 * The pair of nodemasks 'to' and 'from' define the map.
1022 *
1023 * If no pair of bits is found that way, fallback to picking some
1024 * pair of 'source' and 'dest' bits that are not the same. If the
1025 * 'source' and 'dest' bits are the same, this represents a node
1026 * that will be migrating to itself, so no pages need move.
1027 *
1028 * If no bits are left in 'tmp', or if all remaining bits left
1029 * in 'tmp' correspond to the same bit in 'to', return false
1030 * (nothing left to migrate).
1031 *
1032 * This lets us pick a pair of nodes to migrate between, such that
1033 * if possible the dest node is not already occupied by some other
1034 * source node, minimizing the risk of overloading the memory on a
1035 * node that would happen if we migrated incoming memory to a node
1036 * before migrating outgoing memory source that same node.
1037 *
1038 * A single scan of tmp is sufficient. As we go, we remember the
1039 * most recent <s, d> pair that moved (s != d). If we find a pair
1040 * that not only moved, but what's better, moved to an empty slot
1041 * (d is not set in tmp), then we break out then, with that pair.
ae0e47f0 1042 * Otherwise when we finish scanning from_tmp, we at least have the
da0aa138
KM
1043 * most recent <s, d> pair that moved. If we get all the way through
1044 * the scan of tmp without finding any node that moved, much less
1045 * moved to an empty node, then there is nothing left worth migrating.
1046 */
d4984711 1047
0ce72d4f 1048 tmp = *from;
7e2ab150
CL
1049 while (!nodes_empty(tmp)) {
1050 int s,d;
b76ac7e7 1051 int source = NUMA_NO_NODE;
7e2ab150
CL
1052 int dest = 0;
1053
1054 for_each_node_mask(s, tmp) {
4a5b18cc
LW
1055
1056 /*
1057 * do_migrate_pages() tries to maintain the relative
1058 * node relationship of the pages established between
1059 * threads and memory areas.
1060 *
1061 * However if the number of source nodes is not equal to
1062 * the number of destination nodes we can not preserve
1063 * this node relative relationship. In that case, skip
1064 * copying memory from a node that is in the destination
1065 * mask.
1066 *
1067 * Example: [2,3,4] -> [3,4,5] moves everything.
1068 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1069 */
1070
0ce72d4f
AM
1071 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1072 (node_isset(s, *to)))
4a5b18cc
LW
1073 continue;
1074
0ce72d4f 1075 d = node_remap(s, *from, *to);
7e2ab150
CL
1076 if (s == d)
1077 continue;
1078
1079 source = s; /* Node moved. Memorize */
1080 dest = d;
1081
1082 /* dest not in remaining from nodes? */
1083 if (!node_isset(dest, tmp))
1084 break;
1085 }
b76ac7e7 1086 if (source == NUMA_NO_NODE)
7e2ab150
CL
1087 break;
1088
1089 node_clear(source, tmp);
1090 err = migrate_to_node(mm, source, dest, flags);
1091 if (err > 0)
1092 busy += err;
1093 if (err < 0)
1094 break;
39743889
CL
1095 }
1096 up_read(&mm->mmap_sem);
7e2ab150
CL
1097 if (err < 0)
1098 return err;
1099 return busy;
b20a3503
CL
1100
1101}
1102
3ad33b24
LS
1103/*
1104 * Allocate a new page for page migration based on vma policy.
d05f0cdc 1105 * Start by assuming the page is mapped by the same vma as contains @start.
3ad33b24
LS
1106 * Search forward from there, if not. N.B., this assumes that the
1107 * list of pages handed to migrate_pages()--which is how we get here--
1108 * is in virtual address order.
1109 */
d05f0cdc 1110static struct page *new_page(struct page *page, unsigned long start, int **x)
95a402c3 1111{
d05f0cdc 1112 struct vm_area_struct *vma;
3ad33b24 1113 unsigned long uninitialized_var(address);
95a402c3 1114
d05f0cdc 1115 vma = find_vma(current->mm, start);
3ad33b24
LS
1116 while (vma) {
1117 address = page_address_in_vma(page, vma);
1118 if (address != -EFAULT)
1119 break;
1120 vma = vma->vm_next;
1121 }
11c731e8
WL
1122
1123 if (PageHuge(page)) {
cc81717e
MH
1124 BUG_ON(!vma);
1125 return alloc_huge_page_noerr(vma, address, 1);
c8633798
NH
1126 } else if (thp_migration_supported() && PageTransHuge(page)) {
1127 struct page *thp;
1128
1129 thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
1130 HPAGE_PMD_ORDER);
1131 if (!thp)
1132 return NULL;
1133 prep_transhuge_page(thp);
1134 return thp;
11c731e8 1135 }
0bf598d8 1136 /*
11c731e8 1137 * if !vma, alloc_page_vma() will use task or system default policy
0bf598d8 1138 */
0f556856
MH
1139 return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
1140 vma, address);
95a402c3 1141}
b20a3503
CL
1142#else
1143
1144static void migrate_page_add(struct page *page, struct list_head *pagelist,
1145 unsigned long flags)
1146{
39743889
CL
1147}
1148
0ce72d4f
AM
1149int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1150 const nodemask_t *to, int flags)
b20a3503
CL
1151{
1152 return -ENOSYS;
1153}
95a402c3 1154
d05f0cdc 1155static struct page *new_page(struct page *page, unsigned long start, int **x)
95a402c3
CL
1156{
1157 return NULL;
1158}
b20a3503
CL
1159#endif
1160
dbcb0f19 1161static long do_mbind(unsigned long start, unsigned long len,
028fec41
DR
1162 unsigned short mode, unsigned short mode_flags,
1163 nodemask_t *nmask, unsigned long flags)
6ce3c4c0 1164{
6ce3c4c0
CL
1165 struct mm_struct *mm = current->mm;
1166 struct mempolicy *new;
1167 unsigned long end;
1168 int err;
1169 LIST_HEAD(pagelist);
1170
b24f53a0 1171 if (flags & ~(unsigned long)MPOL_MF_VALID)
6ce3c4c0 1172 return -EINVAL;
74c00241 1173 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
6ce3c4c0
CL
1174 return -EPERM;
1175
1176 if (start & ~PAGE_MASK)
1177 return -EINVAL;
1178
1179 if (mode == MPOL_DEFAULT)
1180 flags &= ~MPOL_MF_STRICT;
1181
1182 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1183 end = start + len;
1184
1185 if (end < start)
1186 return -EINVAL;
1187 if (end == start)
1188 return 0;
1189
028fec41 1190 new = mpol_new(mode, mode_flags, nmask);
6ce3c4c0
CL
1191 if (IS_ERR(new))
1192 return PTR_ERR(new);
1193
b24f53a0
LS
1194 if (flags & MPOL_MF_LAZY)
1195 new->flags |= MPOL_F_MOF;
1196
6ce3c4c0
CL
1197 /*
1198 * If we are using the default policy then operation
1199 * on discontinuous address spaces is okay after all
1200 */
1201 if (!new)
1202 flags |= MPOL_MF_DISCONTIG_OK;
1203
028fec41
DR
1204 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1205 start, start + len, mode, mode_flags,
00ef2d2f 1206 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
6ce3c4c0 1207
0aedadf9
CL
1208 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1209
1210 err = migrate_prep();
1211 if (err)
b05ca738 1212 goto mpol_out;
0aedadf9 1213 }
4bfc4495
KH
1214 {
1215 NODEMASK_SCRATCH(scratch);
1216 if (scratch) {
1217 down_write(&mm->mmap_sem);
1218 task_lock(current);
1219 err = mpol_set_nodemask(new, nmask, scratch);
1220 task_unlock(current);
1221 if (err)
1222 up_write(&mm->mmap_sem);
1223 } else
1224 err = -ENOMEM;
1225 NODEMASK_SCRATCH_FREE(scratch);
1226 }
b05ca738
KM
1227 if (err)
1228 goto mpol_out;
1229
d05f0cdc 1230 err = queue_pages_range(mm, start, end, nmask,
6ce3c4c0 1231 flags | MPOL_MF_INVERT, &pagelist);
d05f0cdc 1232 if (!err)
9d8cebd4 1233 err = mbind_range(mm, start, end, new);
7e2ab150 1234
b24f53a0
LS
1235 if (!err) {
1236 int nr_failed = 0;
1237
cf608ac1 1238 if (!list_empty(&pagelist)) {
b24f53a0 1239 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
d05f0cdc
HD
1240 nr_failed = migrate_pages(&pagelist, new_page, NULL,
1241 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
cf608ac1 1242 if (nr_failed)
74060e4d 1243 putback_movable_pages(&pagelist);
cf608ac1 1244 }
6ce3c4c0 1245
b24f53a0 1246 if (nr_failed && (flags & MPOL_MF_STRICT))
6ce3c4c0 1247 err = -EIO;
ab8a3e14 1248 } else
b0e5fd73 1249 putback_movable_pages(&pagelist);
b20a3503 1250
6ce3c4c0 1251 up_write(&mm->mmap_sem);
b05ca738 1252 mpol_out:
f0be3d32 1253 mpol_put(new);
6ce3c4c0
CL
1254 return err;
1255}
1256
8bccd85f
CL
1257/*
1258 * User space interface with variable sized bitmaps for nodelists.
1259 */
1260
1261/* Copy a node mask from user space. */
39743889 1262static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
8bccd85f
CL
1263 unsigned long maxnode)
1264{
1265 unsigned long k;
1266 unsigned long nlongs;
1267 unsigned long endmask;
1268
1269 --maxnode;
1270 nodes_clear(*nodes);
1271 if (maxnode == 0 || !nmask)
1272 return 0;
a9c930ba 1273 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
636f13c1 1274 return -EINVAL;
8bccd85f
CL
1275
1276 nlongs = BITS_TO_LONGS(maxnode);
1277 if ((maxnode % BITS_PER_LONG) == 0)
1278 endmask = ~0UL;
1279 else
1280 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1281
1282 /* When the user specified more nodes than supported just check
1283 if the non supported part is all zero. */
1284 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1285 if (nlongs > PAGE_SIZE/sizeof(long))
1286 return -EINVAL;
1287 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1288 unsigned long t;
1289 if (get_user(t, nmask + k))
1290 return -EFAULT;
1291 if (k == nlongs - 1) {
1292 if (t & endmask)
1293 return -EINVAL;
1294 } else if (t)
1295 return -EINVAL;
1296 }
1297 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1298 endmask = ~0UL;
1299 }
1300
1301 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1302 return -EFAULT;
1303 nodes_addr(*nodes)[nlongs-1] &= endmask;
1304 return 0;
1305}
1306
1307/* Copy a kernel node mask to user space */
1308static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1309 nodemask_t *nodes)
1310{
1311 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1312 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1313
1314 if (copy > nbytes) {
1315 if (copy > PAGE_SIZE)
1316 return -EINVAL;
1317 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1318 return -EFAULT;
1319 copy = nbytes;
1320 }
1321 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1322}
1323
938bb9f5 1324SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
f7f28ca9 1325 unsigned long, mode, const unsigned long __user *, nmask,
938bb9f5 1326 unsigned long, maxnode, unsigned, flags)
8bccd85f
CL
1327{
1328 nodemask_t nodes;
1329 int err;
028fec41 1330 unsigned short mode_flags;
8bccd85f 1331
028fec41
DR
1332 mode_flags = mode & MPOL_MODE_FLAGS;
1333 mode &= ~MPOL_MODE_FLAGS;
a3b51e01
DR
1334 if (mode >= MPOL_MAX)
1335 return -EINVAL;
4c50bc01
DR
1336 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1337 (mode_flags & MPOL_F_RELATIVE_NODES))
1338 return -EINVAL;
8bccd85f
CL
1339 err = get_nodes(&nodes, nmask, maxnode);
1340 if (err)
1341 return err;
028fec41 1342 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
8bccd85f
CL
1343}
1344
1345/* Set the process memory policy */
23c8902d 1346SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
938bb9f5 1347 unsigned long, maxnode)
8bccd85f
CL
1348{
1349 int err;
1350 nodemask_t nodes;
028fec41 1351 unsigned short flags;
8bccd85f 1352
028fec41
DR
1353 flags = mode & MPOL_MODE_FLAGS;
1354 mode &= ~MPOL_MODE_FLAGS;
1355 if ((unsigned int)mode >= MPOL_MAX)
8bccd85f 1356 return -EINVAL;
4c50bc01
DR
1357 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1358 return -EINVAL;
8bccd85f
CL
1359 err = get_nodes(&nodes, nmask, maxnode);
1360 if (err)
1361 return err;
028fec41 1362 return do_set_mempolicy(mode, flags, &nodes);
8bccd85f
CL
1363}
1364
938bb9f5
HC
1365SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1366 const unsigned long __user *, old_nodes,
1367 const unsigned long __user *, new_nodes)
39743889 1368{
596d7cfa 1369 struct mm_struct *mm = NULL;
39743889 1370 struct task_struct *task;
39743889
CL
1371 nodemask_t task_nodes;
1372 int err;
596d7cfa
KM
1373 nodemask_t *old;
1374 nodemask_t *new;
1375 NODEMASK_SCRATCH(scratch);
1376
1377 if (!scratch)
1378 return -ENOMEM;
39743889 1379
596d7cfa
KM
1380 old = &scratch->mask1;
1381 new = &scratch->mask2;
1382
1383 err = get_nodes(old, old_nodes, maxnode);
39743889 1384 if (err)
596d7cfa 1385 goto out;
39743889 1386
596d7cfa 1387 err = get_nodes(new, new_nodes, maxnode);
39743889 1388 if (err)
596d7cfa 1389 goto out;
39743889
CL
1390
1391 /* Find the mm_struct */
55cfaa3c 1392 rcu_read_lock();
228ebcbe 1393 task = pid ? find_task_by_vpid(pid) : current;
39743889 1394 if (!task) {
55cfaa3c 1395 rcu_read_unlock();
596d7cfa
KM
1396 err = -ESRCH;
1397 goto out;
39743889 1398 }
3268c63e 1399 get_task_struct(task);
39743889 1400
596d7cfa 1401 err = -EINVAL;
39743889
CL
1402
1403 /*
31367466
OE
1404 * Check if this process has the right to modify the specified process.
1405 * Use the regular "ptrace_may_access()" checks.
39743889 1406 */
31367466 1407 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
c69e8d9c 1408 rcu_read_unlock();
39743889 1409 err = -EPERM;
3268c63e 1410 goto out_put;
39743889 1411 }
c69e8d9c 1412 rcu_read_unlock();
39743889
CL
1413
1414 task_nodes = cpuset_mems_allowed(task);
1415 /* Is the user allowed to access the target nodes? */
596d7cfa 1416 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
39743889 1417 err = -EPERM;
3268c63e 1418 goto out_put;
39743889
CL
1419 }
1420
01f13bd6 1421 if (!nodes_subset(*new, node_states[N_MEMORY])) {
3b42d28b 1422 err = -EINVAL;
3268c63e 1423 goto out_put;
3b42d28b
CL
1424 }
1425
86c3a764
DQ
1426 err = security_task_movememory(task);
1427 if (err)
3268c63e 1428 goto out_put;
86c3a764 1429
3268c63e
CL
1430 mm = get_task_mm(task);
1431 put_task_struct(task);
f2a9ef88
SL
1432
1433 if (!mm) {
3268c63e 1434 err = -EINVAL;
f2a9ef88
SL
1435 goto out;
1436 }
1437
1438 err = do_migrate_pages(mm, old, new,
1439 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
3268c63e
CL
1440
1441 mmput(mm);
1442out:
596d7cfa
KM
1443 NODEMASK_SCRATCH_FREE(scratch);
1444
39743889 1445 return err;
3268c63e
CL
1446
1447out_put:
1448 put_task_struct(task);
1449 goto out;
1450
39743889
CL
1451}
1452
1453
8bccd85f 1454/* Retrieve NUMA policy */
938bb9f5
HC
1455SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1456 unsigned long __user *, nmask, unsigned long, maxnode,
1457 unsigned long, addr, unsigned long, flags)
8bccd85f 1458{
dbcb0f19
AB
1459 int err;
1460 int uninitialized_var(pval);
8bccd85f
CL
1461 nodemask_t nodes;
1462
1463 if (nmask != NULL && maxnode < MAX_NUMNODES)
1464 return -EINVAL;
1465
1466 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1467
1468 if (err)
1469 return err;
1470
1471 if (policy && put_user(pval, policy))
1472 return -EFAULT;
1473
1474 if (nmask)
1475 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1476
1477 return err;
1478}
1479
1da177e4
LT
1480#ifdef CONFIG_COMPAT
1481
c93e0f6c
HC
1482COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1483 compat_ulong_t __user *, nmask,
1484 compat_ulong_t, maxnode,
1485 compat_ulong_t, addr, compat_ulong_t, flags)
1da177e4
LT
1486{
1487 long err;
1488 unsigned long __user *nm = NULL;
1489 unsigned long nr_bits, alloc_size;
1490 DECLARE_BITMAP(bm, MAX_NUMNODES);
1491
1492 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1493 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1494
1495 if (nmask)
1496 nm = compat_alloc_user_space(alloc_size);
1497
1498 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1499
1500 if (!err && nmask) {
2bbff6c7
KH
1501 unsigned long copy_size;
1502 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1503 err = copy_from_user(bm, nm, copy_size);
1da177e4
LT
1504 /* ensure entire bitmap is zeroed */
1505 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1506 err |= compat_put_bitmap(nmask, bm, nr_bits);
1507 }
1508
1509 return err;
1510}
1511
c93e0f6c
HC
1512COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1513 compat_ulong_t, maxnode)
1da177e4 1514{
1da177e4
LT
1515 unsigned long __user *nm = NULL;
1516 unsigned long nr_bits, alloc_size;
1517 DECLARE_BITMAP(bm, MAX_NUMNODES);
1518
1519 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1520 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1521
1522 if (nmask) {
cf01fb99
CS
1523 if (compat_get_bitmap(bm, nmask, nr_bits))
1524 return -EFAULT;
1da177e4 1525 nm = compat_alloc_user_space(alloc_size);
cf01fb99
CS
1526 if (copy_to_user(nm, bm, alloc_size))
1527 return -EFAULT;
1da177e4
LT
1528 }
1529
1da177e4
LT
1530 return sys_set_mempolicy(mode, nm, nr_bits+1);
1531}
1532
c93e0f6c
HC
1533COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1534 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1535 compat_ulong_t, maxnode, compat_ulong_t, flags)
1da177e4 1536{
1da177e4
LT
1537 unsigned long __user *nm = NULL;
1538 unsigned long nr_bits, alloc_size;
dfcd3c0d 1539 nodemask_t bm;
1da177e4
LT
1540
1541 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1542 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1543
1544 if (nmask) {
cf01fb99
CS
1545 if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1546 return -EFAULT;
1da177e4 1547 nm = compat_alloc_user_space(alloc_size);
cf01fb99
CS
1548 if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1549 return -EFAULT;
1da177e4
LT
1550 }
1551
1da177e4
LT
1552 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1553}
1554
1555#endif
1556
74d2c3a0
ON
1557struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1558 unsigned long addr)
1da177e4 1559{
8d90274b 1560 struct mempolicy *pol = NULL;
1da177e4
LT
1561
1562 if (vma) {
480eccf9 1563 if (vma->vm_ops && vma->vm_ops->get_policy) {
8d90274b 1564 pol = vma->vm_ops->get_policy(vma, addr);
00442ad0 1565 } else if (vma->vm_policy) {
1da177e4 1566 pol = vma->vm_policy;
00442ad0
MG
1567
1568 /*
1569 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1570 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1571 * count on these policies which will be dropped by
1572 * mpol_cond_put() later
1573 */
1574 if (mpol_needs_cond_ref(pol))
1575 mpol_get(pol);
1576 }
1da177e4 1577 }
f15ca78e 1578
74d2c3a0
ON
1579 return pol;
1580}
1581
1582/*
dd6eecb9 1583 * get_vma_policy(@vma, @addr)
74d2c3a0
ON
1584 * @vma: virtual memory area whose policy is sought
1585 * @addr: address in @vma for shared policy lookup
1586 *
1587 * Returns effective policy for a VMA at specified address.
dd6eecb9 1588 * Falls back to current->mempolicy or system default policy, as necessary.
74d2c3a0
ON
1589 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1590 * count--added by the get_policy() vm_op, as appropriate--to protect against
1591 * freeing by another task. It is the caller's responsibility to free the
1592 * extra reference for shared policies.
1593 */
dd6eecb9
ON
1594static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1595 unsigned long addr)
74d2c3a0
ON
1596{
1597 struct mempolicy *pol = __get_vma_policy(vma, addr);
1598
8d90274b 1599 if (!pol)
dd6eecb9 1600 pol = get_task_policy(current);
8d90274b 1601
1da177e4
LT
1602 return pol;
1603}
1604
6b6482bb 1605bool vma_policy_mof(struct vm_area_struct *vma)
fc314724 1606{
6b6482bb 1607 struct mempolicy *pol;
fc314724 1608
6b6482bb
ON
1609 if (vma->vm_ops && vma->vm_ops->get_policy) {
1610 bool ret = false;
fc314724 1611
6b6482bb
ON
1612 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1613 if (pol && (pol->flags & MPOL_F_MOF))
1614 ret = true;
1615 mpol_cond_put(pol);
8d90274b 1616
6b6482bb 1617 return ret;
fc314724
MG
1618 }
1619
6b6482bb 1620 pol = vma->vm_policy;
8d90274b 1621 if (!pol)
6b6482bb 1622 pol = get_task_policy(current);
8d90274b 1623
fc314724
MG
1624 return pol->flags & MPOL_F_MOF;
1625}
1626
d3eb1570
LJ
1627static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1628{
1629 enum zone_type dynamic_policy_zone = policy_zone;
1630
1631 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1632
1633 /*
1634 * if policy->v.nodes has movable memory only,
1635 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1636 *
1637 * policy->v.nodes is intersect with node_states[N_MEMORY].
1638 * so if the following test faile, it implies
1639 * policy->v.nodes has movable memory only.
1640 */
1641 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1642 dynamic_policy_zone = ZONE_MOVABLE;
1643
1644 return zone >= dynamic_policy_zone;
1645}
1646
52cd3b07
LS
1647/*
1648 * Return a nodemask representing a mempolicy for filtering nodes for
1649 * page allocation
1650 */
1651static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
19770b32
MG
1652{
1653 /* Lower zones don't get a nodemask applied for MPOL_BIND */
45c4745a 1654 if (unlikely(policy->mode == MPOL_BIND) &&
d3eb1570 1655 apply_policy_zone(policy, gfp_zone(gfp)) &&
19770b32
MG
1656 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1657 return &policy->v.nodes;
1658
1659 return NULL;
1660}
1661
04ec6264
VB
1662/* Return the node id preferred by the given mempolicy, or the given id */
1663static int policy_node(gfp_t gfp, struct mempolicy *policy,
1664 int nd)
1da177e4 1665{
6d840958
MH
1666 if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
1667 nd = policy->v.preferred_node;
1668 else {
19770b32 1669 /*
6d840958
MH
1670 * __GFP_THISNODE shouldn't even be used with the bind policy
1671 * because we might easily break the expectation to stay on the
1672 * requested node and not break the policy.
19770b32 1673 */
6d840958 1674 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1da177e4 1675 }
6d840958 1676
04ec6264 1677 return nd;
1da177e4
LT
1678}
1679
1680/* Do dynamic interleaving for a process */
1681static unsigned interleave_nodes(struct mempolicy *policy)
1682{
45816682 1683 unsigned next;
1da177e4
LT
1684 struct task_struct *me = current;
1685
45816682 1686 next = next_node_in(me->il_prev, policy->v.nodes);
f5b087b5 1687 if (next < MAX_NUMNODES)
45816682
VB
1688 me->il_prev = next;
1689 return next;
1da177e4
LT
1690}
1691
dc85da15
CL
1692/*
1693 * Depending on the memory policy provide a node from which to allocate the
1694 * next slab entry.
1695 */
2a389610 1696unsigned int mempolicy_slab_node(void)
dc85da15 1697{
e7b691b0 1698 struct mempolicy *policy;
2a389610 1699 int node = numa_mem_id();
e7b691b0
AK
1700
1701 if (in_interrupt())
2a389610 1702 return node;
e7b691b0
AK
1703
1704 policy = current->mempolicy;
fc36b8d3 1705 if (!policy || policy->flags & MPOL_F_LOCAL)
2a389610 1706 return node;
bea904d5
LS
1707
1708 switch (policy->mode) {
1709 case MPOL_PREFERRED:
fc36b8d3
LS
1710 /*
1711 * handled MPOL_F_LOCAL above
1712 */
1713 return policy->v.preferred_node;
765c4507 1714
dc85da15
CL
1715 case MPOL_INTERLEAVE:
1716 return interleave_nodes(policy);
1717
dd1a239f 1718 case MPOL_BIND: {
c33d6c06
MG
1719 struct zoneref *z;
1720
dc85da15
CL
1721 /*
1722 * Follow bind policy behavior and start allocation at the
1723 * first node.
1724 */
19770b32 1725 struct zonelist *zonelist;
19770b32 1726 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
c9634cf0 1727 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
c33d6c06
MG
1728 z = first_zones_zonelist(zonelist, highest_zoneidx,
1729 &policy->v.nodes);
1730 return z->zone ? z->zone->node : node;
dd1a239f 1731 }
dc85da15 1732
dc85da15 1733 default:
bea904d5 1734 BUG();
dc85da15
CL
1735 }
1736}
1737
fee83b3a
AM
1738/*
1739 * Do static interleaving for a VMA with known offset @n. Returns the n'th
1740 * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1741 * number of present nodes.
1742 */
98c70baa 1743static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
1da177e4 1744{
dfcd3c0d 1745 unsigned nnodes = nodes_weight(pol->v.nodes);
f5b087b5 1746 unsigned target;
fee83b3a
AM
1747 int i;
1748 int nid;
1da177e4 1749
f5b087b5
DR
1750 if (!nnodes)
1751 return numa_node_id();
fee83b3a
AM
1752 target = (unsigned int)n % nnodes;
1753 nid = first_node(pol->v.nodes);
1754 for (i = 0; i < target; i++)
dfcd3c0d 1755 nid = next_node(nid, pol->v.nodes);
1da177e4
LT
1756 return nid;
1757}
1758
5da7ca86
CL
1759/* Determine a node number for interleave */
1760static inline unsigned interleave_nid(struct mempolicy *pol,
1761 struct vm_area_struct *vma, unsigned long addr, int shift)
1762{
1763 if (vma) {
1764 unsigned long off;
1765
3b98b087
NA
1766 /*
1767 * for small pages, there is no difference between
1768 * shift and PAGE_SHIFT, so the bit-shift is safe.
1769 * for huge pages, since vm_pgoff is in units of small
1770 * pages, we need to shift off the always 0 bits to get
1771 * a useful offset.
1772 */
1773 BUG_ON(shift < PAGE_SHIFT);
1774 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
5da7ca86 1775 off += (addr - vma->vm_start) >> shift;
98c70baa 1776 return offset_il_node(pol, off);
5da7ca86
CL
1777 } else
1778 return interleave_nodes(pol);
1779}
1780
00ac59ad 1781#ifdef CONFIG_HUGETLBFS
480eccf9 1782/*
04ec6264 1783 * huge_node(@vma, @addr, @gfp_flags, @mpol)
b46e14ac
FF
1784 * @vma: virtual memory area whose policy is sought
1785 * @addr: address in @vma for shared policy lookup and interleave policy
1786 * @gfp_flags: for requested zone
1787 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1788 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
480eccf9 1789 *
04ec6264 1790 * Returns a nid suitable for a huge page allocation and a pointer
52cd3b07
LS
1791 * to the struct mempolicy for conditional unref after allocation.
1792 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1793 * @nodemask for filtering the zonelist.
c0ff7453 1794 *
d26914d1 1795 * Must be protected by read_mems_allowed_begin()
480eccf9 1796 */
04ec6264
VB
1797int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
1798 struct mempolicy **mpol, nodemask_t **nodemask)
5da7ca86 1799{
04ec6264 1800 int nid;
5da7ca86 1801
dd6eecb9 1802 *mpol = get_vma_policy(vma, addr);
19770b32 1803 *nodemask = NULL; /* assume !MPOL_BIND */
5da7ca86 1804
52cd3b07 1805 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
04ec6264
VB
1806 nid = interleave_nid(*mpol, vma, addr,
1807 huge_page_shift(hstate_vma(vma)));
52cd3b07 1808 } else {
04ec6264 1809 nid = policy_node(gfp_flags, *mpol, numa_node_id());
52cd3b07
LS
1810 if ((*mpol)->mode == MPOL_BIND)
1811 *nodemask = &(*mpol)->v.nodes;
480eccf9 1812 }
04ec6264 1813 return nid;
5da7ca86 1814}
06808b08
LS
1815
1816/*
1817 * init_nodemask_of_mempolicy
1818 *
1819 * If the current task's mempolicy is "default" [NULL], return 'false'
1820 * to indicate default policy. Otherwise, extract the policy nodemask
1821 * for 'bind' or 'interleave' policy into the argument nodemask, or
1822 * initialize the argument nodemask to contain the single node for
1823 * 'preferred' or 'local' policy and return 'true' to indicate presence
1824 * of non-default mempolicy.
1825 *
1826 * We don't bother with reference counting the mempolicy [mpol_get/put]
1827 * because the current task is examining it's own mempolicy and a task's
1828 * mempolicy is only ever changed by the task itself.
1829 *
1830 * N.B., it is the caller's responsibility to free a returned nodemask.
1831 */
1832bool init_nodemask_of_mempolicy(nodemask_t *mask)
1833{
1834 struct mempolicy *mempolicy;
1835 int nid;
1836
1837 if (!(mask && current->mempolicy))
1838 return false;
1839
c0ff7453 1840 task_lock(current);
06808b08
LS
1841 mempolicy = current->mempolicy;
1842 switch (mempolicy->mode) {
1843 case MPOL_PREFERRED:
1844 if (mempolicy->flags & MPOL_F_LOCAL)
1845 nid = numa_node_id();
1846 else
1847 nid = mempolicy->v.preferred_node;
1848 init_nodemask_of_node(mask, nid);
1849 break;
1850
1851 case MPOL_BIND:
1852 /* Fall through */
1853 case MPOL_INTERLEAVE:
1854 *mask = mempolicy->v.nodes;
1855 break;
1856
1857 default:
1858 BUG();
1859 }
c0ff7453 1860 task_unlock(current);
06808b08
LS
1861
1862 return true;
1863}
00ac59ad 1864#endif
5da7ca86 1865
6f48d0eb
DR
1866/*
1867 * mempolicy_nodemask_intersects
1868 *
1869 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1870 * policy. Otherwise, check for intersection between mask and the policy
1871 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1872 * policy, always return true since it may allocate elsewhere on fallback.
1873 *
1874 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1875 */
1876bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1877 const nodemask_t *mask)
1878{
1879 struct mempolicy *mempolicy;
1880 bool ret = true;
1881
1882 if (!mask)
1883 return ret;
1884 task_lock(tsk);
1885 mempolicy = tsk->mempolicy;
1886 if (!mempolicy)
1887 goto out;
1888
1889 switch (mempolicy->mode) {
1890 case MPOL_PREFERRED:
1891 /*
1892 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1893 * allocate from, they may fallback to other nodes when oom.
1894 * Thus, it's possible for tsk to have allocated memory from
1895 * nodes in mask.
1896 */
1897 break;
1898 case MPOL_BIND:
1899 case MPOL_INTERLEAVE:
1900 ret = nodes_intersects(mempolicy->v.nodes, *mask);
1901 break;
1902 default:
1903 BUG();
1904 }
1905out:
1906 task_unlock(tsk);
1907 return ret;
1908}
1909
1da177e4
LT
1910/* Allocate a page in interleaved policy.
1911 Own path because it needs to do special accounting. */
662f3a0b
AK
1912static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1913 unsigned nid)
1da177e4 1914{
1da177e4
LT
1915 struct page *page;
1916
04ec6264 1917 page = __alloc_pages(gfp, order, nid);
4518085e
KW
1918 /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
1919 if (!static_branch_likely(&vm_numa_stat_key))
1920 return page;
de55c8b2
AR
1921 if (page && page_to_nid(page) == nid) {
1922 preempt_disable();
1923 __inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
1924 preempt_enable();
1925 }
1da177e4
LT
1926 return page;
1927}
1928
1929/**
0bbbc0b3 1930 * alloc_pages_vma - Allocate a page for a VMA.
1da177e4
LT
1931 *
1932 * @gfp:
1933 * %GFP_USER user allocation.
1934 * %GFP_KERNEL kernel allocations,
1935 * %GFP_HIGHMEM highmem/user allocations,
1936 * %GFP_FS allocation should not call back into a file system.
1937 * %GFP_ATOMIC don't sleep.
1938 *
0bbbc0b3 1939 * @order:Order of the GFP allocation.
1da177e4
LT
1940 * @vma: Pointer to VMA or NULL if not available.
1941 * @addr: Virtual Address of the allocation. Must be inside the VMA.
be97a41b
VB
1942 * @node: Which node to prefer for allocation (modulo policy).
1943 * @hugepage: for hugepages try only the preferred node if possible
1da177e4
LT
1944 *
1945 * This function allocates a page from the kernel page pool and applies
1946 * a NUMA policy associated with the VMA or the current process.
1947 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1948 * mm_struct of the VMA to prevent it from going away. Should be used for
be97a41b
VB
1949 * all allocations for pages that will be mapped into user space. Returns
1950 * NULL when no page can be allocated.
1da177e4
LT
1951 */
1952struct page *
0bbbc0b3 1953alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
be97a41b 1954 unsigned long addr, int node, bool hugepage)
1da177e4 1955{
cc9a6c87 1956 struct mempolicy *pol;
c0ff7453 1957 struct page *page;
04ec6264 1958 int preferred_nid;
be97a41b 1959 nodemask_t *nmask;
cc9a6c87 1960
dd6eecb9 1961 pol = get_vma_policy(vma, addr);
1da177e4 1962
0867a57c
VB
1963 if (pol->mode == MPOL_INTERLEAVE) {
1964 unsigned nid;
1965
1966 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
1967 mpol_cond_put(pol);
1968 page = alloc_page_interleave(gfp, order, nid);
1969 goto out;
1970 }
1971
1972 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
1973 int hpage_node = node;
1974
be97a41b
VB
1975 /*
1976 * For hugepage allocation and non-interleave policy which
0867a57c
VB
1977 * allows the current node (or other explicitly preferred
1978 * node) we only try to allocate from the current/preferred
1979 * node and don't fall back to other nodes, as the cost of
1980 * remote accesses would likely offset THP benefits.
be97a41b
VB
1981 *
1982 * If the policy is interleave, or does not allow the current
1983 * node in its nodemask, we allocate the standard way.
1984 */
0867a57c
VB
1985 if (pol->mode == MPOL_PREFERRED &&
1986 !(pol->flags & MPOL_F_LOCAL))
1987 hpage_node = pol->v.preferred_node;
1988
be97a41b 1989 nmask = policy_nodemask(gfp, pol);
0867a57c 1990 if (!nmask || node_isset(hpage_node, *nmask)) {
be97a41b 1991 mpol_cond_put(pol);
96db800f 1992 page = __alloc_pages_node(hpage_node,
5265047a 1993 gfp | __GFP_THISNODE, order);
be97a41b
VB
1994 goto out;
1995 }
1996 }
1997
be97a41b 1998 nmask = policy_nodemask(gfp, pol);
04ec6264
VB
1999 preferred_nid = policy_node(gfp, pol, node);
2000 page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
d51e9894 2001 mpol_cond_put(pol);
be97a41b 2002out:
c0ff7453 2003 return page;
1da177e4
LT
2004}
2005
2006/**
2007 * alloc_pages_current - Allocate pages.
2008 *
2009 * @gfp:
2010 * %GFP_USER user allocation,
2011 * %GFP_KERNEL kernel allocation,
2012 * %GFP_HIGHMEM highmem allocation,
2013 * %GFP_FS don't call back into a file system.
2014 * %GFP_ATOMIC don't sleep.
2015 * @order: Power of two of allocation size in pages. 0 is a single page.
2016 *
2017 * Allocate a page from the kernel page pool. When not in
2018 * interrupt context and apply the current process NUMA policy.
2019 * Returns NULL when no page can be allocated.
1da177e4 2020 */
dd0fc66f 2021struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1da177e4 2022{
8d90274b 2023 struct mempolicy *pol = &default_policy;
c0ff7453 2024 struct page *page;
1da177e4 2025
8d90274b
ON
2026 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2027 pol = get_task_policy(current);
52cd3b07
LS
2028
2029 /*
2030 * No reference counting needed for current->mempolicy
2031 * nor system default_policy
2032 */
45c4745a 2033 if (pol->mode == MPOL_INTERLEAVE)
c0ff7453
MX
2034 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2035 else
2036 page = __alloc_pages_nodemask(gfp, order,
04ec6264 2037 policy_node(gfp, pol, numa_node_id()),
5c4b4be3 2038 policy_nodemask(gfp, pol));
cc9a6c87 2039
c0ff7453 2040 return page;
1da177e4
LT
2041}
2042EXPORT_SYMBOL(alloc_pages_current);
2043
ef0855d3
ON
2044int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2045{
2046 struct mempolicy *pol = mpol_dup(vma_policy(src));
2047
2048 if (IS_ERR(pol))
2049 return PTR_ERR(pol);
2050 dst->vm_policy = pol;
2051 return 0;
2052}
2053
4225399a 2054/*
846a16bf 2055 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
4225399a
PJ
2056 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2057 * with the mems_allowed returned by cpuset_mems_allowed(). This
2058 * keeps mempolicies cpuset relative after its cpuset moves. See
2059 * further kernel/cpuset.c update_nodemask().
708c1bbc
MX
2060 *
2061 * current's mempolicy may be rebinded by the other task(the task that changes
2062 * cpuset's mems), so we needn't do rebind work for current task.
4225399a 2063 */
4225399a 2064
846a16bf
LS
2065/* Slow path of a mempolicy duplicate */
2066struct mempolicy *__mpol_dup(struct mempolicy *old)
1da177e4
LT
2067{
2068 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2069
2070 if (!new)
2071 return ERR_PTR(-ENOMEM);
708c1bbc
MX
2072
2073 /* task's mempolicy is protected by alloc_lock */
2074 if (old == current->mempolicy) {
2075 task_lock(current);
2076 *new = *old;
2077 task_unlock(current);
2078 } else
2079 *new = *old;
2080
4225399a
PJ
2081 if (current_cpuset_is_being_rebound()) {
2082 nodemask_t mems = cpuset_mems_allowed(current);
213980c0 2083 mpol_rebind_policy(new, &mems);
4225399a 2084 }
1da177e4 2085 atomic_set(&new->refcnt, 1);
1da177e4
LT
2086 return new;
2087}
2088
2089/* Slow path of a mempolicy comparison */
fcfb4dcc 2090bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1da177e4
LT
2091{
2092 if (!a || !b)
fcfb4dcc 2093 return false;
45c4745a 2094 if (a->mode != b->mode)
fcfb4dcc 2095 return false;
19800502 2096 if (a->flags != b->flags)
fcfb4dcc 2097 return false;
19800502
BL
2098 if (mpol_store_user_nodemask(a))
2099 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
fcfb4dcc 2100 return false;
19800502 2101
45c4745a 2102 switch (a->mode) {
19770b32
MG
2103 case MPOL_BIND:
2104 /* Fall through */
1da177e4 2105 case MPOL_INTERLEAVE:
fcfb4dcc 2106 return !!nodes_equal(a->v.nodes, b->v.nodes);
1da177e4 2107 case MPOL_PREFERRED:
75719661 2108 return a->v.preferred_node == b->v.preferred_node;
1da177e4
LT
2109 default:
2110 BUG();
fcfb4dcc 2111 return false;
1da177e4
LT
2112 }
2113}
2114
1da177e4
LT
2115/*
2116 * Shared memory backing store policy support.
2117 *
2118 * Remember policies even when nobody has shared memory mapped.
2119 * The policies are kept in Red-Black tree linked from the inode.
4a8c7bb5 2120 * They are protected by the sp->lock rwlock, which should be held
1da177e4
LT
2121 * for any accesses to the tree.
2122 */
2123
4a8c7bb5
NZ
2124/*
2125 * lookup first element intersecting start-end. Caller holds sp->lock for
2126 * reading or for writing
2127 */
1da177e4
LT
2128static struct sp_node *
2129sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2130{
2131 struct rb_node *n = sp->root.rb_node;
2132
2133 while (n) {
2134 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2135
2136 if (start >= p->end)
2137 n = n->rb_right;
2138 else if (end <= p->start)
2139 n = n->rb_left;
2140 else
2141 break;
2142 }
2143 if (!n)
2144 return NULL;
2145 for (;;) {
2146 struct sp_node *w = NULL;
2147 struct rb_node *prev = rb_prev(n);
2148 if (!prev)
2149 break;
2150 w = rb_entry(prev, struct sp_node, nd);
2151 if (w->end <= start)
2152 break;
2153 n = prev;
2154 }
2155 return rb_entry(n, struct sp_node, nd);
2156}
2157
4a8c7bb5
NZ
2158/*
2159 * Insert a new shared policy into the list. Caller holds sp->lock for
2160 * writing.
2161 */
1da177e4
LT
2162static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2163{
2164 struct rb_node **p = &sp->root.rb_node;
2165 struct rb_node *parent = NULL;
2166 struct sp_node *nd;
2167
2168 while (*p) {
2169 parent = *p;
2170 nd = rb_entry(parent, struct sp_node, nd);
2171 if (new->start < nd->start)
2172 p = &(*p)->rb_left;
2173 else if (new->end > nd->end)
2174 p = &(*p)->rb_right;
2175 else
2176 BUG();
2177 }
2178 rb_link_node(&new->nd, parent, p);
2179 rb_insert_color(&new->nd, &sp->root);
140d5a49 2180 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
45c4745a 2181 new->policy ? new->policy->mode : 0);
1da177e4
LT
2182}
2183
2184/* Find shared policy intersecting idx */
2185struct mempolicy *
2186mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2187{
2188 struct mempolicy *pol = NULL;
2189 struct sp_node *sn;
2190
2191 if (!sp->root.rb_node)
2192 return NULL;
4a8c7bb5 2193 read_lock(&sp->lock);
1da177e4
LT
2194 sn = sp_lookup(sp, idx, idx+1);
2195 if (sn) {
2196 mpol_get(sn->policy);
2197 pol = sn->policy;
2198 }
4a8c7bb5 2199 read_unlock(&sp->lock);
1da177e4
LT
2200 return pol;
2201}
2202
63f74ca2
KM
2203static void sp_free(struct sp_node *n)
2204{
2205 mpol_put(n->policy);
2206 kmem_cache_free(sn_cache, n);
2207}
2208
771fb4d8
LS
2209/**
2210 * mpol_misplaced - check whether current page node is valid in policy
2211 *
b46e14ac
FF
2212 * @page: page to be checked
2213 * @vma: vm area where page mapped
2214 * @addr: virtual address where page mapped
771fb4d8
LS
2215 *
2216 * Lookup current policy node id for vma,addr and "compare to" page's
2217 * node id.
2218 *
2219 * Returns:
2220 * -1 - not misplaced, page is in the right node
2221 * node - node id where the page should be
2222 *
2223 * Policy determination "mimics" alloc_page_vma().
2224 * Called from fault path where we know the vma and faulting address.
2225 */
2226int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2227{
2228 struct mempolicy *pol;
c33d6c06 2229 struct zoneref *z;
771fb4d8
LS
2230 int curnid = page_to_nid(page);
2231 unsigned long pgoff;
90572890
PZ
2232 int thiscpu = raw_smp_processor_id();
2233 int thisnid = cpu_to_node(thiscpu);
771fb4d8
LS
2234 int polnid = -1;
2235 int ret = -1;
2236
dd6eecb9 2237 pol = get_vma_policy(vma, addr);
771fb4d8
LS
2238 if (!(pol->flags & MPOL_F_MOF))
2239 goto out;
2240
2241 switch (pol->mode) {
2242 case MPOL_INTERLEAVE:
771fb4d8
LS
2243 pgoff = vma->vm_pgoff;
2244 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
98c70baa 2245 polnid = offset_il_node(pol, pgoff);
771fb4d8
LS
2246 break;
2247
2248 case MPOL_PREFERRED:
2249 if (pol->flags & MPOL_F_LOCAL)
2250 polnid = numa_node_id();
2251 else
2252 polnid = pol->v.preferred_node;
2253 break;
2254
2255 case MPOL_BIND:
c33d6c06 2256
771fb4d8
LS
2257 /*
2258 * allows binding to multiple nodes.
2259 * use current page if in policy nodemask,
2260 * else select nearest allowed node, if any.
2261 * If no allowed nodes, use current [!misplaced].
2262 */
2263 if (node_isset(curnid, pol->v.nodes))
2264 goto out;
c33d6c06 2265 z = first_zones_zonelist(
771fb4d8
LS
2266 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2267 gfp_zone(GFP_HIGHUSER),
c33d6c06
MG
2268 &pol->v.nodes);
2269 polnid = z->zone->node;
771fb4d8
LS
2270 break;
2271
2272 default:
2273 BUG();
2274 }
5606e387
MG
2275
2276 /* Migrate the page towards the node whose CPU is referencing it */
e42c8ff2 2277 if (pol->flags & MPOL_F_MORON) {
90572890 2278 polnid = thisnid;
5606e387 2279
10f39042 2280 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
de1c9ce6 2281 goto out;
e42c8ff2
MG
2282 }
2283
771fb4d8
LS
2284 if (curnid != polnid)
2285 ret = polnid;
2286out:
2287 mpol_cond_put(pol);
2288
2289 return ret;
2290}
2291
c11600e4
DR
2292/*
2293 * Drop the (possibly final) reference to task->mempolicy. It needs to be
2294 * dropped after task->mempolicy is set to NULL so that any allocation done as
2295 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2296 * policy.
2297 */
2298void mpol_put_task_policy(struct task_struct *task)
2299{
2300 struct mempolicy *pol;
2301
2302 task_lock(task);
2303 pol = task->mempolicy;
2304 task->mempolicy = NULL;
2305 task_unlock(task);
2306 mpol_put(pol);
2307}
2308
1da177e4
LT
2309static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2310{
140d5a49 2311 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
1da177e4 2312 rb_erase(&n->nd, &sp->root);
63f74ca2 2313 sp_free(n);
1da177e4
LT
2314}
2315
42288fe3
MG
2316static void sp_node_init(struct sp_node *node, unsigned long start,
2317 unsigned long end, struct mempolicy *pol)
2318{
2319 node->start = start;
2320 node->end = end;
2321 node->policy = pol;
2322}
2323
dbcb0f19
AB
2324static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2325 struct mempolicy *pol)
1da177e4 2326{
869833f2
KM
2327 struct sp_node *n;
2328 struct mempolicy *newpol;
1da177e4 2329
869833f2 2330 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1da177e4
LT
2331 if (!n)
2332 return NULL;
869833f2
KM
2333
2334 newpol = mpol_dup(pol);
2335 if (IS_ERR(newpol)) {
2336 kmem_cache_free(sn_cache, n);
2337 return NULL;
2338 }
2339 newpol->flags |= MPOL_F_SHARED;
42288fe3 2340 sp_node_init(n, start, end, newpol);
869833f2 2341
1da177e4
LT
2342 return n;
2343}
2344
2345/* Replace a policy range. */
2346static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2347 unsigned long end, struct sp_node *new)
2348{
b22d127a 2349 struct sp_node *n;
42288fe3
MG
2350 struct sp_node *n_new = NULL;
2351 struct mempolicy *mpol_new = NULL;
b22d127a 2352 int ret = 0;
1da177e4 2353
42288fe3 2354restart:
4a8c7bb5 2355 write_lock(&sp->lock);
1da177e4
LT
2356 n = sp_lookup(sp, start, end);
2357 /* Take care of old policies in the same range. */
2358 while (n && n->start < end) {
2359 struct rb_node *next = rb_next(&n->nd);
2360 if (n->start >= start) {
2361 if (n->end <= end)
2362 sp_delete(sp, n);
2363 else
2364 n->start = end;
2365 } else {
2366 /* Old policy spanning whole new range. */
2367 if (n->end > end) {
42288fe3
MG
2368 if (!n_new)
2369 goto alloc_new;
2370
2371 *mpol_new = *n->policy;
2372 atomic_set(&mpol_new->refcnt, 1);
7880639c 2373 sp_node_init(n_new, end, n->end, mpol_new);
1da177e4 2374 n->end = start;
5ca39575 2375 sp_insert(sp, n_new);
42288fe3
MG
2376 n_new = NULL;
2377 mpol_new = NULL;
1da177e4
LT
2378 break;
2379 } else
2380 n->end = start;
2381 }
2382 if (!next)
2383 break;
2384 n = rb_entry(next, struct sp_node, nd);
2385 }
2386 if (new)
2387 sp_insert(sp, new);
4a8c7bb5 2388 write_unlock(&sp->lock);
42288fe3
MG
2389 ret = 0;
2390
2391err_out:
2392 if (mpol_new)
2393 mpol_put(mpol_new);
2394 if (n_new)
2395 kmem_cache_free(sn_cache, n_new);
2396
b22d127a 2397 return ret;
42288fe3
MG
2398
2399alloc_new:
4a8c7bb5 2400 write_unlock(&sp->lock);
42288fe3
MG
2401 ret = -ENOMEM;
2402 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2403 if (!n_new)
2404 goto err_out;
2405 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2406 if (!mpol_new)
2407 goto err_out;
2408 goto restart;
1da177e4
LT
2409}
2410
71fe804b
LS
2411/**
2412 * mpol_shared_policy_init - initialize shared policy for inode
2413 * @sp: pointer to inode shared policy
2414 * @mpol: struct mempolicy to install
2415 *
2416 * Install non-NULL @mpol in inode's shared policy rb-tree.
2417 * On entry, the current task has a reference on a non-NULL @mpol.
2418 * This must be released on exit.
4bfc4495 2419 * This is called at get_inode() calls and we can use GFP_KERNEL.
71fe804b
LS
2420 */
2421void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2422{
58568d2a
MX
2423 int ret;
2424
71fe804b 2425 sp->root = RB_ROOT; /* empty tree == default mempolicy */
4a8c7bb5 2426 rwlock_init(&sp->lock);
71fe804b
LS
2427
2428 if (mpol) {
2429 struct vm_area_struct pvma;
2430 struct mempolicy *new;
4bfc4495 2431 NODEMASK_SCRATCH(scratch);
71fe804b 2432
4bfc4495 2433 if (!scratch)
5c0c1654 2434 goto put_mpol;
71fe804b
LS
2435 /* contextualize the tmpfs mount point mempolicy */
2436 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
15d77835 2437 if (IS_ERR(new))
0cae3457 2438 goto free_scratch; /* no valid nodemask intersection */
58568d2a
MX
2439
2440 task_lock(current);
4bfc4495 2441 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
58568d2a 2442 task_unlock(current);
15d77835 2443 if (ret)
5c0c1654 2444 goto put_new;
71fe804b
LS
2445
2446 /* Create pseudo-vma that contains just the policy */
2447 memset(&pvma, 0, sizeof(struct vm_area_struct));
2448 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2449 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
15d77835 2450
5c0c1654 2451put_new:
71fe804b 2452 mpol_put(new); /* drop initial ref */
0cae3457 2453free_scratch:
4bfc4495 2454 NODEMASK_SCRATCH_FREE(scratch);
5c0c1654
LS
2455put_mpol:
2456 mpol_put(mpol); /* drop our incoming ref on sb mpol */
7339ff83
RH
2457 }
2458}
2459
1da177e4
LT
2460int mpol_set_shared_policy(struct shared_policy *info,
2461 struct vm_area_struct *vma, struct mempolicy *npol)
2462{
2463 int err;
2464 struct sp_node *new = NULL;
2465 unsigned long sz = vma_pages(vma);
2466
028fec41 2467 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
1da177e4 2468 vma->vm_pgoff,
45c4745a 2469 sz, npol ? npol->mode : -1,
028fec41 2470 npol ? npol->flags : -1,
00ef2d2f 2471 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
1da177e4
LT
2472
2473 if (npol) {
2474 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2475 if (!new)
2476 return -ENOMEM;
2477 }
2478 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2479 if (err && new)
63f74ca2 2480 sp_free(new);
1da177e4
LT
2481 return err;
2482}
2483
2484/* Free a backing policy store on inode delete. */
2485void mpol_free_shared_policy(struct shared_policy *p)
2486{
2487 struct sp_node *n;
2488 struct rb_node *next;
2489
2490 if (!p->root.rb_node)
2491 return;
4a8c7bb5 2492 write_lock(&p->lock);
1da177e4
LT
2493 next = rb_first(&p->root);
2494 while (next) {
2495 n = rb_entry(next, struct sp_node, nd);
2496 next = rb_next(&n->nd);
63f74ca2 2497 sp_delete(p, n);
1da177e4 2498 }
4a8c7bb5 2499 write_unlock(&p->lock);
1da177e4
LT
2500}
2501
1a687c2e 2502#ifdef CONFIG_NUMA_BALANCING
c297663c 2503static int __initdata numabalancing_override;
1a687c2e
MG
2504
2505static void __init check_numabalancing_enable(void)
2506{
2507 bool numabalancing_default = false;
2508
2509 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2510 numabalancing_default = true;
2511
c297663c
MG
2512 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2513 if (numabalancing_override)
2514 set_numabalancing_state(numabalancing_override == 1);
2515
b0dc2b9b 2516 if (num_online_nodes() > 1 && !numabalancing_override) {
756a025f 2517 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
c297663c 2518 numabalancing_default ? "Enabling" : "Disabling");
1a687c2e
MG
2519 set_numabalancing_state(numabalancing_default);
2520 }
2521}
2522
2523static int __init setup_numabalancing(char *str)
2524{
2525 int ret = 0;
2526 if (!str)
2527 goto out;
1a687c2e
MG
2528
2529 if (!strcmp(str, "enable")) {
c297663c 2530 numabalancing_override = 1;
1a687c2e
MG
2531 ret = 1;
2532 } else if (!strcmp(str, "disable")) {
c297663c 2533 numabalancing_override = -1;
1a687c2e
MG
2534 ret = 1;
2535 }
2536out:
2537 if (!ret)
4a404bea 2538 pr_warn("Unable to parse numa_balancing=\n");
1a687c2e
MG
2539
2540 return ret;
2541}
2542__setup("numa_balancing=", setup_numabalancing);
2543#else
2544static inline void __init check_numabalancing_enable(void)
2545{
2546}
2547#endif /* CONFIG_NUMA_BALANCING */
2548
1da177e4
LT
2549/* assumes fs == KERNEL_DS */
2550void __init numa_policy_init(void)
2551{
b71636e2
PM
2552 nodemask_t interleave_nodes;
2553 unsigned long largest = 0;
2554 int nid, prefer = 0;
2555
1da177e4
LT
2556 policy_cache = kmem_cache_create("numa_policy",
2557 sizeof(struct mempolicy),
20c2df83 2558 0, SLAB_PANIC, NULL);
1da177e4
LT
2559
2560 sn_cache = kmem_cache_create("shared_policy_node",
2561 sizeof(struct sp_node),
20c2df83 2562 0, SLAB_PANIC, NULL);
1da177e4 2563
5606e387
MG
2564 for_each_node(nid) {
2565 preferred_node_policy[nid] = (struct mempolicy) {
2566 .refcnt = ATOMIC_INIT(1),
2567 .mode = MPOL_PREFERRED,
2568 .flags = MPOL_F_MOF | MPOL_F_MORON,
2569 .v = { .preferred_node = nid, },
2570 };
2571 }
2572
b71636e2
PM
2573 /*
2574 * Set interleaving policy for system init. Interleaving is only
2575 * enabled across suitably sized nodes (default is >= 16MB), or
2576 * fall back to the largest node if they're all smaller.
2577 */
2578 nodes_clear(interleave_nodes);
01f13bd6 2579 for_each_node_state(nid, N_MEMORY) {
b71636e2
PM
2580 unsigned long total_pages = node_present_pages(nid);
2581
2582 /* Preserve the largest node */
2583 if (largest < total_pages) {
2584 largest = total_pages;
2585 prefer = nid;
2586 }
2587
2588 /* Interleave this node? */
2589 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2590 node_set(nid, interleave_nodes);
2591 }
2592
2593 /* All too small, use the largest */
2594 if (unlikely(nodes_empty(interleave_nodes)))
2595 node_set(prefer, interleave_nodes);
1da177e4 2596
028fec41 2597 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
b1de0d13 2598 pr_err("%s: interleaving failed\n", __func__);
1a687c2e
MG
2599
2600 check_numabalancing_enable();
1da177e4
LT
2601}
2602
8bccd85f 2603/* Reset policy of current process to default */
1da177e4
LT
2604void numa_default_policy(void)
2605{
028fec41 2606 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
1da177e4 2607}
68860ec1 2608
095f1fc4
LS
2609/*
2610 * Parse and format mempolicy from/to strings
2611 */
2612
1a75a6c8 2613/*
f2a07f40 2614 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
1a75a6c8 2615 */
345ace9c
LS
2616static const char * const policy_modes[] =
2617{
2618 [MPOL_DEFAULT] = "default",
2619 [MPOL_PREFERRED] = "prefer",
2620 [MPOL_BIND] = "bind",
2621 [MPOL_INTERLEAVE] = "interleave",
d3a71033 2622 [MPOL_LOCAL] = "local",
345ace9c 2623};
1a75a6c8 2624
095f1fc4
LS
2625
2626#ifdef CONFIG_TMPFS
2627/**
f2a07f40 2628 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
095f1fc4 2629 * @str: string containing mempolicy to parse
71fe804b 2630 * @mpol: pointer to struct mempolicy pointer, returned on success.
095f1fc4
LS
2631 *
2632 * Format of input:
2633 * <mode>[=<flags>][:<nodelist>]
2634 *
71fe804b 2635 * On success, returns 0, else 1
095f1fc4 2636 */
a7a88b23 2637int mpol_parse_str(char *str, struct mempolicy **mpol)
095f1fc4 2638{
71fe804b 2639 struct mempolicy *new = NULL;
b4652e84 2640 unsigned short mode;
f2a07f40 2641 unsigned short mode_flags;
71fe804b 2642 nodemask_t nodes;
095f1fc4
LS
2643 char *nodelist = strchr(str, ':');
2644 char *flags = strchr(str, '=');
095f1fc4
LS
2645 int err = 1;
2646
2647 if (nodelist) {
2648 /* NUL-terminate mode or flags string */
2649 *nodelist++ = '\0';
71fe804b 2650 if (nodelist_parse(nodelist, nodes))
095f1fc4 2651 goto out;
01f13bd6 2652 if (!nodes_subset(nodes, node_states[N_MEMORY]))
095f1fc4 2653 goto out;
71fe804b
LS
2654 } else
2655 nodes_clear(nodes);
2656
095f1fc4
LS
2657 if (flags)
2658 *flags++ = '\0'; /* terminate mode string */
2659
479e2802 2660 for (mode = 0; mode < MPOL_MAX; mode++) {
345ace9c 2661 if (!strcmp(str, policy_modes[mode])) {
095f1fc4
LS
2662 break;
2663 }
2664 }
a720094d 2665 if (mode >= MPOL_MAX)
095f1fc4
LS
2666 goto out;
2667
71fe804b 2668 switch (mode) {
095f1fc4 2669 case MPOL_PREFERRED:
71fe804b
LS
2670 /*
2671 * Insist on a nodelist of one node only
2672 */
095f1fc4
LS
2673 if (nodelist) {
2674 char *rest = nodelist;
2675 while (isdigit(*rest))
2676 rest++;
926f2ae0
KM
2677 if (*rest)
2678 goto out;
095f1fc4
LS
2679 }
2680 break;
095f1fc4
LS
2681 case MPOL_INTERLEAVE:
2682 /*
2683 * Default to online nodes with memory if no nodelist
2684 */
2685 if (!nodelist)
01f13bd6 2686 nodes = node_states[N_MEMORY];
3f226aa1 2687 break;
71fe804b 2688 case MPOL_LOCAL:
3f226aa1 2689 /*
71fe804b 2690 * Don't allow a nodelist; mpol_new() checks flags
3f226aa1 2691 */
71fe804b 2692 if (nodelist)
3f226aa1 2693 goto out;
71fe804b 2694 mode = MPOL_PREFERRED;
3f226aa1 2695 break;
413b43de
RT
2696 case MPOL_DEFAULT:
2697 /*
2698 * Insist on a empty nodelist
2699 */
2700 if (!nodelist)
2701 err = 0;
2702 goto out;
d69b2e63
KM
2703 case MPOL_BIND:
2704 /*
2705 * Insist on a nodelist
2706 */
2707 if (!nodelist)
2708 goto out;
095f1fc4
LS
2709 }
2710
71fe804b 2711 mode_flags = 0;
095f1fc4
LS
2712 if (flags) {
2713 /*
2714 * Currently, we only support two mutually exclusive
2715 * mode flags.
2716 */
2717 if (!strcmp(flags, "static"))
71fe804b 2718 mode_flags |= MPOL_F_STATIC_NODES;
095f1fc4 2719 else if (!strcmp(flags, "relative"))
71fe804b 2720 mode_flags |= MPOL_F_RELATIVE_NODES;
095f1fc4 2721 else
926f2ae0 2722 goto out;
095f1fc4 2723 }
71fe804b
LS
2724
2725 new = mpol_new(mode, mode_flags, &nodes);
2726 if (IS_ERR(new))
926f2ae0
KM
2727 goto out;
2728
f2a07f40
HD
2729 /*
2730 * Save nodes for mpol_to_str() to show the tmpfs mount options
2731 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2732 */
2733 if (mode != MPOL_PREFERRED)
2734 new->v.nodes = nodes;
2735 else if (nodelist)
2736 new->v.preferred_node = first_node(nodes);
2737 else
2738 new->flags |= MPOL_F_LOCAL;
2739
2740 /*
2741 * Save nodes for contextualization: this will be used to "clone"
2742 * the mempolicy in a specific context [cpuset] at a later time.
2743 */
2744 new->w.user_nodemask = nodes;
2745
926f2ae0 2746 err = 0;
71fe804b 2747
095f1fc4
LS
2748out:
2749 /* Restore string for error message */
2750 if (nodelist)
2751 *--nodelist = ':';
2752 if (flags)
2753 *--flags = '=';
71fe804b
LS
2754 if (!err)
2755 *mpol = new;
095f1fc4
LS
2756 return err;
2757}
2758#endif /* CONFIG_TMPFS */
2759
71fe804b
LS
2760/**
2761 * mpol_to_str - format a mempolicy structure for printing
2762 * @buffer: to contain formatted mempolicy string
2763 * @maxlen: length of @buffer
2764 * @pol: pointer to mempolicy to be formatted
71fe804b 2765 *
948927ee
DR
2766 * Convert @pol into a string. If @buffer is too short, truncate the string.
2767 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2768 * longest flag, "relative", and to display at least a few node ids.
1a75a6c8 2769 */
948927ee 2770void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1a75a6c8
CL
2771{
2772 char *p = buffer;
948927ee
DR
2773 nodemask_t nodes = NODE_MASK_NONE;
2774 unsigned short mode = MPOL_DEFAULT;
2775 unsigned short flags = 0;
2291990a 2776
8790c71a 2777 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
bea904d5 2778 mode = pol->mode;
948927ee
DR
2779 flags = pol->flags;
2780 }
bea904d5 2781
1a75a6c8
CL
2782 switch (mode) {
2783 case MPOL_DEFAULT:
1a75a6c8 2784 break;
1a75a6c8 2785 case MPOL_PREFERRED:
fc36b8d3 2786 if (flags & MPOL_F_LOCAL)
f2a07f40 2787 mode = MPOL_LOCAL;
53f2556b 2788 else
fc36b8d3 2789 node_set(pol->v.preferred_node, nodes);
1a75a6c8 2790 break;
1a75a6c8 2791 case MPOL_BIND:
1a75a6c8 2792 case MPOL_INTERLEAVE:
f2a07f40 2793 nodes = pol->v.nodes;
1a75a6c8 2794 break;
1a75a6c8 2795 default:
948927ee
DR
2796 WARN_ON_ONCE(1);
2797 snprintf(p, maxlen, "unknown");
2798 return;
1a75a6c8
CL
2799 }
2800
b7a9f420 2801 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
1a75a6c8 2802
fc36b8d3 2803 if (flags & MPOL_MODE_FLAGS) {
948927ee 2804 p += snprintf(p, buffer + maxlen - p, "=");
f5b087b5 2805
2291990a
LS
2806 /*
2807 * Currently, the only defined flags are mutually exclusive
2808 */
f5b087b5 2809 if (flags & MPOL_F_STATIC_NODES)
2291990a
LS
2810 p += snprintf(p, buffer + maxlen - p, "static");
2811 else if (flags & MPOL_F_RELATIVE_NODES)
2812 p += snprintf(p, buffer + maxlen - p, "relative");
f5b087b5
DR
2813 }
2814
9e763e0f
TH
2815 if (!nodes_empty(nodes))
2816 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
2817 nodemask_pr_args(&nodes));
1a75a6c8 2818}