]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - mm/mempolicy.c
mm/mempolicy: remove redundant check in get_nodes
[mirror_ubuntu-hirsute-kernel.git] / mm / mempolicy.c
CommitLineData
1da177e4
LT
1/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
8bccd85f 5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
1da177e4
LT
6 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
8bccd85f 21 *
1da177e4
LT
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
8bccd85f
CL
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
1da177e4 28 * preferred Try a specific node first before normal fallback.
00ef2d2f 29 * As a special case NUMA_NO_NODE here means do the allocation
1da177e4
LT
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
8bccd85f 33 *
1da177e4
LT
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
1da177e4
LT
66*/
67
b1de0d13
MH
68#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69
1da177e4
LT
70#include <linux/mempolicy.h>
71#include <linux/mm.h>
72#include <linux/highmem.h>
73#include <linux/hugetlb.h>
74#include <linux/kernel.h>
75#include <linux/sched.h>
6e84f315 76#include <linux/sched/mm.h>
6a3827d7 77#include <linux/sched/numa_balancing.h>
f719ff9b 78#include <linux/sched/task.h>
1da177e4
LT
79#include <linux/nodemask.h>
80#include <linux/cpuset.h>
1da177e4
LT
81#include <linux/slab.h>
82#include <linux/string.h>
b95f1b31 83#include <linux/export.h>
b488893a 84#include <linux/nsproxy.h>
1da177e4
LT
85#include <linux/interrupt.h>
86#include <linux/init.h>
87#include <linux/compat.h>
31367466 88#include <linux/ptrace.h>
dc9aa5b9 89#include <linux/swap.h>
1a75a6c8
CL
90#include <linux/seq_file.h>
91#include <linux/proc_fs.h>
b20a3503 92#include <linux/migrate.h>
62b61f61 93#include <linux/ksm.h>
95a402c3 94#include <linux/rmap.h>
86c3a764 95#include <linux/security.h>
dbcb0f19 96#include <linux/syscalls.h>
095f1fc4 97#include <linux/ctype.h>
6d9c285a 98#include <linux/mm_inline.h>
b24f53a0 99#include <linux/mmu_notifier.h>
b1de0d13 100#include <linux/printk.h>
c8633798 101#include <linux/swapops.h>
dc9aa5b9 102
1da177e4 103#include <asm/tlbflush.h>
7c0f6ba6 104#include <linux/uaccess.h>
1da177e4 105
62695a84
NP
106#include "internal.h"
107
38e35860 108/* Internal flags */
dc9aa5b9 109#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
38e35860 110#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
dc9aa5b9 111
fcc234f8
PE
112static struct kmem_cache *policy_cache;
113static struct kmem_cache *sn_cache;
1da177e4 114
1da177e4
LT
115/* Highest zone. An specific allocation for a zone below that is not
116 policied. */
6267276f 117enum zone_type policy_zone = 0;
1da177e4 118
bea904d5
LS
119/*
120 * run-time system-wide default policy => local allocation
121 */
e754d79d 122static struct mempolicy default_policy = {
1da177e4 123 .refcnt = ATOMIC_INIT(1), /* never free it */
bea904d5 124 .mode = MPOL_PREFERRED,
fc36b8d3 125 .flags = MPOL_F_LOCAL,
1da177e4
LT
126};
127
5606e387
MG
128static struct mempolicy preferred_node_policy[MAX_NUMNODES];
129
74d2c3a0 130struct mempolicy *get_task_policy(struct task_struct *p)
5606e387
MG
131{
132 struct mempolicy *pol = p->mempolicy;
f15ca78e 133 int node;
5606e387 134
f15ca78e
ON
135 if (pol)
136 return pol;
5606e387 137
f15ca78e
ON
138 node = numa_node_id();
139 if (node != NUMA_NO_NODE) {
140 pol = &preferred_node_policy[node];
141 /* preferred_node_policy is not initialised early in boot */
142 if (pol->mode)
143 return pol;
5606e387
MG
144 }
145
f15ca78e 146 return &default_policy;
5606e387
MG
147}
148
37012946
DR
149static const struct mempolicy_operations {
150 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
213980c0 151 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
37012946
DR
152} mpol_ops[MPOL_MAX];
153
f5b087b5
DR
154static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
155{
6d556294 156 return pol->flags & MPOL_MODE_FLAGS;
4c50bc01
DR
157}
158
159static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
160 const nodemask_t *rel)
161{
162 nodemask_t tmp;
163 nodes_fold(tmp, *orig, nodes_weight(*rel));
164 nodes_onto(*ret, tmp, *rel);
f5b087b5
DR
165}
166
37012946
DR
167static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
168{
169 if (nodes_empty(*nodes))
170 return -EINVAL;
171 pol->v.nodes = *nodes;
172 return 0;
173}
174
175static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
176{
177 if (!nodes)
fc36b8d3 178 pol->flags |= MPOL_F_LOCAL; /* local allocation */
37012946
DR
179 else if (nodes_empty(*nodes))
180 return -EINVAL; /* no allowed nodes */
181 else
182 pol->v.preferred_node = first_node(*nodes);
183 return 0;
184}
185
186static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
187{
859f7ef1 188 if (nodes_empty(*nodes))
37012946
DR
189 return -EINVAL;
190 pol->v.nodes = *nodes;
191 return 0;
192}
193
58568d2a
MX
194/*
195 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
196 * any, for the new policy. mpol_new() has already validated the nodes
197 * parameter with respect to the policy mode and flags. But, we need to
198 * handle an empty nodemask with MPOL_PREFERRED here.
199 *
200 * Must be called holding task's alloc_lock to protect task's mems_allowed
201 * and mempolicy. May also be called holding the mmap_semaphore for write.
202 */
4bfc4495
KH
203static int mpol_set_nodemask(struct mempolicy *pol,
204 const nodemask_t *nodes, struct nodemask_scratch *nsc)
58568d2a 205{
58568d2a
MX
206 int ret;
207
208 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
209 if (pol == NULL)
210 return 0;
01f13bd6 211 /* Check N_MEMORY */
4bfc4495 212 nodes_and(nsc->mask1,
01f13bd6 213 cpuset_current_mems_allowed, node_states[N_MEMORY]);
58568d2a
MX
214
215 VM_BUG_ON(!nodes);
216 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
217 nodes = NULL; /* explicit local allocation */
218 else {
219 if (pol->flags & MPOL_F_RELATIVE_NODES)
859f7ef1 220 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
58568d2a 221 else
4bfc4495
KH
222 nodes_and(nsc->mask2, *nodes, nsc->mask1);
223
58568d2a
MX
224 if (mpol_store_user_nodemask(pol))
225 pol->w.user_nodemask = *nodes;
226 else
227 pol->w.cpuset_mems_allowed =
228 cpuset_current_mems_allowed;
229 }
230
4bfc4495
KH
231 if (nodes)
232 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
233 else
234 ret = mpol_ops[pol->mode].create(pol, NULL);
58568d2a
MX
235 return ret;
236}
237
238/*
239 * This function just creates a new policy, does some check and simple
240 * initialization. You must invoke mpol_set_nodemask() to set nodes.
241 */
028fec41
DR
242static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
243 nodemask_t *nodes)
1da177e4
LT
244{
245 struct mempolicy *policy;
246
028fec41 247 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
00ef2d2f 248 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
140d5a49 249
3e1f0645
DR
250 if (mode == MPOL_DEFAULT) {
251 if (nodes && !nodes_empty(*nodes))
37012946 252 return ERR_PTR(-EINVAL);
d3a71033 253 return NULL;
37012946 254 }
3e1f0645
DR
255 VM_BUG_ON(!nodes);
256
257 /*
258 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
259 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
260 * All other modes require a valid pointer to a non-empty nodemask.
261 */
262 if (mode == MPOL_PREFERRED) {
263 if (nodes_empty(*nodes)) {
264 if (((flags & MPOL_F_STATIC_NODES) ||
265 (flags & MPOL_F_RELATIVE_NODES)))
266 return ERR_PTR(-EINVAL);
3e1f0645 267 }
479e2802 268 } else if (mode == MPOL_LOCAL) {
8d303e44
PK
269 if (!nodes_empty(*nodes) ||
270 (flags & MPOL_F_STATIC_NODES) ||
271 (flags & MPOL_F_RELATIVE_NODES))
479e2802
PZ
272 return ERR_PTR(-EINVAL);
273 mode = MPOL_PREFERRED;
3e1f0645
DR
274 } else if (nodes_empty(*nodes))
275 return ERR_PTR(-EINVAL);
1da177e4
LT
276 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
277 if (!policy)
278 return ERR_PTR(-ENOMEM);
279 atomic_set(&policy->refcnt, 1);
45c4745a 280 policy->mode = mode;
3e1f0645 281 policy->flags = flags;
37012946 282
1da177e4 283 return policy;
37012946
DR
284}
285
52cd3b07
LS
286/* Slow path of a mpol destructor. */
287void __mpol_put(struct mempolicy *p)
288{
289 if (!atomic_dec_and_test(&p->refcnt))
290 return;
52cd3b07
LS
291 kmem_cache_free(policy_cache, p);
292}
293
213980c0 294static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
37012946
DR
295{
296}
297
213980c0 298static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
37012946
DR
299{
300 nodemask_t tmp;
301
302 if (pol->flags & MPOL_F_STATIC_NODES)
303 nodes_and(tmp, pol->w.user_nodemask, *nodes);
304 else if (pol->flags & MPOL_F_RELATIVE_NODES)
305 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
306 else {
213980c0
VB
307 nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
308 *nodes);
309 pol->w.cpuset_mems_allowed = tmp;
37012946 310 }
f5b087b5 311
708c1bbc
MX
312 if (nodes_empty(tmp))
313 tmp = *nodes;
314
213980c0 315 pol->v.nodes = tmp;
37012946
DR
316}
317
318static void mpol_rebind_preferred(struct mempolicy *pol,
213980c0 319 const nodemask_t *nodes)
37012946
DR
320{
321 nodemask_t tmp;
322
37012946
DR
323 if (pol->flags & MPOL_F_STATIC_NODES) {
324 int node = first_node(pol->w.user_nodemask);
325
fc36b8d3 326 if (node_isset(node, *nodes)) {
37012946 327 pol->v.preferred_node = node;
fc36b8d3
LS
328 pol->flags &= ~MPOL_F_LOCAL;
329 } else
330 pol->flags |= MPOL_F_LOCAL;
37012946
DR
331 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
332 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
333 pol->v.preferred_node = first_node(tmp);
fc36b8d3 334 } else if (!(pol->flags & MPOL_F_LOCAL)) {
37012946
DR
335 pol->v.preferred_node = node_remap(pol->v.preferred_node,
336 pol->w.cpuset_mems_allowed,
337 *nodes);
338 pol->w.cpuset_mems_allowed = *nodes;
339 }
1da177e4
LT
340}
341
708c1bbc
MX
342/*
343 * mpol_rebind_policy - Migrate a policy to a different set of nodes
344 *
213980c0
VB
345 * Per-vma policies are protected by mmap_sem. Allocations using per-task
346 * policies are protected by task->mems_allowed_seq to prevent a premature
347 * OOM/allocation failure due to parallel nodemask modification.
708c1bbc 348 */
213980c0 349static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
1d0d2680 350{
1d0d2680
DR
351 if (!pol)
352 return;
213980c0 353 if (!mpol_store_user_nodemask(pol) &&
1d0d2680
DR
354 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
355 return;
708c1bbc 356
213980c0 357 mpol_ops[pol->mode].rebind(pol, newmask);
1d0d2680
DR
358}
359
360/*
361 * Wrapper for mpol_rebind_policy() that just requires task
362 * pointer, and updates task mempolicy.
58568d2a
MX
363 *
364 * Called with task's alloc_lock held.
1d0d2680
DR
365 */
366
213980c0 367void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
1d0d2680 368{
213980c0 369 mpol_rebind_policy(tsk->mempolicy, new);
1d0d2680
DR
370}
371
372/*
373 * Rebind each vma in mm to new nodemask.
374 *
375 * Call holding a reference to mm. Takes mm->mmap_sem during call.
376 */
377
378void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
379{
380 struct vm_area_struct *vma;
381
382 down_write(&mm->mmap_sem);
383 for (vma = mm->mmap; vma; vma = vma->vm_next)
213980c0 384 mpol_rebind_policy(vma->vm_policy, new);
1d0d2680
DR
385 up_write(&mm->mmap_sem);
386}
387
37012946
DR
388static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
389 [MPOL_DEFAULT] = {
390 .rebind = mpol_rebind_default,
391 },
392 [MPOL_INTERLEAVE] = {
393 .create = mpol_new_interleave,
394 .rebind = mpol_rebind_nodemask,
395 },
396 [MPOL_PREFERRED] = {
397 .create = mpol_new_preferred,
398 .rebind = mpol_rebind_preferred,
399 },
400 [MPOL_BIND] = {
401 .create = mpol_new_bind,
402 .rebind = mpol_rebind_nodemask,
403 },
404};
405
fc301289
CL
406static void migrate_page_add(struct page *page, struct list_head *pagelist,
407 unsigned long flags);
1a75a6c8 408
6f4576e3
NH
409struct queue_pages {
410 struct list_head *pagelist;
411 unsigned long flags;
412 nodemask_t *nmask;
413 struct vm_area_struct *prev;
414};
415
88aaa2a1
NH
416/*
417 * Check if the page's nid is in qp->nmask.
418 *
419 * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
420 * in the invert of qp->nmask.
421 */
422static inline bool queue_pages_required(struct page *page,
423 struct queue_pages *qp)
424{
425 int nid = page_to_nid(page);
426 unsigned long flags = qp->flags;
427
428 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
429}
430
c8633798
NH
431static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
432 unsigned long end, struct mm_walk *walk)
433{
434 int ret = 0;
435 struct page *page;
436 struct queue_pages *qp = walk->private;
437 unsigned long flags;
438
439 if (unlikely(is_pmd_migration_entry(*pmd))) {
440 ret = 1;
441 goto unlock;
442 }
443 page = pmd_page(*pmd);
444 if (is_huge_zero_page(page)) {
445 spin_unlock(ptl);
446 __split_huge_pmd(walk->vma, pmd, addr, false, NULL);
447 goto out;
448 }
449 if (!thp_migration_supported()) {
450 get_page(page);
451 spin_unlock(ptl);
452 lock_page(page);
453 ret = split_huge_page(page);
454 unlock_page(page);
455 put_page(page);
456 goto out;
457 }
458 if (!queue_pages_required(page, qp)) {
459 ret = 1;
460 goto unlock;
461 }
462
463 ret = 1;
464 flags = qp->flags;
465 /* go to thp migration */
466 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
467 migrate_page_add(page, qp->pagelist, flags);
468unlock:
469 spin_unlock(ptl);
470out:
471 return ret;
472}
473
98094945
NH
474/*
475 * Scan through pages checking if pages follow certain conditions,
476 * and move them to the pagelist if they do.
477 */
6f4576e3
NH
478static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
479 unsigned long end, struct mm_walk *walk)
1da177e4 480{
6f4576e3
NH
481 struct vm_area_struct *vma = walk->vma;
482 struct page *page;
483 struct queue_pages *qp = walk->private;
484 unsigned long flags = qp->flags;
c8633798 485 int ret;
91612e0d 486 pte_t *pte;
705e87c0 487 spinlock_t *ptl;
941150a3 488
c8633798
NH
489 ptl = pmd_trans_huge_lock(pmd, vma);
490 if (ptl) {
491 ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
492 if (ret)
493 return 0;
248db92d 494 }
91612e0d 495
337d9abf
NH
496 if (pmd_trans_unstable(pmd))
497 return 0;
248db92d 498retry:
6f4576e3
NH
499 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
500 for (; addr != end; pte++, addr += PAGE_SIZE) {
91612e0d 501 if (!pte_present(*pte))
1da177e4 502 continue;
6aab341e
LT
503 page = vm_normal_page(vma, addr, *pte);
504 if (!page)
1da177e4 505 continue;
053837fc 506 /*
62b61f61
HD
507 * vm_normal_page() filters out zero pages, but there might
508 * still be PageReserved pages to skip, perhaps in a VDSO.
053837fc 509 */
b79bc0a0 510 if (PageReserved(page))
f4598c8b 511 continue;
88aaa2a1 512 if (!queue_pages_required(page, qp))
38e35860 513 continue;
c8633798 514 if (PageTransCompound(page) && !thp_migration_supported()) {
248db92d
KS
515 get_page(page);
516 pte_unmap_unlock(pte, ptl);
517 lock_page(page);
518 ret = split_huge_page(page);
519 unlock_page(page);
520 put_page(page);
521 /* Failed to split -- skip. */
522 if (ret) {
523 pte = pte_offset_map_lock(walk->mm, pmd,
524 addr, &ptl);
525 continue;
526 }
527 goto retry;
528 }
38e35860 529
77bf45e7 530 migrate_page_add(page, qp->pagelist, flags);
6f4576e3
NH
531 }
532 pte_unmap_unlock(pte - 1, ptl);
533 cond_resched();
534 return 0;
91612e0d
HD
535}
536
6f4576e3
NH
537static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
538 unsigned long addr, unsigned long end,
539 struct mm_walk *walk)
e2d8cf40
NH
540{
541#ifdef CONFIG_HUGETLB_PAGE
6f4576e3
NH
542 struct queue_pages *qp = walk->private;
543 unsigned long flags = qp->flags;
e2d8cf40 544 struct page *page;
cb900f41 545 spinlock_t *ptl;
d4c54919 546 pte_t entry;
e2d8cf40 547
6f4576e3
NH
548 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
549 entry = huge_ptep_get(pte);
d4c54919
NH
550 if (!pte_present(entry))
551 goto unlock;
552 page = pte_page(entry);
88aaa2a1 553 if (!queue_pages_required(page, qp))
e2d8cf40
NH
554 goto unlock;
555 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
556 if (flags & (MPOL_MF_MOVE_ALL) ||
557 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
6f4576e3 558 isolate_huge_page(page, qp->pagelist);
e2d8cf40 559unlock:
cb900f41 560 spin_unlock(ptl);
e2d8cf40
NH
561#else
562 BUG();
563#endif
91612e0d 564 return 0;
1da177e4
LT
565}
566
5877231f 567#ifdef CONFIG_NUMA_BALANCING
b24f53a0 568/*
4b10e7d5
MG
569 * This is used to mark a range of virtual addresses to be inaccessible.
570 * These are later cleared by a NUMA hinting fault. Depending on these
571 * faults, pages may be migrated for better NUMA placement.
572 *
573 * This is assuming that NUMA faults are handled using PROT_NONE. If
574 * an architecture makes a different choice, it will need further
575 * changes to the core.
b24f53a0 576 */
4b10e7d5
MG
577unsigned long change_prot_numa(struct vm_area_struct *vma,
578 unsigned long addr, unsigned long end)
b24f53a0 579{
4b10e7d5 580 int nr_updated;
b24f53a0 581
4d942466 582 nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
03c5a6e1
MG
583 if (nr_updated)
584 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
b24f53a0 585
4b10e7d5 586 return nr_updated;
b24f53a0
LS
587}
588#else
589static unsigned long change_prot_numa(struct vm_area_struct *vma,
590 unsigned long addr, unsigned long end)
591{
592 return 0;
593}
5877231f 594#endif /* CONFIG_NUMA_BALANCING */
b24f53a0 595
6f4576e3
NH
596static int queue_pages_test_walk(unsigned long start, unsigned long end,
597 struct mm_walk *walk)
598{
599 struct vm_area_struct *vma = walk->vma;
600 struct queue_pages *qp = walk->private;
601 unsigned long endvma = vma->vm_end;
602 unsigned long flags = qp->flags;
603
77bf45e7 604 if (!vma_migratable(vma))
48684a65
NH
605 return 1;
606
6f4576e3
NH
607 if (endvma > end)
608 endvma = end;
609 if (vma->vm_start > start)
610 start = vma->vm_start;
611
612 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
613 if (!vma->vm_next && vma->vm_end < end)
614 return -EFAULT;
615 if (qp->prev && qp->prev->vm_end < vma->vm_start)
616 return -EFAULT;
617 }
618
619 qp->prev = vma;
620
6f4576e3
NH
621 if (flags & MPOL_MF_LAZY) {
622 /* Similar to task_numa_work, skip inaccessible VMAs */
4355c018
LC
623 if (!is_vm_hugetlb_page(vma) &&
624 (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
625 !(vma->vm_flags & VM_MIXEDMAP))
6f4576e3
NH
626 change_prot_numa(vma, start, endvma);
627 return 1;
628 }
629
77bf45e7
KS
630 /* queue pages from current vma */
631 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
6f4576e3
NH
632 return 0;
633 return 1;
634}
635
dc9aa5b9 636/*
98094945
NH
637 * Walk through page tables and collect pages to be migrated.
638 *
639 * If pages found in a given range are on a set of nodes (determined by
640 * @nodes and @flags,) it's isolated and queued to the pagelist which is
641 * passed via @private.)
dc9aa5b9 642 */
d05f0cdc 643static int
98094945 644queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
6f4576e3
NH
645 nodemask_t *nodes, unsigned long flags,
646 struct list_head *pagelist)
1da177e4 647{
6f4576e3
NH
648 struct queue_pages qp = {
649 .pagelist = pagelist,
650 .flags = flags,
651 .nmask = nodes,
652 .prev = NULL,
653 };
654 struct mm_walk queue_pages_walk = {
655 .hugetlb_entry = queue_pages_hugetlb,
656 .pmd_entry = queue_pages_pte_range,
657 .test_walk = queue_pages_test_walk,
658 .mm = mm,
659 .private = &qp,
660 };
661
662 return walk_page_range(start, end, &queue_pages_walk);
1da177e4
LT
663}
664
869833f2
KM
665/*
666 * Apply policy to a single VMA
667 * This must be called with the mmap_sem held for writing.
668 */
669static int vma_replace_policy(struct vm_area_struct *vma,
670 struct mempolicy *pol)
8d34694c 671{
869833f2
KM
672 int err;
673 struct mempolicy *old;
674 struct mempolicy *new;
8d34694c
KM
675
676 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
677 vma->vm_start, vma->vm_end, vma->vm_pgoff,
678 vma->vm_ops, vma->vm_file,
679 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
680
869833f2
KM
681 new = mpol_dup(pol);
682 if (IS_ERR(new))
683 return PTR_ERR(new);
684
685 if (vma->vm_ops && vma->vm_ops->set_policy) {
8d34694c 686 err = vma->vm_ops->set_policy(vma, new);
869833f2
KM
687 if (err)
688 goto err_out;
8d34694c 689 }
869833f2
KM
690
691 old = vma->vm_policy;
692 vma->vm_policy = new; /* protected by mmap_sem */
693 mpol_put(old);
694
695 return 0;
696 err_out:
697 mpol_put(new);
8d34694c
KM
698 return err;
699}
700
1da177e4 701/* Step 2: apply policy to a range and do splits. */
9d8cebd4
KM
702static int mbind_range(struct mm_struct *mm, unsigned long start,
703 unsigned long end, struct mempolicy *new_pol)
1da177e4
LT
704{
705 struct vm_area_struct *next;
9d8cebd4
KM
706 struct vm_area_struct *prev;
707 struct vm_area_struct *vma;
708 int err = 0;
e26a5114 709 pgoff_t pgoff;
9d8cebd4
KM
710 unsigned long vmstart;
711 unsigned long vmend;
1da177e4 712
097d5910 713 vma = find_vma(mm, start);
9d8cebd4
KM
714 if (!vma || vma->vm_start > start)
715 return -EFAULT;
716
097d5910 717 prev = vma->vm_prev;
e26a5114
KM
718 if (start > vma->vm_start)
719 prev = vma;
720
9d8cebd4 721 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
1da177e4 722 next = vma->vm_next;
9d8cebd4
KM
723 vmstart = max(start, vma->vm_start);
724 vmend = min(end, vma->vm_end);
725
e26a5114
KM
726 if (mpol_equal(vma_policy(vma), new_pol))
727 continue;
728
729 pgoff = vma->vm_pgoff +
730 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
9d8cebd4 731 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
19a809af
AA
732 vma->anon_vma, vma->vm_file, pgoff,
733 new_pol, vma->vm_userfaultfd_ctx);
9d8cebd4
KM
734 if (prev) {
735 vma = prev;
736 next = vma->vm_next;
3964acd0
ON
737 if (mpol_equal(vma_policy(vma), new_pol))
738 continue;
739 /* vma_merge() joined vma && vma->next, case 8 */
740 goto replace;
9d8cebd4
KM
741 }
742 if (vma->vm_start != vmstart) {
743 err = split_vma(vma->vm_mm, vma, vmstart, 1);
744 if (err)
745 goto out;
746 }
747 if (vma->vm_end != vmend) {
748 err = split_vma(vma->vm_mm, vma, vmend, 0);
749 if (err)
750 goto out;
751 }
3964acd0 752 replace:
869833f2 753 err = vma_replace_policy(vma, new_pol);
8d34694c
KM
754 if (err)
755 goto out;
1da177e4 756 }
9d8cebd4
KM
757
758 out:
1da177e4
LT
759 return err;
760}
761
1da177e4 762/* Set the process memory policy */
028fec41
DR
763static long do_set_mempolicy(unsigned short mode, unsigned short flags,
764 nodemask_t *nodes)
1da177e4 765{
58568d2a 766 struct mempolicy *new, *old;
4bfc4495 767 NODEMASK_SCRATCH(scratch);
58568d2a 768 int ret;
1da177e4 769
4bfc4495
KH
770 if (!scratch)
771 return -ENOMEM;
f4e53d91 772
4bfc4495
KH
773 new = mpol_new(mode, flags, nodes);
774 if (IS_ERR(new)) {
775 ret = PTR_ERR(new);
776 goto out;
777 }
2c7c3a7d 778
58568d2a 779 task_lock(current);
4bfc4495 780 ret = mpol_set_nodemask(new, nodes, scratch);
58568d2a
MX
781 if (ret) {
782 task_unlock(current);
58568d2a 783 mpol_put(new);
4bfc4495 784 goto out;
58568d2a
MX
785 }
786 old = current->mempolicy;
1da177e4 787 current->mempolicy = new;
45816682
VB
788 if (new && new->mode == MPOL_INTERLEAVE)
789 current->il_prev = MAX_NUMNODES-1;
58568d2a 790 task_unlock(current);
58568d2a 791 mpol_put(old);
4bfc4495
KH
792 ret = 0;
793out:
794 NODEMASK_SCRATCH_FREE(scratch);
795 return ret;
1da177e4
LT
796}
797
bea904d5
LS
798/*
799 * Return nodemask for policy for get_mempolicy() query
58568d2a
MX
800 *
801 * Called with task's alloc_lock held
bea904d5
LS
802 */
803static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
1da177e4 804{
dfcd3c0d 805 nodes_clear(*nodes);
bea904d5
LS
806 if (p == &default_policy)
807 return;
808
45c4745a 809 switch (p->mode) {
19770b32
MG
810 case MPOL_BIND:
811 /* Fall through */
1da177e4 812 case MPOL_INTERLEAVE:
dfcd3c0d 813 *nodes = p->v.nodes;
1da177e4
LT
814 break;
815 case MPOL_PREFERRED:
fc36b8d3 816 if (!(p->flags & MPOL_F_LOCAL))
dfcd3c0d 817 node_set(p->v.preferred_node, *nodes);
53f2556b 818 /* else return empty node mask for local allocation */
1da177e4
LT
819 break;
820 default:
821 BUG();
822 }
823}
824
d4edcf0d 825static int lookup_node(unsigned long addr)
1da177e4
LT
826{
827 struct page *p;
828 int err;
829
768ae309 830 err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL);
1da177e4
LT
831 if (err >= 0) {
832 err = page_to_nid(p);
833 put_page(p);
834 }
835 return err;
836}
837
1da177e4 838/* Retrieve NUMA policy */
dbcb0f19
AB
839static long do_get_mempolicy(int *policy, nodemask_t *nmask,
840 unsigned long addr, unsigned long flags)
1da177e4 841{
8bccd85f 842 int err;
1da177e4
LT
843 struct mm_struct *mm = current->mm;
844 struct vm_area_struct *vma = NULL;
845 struct mempolicy *pol = current->mempolicy;
846
754af6f5
LS
847 if (flags &
848 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
1da177e4 849 return -EINVAL;
754af6f5
LS
850
851 if (flags & MPOL_F_MEMS_ALLOWED) {
852 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
853 return -EINVAL;
854 *policy = 0; /* just so it's initialized */
58568d2a 855 task_lock(current);
754af6f5 856 *nmask = cpuset_current_mems_allowed;
58568d2a 857 task_unlock(current);
754af6f5
LS
858 return 0;
859 }
860
1da177e4 861 if (flags & MPOL_F_ADDR) {
bea904d5
LS
862 /*
863 * Do NOT fall back to task policy if the
864 * vma/shared policy at addr is NULL. We
865 * want to return MPOL_DEFAULT in this case.
866 */
1da177e4
LT
867 down_read(&mm->mmap_sem);
868 vma = find_vma_intersection(mm, addr, addr+1);
869 if (!vma) {
870 up_read(&mm->mmap_sem);
871 return -EFAULT;
872 }
873 if (vma->vm_ops && vma->vm_ops->get_policy)
874 pol = vma->vm_ops->get_policy(vma, addr);
875 else
876 pol = vma->vm_policy;
877 } else if (addr)
878 return -EINVAL;
879
880 if (!pol)
bea904d5 881 pol = &default_policy; /* indicates default behavior */
1da177e4
LT
882
883 if (flags & MPOL_F_NODE) {
884 if (flags & MPOL_F_ADDR) {
d4edcf0d 885 err = lookup_node(addr);
1da177e4
LT
886 if (err < 0)
887 goto out;
8bccd85f 888 *policy = err;
1da177e4 889 } else if (pol == current->mempolicy &&
45c4745a 890 pol->mode == MPOL_INTERLEAVE) {
45816682 891 *policy = next_node_in(current->il_prev, pol->v.nodes);
1da177e4
LT
892 } else {
893 err = -EINVAL;
894 goto out;
895 }
bea904d5
LS
896 } else {
897 *policy = pol == &default_policy ? MPOL_DEFAULT :
898 pol->mode;
d79df630
DR
899 /*
900 * Internal mempolicy flags must be masked off before exposing
901 * the policy to userspace.
902 */
903 *policy |= (pol->flags & MPOL_MODE_FLAGS);
bea904d5 904 }
1da177e4 905
1da177e4 906 err = 0;
58568d2a 907 if (nmask) {
c6b6ef8b
LS
908 if (mpol_store_user_nodemask(pol)) {
909 *nmask = pol->w.user_nodemask;
910 } else {
911 task_lock(current);
912 get_policy_nodemask(pol, nmask);
913 task_unlock(current);
914 }
58568d2a 915 }
1da177e4
LT
916
917 out:
52cd3b07 918 mpol_cond_put(pol);
1da177e4
LT
919 if (vma)
920 up_read(&current->mm->mmap_sem);
921 return err;
922}
923
b20a3503 924#ifdef CONFIG_MIGRATION
6ce3c4c0 925/*
c8633798 926 * page migration, thp tail pages can be passed.
6ce3c4c0 927 */
fc301289
CL
928static void migrate_page_add(struct page *page, struct list_head *pagelist,
929 unsigned long flags)
6ce3c4c0 930{
c8633798 931 struct page *head = compound_head(page);
6ce3c4c0 932 /*
fc301289 933 * Avoid migrating a page that is shared with others.
6ce3c4c0 934 */
c8633798
NH
935 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
936 if (!isolate_lru_page(head)) {
937 list_add_tail(&head->lru, pagelist);
938 mod_node_page_state(page_pgdat(head),
939 NR_ISOLATED_ANON + page_is_file_cache(head),
940 hpage_nr_pages(head));
62695a84
NP
941 }
942 }
7e2ab150 943}
6ce3c4c0 944
742755a1 945static struct page *new_node_page(struct page *page, unsigned long node, int **x)
95a402c3 946{
e2d8cf40
NH
947 if (PageHuge(page))
948 return alloc_huge_page_node(page_hstate(compound_head(page)),
949 node);
c8633798
NH
950 else if (thp_migration_supported() && PageTransHuge(page)) {
951 struct page *thp;
952
953 thp = alloc_pages_node(node,
954 (GFP_TRANSHUGE | __GFP_THISNODE),
955 HPAGE_PMD_ORDER);
956 if (!thp)
957 return NULL;
958 prep_transhuge_page(thp);
959 return thp;
960 } else
96db800f 961 return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
b360edb4 962 __GFP_THISNODE, 0);
95a402c3
CL
963}
964
7e2ab150
CL
965/*
966 * Migrate pages from one node to a target node.
967 * Returns error or the number of pages not migrated.
968 */
dbcb0f19
AB
969static int migrate_to_node(struct mm_struct *mm, int source, int dest,
970 int flags)
7e2ab150
CL
971{
972 nodemask_t nmask;
973 LIST_HEAD(pagelist);
974 int err = 0;
975
976 nodes_clear(nmask);
977 node_set(source, nmask);
6ce3c4c0 978
08270807
MK
979 /*
980 * This does not "check" the range but isolates all pages that
981 * need migration. Between passing in the full user address
982 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
983 */
984 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
98094945 985 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
7e2ab150
CL
986 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
987
cf608ac1 988 if (!list_empty(&pagelist)) {
68711a74 989 err = migrate_pages(&pagelist, new_node_page, NULL, dest,
9c620e2b 990 MIGRATE_SYNC, MR_SYSCALL);
cf608ac1 991 if (err)
e2d8cf40 992 putback_movable_pages(&pagelist);
cf608ac1 993 }
95a402c3 994
7e2ab150 995 return err;
6ce3c4c0
CL
996}
997
39743889 998/*
7e2ab150
CL
999 * Move pages between the two nodesets so as to preserve the physical
1000 * layout as much as possible.
39743889
CL
1001 *
1002 * Returns the number of page that could not be moved.
1003 */
0ce72d4f
AM
1004int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1005 const nodemask_t *to, int flags)
39743889 1006{
7e2ab150 1007 int busy = 0;
0aedadf9 1008 int err;
7e2ab150 1009 nodemask_t tmp;
39743889 1010
0aedadf9
CL
1011 err = migrate_prep();
1012 if (err)
1013 return err;
1014
53f2556b 1015 down_read(&mm->mmap_sem);
39743889 1016
da0aa138
KM
1017 /*
1018 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1019 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1020 * bit in 'tmp', and return that <source, dest> pair for migration.
1021 * The pair of nodemasks 'to' and 'from' define the map.
1022 *
1023 * If no pair of bits is found that way, fallback to picking some
1024 * pair of 'source' and 'dest' bits that are not the same. If the
1025 * 'source' and 'dest' bits are the same, this represents a node
1026 * that will be migrating to itself, so no pages need move.
1027 *
1028 * If no bits are left in 'tmp', or if all remaining bits left
1029 * in 'tmp' correspond to the same bit in 'to', return false
1030 * (nothing left to migrate).
1031 *
1032 * This lets us pick a pair of nodes to migrate between, such that
1033 * if possible the dest node is not already occupied by some other
1034 * source node, minimizing the risk of overloading the memory on a
1035 * node that would happen if we migrated incoming memory to a node
1036 * before migrating outgoing memory source that same node.
1037 *
1038 * A single scan of tmp is sufficient. As we go, we remember the
1039 * most recent <s, d> pair that moved (s != d). If we find a pair
1040 * that not only moved, but what's better, moved to an empty slot
1041 * (d is not set in tmp), then we break out then, with that pair.
ae0e47f0 1042 * Otherwise when we finish scanning from_tmp, we at least have the
da0aa138
KM
1043 * most recent <s, d> pair that moved. If we get all the way through
1044 * the scan of tmp without finding any node that moved, much less
1045 * moved to an empty node, then there is nothing left worth migrating.
1046 */
d4984711 1047
0ce72d4f 1048 tmp = *from;
7e2ab150
CL
1049 while (!nodes_empty(tmp)) {
1050 int s,d;
b76ac7e7 1051 int source = NUMA_NO_NODE;
7e2ab150
CL
1052 int dest = 0;
1053
1054 for_each_node_mask(s, tmp) {
4a5b18cc
LW
1055
1056 /*
1057 * do_migrate_pages() tries to maintain the relative
1058 * node relationship of the pages established between
1059 * threads and memory areas.
1060 *
1061 * However if the number of source nodes is not equal to
1062 * the number of destination nodes we can not preserve
1063 * this node relative relationship. In that case, skip
1064 * copying memory from a node that is in the destination
1065 * mask.
1066 *
1067 * Example: [2,3,4] -> [3,4,5] moves everything.
1068 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1069 */
1070
0ce72d4f
AM
1071 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1072 (node_isset(s, *to)))
4a5b18cc
LW
1073 continue;
1074
0ce72d4f 1075 d = node_remap(s, *from, *to);
7e2ab150
CL
1076 if (s == d)
1077 continue;
1078
1079 source = s; /* Node moved. Memorize */
1080 dest = d;
1081
1082 /* dest not in remaining from nodes? */
1083 if (!node_isset(dest, tmp))
1084 break;
1085 }
b76ac7e7 1086 if (source == NUMA_NO_NODE)
7e2ab150
CL
1087 break;
1088
1089 node_clear(source, tmp);
1090 err = migrate_to_node(mm, source, dest, flags);
1091 if (err > 0)
1092 busy += err;
1093 if (err < 0)
1094 break;
39743889
CL
1095 }
1096 up_read(&mm->mmap_sem);
7e2ab150
CL
1097 if (err < 0)
1098 return err;
1099 return busy;
b20a3503
CL
1100
1101}
1102
3ad33b24
LS
1103/*
1104 * Allocate a new page for page migration based on vma policy.
d05f0cdc 1105 * Start by assuming the page is mapped by the same vma as contains @start.
3ad33b24
LS
1106 * Search forward from there, if not. N.B., this assumes that the
1107 * list of pages handed to migrate_pages()--which is how we get here--
1108 * is in virtual address order.
1109 */
d05f0cdc 1110static struct page *new_page(struct page *page, unsigned long start, int **x)
95a402c3 1111{
d05f0cdc 1112 struct vm_area_struct *vma;
3ad33b24 1113 unsigned long uninitialized_var(address);
95a402c3 1114
d05f0cdc 1115 vma = find_vma(current->mm, start);
3ad33b24
LS
1116 while (vma) {
1117 address = page_address_in_vma(page, vma);
1118 if (address != -EFAULT)
1119 break;
1120 vma = vma->vm_next;
1121 }
11c731e8
WL
1122
1123 if (PageHuge(page)) {
cc81717e
MH
1124 BUG_ON(!vma);
1125 return alloc_huge_page_noerr(vma, address, 1);
c8633798
NH
1126 } else if (thp_migration_supported() && PageTransHuge(page)) {
1127 struct page *thp;
1128
1129 thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
1130 HPAGE_PMD_ORDER);
1131 if (!thp)
1132 return NULL;
1133 prep_transhuge_page(thp);
1134 return thp;
11c731e8 1135 }
0bf598d8 1136 /*
11c731e8 1137 * if !vma, alloc_page_vma() will use task or system default policy
0bf598d8 1138 */
0f556856
MH
1139 return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
1140 vma, address);
95a402c3 1141}
b20a3503
CL
1142#else
1143
1144static void migrate_page_add(struct page *page, struct list_head *pagelist,
1145 unsigned long flags)
1146{
39743889
CL
1147}
1148
0ce72d4f
AM
1149int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1150 const nodemask_t *to, int flags)
b20a3503
CL
1151{
1152 return -ENOSYS;
1153}
95a402c3 1154
d05f0cdc 1155static struct page *new_page(struct page *page, unsigned long start, int **x)
95a402c3
CL
1156{
1157 return NULL;
1158}
b20a3503
CL
1159#endif
1160
dbcb0f19 1161static long do_mbind(unsigned long start, unsigned long len,
028fec41
DR
1162 unsigned short mode, unsigned short mode_flags,
1163 nodemask_t *nmask, unsigned long flags)
6ce3c4c0 1164{
6ce3c4c0
CL
1165 struct mm_struct *mm = current->mm;
1166 struct mempolicy *new;
1167 unsigned long end;
1168 int err;
1169 LIST_HEAD(pagelist);
1170
b24f53a0 1171 if (flags & ~(unsigned long)MPOL_MF_VALID)
6ce3c4c0 1172 return -EINVAL;
74c00241 1173 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
6ce3c4c0
CL
1174 return -EPERM;
1175
1176 if (start & ~PAGE_MASK)
1177 return -EINVAL;
1178
1179 if (mode == MPOL_DEFAULT)
1180 flags &= ~MPOL_MF_STRICT;
1181
1182 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1183 end = start + len;
1184
1185 if (end < start)
1186 return -EINVAL;
1187 if (end == start)
1188 return 0;
1189
028fec41 1190 new = mpol_new(mode, mode_flags, nmask);
6ce3c4c0
CL
1191 if (IS_ERR(new))
1192 return PTR_ERR(new);
1193
b24f53a0
LS
1194 if (flags & MPOL_MF_LAZY)
1195 new->flags |= MPOL_F_MOF;
1196
6ce3c4c0
CL
1197 /*
1198 * If we are using the default policy then operation
1199 * on discontinuous address spaces is okay after all
1200 */
1201 if (!new)
1202 flags |= MPOL_MF_DISCONTIG_OK;
1203
028fec41
DR
1204 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1205 start, start + len, mode, mode_flags,
00ef2d2f 1206 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
6ce3c4c0 1207
0aedadf9
CL
1208 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1209
1210 err = migrate_prep();
1211 if (err)
b05ca738 1212 goto mpol_out;
0aedadf9 1213 }
4bfc4495
KH
1214 {
1215 NODEMASK_SCRATCH(scratch);
1216 if (scratch) {
1217 down_write(&mm->mmap_sem);
1218 task_lock(current);
1219 err = mpol_set_nodemask(new, nmask, scratch);
1220 task_unlock(current);
1221 if (err)
1222 up_write(&mm->mmap_sem);
1223 } else
1224 err = -ENOMEM;
1225 NODEMASK_SCRATCH_FREE(scratch);
1226 }
b05ca738
KM
1227 if (err)
1228 goto mpol_out;
1229
d05f0cdc 1230 err = queue_pages_range(mm, start, end, nmask,
6ce3c4c0 1231 flags | MPOL_MF_INVERT, &pagelist);
d05f0cdc 1232 if (!err)
9d8cebd4 1233 err = mbind_range(mm, start, end, new);
7e2ab150 1234
b24f53a0
LS
1235 if (!err) {
1236 int nr_failed = 0;
1237
cf608ac1 1238 if (!list_empty(&pagelist)) {
b24f53a0 1239 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
d05f0cdc
HD
1240 nr_failed = migrate_pages(&pagelist, new_page, NULL,
1241 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
cf608ac1 1242 if (nr_failed)
74060e4d 1243 putback_movable_pages(&pagelist);
cf608ac1 1244 }
6ce3c4c0 1245
b24f53a0 1246 if (nr_failed && (flags & MPOL_MF_STRICT))
6ce3c4c0 1247 err = -EIO;
ab8a3e14 1248 } else
b0e5fd73 1249 putback_movable_pages(&pagelist);
b20a3503 1250
6ce3c4c0 1251 up_write(&mm->mmap_sem);
b05ca738 1252 mpol_out:
f0be3d32 1253 mpol_put(new);
6ce3c4c0
CL
1254 return err;
1255}
1256
8bccd85f
CL
1257/*
1258 * User space interface with variable sized bitmaps for nodelists.
1259 */
1260
1261/* Copy a node mask from user space. */
39743889 1262static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
8bccd85f
CL
1263 unsigned long maxnode)
1264{
1265 unsigned long k;
1266 unsigned long nlongs;
1267 unsigned long endmask;
1268
1269 --maxnode;
1270 nodes_clear(*nodes);
1271 if (maxnode == 0 || !nmask)
1272 return 0;
a9c930ba 1273 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
636f13c1 1274 return -EINVAL;
8bccd85f
CL
1275
1276 nlongs = BITS_TO_LONGS(maxnode);
1277 if ((maxnode % BITS_PER_LONG) == 0)
1278 endmask = ~0UL;
1279 else
1280 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1281
1282 /* When the user specified more nodes than supported just check
1283 if the non supported part is all zero. */
1284 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
8bccd85f
CL
1285 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1286 unsigned long t;
1287 if (get_user(t, nmask + k))
1288 return -EFAULT;
1289 if (k == nlongs - 1) {
1290 if (t & endmask)
1291 return -EINVAL;
1292 } else if (t)
1293 return -EINVAL;
1294 }
1295 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1296 endmask = ~0UL;
1297 }
1298
1299 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1300 return -EFAULT;
1301 nodes_addr(*nodes)[nlongs-1] &= endmask;
1302 return 0;
1303}
1304
1305/* Copy a kernel node mask to user space */
1306static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1307 nodemask_t *nodes)
1308{
1309 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1310 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1311
1312 if (copy > nbytes) {
1313 if (copy > PAGE_SIZE)
1314 return -EINVAL;
1315 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1316 return -EFAULT;
1317 copy = nbytes;
1318 }
1319 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1320}
1321
938bb9f5 1322SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
f7f28ca9 1323 unsigned long, mode, const unsigned long __user *, nmask,
938bb9f5 1324 unsigned long, maxnode, unsigned, flags)
8bccd85f
CL
1325{
1326 nodemask_t nodes;
1327 int err;
028fec41 1328 unsigned short mode_flags;
8bccd85f 1329
028fec41
DR
1330 mode_flags = mode & MPOL_MODE_FLAGS;
1331 mode &= ~MPOL_MODE_FLAGS;
a3b51e01
DR
1332 if (mode >= MPOL_MAX)
1333 return -EINVAL;
4c50bc01
DR
1334 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1335 (mode_flags & MPOL_F_RELATIVE_NODES))
1336 return -EINVAL;
8bccd85f
CL
1337 err = get_nodes(&nodes, nmask, maxnode);
1338 if (err)
1339 return err;
028fec41 1340 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
8bccd85f
CL
1341}
1342
1343/* Set the process memory policy */
23c8902d 1344SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
938bb9f5 1345 unsigned long, maxnode)
8bccd85f
CL
1346{
1347 int err;
1348 nodemask_t nodes;
028fec41 1349 unsigned short flags;
8bccd85f 1350
028fec41
DR
1351 flags = mode & MPOL_MODE_FLAGS;
1352 mode &= ~MPOL_MODE_FLAGS;
1353 if ((unsigned int)mode >= MPOL_MAX)
8bccd85f 1354 return -EINVAL;
4c50bc01
DR
1355 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1356 return -EINVAL;
8bccd85f
CL
1357 err = get_nodes(&nodes, nmask, maxnode);
1358 if (err)
1359 return err;
028fec41 1360 return do_set_mempolicy(mode, flags, &nodes);
8bccd85f
CL
1361}
1362
938bb9f5
HC
1363SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1364 const unsigned long __user *, old_nodes,
1365 const unsigned long __user *, new_nodes)
39743889 1366{
596d7cfa 1367 struct mm_struct *mm = NULL;
39743889 1368 struct task_struct *task;
39743889
CL
1369 nodemask_t task_nodes;
1370 int err;
596d7cfa
KM
1371 nodemask_t *old;
1372 nodemask_t *new;
1373 NODEMASK_SCRATCH(scratch);
1374
1375 if (!scratch)
1376 return -ENOMEM;
39743889 1377
596d7cfa
KM
1378 old = &scratch->mask1;
1379 new = &scratch->mask2;
1380
1381 err = get_nodes(old, old_nodes, maxnode);
39743889 1382 if (err)
596d7cfa 1383 goto out;
39743889 1384
596d7cfa 1385 err = get_nodes(new, new_nodes, maxnode);
39743889 1386 if (err)
596d7cfa 1387 goto out;
39743889
CL
1388
1389 /* Find the mm_struct */
55cfaa3c 1390 rcu_read_lock();
228ebcbe 1391 task = pid ? find_task_by_vpid(pid) : current;
39743889 1392 if (!task) {
55cfaa3c 1393 rcu_read_unlock();
596d7cfa
KM
1394 err = -ESRCH;
1395 goto out;
39743889 1396 }
3268c63e 1397 get_task_struct(task);
39743889 1398
596d7cfa 1399 err = -EINVAL;
39743889
CL
1400
1401 /*
31367466
OE
1402 * Check if this process has the right to modify the specified process.
1403 * Use the regular "ptrace_may_access()" checks.
39743889 1404 */
31367466 1405 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
c69e8d9c 1406 rcu_read_unlock();
39743889 1407 err = -EPERM;
3268c63e 1408 goto out_put;
39743889 1409 }
c69e8d9c 1410 rcu_read_unlock();
39743889
CL
1411
1412 task_nodes = cpuset_mems_allowed(task);
1413 /* Is the user allowed to access the target nodes? */
596d7cfa 1414 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
39743889 1415 err = -EPERM;
3268c63e 1416 goto out_put;
39743889
CL
1417 }
1418
01f13bd6 1419 if (!nodes_subset(*new, node_states[N_MEMORY])) {
3b42d28b 1420 err = -EINVAL;
3268c63e 1421 goto out_put;
3b42d28b
CL
1422 }
1423
86c3a764
DQ
1424 err = security_task_movememory(task);
1425 if (err)
3268c63e 1426 goto out_put;
86c3a764 1427
3268c63e
CL
1428 mm = get_task_mm(task);
1429 put_task_struct(task);
f2a9ef88
SL
1430
1431 if (!mm) {
3268c63e 1432 err = -EINVAL;
f2a9ef88
SL
1433 goto out;
1434 }
1435
1436 err = do_migrate_pages(mm, old, new,
1437 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
3268c63e
CL
1438
1439 mmput(mm);
1440out:
596d7cfa
KM
1441 NODEMASK_SCRATCH_FREE(scratch);
1442
39743889 1443 return err;
3268c63e
CL
1444
1445out_put:
1446 put_task_struct(task);
1447 goto out;
1448
39743889
CL
1449}
1450
1451
8bccd85f 1452/* Retrieve NUMA policy */
938bb9f5
HC
1453SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1454 unsigned long __user *, nmask, unsigned long, maxnode,
1455 unsigned long, addr, unsigned long, flags)
8bccd85f 1456{
dbcb0f19
AB
1457 int err;
1458 int uninitialized_var(pval);
8bccd85f
CL
1459 nodemask_t nodes;
1460
1461 if (nmask != NULL && maxnode < MAX_NUMNODES)
1462 return -EINVAL;
1463
1464 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1465
1466 if (err)
1467 return err;
1468
1469 if (policy && put_user(pval, policy))
1470 return -EFAULT;
1471
1472 if (nmask)
1473 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1474
1475 return err;
1476}
1477
1da177e4
LT
1478#ifdef CONFIG_COMPAT
1479
c93e0f6c
HC
1480COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1481 compat_ulong_t __user *, nmask,
1482 compat_ulong_t, maxnode,
1483 compat_ulong_t, addr, compat_ulong_t, flags)
1da177e4
LT
1484{
1485 long err;
1486 unsigned long __user *nm = NULL;
1487 unsigned long nr_bits, alloc_size;
1488 DECLARE_BITMAP(bm, MAX_NUMNODES);
1489
1490 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1491 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1492
1493 if (nmask)
1494 nm = compat_alloc_user_space(alloc_size);
1495
1496 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1497
1498 if (!err && nmask) {
2bbff6c7
KH
1499 unsigned long copy_size;
1500 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1501 err = copy_from_user(bm, nm, copy_size);
1da177e4
LT
1502 /* ensure entire bitmap is zeroed */
1503 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1504 err |= compat_put_bitmap(nmask, bm, nr_bits);
1505 }
1506
1507 return err;
1508}
1509
c93e0f6c
HC
1510COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1511 compat_ulong_t, maxnode)
1da177e4 1512{
1da177e4
LT
1513 unsigned long __user *nm = NULL;
1514 unsigned long nr_bits, alloc_size;
1515 DECLARE_BITMAP(bm, MAX_NUMNODES);
1516
1517 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1518 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1519
1520 if (nmask) {
cf01fb99
CS
1521 if (compat_get_bitmap(bm, nmask, nr_bits))
1522 return -EFAULT;
1da177e4 1523 nm = compat_alloc_user_space(alloc_size);
cf01fb99
CS
1524 if (copy_to_user(nm, bm, alloc_size))
1525 return -EFAULT;
1da177e4
LT
1526 }
1527
1da177e4
LT
1528 return sys_set_mempolicy(mode, nm, nr_bits+1);
1529}
1530
c93e0f6c
HC
1531COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1532 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1533 compat_ulong_t, maxnode, compat_ulong_t, flags)
1da177e4 1534{
1da177e4
LT
1535 unsigned long __user *nm = NULL;
1536 unsigned long nr_bits, alloc_size;
dfcd3c0d 1537 nodemask_t bm;
1da177e4
LT
1538
1539 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1540 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1541
1542 if (nmask) {
cf01fb99
CS
1543 if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1544 return -EFAULT;
1da177e4 1545 nm = compat_alloc_user_space(alloc_size);
cf01fb99
CS
1546 if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1547 return -EFAULT;
1da177e4
LT
1548 }
1549
1da177e4
LT
1550 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1551}
1552
1553#endif
1554
74d2c3a0
ON
1555struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1556 unsigned long addr)
1da177e4 1557{
8d90274b 1558 struct mempolicy *pol = NULL;
1da177e4
LT
1559
1560 if (vma) {
480eccf9 1561 if (vma->vm_ops && vma->vm_ops->get_policy) {
8d90274b 1562 pol = vma->vm_ops->get_policy(vma, addr);
00442ad0 1563 } else if (vma->vm_policy) {
1da177e4 1564 pol = vma->vm_policy;
00442ad0
MG
1565
1566 /*
1567 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1568 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1569 * count on these policies which will be dropped by
1570 * mpol_cond_put() later
1571 */
1572 if (mpol_needs_cond_ref(pol))
1573 mpol_get(pol);
1574 }
1da177e4 1575 }
f15ca78e 1576
74d2c3a0
ON
1577 return pol;
1578}
1579
1580/*
dd6eecb9 1581 * get_vma_policy(@vma, @addr)
74d2c3a0
ON
1582 * @vma: virtual memory area whose policy is sought
1583 * @addr: address in @vma for shared policy lookup
1584 *
1585 * Returns effective policy for a VMA at specified address.
dd6eecb9 1586 * Falls back to current->mempolicy or system default policy, as necessary.
74d2c3a0
ON
1587 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1588 * count--added by the get_policy() vm_op, as appropriate--to protect against
1589 * freeing by another task. It is the caller's responsibility to free the
1590 * extra reference for shared policies.
1591 */
dd6eecb9
ON
1592static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1593 unsigned long addr)
74d2c3a0
ON
1594{
1595 struct mempolicy *pol = __get_vma_policy(vma, addr);
1596
8d90274b 1597 if (!pol)
dd6eecb9 1598 pol = get_task_policy(current);
8d90274b 1599
1da177e4
LT
1600 return pol;
1601}
1602
6b6482bb 1603bool vma_policy_mof(struct vm_area_struct *vma)
fc314724 1604{
6b6482bb 1605 struct mempolicy *pol;
fc314724 1606
6b6482bb
ON
1607 if (vma->vm_ops && vma->vm_ops->get_policy) {
1608 bool ret = false;
fc314724 1609
6b6482bb
ON
1610 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1611 if (pol && (pol->flags & MPOL_F_MOF))
1612 ret = true;
1613 mpol_cond_put(pol);
8d90274b 1614
6b6482bb 1615 return ret;
fc314724
MG
1616 }
1617
6b6482bb 1618 pol = vma->vm_policy;
8d90274b 1619 if (!pol)
6b6482bb 1620 pol = get_task_policy(current);
8d90274b 1621
fc314724
MG
1622 return pol->flags & MPOL_F_MOF;
1623}
1624
d3eb1570
LJ
1625static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1626{
1627 enum zone_type dynamic_policy_zone = policy_zone;
1628
1629 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1630
1631 /*
1632 * if policy->v.nodes has movable memory only,
1633 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1634 *
1635 * policy->v.nodes is intersect with node_states[N_MEMORY].
1636 * so if the following test faile, it implies
1637 * policy->v.nodes has movable memory only.
1638 */
1639 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1640 dynamic_policy_zone = ZONE_MOVABLE;
1641
1642 return zone >= dynamic_policy_zone;
1643}
1644
52cd3b07
LS
1645/*
1646 * Return a nodemask representing a mempolicy for filtering nodes for
1647 * page allocation
1648 */
1649static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
19770b32
MG
1650{
1651 /* Lower zones don't get a nodemask applied for MPOL_BIND */
45c4745a 1652 if (unlikely(policy->mode == MPOL_BIND) &&
d3eb1570 1653 apply_policy_zone(policy, gfp_zone(gfp)) &&
19770b32
MG
1654 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1655 return &policy->v.nodes;
1656
1657 return NULL;
1658}
1659
04ec6264
VB
1660/* Return the node id preferred by the given mempolicy, or the given id */
1661static int policy_node(gfp_t gfp, struct mempolicy *policy,
1662 int nd)
1da177e4 1663{
6d840958
MH
1664 if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
1665 nd = policy->v.preferred_node;
1666 else {
19770b32 1667 /*
6d840958
MH
1668 * __GFP_THISNODE shouldn't even be used with the bind policy
1669 * because we might easily break the expectation to stay on the
1670 * requested node and not break the policy.
19770b32 1671 */
6d840958 1672 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1da177e4 1673 }
6d840958 1674
04ec6264 1675 return nd;
1da177e4
LT
1676}
1677
1678/* Do dynamic interleaving for a process */
1679static unsigned interleave_nodes(struct mempolicy *policy)
1680{
45816682 1681 unsigned next;
1da177e4
LT
1682 struct task_struct *me = current;
1683
45816682 1684 next = next_node_in(me->il_prev, policy->v.nodes);
f5b087b5 1685 if (next < MAX_NUMNODES)
45816682
VB
1686 me->il_prev = next;
1687 return next;
1da177e4
LT
1688}
1689
dc85da15
CL
1690/*
1691 * Depending on the memory policy provide a node from which to allocate the
1692 * next slab entry.
1693 */
2a389610 1694unsigned int mempolicy_slab_node(void)
dc85da15 1695{
e7b691b0 1696 struct mempolicy *policy;
2a389610 1697 int node = numa_mem_id();
e7b691b0
AK
1698
1699 if (in_interrupt())
2a389610 1700 return node;
e7b691b0
AK
1701
1702 policy = current->mempolicy;
fc36b8d3 1703 if (!policy || policy->flags & MPOL_F_LOCAL)
2a389610 1704 return node;
bea904d5
LS
1705
1706 switch (policy->mode) {
1707 case MPOL_PREFERRED:
fc36b8d3
LS
1708 /*
1709 * handled MPOL_F_LOCAL above
1710 */
1711 return policy->v.preferred_node;
765c4507 1712
dc85da15
CL
1713 case MPOL_INTERLEAVE:
1714 return interleave_nodes(policy);
1715
dd1a239f 1716 case MPOL_BIND: {
c33d6c06
MG
1717 struct zoneref *z;
1718
dc85da15
CL
1719 /*
1720 * Follow bind policy behavior and start allocation at the
1721 * first node.
1722 */
19770b32 1723 struct zonelist *zonelist;
19770b32 1724 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
c9634cf0 1725 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
c33d6c06
MG
1726 z = first_zones_zonelist(zonelist, highest_zoneidx,
1727 &policy->v.nodes);
1728 return z->zone ? z->zone->node : node;
dd1a239f 1729 }
dc85da15 1730
dc85da15 1731 default:
bea904d5 1732 BUG();
dc85da15
CL
1733 }
1734}
1735
fee83b3a
AM
1736/*
1737 * Do static interleaving for a VMA with known offset @n. Returns the n'th
1738 * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1739 * number of present nodes.
1740 */
98c70baa 1741static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
1da177e4 1742{
dfcd3c0d 1743 unsigned nnodes = nodes_weight(pol->v.nodes);
f5b087b5 1744 unsigned target;
fee83b3a
AM
1745 int i;
1746 int nid;
1da177e4 1747
f5b087b5
DR
1748 if (!nnodes)
1749 return numa_node_id();
fee83b3a
AM
1750 target = (unsigned int)n % nnodes;
1751 nid = first_node(pol->v.nodes);
1752 for (i = 0; i < target; i++)
dfcd3c0d 1753 nid = next_node(nid, pol->v.nodes);
1da177e4
LT
1754 return nid;
1755}
1756
5da7ca86
CL
1757/* Determine a node number for interleave */
1758static inline unsigned interleave_nid(struct mempolicy *pol,
1759 struct vm_area_struct *vma, unsigned long addr, int shift)
1760{
1761 if (vma) {
1762 unsigned long off;
1763
3b98b087
NA
1764 /*
1765 * for small pages, there is no difference between
1766 * shift and PAGE_SHIFT, so the bit-shift is safe.
1767 * for huge pages, since vm_pgoff is in units of small
1768 * pages, we need to shift off the always 0 bits to get
1769 * a useful offset.
1770 */
1771 BUG_ON(shift < PAGE_SHIFT);
1772 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
5da7ca86 1773 off += (addr - vma->vm_start) >> shift;
98c70baa 1774 return offset_il_node(pol, off);
5da7ca86
CL
1775 } else
1776 return interleave_nodes(pol);
1777}
1778
00ac59ad 1779#ifdef CONFIG_HUGETLBFS
480eccf9 1780/*
04ec6264 1781 * huge_node(@vma, @addr, @gfp_flags, @mpol)
b46e14ac
FF
1782 * @vma: virtual memory area whose policy is sought
1783 * @addr: address in @vma for shared policy lookup and interleave policy
1784 * @gfp_flags: for requested zone
1785 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1786 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
480eccf9 1787 *
04ec6264 1788 * Returns a nid suitable for a huge page allocation and a pointer
52cd3b07
LS
1789 * to the struct mempolicy for conditional unref after allocation.
1790 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1791 * @nodemask for filtering the zonelist.
c0ff7453 1792 *
d26914d1 1793 * Must be protected by read_mems_allowed_begin()
480eccf9 1794 */
04ec6264
VB
1795int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
1796 struct mempolicy **mpol, nodemask_t **nodemask)
5da7ca86 1797{
04ec6264 1798 int nid;
5da7ca86 1799
dd6eecb9 1800 *mpol = get_vma_policy(vma, addr);
19770b32 1801 *nodemask = NULL; /* assume !MPOL_BIND */
5da7ca86 1802
52cd3b07 1803 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
04ec6264
VB
1804 nid = interleave_nid(*mpol, vma, addr,
1805 huge_page_shift(hstate_vma(vma)));
52cd3b07 1806 } else {
04ec6264 1807 nid = policy_node(gfp_flags, *mpol, numa_node_id());
52cd3b07
LS
1808 if ((*mpol)->mode == MPOL_BIND)
1809 *nodemask = &(*mpol)->v.nodes;
480eccf9 1810 }
04ec6264 1811 return nid;
5da7ca86 1812}
06808b08
LS
1813
1814/*
1815 * init_nodemask_of_mempolicy
1816 *
1817 * If the current task's mempolicy is "default" [NULL], return 'false'
1818 * to indicate default policy. Otherwise, extract the policy nodemask
1819 * for 'bind' or 'interleave' policy into the argument nodemask, or
1820 * initialize the argument nodemask to contain the single node for
1821 * 'preferred' or 'local' policy and return 'true' to indicate presence
1822 * of non-default mempolicy.
1823 *
1824 * We don't bother with reference counting the mempolicy [mpol_get/put]
1825 * because the current task is examining it's own mempolicy and a task's
1826 * mempolicy is only ever changed by the task itself.
1827 *
1828 * N.B., it is the caller's responsibility to free a returned nodemask.
1829 */
1830bool init_nodemask_of_mempolicy(nodemask_t *mask)
1831{
1832 struct mempolicy *mempolicy;
1833 int nid;
1834
1835 if (!(mask && current->mempolicy))
1836 return false;
1837
c0ff7453 1838 task_lock(current);
06808b08
LS
1839 mempolicy = current->mempolicy;
1840 switch (mempolicy->mode) {
1841 case MPOL_PREFERRED:
1842 if (mempolicy->flags & MPOL_F_LOCAL)
1843 nid = numa_node_id();
1844 else
1845 nid = mempolicy->v.preferred_node;
1846 init_nodemask_of_node(mask, nid);
1847 break;
1848
1849 case MPOL_BIND:
1850 /* Fall through */
1851 case MPOL_INTERLEAVE:
1852 *mask = mempolicy->v.nodes;
1853 break;
1854
1855 default:
1856 BUG();
1857 }
c0ff7453 1858 task_unlock(current);
06808b08
LS
1859
1860 return true;
1861}
00ac59ad 1862#endif
5da7ca86 1863
6f48d0eb
DR
1864/*
1865 * mempolicy_nodemask_intersects
1866 *
1867 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1868 * policy. Otherwise, check for intersection between mask and the policy
1869 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1870 * policy, always return true since it may allocate elsewhere on fallback.
1871 *
1872 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1873 */
1874bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1875 const nodemask_t *mask)
1876{
1877 struct mempolicy *mempolicy;
1878 bool ret = true;
1879
1880 if (!mask)
1881 return ret;
1882 task_lock(tsk);
1883 mempolicy = tsk->mempolicy;
1884 if (!mempolicy)
1885 goto out;
1886
1887 switch (mempolicy->mode) {
1888 case MPOL_PREFERRED:
1889 /*
1890 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1891 * allocate from, they may fallback to other nodes when oom.
1892 * Thus, it's possible for tsk to have allocated memory from
1893 * nodes in mask.
1894 */
1895 break;
1896 case MPOL_BIND:
1897 case MPOL_INTERLEAVE:
1898 ret = nodes_intersects(mempolicy->v.nodes, *mask);
1899 break;
1900 default:
1901 BUG();
1902 }
1903out:
1904 task_unlock(tsk);
1905 return ret;
1906}
1907
1da177e4
LT
1908/* Allocate a page in interleaved policy.
1909 Own path because it needs to do special accounting. */
662f3a0b
AK
1910static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1911 unsigned nid)
1da177e4 1912{
1da177e4
LT
1913 struct page *page;
1914
04ec6264 1915 page = __alloc_pages(gfp, order, nid);
4518085e
KW
1916 /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
1917 if (!static_branch_likely(&vm_numa_stat_key))
1918 return page;
de55c8b2
AR
1919 if (page && page_to_nid(page) == nid) {
1920 preempt_disable();
1921 __inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
1922 preempt_enable();
1923 }
1da177e4
LT
1924 return page;
1925}
1926
1927/**
0bbbc0b3 1928 * alloc_pages_vma - Allocate a page for a VMA.
1da177e4
LT
1929 *
1930 * @gfp:
1931 * %GFP_USER user allocation.
1932 * %GFP_KERNEL kernel allocations,
1933 * %GFP_HIGHMEM highmem/user allocations,
1934 * %GFP_FS allocation should not call back into a file system.
1935 * %GFP_ATOMIC don't sleep.
1936 *
0bbbc0b3 1937 * @order:Order of the GFP allocation.
1da177e4
LT
1938 * @vma: Pointer to VMA or NULL if not available.
1939 * @addr: Virtual Address of the allocation. Must be inside the VMA.
be97a41b
VB
1940 * @node: Which node to prefer for allocation (modulo policy).
1941 * @hugepage: for hugepages try only the preferred node if possible
1da177e4
LT
1942 *
1943 * This function allocates a page from the kernel page pool and applies
1944 * a NUMA policy associated with the VMA or the current process.
1945 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1946 * mm_struct of the VMA to prevent it from going away. Should be used for
be97a41b
VB
1947 * all allocations for pages that will be mapped into user space. Returns
1948 * NULL when no page can be allocated.
1da177e4
LT
1949 */
1950struct page *
0bbbc0b3 1951alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
be97a41b 1952 unsigned long addr, int node, bool hugepage)
1da177e4 1953{
cc9a6c87 1954 struct mempolicy *pol;
c0ff7453 1955 struct page *page;
04ec6264 1956 int preferred_nid;
be97a41b 1957 nodemask_t *nmask;
cc9a6c87 1958
dd6eecb9 1959 pol = get_vma_policy(vma, addr);
1da177e4 1960
0867a57c
VB
1961 if (pol->mode == MPOL_INTERLEAVE) {
1962 unsigned nid;
1963
1964 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
1965 mpol_cond_put(pol);
1966 page = alloc_page_interleave(gfp, order, nid);
1967 goto out;
1968 }
1969
1970 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
1971 int hpage_node = node;
1972
be97a41b
VB
1973 /*
1974 * For hugepage allocation and non-interleave policy which
0867a57c
VB
1975 * allows the current node (or other explicitly preferred
1976 * node) we only try to allocate from the current/preferred
1977 * node and don't fall back to other nodes, as the cost of
1978 * remote accesses would likely offset THP benefits.
be97a41b
VB
1979 *
1980 * If the policy is interleave, or does not allow the current
1981 * node in its nodemask, we allocate the standard way.
1982 */
0867a57c
VB
1983 if (pol->mode == MPOL_PREFERRED &&
1984 !(pol->flags & MPOL_F_LOCAL))
1985 hpage_node = pol->v.preferred_node;
1986
be97a41b 1987 nmask = policy_nodemask(gfp, pol);
0867a57c 1988 if (!nmask || node_isset(hpage_node, *nmask)) {
be97a41b 1989 mpol_cond_put(pol);
96db800f 1990 page = __alloc_pages_node(hpage_node,
5265047a 1991 gfp | __GFP_THISNODE, order);
be97a41b
VB
1992 goto out;
1993 }
1994 }
1995
be97a41b 1996 nmask = policy_nodemask(gfp, pol);
04ec6264
VB
1997 preferred_nid = policy_node(gfp, pol, node);
1998 page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
d51e9894 1999 mpol_cond_put(pol);
be97a41b 2000out:
c0ff7453 2001 return page;
1da177e4
LT
2002}
2003
2004/**
2005 * alloc_pages_current - Allocate pages.
2006 *
2007 * @gfp:
2008 * %GFP_USER user allocation,
2009 * %GFP_KERNEL kernel allocation,
2010 * %GFP_HIGHMEM highmem allocation,
2011 * %GFP_FS don't call back into a file system.
2012 * %GFP_ATOMIC don't sleep.
2013 * @order: Power of two of allocation size in pages. 0 is a single page.
2014 *
2015 * Allocate a page from the kernel page pool. When not in
2016 * interrupt context and apply the current process NUMA policy.
2017 * Returns NULL when no page can be allocated.
1da177e4 2018 */
dd0fc66f 2019struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1da177e4 2020{
8d90274b 2021 struct mempolicy *pol = &default_policy;
c0ff7453 2022 struct page *page;
1da177e4 2023
8d90274b
ON
2024 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2025 pol = get_task_policy(current);
52cd3b07
LS
2026
2027 /*
2028 * No reference counting needed for current->mempolicy
2029 * nor system default_policy
2030 */
45c4745a 2031 if (pol->mode == MPOL_INTERLEAVE)
c0ff7453
MX
2032 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2033 else
2034 page = __alloc_pages_nodemask(gfp, order,
04ec6264 2035 policy_node(gfp, pol, numa_node_id()),
5c4b4be3 2036 policy_nodemask(gfp, pol));
cc9a6c87 2037
c0ff7453 2038 return page;
1da177e4
LT
2039}
2040EXPORT_SYMBOL(alloc_pages_current);
2041
ef0855d3
ON
2042int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2043{
2044 struct mempolicy *pol = mpol_dup(vma_policy(src));
2045
2046 if (IS_ERR(pol))
2047 return PTR_ERR(pol);
2048 dst->vm_policy = pol;
2049 return 0;
2050}
2051
4225399a 2052/*
846a16bf 2053 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
4225399a
PJ
2054 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2055 * with the mems_allowed returned by cpuset_mems_allowed(). This
2056 * keeps mempolicies cpuset relative after its cpuset moves. See
2057 * further kernel/cpuset.c update_nodemask().
708c1bbc
MX
2058 *
2059 * current's mempolicy may be rebinded by the other task(the task that changes
2060 * cpuset's mems), so we needn't do rebind work for current task.
4225399a 2061 */
4225399a 2062
846a16bf
LS
2063/* Slow path of a mempolicy duplicate */
2064struct mempolicy *__mpol_dup(struct mempolicy *old)
1da177e4
LT
2065{
2066 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2067
2068 if (!new)
2069 return ERR_PTR(-ENOMEM);
708c1bbc
MX
2070
2071 /* task's mempolicy is protected by alloc_lock */
2072 if (old == current->mempolicy) {
2073 task_lock(current);
2074 *new = *old;
2075 task_unlock(current);
2076 } else
2077 *new = *old;
2078
4225399a
PJ
2079 if (current_cpuset_is_being_rebound()) {
2080 nodemask_t mems = cpuset_mems_allowed(current);
213980c0 2081 mpol_rebind_policy(new, &mems);
4225399a 2082 }
1da177e4 2083 atomic_set(&new->refcnt, 1);
1da177e4
LT
2084 return new;
2085}
2086
2087/* Slow path of a mempolicy comparison */
fcfb4dcc 2088bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1da177e4
LT
2089{
2090 if (!a || !b)
fcfb4dcc 2091 return false;
45c4745a 2092 if (a->mode != b->mode)
fcfb4dcc 2093 return false;
19800502 2094 if (a->flags != b->flags)
fcfb4dcc 2095 return false;
19800502
BL
2096 if (mpol_store_user_nodemask(a))
2097 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
fcfb4dcc 2098 return false;
19800502 2099
45c4745a 2100 switch (a->mode) {
19770b32
MG
2101 case MPOL_BIND:
2102 /* Fall through */
1da177e4 2103 case MPOL_INTERLEAVE:
fcfb4dcc 2104 return !!nodes_equal(a->v.nodes, b->v.nodes);
1da177e4 2105 case MPOL_PREFERRED:
75719661 2106 return a->v.preferred_node == b->v.preferred_node;
1da177e4
LT
2107 default:
2108 BUG();
fcfb4dcc 2109 return false;
1da177e4
LT
2110 }
2111}
2112
1da177e4
LT
2113/*
2114 * Shared memory backing store policy support.
2115 *
2116 * Remember policies even when nobody has shared memory mapped.
2117 * The policies are kept in Red-Black tree linked from the inode.
4a8c7bb5 2118 * They are protected by the sp->lock rwlock, which should be held
1da177e4
LT
2119 * for any accesses to the tree.
2120 */
2121
4a8c7bb5
NZ
2122/*
2123 * lookup first element intersecting start-end. Caller holds sp->lock for
2124 * reading or for writing
2125 */
1da177e4
LT
2126static struct sp_node *
2127sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2128{
2129 struct rb_node *n = sp->root.rb_node;
2130
2131 while (n) {
2132 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2133
2134 if (start >= p->end)
2135 n = n->rb_right;
2136 else if (end <= p->start)
2137 n = n->rb_left;
2138 else
2139 break;
2140 }
2141 if (!n)
2142 return NULL;
2143 for (;;) {
2144 struct sp_node *w = NULL;
2145 struct rb_node *prev = rb_prev(n);
2146 if (!prev)
2147 break;
2148 w = rb_entry(prev, struct sp_node, nd);
2149 if (w->end <= start)
2150 break;
2151 n = prev;
2152 }
2153 return rb_entry(n, struct sp_node, nd);
2154}
2155
4a8c7bb5
NZ
2156/*
2157 * Insert a new shared policy into the list. Caller holds sp->lock for
2158 * writing.
2159 */
1da177e4
LT
2160static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2161{
2162 struct rb_node **p = &sp->root.rb_node;
2163 struct rb_node *parent = NULL;
2164 struct sp_node *nd;
2165
2166 while (*p) {
2167 parent = *p;
2168 nd = rb_entry(parent, struct sp_node, nd);
2169 if (new->start < nd->start)
2170 p = &(*p)->rb_left;
2171 else if (new->end > nd->end)
2172 p = &(*p)->rb_right;
2173 else
2174 BUG();
2175 }
2176 rb_link_node(&new->nd, parent, p);
2177 rb_insert_color(&new->nd, &sp->root);
140d5a49 2178 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
45c4745a 2179 new->policy ? new->policy->mode : 0);
1da177e4
LT
2180}
2181
2182/* Find shared policy intersecting idx */
2183struct mempolicy *
2184mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2185{
2186 struct mempolicy *pol = NULL;
2187 struct sp_node *sn;
2188
2189 if (!sp->root.rb_node)
2190 return NULL;
4a8c7bb5 2191 read_lock(&sp->lock);
1da177e4
LT
2192 sn = sp_lookup(sp, idx, idx+1);
2193 if (sn) {
2194 mpol_get(sn->policy);
2195 pol = sn->policy;
2196 }
4a8c7bb5 2197 read_unlock(&sp->lock);
1da177e4
LT
2198 return pol;
2199}
2200
63f74ca2
KM
2201static void sp_free(struct sp_node *n)
2202{
2203 mpol_put(n->policy);
2204 kmem_cache_free(sn_cache, n);
2205}
2206
771fb4d8
LS
2207/**
2208 * mpol_misplaced - check whether current page node is valid in policy
2209 *
b46e14ac
FF
2210 * @page: page to be checked
2211 * @vma: vm area where page mapped
2212 * @addr: virtual address where page mapped
771fb4d8
LS
2213 *
2214 * Lookup current policy node id for vma,addr and "compare to" page's
2215 * node id.
2216 *
2217 * Returns:
2218 * -1 - not misplaced, page is in the right node
2219 * node - node id where the page should be
2220 *
2221 * Policy determination "mimics" alloc_page_vma().
2222 * Called from fault path where we know the vma and faulting address.
2223 */
2224int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2225{
2226 struct mempolicy *pol;
c33d6c06 2227 struct zoneref *z;
771fb4d8
LS
2228 int curnid = page_to_nid(page);
2229 unsigned long pgoff;
90572890
PZ
2230 int thiscpu = raw_smp_processor_id();
2231 int thisnid = cpu_to_node(thiscpu);
771fb4d8
LS
2232 int polnid = -1;
2233 int ret = -1;
2234
dd6eecb9 2235 pol = get_vma_policy(vma, addr);
771fb4d8
LS
2236 if (!(pol->flags & MPOL_F_MOF))
2237 goto out;
2238
2239 switch (pol->mode) {
2240 case MPOL_INTERLEAVE:
771fb4d8
LS
2241 pgoff = vma->vm_pgoff;
2242 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
98c70baa 2243 polnid = offset_il_node(pol, pgoff);
771fb4d8
LS
2244 break;
2245
2246 case MPOL_PREFERRED:
2247 if (pol->flags & MPOL_F_LOCAL)
2248 polnid = numa_node_id();
2249 else
2250 polnid = pol->v.preferred_node;
2251 break;
2252
2253 case MPOL_BIND:
c33d6c06 2254
771fb4d8
LS
2255 /*
2256 * allows binding to multiple nodes.
2257 * use current page if in policy nodemask,
2258 * else select nearest allowed node, if any.
2259 * If no allowed nodes, use current [!misplaced].
2260 */
2261 if (node_isset(curnid, pol->v.nodes))
2262 goto out;
c33d6c06 2263 z = first_zones_zonelist(
771fb4d8
LS
2264 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2265 gfp_zone(GFP_HIGHUSER),
c33d6c06
MG
2266 &pol->v.nodes);
2267 polnid = z->zone->node;
771fb4d8
LS
2268 break;
2269
2270 default:
2271 BUG();
2272 }
5606e387
MG
2273
2274 /* Migrate the page towards the node whose CPU is referencing it */
e42c8ff2 2275 if (pol->flags & MPOL_F_MORON) {
90572890 2276 polnid = thisnid;
5606e387 2277
10f39042 2278 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
de1c9ce6 2279 goto out;
e42c8ff2
MG
2280 }
2281
771fb4d8
LS
2282 if (curnid != polnid)
2283 ret = polnid;
2284out:
2285 mpol_cond_put(pol);
2286
2287 return ret;
2288}
2289
c11600e4
DR
2290/*
2291 * Drop the (possibly final) reference to task->mempolicy. It needs to be
2292 * dropped after task->mempolicy is set to NULL so that any allocation done as
2293 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2294 * policy.
2295 */
2296void mpol_put_task_policy(struct task_struct *task)
2297{
2298 struct mempolicy *pol;
2299
2300 task_lock(task);
2301 pol = task->mempolicy;
2302 task->mempolicy = NULL;
2303 task_unlock(task);
2304 mpol_put(pol);
2305}
2306
1da177e4
LT
2307static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2308{
140d5a49 2309 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
1da177e4 2310 rb_erase(&n->nd, &sp->root);
63f74ca2 2311 sp_free(n);
1da177e4
LT
2312}
2313
42288fe3
MG
2314static void sp_node_init(struct sp_node *node, unsigned long start,
2315 unsigned long end, struct mempolicy *pol)
2316{
2317 node->start = start;
2318 node->end = end;
2319 node->policy = pol;
2320}
2321
dbcb0f19
AB
2322static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2323 struct mempolicy *pol)
1da177e4 2324{
869833f2
KM
2325 struct sp_node *n;
2326 struct mempolicy *newpol;
1da177e4 2327
869833f2 2328 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1da177e4
LT
2329 if (!n)
2330 return NULL;
869833f2
KM
2331
2332 newpol = mpol_dup(pol);
2333 if (IS_ERR(newpol)) {
2334 kmem_cache_free(sn_cache, n);
2335 return NULL;
2336 }
2337 newpol->flags |= MPOL_F_SHARED;
42288fe3 2338 sp_node_init(n, start, end, newpol);
869833f2 2339
1da177e4
LT
2340 return n;
2341}
2342
2343/* Replace a policy range. */
2344static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2345 unsigned long end, struct sp_node *new)
2346{
b22d127a 2347 struct sp_node *n;
42288fe3
MG
2348 struct sp_node *n_new = NULL;
2349 struct mempolicy *mpol_new = NULL;
b22d127a 2350 int ret = 0;
1da177e4 2351
42288fe3 2352restart:
4a8c7bb5 2353 write_lock(&sp->lock);
1da177e4
LT
2354 n = sp_lookup(sp, start, end);
2355 /* Take care of old policies in the same range. */
2356 while (n && n->start < end) {
2357 struct rb_node *next = rb_next(&n->nd);
2358 if (n->start >= start) {
2359 if (n->end <= end)
2360 sp_delete(sp, n);
2361 else
2362 n->start = end;
2363 } else {
2364 /* Old policy spanning whole new range. */
2365 if (n->end > end) {
42288fe3
MG
2366 if (!n_new)
2367 goto alloc_new;
2368
2369 *mpol_new = *n->policy;
2370 atomic_set(&mpol_new->refcnt, 1);
7880639c 2371 sp_node_init(n_new, end, n->end, mpol_new);
1da177e4 2372 n->end = start;
5ca39575 2373 sp_insert(sp, n_new);
42288fe3
MG
2374 n_new = NULL;
2375 mpol_new = NULL;
1da177e4
LT
2376 break;
2377 } else
2378 n->end = start;
2379 }
2380 if (!next)
2381 break;
2382 n = rb_entry(next, struct sp_node, nd);
2383 }
2384 if (new)
2385 sp_insert(sp, new);
4a8c7bb5 2386 write_unlock(&sp->lock);
42288fe3
MG
2387 ret = 0;
2388
2389err_out:
2390 if (mpol_new)
2391 mpol_put(mpol_new);
2392 if (n_new)
2393 kmem_cache_free(sn_cache, n_new);
2394
b22d127a 2395 return ret;
42288fe3
MG
2396
2397alloc_new:
4a8c7bb5 2398 write_unlock(&sp->lock);
42288fe3
MG
2399 ret = -ENOMEM;
2400 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2401 if (!n_new)
2402 goto err_out;
2403 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2404 if (!mpol_new)
2405 goto err_out;
2406 goto restart;
1da177e4
LT
2407}
2408
71fe804b
LS
2409/**
2410 * mpol_shared_policy_init - initialize shared policy for inode
2411 * @sp: pointer to inode shared policy
2412 * @mpol: struct mempolicy to install
2413 *
2414 * Install non-NULL @mpol in inode's shared policy rb-tree.
2415 * On entry, the current task has a reference on a non-NULL @mpol.
2416 * This must be released on exit.
4bfc4495 2417 * This is called at get_inode() calls and we can use GFP_KERNEL.
71fe804b
LS
2418 */
2419void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2420{
58568d2a
MX
2421 int ret;
2422
71fe804b 2423 sp->root = RB_ROOT; /* empty tree == default mempolicy */
4a8c7bb5 2424 rwlock_init(&sp->lock);
71fe804b
LS
2425
2426 if (mpol) {
2427 struct vm_area_struct pvma;
2428 struct mempolicy *new;
4bfc4495 2429 NODEMASK_SCRATCH(scratch);
71fe804b 2430
4bfc4495 2431 if (!scratch)
5c0c1654 2432 goto put_mpol;
71fe804b
LS
2433 /* contextualize the tmpfs mount point mempolicy */
2434 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
15d77835 2435 if (IS_ERR(new))
0cae3457 2436 goto free_scratch; /* no valid nodemask intersection */
58568d2a
MX
2437
2438 task_lock(current);
4bfc4495 2439 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
58568d2a 2440 task_unlock(current);
15d77835 2441 if (ret)
5c0c1654 2442 goto put_new;
71fe804b
LS
2443
2444 /* Create pseudo-vma that contains just the policy */
2445 memset(&pvma, 0, sizeof(struct vm_area_struct));
2446 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2447 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
15d77835 2448
5c0c1654 2449put_new:
71fe804b 2450 mpol_put(new); /* drop initial ref */
0cae3457 2451free_scratch:
4bfc4495 2452 NODEMASK_SCRATCH_FREE(scratch);
5c0c1654
LS
2453put_mpol:
2454 mpol_put(mpol); /* drop our incoming ref on sb mpol */
7339ff83
RH
2455 }
2456}
2457
1da177e4
LT
2458int mpol_set_shared_policy(struct shared_policy *info,
2459 struct vm_area_struct *vma, struct mempolicy *npol)
2460{
2461 int err;
2462 struct sp_node *new = NULL;
2463 unsigned long sz = vma_pages(vma);
2464
028fec41 2465 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
1da177e4 2466 vma->vm_pgoff,
45c4745a 2467 sz, npol ? npol->mode : -1,
028fec41 2468 npol ? npol->flags : -1,
00ef2d2f 2469 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
1da177e4
LT
2470
2471 if (npol) {
2472 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2473 if (!new)
2474 return -ENOMEM;
2475 }
2476 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2477 if (err && new)
63f74ca2 2478 sp_free(new);
1da177e4
LT
2479 return err;
2480}
2481
2482/* Free a backing policy store on inode delete. */
2483void mpol_free_shared_policy(struct shared_policy *p)
2484{
2485 struct sp_node *n;
2486 struct rb_node *next;
2487
2488 if (!p->root.rb_node)
2489 return;
4a8c7bb5 2490 write_lock(&p->lock);
1da177e4
LT
2491 next = rb_first(&p->root);
2492 while (next) {
2493 n = rb_entry(next, struct sp_node, nd);
2494 next = rb_next(&n->nd);
63f74ca2 2495 sp_delete(p, n);
1da177e4 2496 }
4a8c7bb5 2497 write_unlock(&p->lock);
1da177e4
LT
2498}
2499
1a687c2e 2500#ifdef CONFIG_NUMA_BALANCING
c297663c 2501static int __initdata numabalancing_override;
1a687c2e
MG
2502
2503static void __init check_numabalancing_enable(void)
2504{
2505 bool numabalancing_default = false;
2506
2507 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2508 numabalancing_default = true;
2509
c297663c
MG
2510 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2511 if (numabalancing_override)
2512 set_numabalancing_state(numabalancing_override == 1);
2513
b0dc2b9b 2514 if (num_online_nodes() > 1 && !numabalancing_override) {
756a025f 2515 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
c297663c 2516 numabalancing_default ? "Enabling" : "Disabling");
1a687c2e
MG
2517 set_numabalancing_state(numabalancing_default);
2518 }
2519}
2520
2521static int __init setup_numabalancing(char *str)
2522{
2523 int ret = 0;
2524 if (!str)
2525 goto out;
1a687c2e
MG
2526
2527 if (!strcmp(str, "enable")) {
c297663c 2528 numabalancing_override = 1;
1a687c2e
MG
2529 ret = 1;
2530 } else if (!strcmp(str, "disable")) {
c297663c 2531 numabalancing_override = -1;
1a687c2e
MG
2532 ret = 1;
2533 }
2534out:
2535 if (!ret)
4a404bea 2536 pr_warn("Unable to parse numa_balancing=\n");
1a687c2e
MG
2537
2538 return ret;
2539}
2540__setup("numa_balancing=", setup_numabalancing);
2541#else
2542static inline void __init check_numabalancing_enable(void)
2543{
2544}
2545#endif /* CONFIG_NUMA_BALANCING */
2546
1da177e4
LT
2547/* assumes fs == KERNEL_DS */
2548void __init numa_policy_init(void)
2549{
b71636e2
PM
2550 nodemask_t interleave_nodes;
2551 unsigned long largest = 0;
2552 int nid, prefer = 0;
2553
1da177e4
LT
2554 policy_cache = kmem_cache_create("numa_policy",
2555 sizeof(struct mempolicy),
20c2df83 2556 0, SLAB_PANIC, NULL);
1da177e4
LT
2557
2558 sn_cache = kmem_cache_create("shared_policy_node",
2559 sizeof(struct sp_node),
20c2df83 2560 0, SLAB_PANIC, NULL);
1da177e4 2561
5606e387
MG
2562 for_each_node(nid) {
2563 preferred_node_policy[nid] = (struct mempolicy) {
2564 .refcnt = ATOMIC_INIT(1),
2565 .mode = MPOL_PREFERRED,
2566 .flags = MPOL_F_MOF | MPOL_F_MORON,
2567 .v = { .preferred_node = nid, },
2568 };
2569 }
2570
b71636e2
PM
2571 /*
2572 * Set interleaving policy for system init. Interleaving is only
2573 * enabled across suitably sized nodes (default is >= 16MB), or
2574 * fall back to the largest node if they're all smaller.
2575 */
2576 nodes_clear(interleave_nodes);
01f13bd6 2577 for_each_node_state(nid, N_MEMORY) {
b71636e2
PM
2578 unsigned long total_pages = node_present_pages(nid);
2579
2580 /* Preserve the largest node */
2581 if (largest < total_pages) {
2582 largest = total_pages;
2583 prefer = nid;
2584 }
2585
2586 /* Interleave this node? */
2587 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2588 node_set(nid, interleave_nodes);
2589 }
2590
2591 /* All too small, use the largest */
2592 if (unlikely(nodes_empty(interleave_nodes)))
2593 node_set(prefer, interleave_nodes);
1da177e4 2594
028fec41 2595 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
b1de0d13 2596 pr_err("%s: interleaving failed\n", __func__);
1a687c2e
MG
2597
2598 check_numabalancing_enable();
1da177e4
LT
2599}
2600
8bccd85f 2601/* Reset policy of current process to default */
1da177e4
LT
2602void numa_default_policy(void)
2603{
028fec41 2604 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
1da177e4 2605}
68860ec1 2606
095f1fc4
LS
2607/*
2608 * Parse and format mempolicy from/to strings
2609 */
2610
1a75a6c8 2611/*
f2a07f40 2612 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
1a75a6c8 2613 */
345ace9c
LS
2614static const char * const policy_modes[] =
2615{
2616 [MPOL_DEFAULT] = "default",
2617 [MPOL_PREFERRED] = "prefer",
2618 [MPOL_BIND] = "bind",
2619 [MPOL_INTERLEAVE] = "interleave",
d3a71033 2620 [MPOL_LOCAL] = "local",
345ace9c 2621};
1a75a6c8 2622
095f1fc4
LS
2623
2624#ifdef CONFIG_TMPFS
2625/**
f2a07f40 2626 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
095f1fc4 2627 * @str: string containing mempolicy to parse
71fe804b 2628 * @mpol: pointer to struct mempolicy pointer, returned on success.
095f1fc4
LS
2629 *
2630 * Format of input:
2631 * <mode>[=<flags>][:<nodelist>]
2632 *
71fe804b 2633 * On success, returns 0, else 1
095f1fc4 2634 */
a7a88b23 2635int mpol_parse_str(char *str, struct mempolicy **mpol)
095f1fc4 2636{
71fe804b 2637 struct mempolicy *new = NULL;
b4652e84 2638 unsigned short mode;
f2a07f40 2639 unsigned short mode_flags;
71fe804b 2640 nodemask_t nodes;
095f1fc4
LS
2641 char *nodelist = strchr(str, ':');
2642 char *flags = strchr(str, '=');
095f1fc4
LS
2643 int err = 1;
2644
2645 if (nodelist) {
2646 /* NUL-terminate mode or flags string */
2647 *nodelist++ = '\0';
71fe804b 2648 if (nodelist_parse(nodelist, nodes))
095f1fc4 2649 goto out;
01f13bd6 2650 if (!nodes_subset(nodes, node_states[N_MEMORY]))
095f1fc4 2651 goto out;
71fe804b
LS
2652 } else
2653 nodes_clear(nodes);
2654
095f1fc4
LS
2655 if (flags)
2656 *flags++ = '\0'; /* terminate mode string */
2657
479e2802 2658 for (mode = 0; mode < MPOL_MAX; mode++) {
345ace9c 2659 if (!strcmp(str, policy_modes[mode])) {
095f1fc4
LS
2660 break;
2661 }
2662 }
a720094d 2663 if (mode >= MPOL_MAX)
095f1fc4
LS
2664 goto out;
2665
71fe804b 2666 switch (mode) {
095f1fc4 2667 case MPOL_PREFERRED:
71fe804b
LS
2668 /*
2669 * Insist on a nodelist of one node only
2670 */
095f1fc4
LS
2671 if (nodelist) {
2672 char *rest = nodelist;
2673 while (isdigit(*rest))
2674 rest++;
926f2ae0
KM
2675 if (*rest)
2676 goto out;
095f1fc4
LS
2677 }
2678 break;
095f1fc4
LS
2679 case MPOL_INTERLEAVE:
2680 /*
2681 * Default to online nodes with memory if no nodelist
2682 */
2683 if (!nodelist)
01f13bd6 2684 nodes = node_states[N_MEMORY];
3f226aa1 2685 break;
71fe804b 2686 case MPOL_LOCAL:
3f226aa1 2687 /*
71fe804b 2688 * Don't allow a nodelist; mpol_new() checks flags
3f226aa1 2689 */
71fe804b 2690 if (nodelist)
3f226aa1 2691 goto out;
71fe804b 2692 mode = MPOL_PREFERRED;
3f226aa1 2693 break;
413b43de
RT
2694 case MPOL_DEFAULT:
2695 /*
2696 * Insist on a empty nodelist
2697 */
2698 if (!nodelist)
2699 err = 0;
2700 goto out;
d69b2e63
KM
2701 case MPOL_BIND:
2702 /*
2703 * Insist on a nodelist
2704 */
2705 if (!nodelist)
2706 goto out;
095f1fc4
LS
2707 }
2708
71fe804b 2709 mode_flags = 0;
095f1fc4
LS
2710 if (flags) {
2711 /*
2712 * Currently, we only support two mutually exclusive
2713 * mode flags.
2714 */
2715 if (!strcmp(flags, "static"))
71fe804b 2716 mode_flags |= MPOL_F_STATIC_NODES;
095f1fc4 2717 else if (!strcmp(flags, "relative"))
71fe804b 2718 mode_flags |= MPOL_F_RELATIVE_NODES;
095f1fc4 2719 else
926f2ae0 2720 goto out;
095f1fc4 2721 }
71fe804b
LS
2722
2723 new = mpol_new(mode, mode_flags, &nodes);
2724 if (IS_ERR(new))
926f2ae0
KM
2725 goto out;
2726
f2a07f40
HD
2727 /*
2728 * Save nodes for mpol_to_str() to show the tmpfs mount options
2729 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2730 */
2731 if (mode != MPOL_PREFERRED)
2732 new->v.nodes = nodes;
2733 else if (nodelist)
2734 new->v.preferred_node = first_node(nodes);
2735 else
2736 new->flags |= MPOL_F_LOCAL;
2737
2738 /*
2739 * Save nodes for contextualization: this will be used to "clone"
2740 * the mempolicy in a specific context [cpuset] at a later time.
2741 */
2742 new->w.user_nodemask = nodes;
2743
926f2ae0 2744 err = 0;
71fe804b 2745
095f1fc4
LS
2746out:
2747 /* Restore string for error message */
2748 if (nodelist)
2749 *--nodelist = ':';
2750 if (flags)
2751 *--flags = '=';
71fe804b
LS
2752 if (!err)
2753 *mpol = new;
095f1fc4
LS
2754 return err;
2755}
2756#endif /* CONFIG_TMPFS */
2757
71fe804b
LS
2758/**
2759 * mpol_to_str - format a mempolicy structure for printing
2760 * @buffer: to contain formatted mempolicy string
2761 * @maxlen: length of @buffer
2762 * @pol: pointer to mempolicy to be formatted
71fe804b 2763 *
948927ee
DR
2764 * Convert @pol into a string. If @buffer is too short, truncate the string.
2765 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2766 * longest flag, "relative", and to display at least a few node ids.
1a75a6c8 2767 */
948927ee 2768void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1a75a6c8
CL
2769{
2770 char *p = buffer;
948927ee
DR
2771 nodemask_t nodes = NODE_MASK_NONE;
2772 unsigned short mode = MPOL_DEFAULT;
2773 unsigned short flags = 0;
2291990a 2774
8790c71a 2775 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
bea904d5 2776 mode = pol->mode;
948927ee
DR
2777 flags = pol->flags;
2778 }
bea904d5 2779
1a75a6c8
CL
2780 switch (mode) {
2781 case MPOL_DEFAULT:
1a75a6c8 2782 break;
1a75a6c8 2783 case MPOL_PREFERRED:
fc36b8d3 2784 if (flags & MPOL_F_LOCAL)
f2a07f40 2785 mode = MPOL_LOCAL;
53f2556b 2786 else
fc36b8d3 2787 node_set(pol->v.preferred_node, nodes);
1a75a6c8 2788 break;
1a75a6c8 2789 case MPOL_BIND:
1a75a6c8 2790 case MPOL_INTERLEAVE:
f2a07f40 2791 nodes = pol->v.nodes;
1a75a6c8 2792 break;
1a75a6c8 2793 default:
948927ee
DR
2794 WARN_ON_ONCE(1);
2795 snprintf(p, maxlen, "unknown");
2796 return;
1a75a6c8
CL
2797 }
2798
b7a9f420 2799 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
1a75a6c8 2800
fc36b8d3 2801 if (flags & MPOL_MODE_FLAGS) {
948927ee 2802 p += snprintf(p, buffer + maxlen - p, "=");
f5b087b5 2803
2291990a
LS
2804 /*
2805 * Currently, the only defined flags are mutually exclusive
2806 */
f5b087b5 2807 if (flags & MPOL_F_STATIC_NODES)
2291990a
LS
2808 p += snprintf(p, buffer + maxlen - p, "static");
2809 else if (flags & MPOL_F_RELATIVE_NODES)
2810 p += snprintf(p, buffer + maxlen - p, "relative");
f5b087b5
DR
2811 }
2812
9e763e0f
TH
2813 if (!nodes_empty(nodes))
2814 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
2815 nodemask_pr_args(&nodes));
1a75a6c8 2816}