]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - mm/mempolicy.c
UBUNTU: Ubuntu-4.10.0-37.41
[mirror_ubuntu-zesty-kernel.git] / mm / mempolicy.c
CommitLineData
1da177e4
LT
1/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
8bccd85f 5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
1da177e4
LT
6 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
8bccd85f 21 *
1da177e4
LT
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
8bccd85f
CL
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
1da177e4 28 * preferred Try a specific node first before normal fallback.
00ef2d2f 29 * As a special case NUMA_NO_NODE here means do the allocation
1da177e4
LT
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
8bccd85f 33 *
1da177e4
LT
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
1da177e4
LT
66*/
67
b1de0d13
MH
68#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69
1da177e4
LT
70#include <linux/mempolicy.h>
71#include <linux/mm.h>
72#include <linux/highmem.h>
73#include <linux/hugetlb.h>
74#include <linux/kernel.h>
75#include <linux/sched.h>
1da177e4
LT
76#include <linux/nodemask.h>
77#include <linux/cpuset.h>
1da177e4
LT
78#include <linux/slab.h>
79#include <linux/string.h>
b95f1b31 80#include <linux/export.h>
b488893a 81#include <linux/nsproxy.h>
1da177e4
LT
82#include <linux/interrupt.h>
83#include <linux/init.h>
84#include <linux/compat.h>
dc9aa5b9 85#include <linux/swap.h>
1a75a6c8
CL
86#include <linux/seq_file.h>
87#include <linux/proc_fs.h>
b20a3503 88#include <linux/migrate.h>
62b61f61 89#include <linux/ksm.h>
95a402c3 90#include <linux/rmap.h>
86c3a764 91#include <linux/security.h>
dbcb0f19 92#include <linux/syscalls.h>
095f1fc4 93#include <linux/ctype.h>
6d9c285a 94#include <linux/mm_inline.h>
b24f53a0 95#include <linux/mmu_notifier.h>
b1de0d13 96#include <linux/printk.h>
dc9aa5b9 97
1da177e4 98#include <asm/tlbflush.h>
7c0f6ba6 99#include <linux/uaccess.h>
1da177e4 100
62695a84
NP
101#include "internal.h"
102
38e35860 103/* Internal flags */
dc9aa5b9 104#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
38e35860 105#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
dc9aa5b9 106
fcc234f8
PE
107static struct kmem_cache *policy_cache;
108static struct kmem_cache *sn_cache;
1da177e4 109
1da177e4
LT
110/* Highest zone. An specific allocation for a zone below that is not
111 policied. */
6267276f 112enum zone_type policy_zone = 0;
1da177e4 113
bea904d5
LS
114/*
115 * run-time system-wide default policy => local allocation
116 */
e754d79d 117static struct mempolicy default_policy = {
1da177e4 118 .refcnt = ATOMIC_INIT(1), /* never free it */
bea904d5 119 .mode = MPOL_PREFERRED,
fc36b8d3 120 .flags = MPOL_F_LOCAL,
1da177e4
LT
121};
122
5606e387
MG
123static struct mempolicy preferred_node_policy[MAX_NUMNODES];
124
74d2c3a0 125struct mempolicy *get_task_policy(struct task_struct *p)
5606e387
MG
126{
127 struct mempolicy *pol = p->mempolicy;
f15ca78e 128 int node;
5606e387 129
f15ca78e
ON
130 if (pol)
131 return pol;
5606e387 132
f15ca78e
ON
133 node = numa_node_id();
134 if (node != NUMA_NO_NODE) {
135 pol = &preferred_node_policy[node];
136 /* preferred_node_policy is not initialised early in boot */
137 if (pol->mode)
138 return pol;
5606e387
MG
139 }
140
f15ca78e 141 return &default_policy;
5606e387
MG
142}
143
37012946
DR
144static const struct mempolicy_operations {
145 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
708c1bbc
MX
146 /*
147 * If read-side task has no lock to protect task->mempolicy, write-side
148 * task will rebind the task->mempolicy by two step. The first step is
149 * setting all the newly nodes, and the second step is cleaning all the
150 * disallowed nodes. In this way, we can avoid finding no node to alloc
151 * page.
152 * If we have a lock to protect task->mempolicy in read-side, we do
153 * rebind directly.
154 *
155 * step:
156 * MPOL_REBIND_ONCE - do rebind work at once
157 * MPOL_REBIND_STEP1 - set all the newly nodes
158 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
159 */
160 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
161 enum mpol_rebind_step step);
37012946
DR
162} mpol_ops[MPOL_MAX];
163
f5b087b5
DR
164static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
165{
6d556294 166 return pol->flags & MPOL_MODE_FLAGS;
4c50bc01
DR
167}
168
169static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
170 const nodemask_t *rel)
171{
172 nodemask_t tmp;
173 nodes_fold(tmp, *orig, nodes_weight(*rel));
174 nodes_onto(*ret, tmp, *rel);
f5b087b5
DR
175}
176
37012946
DR
177static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
178{
179 if (nodes_empty(*nodes))
180 return -EINVAL;
181 pol->v.nodes = *nodes;
182 return 0;
183}
184
185static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
186{
187 if (!nodes)
fc36b8d3 188 pol->flags |= MPOL_F_LOCAL; /* local allocation */
37012946
DR
189 else if (nodes_empty(*nodes))
190 return -EINVAL; /* no allowed nodes */
191 else
192 pol->v.preferred_node = first_node(*nodes);
193 return 0;
194}
195
196static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
197{
859f7ef1 198 if (nodes_empty(*nodes))
37012946
DR
199 return -EINVAL;
200 pol->v.nodes = *nodes;
201 return 0;
202}
203
58568d2a
MX
204/*
205 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
206 * any, for the new policy. mpol_new() has already validated the nodes
207 * parameter with respect to the policy mode and flags. But, we need to
208 * handle an empty nodemask with MPOL_PREFERRED here.
209 *
210 * Must be called holding task's alloc_lock to protect task's mems_allowed
211 * and mempolicy. May also be called holding the mmap_semaphore for write.
212 */
4bfc4495
KH
213static int mpol_set_nodemask(struct mempolicy *pol,
214 const nodemask_t *nodes, struct nodemask_scratch *nsc)
58568d2a 215{
58568d2a
MX
216 int ret;
217
218 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
219 if (pol == NULL)
220 return 0;
01f13bd6 221 /* Check N_MEMORY */
4bfc4495 222 nodes_and(nsc->mask1,
01f13bd6 223 cpuset_current_mems_allowed, node_states[N_MEMORY]);
58568d2a
MX
224
225 VM_BUG_ON(!nodes);
226 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
227 nodes = NULL; /* explicit local allocation */
228 else {
229 if (pol->flags & MPOL_F_RELATIVE_NODES)
859f7ef1 230 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
58568d2a 231 else
4bfc4495
KH
232 nodes_and(nsc->mask2, *nodes, nsc->mask1);
233
58568d2a
MX
234 if (mpol_store_user_nodemask(pol))
235 pol->w.user_nodemask = *nodes;
236 else
237 pol->w.cpuset_mems_allowed =
238 cpuset_current_mems_allowed;
239 }
240
4bfc4495
KH
241 if (nodes)
242 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
243 else
244 ret = mpol_ops[pol->mode].create(pol, NULL);
58568d2a
MX
245 return ret;
246}
247
248/*
249 * This function just creates a new policy, does some check and simple
250 * initialization. You must invoke mpol_set_nodemask() to set nodes.
251 */
028fec41
DR
252static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
253 nodemask_t *nodes)
1da177e4
LT
254{
255 struct mempolicy *policy;
256
028fec41 257 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
00ef2d2f 258 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
140d5a49 259
3e1f0645
DR
260 if (mode == MPOL_DEFAULT) {
261 if (nodes && !nodes_empty(*nodes))
37012946 262 return ERR_PTR(-EINVAL);
d3a71033 263 return NULL;
37012946 264 }
3e1f0645
DR
265 VM_BUG_ON(!nodes);
266
267 /*
268 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
269 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
270 * All other modes require a valid pointer to a non-empty nodemask.
271 */
272 if (mode == MPOL_PREFERRED) {
273 if (nodes_empty(*nodes)) {
274 if (((flags & MPOL_F_STATIC_NODES) ||
275 (flags & MPOL_F_RELATIVE_NODES)))
276 return ERR_PTR(-EINVAL);
3e1f0645 277 }
479e2802 278 } else if (mode == MPOL_LOCAL) {
8d303e44
PK
279 if (!nodes_empty(*nodes) ||
280 (flags & MPOL_F_STATIC_NODES) ||
281 (flags & MPOL_F_RELATIVE_NODES))
479e2802
PZ
282 return ERR_PTR(-EINVAL);
283 mode = MPOL_PREFERRED;
3e1f0645
DR
284 } else if (nodes_empty(*nodes))
285 return ERR_PTR(-EINVAL);
1da177e4
LT
286 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
287 if (!policy)
288 return ERR_PTR(-ENOMEM);
289 atomic_set(&policy->refcnt, 1);
45c4745a 290 policy->mode = mode;
3e1f0645 291 policy->flags = flags;
37012946 292
1da177e4 293 return policy;
37012946
DR
294}
295
52cd3b07
LS
296/* Slow path of a mpol destructor. */
297void __mpol_put(struct mempolicy *p)
298{
299 if (!atomic_dec_and_test(&p->refcnt))
300 return;
52cd3b07
LS
301 kmem_cache_free(policy_cache, p);
302}
303
708c1bbc
MX
304static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
305 enum mpol_rebind_step step)
37012946
DR
306{
307}
308
708c1bbc
MX
309/*
310 * step:
311 * MPOL_REBIND_ONCE - do rebind work at once
312 * MPOL_REBIND_STEP1 - set all the newly nodes
313 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
314 */
315static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
316 enum mpol_rebind_step step)
37012946
DR
317{
318 nodemask_t tmp;
319
320 if (pol->flags & MPOL_F_STATIC_NODES)
321 nodes_and(tmp, pol->w.user_nodemask, *nodes);
322 else if (pol->flags & MPOL_F_RELATIVE_NODES)
323 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
324 else {
708c1bbc
MX
325 /*
326 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
327 * result
328 */
329 if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
330 nodes_remap(tmp, pol->v.nodes,
331 pol->w.cpuset_mems_allowed, *nodes);
332 pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
333 } else if (step == MPOL_REBIND_STEP2) {
334 tmp = pol->w.cpuset_mems_allowed;
335 pol->w.cpuset_mems_allowed = *nodes;
336 } else
337 BUG();
37012946 338 }
f5b087b5 339
708c1bbc
MX
340 if (nodes_empty(tmp))
341 tmp = *nodes;
342
343 if (step == MPOL_REBIND_STEP1)
344 nodes_or(pol->v.nodes, pol->v.nodes, tmp);
345 else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
346 pol->v.nodes = tmp;
347 else
348 BUG();
349
37012946 350 if (!node_isset(current->il_next, tmp)) {
0edaf86c 351 current->il_next = next_node_in(current->il_next, tmp);
37012946
DR
352 if (current->il_next >= MAX_NUMNODES)
353 current->il_next = numa_node_id();
354 }
355}
356
357static void mpol_rebind_preferred(struct mempolicy *pol,
708c1bbc
MX
358 const nodemask_t *nodes,
359 enum mpol_rebind_step step)
37012946
DR
360{
361 nodemask_t tmp;
362
37012946
DR
363 if (pol->flags & MPOL_F_STATIC_NODES) {
364 int node = first_node(pol->w.user_nodemask);
365
fc36b8d3 366 if (node_isset(node, *nodes)) {
37012946 367 pol->v.preferred_node = node;
fc36b8d3
LS
368 pol->flags &= ~MPOL_F_LOCAL;
369 } else
370 pol->flags |= MPOL_F_LOCAL;
37012946
DR
371 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
372 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
373 pol->v.preferred_node = first_node(tmp);
fc36b8d3 374 } else if (!(pol->flags & MPOL_F_LOCAL)) {
37012946
DR
375 pol->v.preferred_node = node_remap(pol->v.preferred_node,
376 pol->w.cpuset_mems_allowed,
377 *nodes);
378 pol->w.cpuset_mems_allowed = *nodes;
379 }
1da177e4
LT
380}
381
708c1bbc
MX
382/*
383 * mpol_rebind_policy - Migrate a policy to a different set of nodes
384 *
385 * If read-side task has no lock to protect task->mempolicy, write-side
386 * task will rebind the task->mempolicy by two step. The first step is
387 * setting all the newly nodes, and the second step is cleaning all the
388 * disallowed nodes. In this way, we can avoid finding no node to alloc
389 * page.
390 * If we have a lock to protect task->mempolicy in read-side, we do
391 * rebind directly.
392 *
393 * step:
394 * MPOL_REBIND_ONCE - do rebind work at once
395 * MPOL_REBIND_STEP1 - set all the newly nodes
396 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
397 */
398static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
399 enum mpol_rebind_step step)
1d0d2680 400{
1d0d2680
DR
401 if (!pol)
402 return;
89c522c7 403 if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
1d0d2680
DR
404 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
405 return;
708c1bbc
MX
406
407 if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
408 return;
409
410 if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
411 BUG();
412
413 if (step == MPOL_REBIND_STEP1)
414 pol->flags |= MPOL_F_REBINDING;
415 else if (step == MPOL_REBIND_STEP2)
416 pol->flags &= ~MPOL_F_REBINDING;
417 else if (step >= MPOL_REBIND_NSTEP)
418 BUG();
419
420 mpol_ops[pol->mode].rebind(pol, newmask, step);
1d0d2680
DR
421}
422
423/*
424 * Wrapper for mpol_rebind_policy() that just requires task
425 * pointer, and updates task mempolicy.
58568d2a
MX
426 *
427 * Called with task's alloc_lock held.
1d0d2680
DR
428 */
429
708c1bbc
MX
430void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
431 enum mpol_rebind_step step)
1d0d2680 432{
708c1bbc 433 mpol_rebind_policy(tsk->mempolicy, new, step);
1d0d2680
DR
434}
435
436/*
437 * Rebind each vma in mm to new nodemask.
438 *
439 * Call holding a reference to mm. Takes mm->mmap_sem during call.
440 */
441
442void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
443{
444 struct vm_area_struct *vma;
445
446 down_write(&mm->mmap_sem);
447 for (vma = mm->mmap; vma; vma = vma->vm_next)
708c1bbc 448 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
1d0d2680
DR
449 up_write(&mm->mmap_sem);
450}
451
37012946
DR
452static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
453 [MPOL_DEFAULT] = {
454 .rebind = mpol_rebind_default,
455 },
456 [MPOL_INTERLEAVE] = {
457 .create = mpol_new_interleave,
458 .rebind = mpol_rebind_nodemask,
459 },
460 [MPOL_PREFERRED] = {
461 .create = mpol_new_preferred,
462 .rebind = mpol_rebind_preferred,
463 },
464 [MPOL_BIND] = {
465 .create = mpol_new_bind,
466 .rebind = mpol_rebind_nodemask,
467 },
468};
469
fc301289
CL
470static void migrate_page_add(struct page *page, struct list_head *pagelist,
471 unsigned long flags);
1a75a6c8 472
6f4576e3
NH
473struct queue_pages {
474 struct list_head *pagelist;
475 unsigned long flags;
476 nodemask_t *nmask;
477 struct vm_area_struct *prev;
478};
479
98094945
NH
480/*
481 * Scan through pages checking if pages follow certain conditions,
482 * and move them to the pagelist if they do.
483 */
6f4576e3
NH
484static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
485 unsigned long end, struct mm_walk *walk)
1da177e4 486{
6f4576e3
NH
487 struct vm_area_struct *vma = walk->vma;
488 struct page *page;
489 struct queue_pages *qp = walk->private;
490 unsigned long flags = qp->flags;
248db92d 491 int nid, ret;
91612e0d 492 pte_t *pte;
705e87c0 493 spinlock_t *ptl;
941150a3 494
248db92d
KS
495 if (pmd_trans_huge(*pmd)) {
496 ptl = pmd_lock(walk->mm, pmd);
497 if (pmd_trans_huge(*pmd)) {
498 page = pmd_page(*pmd);
499 if (is_huge_zero_page(page)) {
500 spin_unlock(ptl);
fd60775a 501 __split_huge_pmd(vma, pmd, addr, false, NULL);
248db92d
KS
502 } else {
503 get_page(page);
504 spin_unlock(ptl);
505 lock_page(page);
506 ret = split_huge_page(page);
507 unlock_page(page);
508 put_page(page);
509 if (ret)
510 return 0;
511 }
512 } else {
513 spin_unlock(ptl);
514 }
515 }
91612e0d 516
337d9abf
NH
517 if (pmd_trans_unstable(pmd))
518 return 0;
248db92d 519retry:
6f4576e3
NH
520 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
521 for (; addr != end; pte++, addr += PAGE_SIZE) {
91612e0d 522 if (!pte_present(*pte))
1da177e4 523 continue;
6aab341e
LT
524 page = vm_normal_page(vma, addr, *pte);
525 if (!page)
1da177e4 526 continue;
053837fc 527 /*
62b61f61
HD
528 * vm_normal_page() filters out zero pages, but there might
529 * still be PageReserved pages to skip, perhaps in a VDSO.
053837fc 530 */
b79bc0a0 531 if (PageReserved(page))
f4598c8b 532 continue;
6aab341e 533 nid = page_to_nid(page);
6f4576e3 534 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
38e35860 535 continue;
800d8c63 536 if (PageTransCompound(page)) {
248db92d
KS
537 get_page(page);
538 pte_unmap_unlock(pte, ptl);
539 lock_page(page);
540 ret = split_huge_page(page);
541 unlock_page(page);
542 put_page(page);
543 /* Failed to split -- skip. */
544 if (ret) {
545 pte = pte_offset_map_lock(walk->mm, pmd,
546 addr, &ptl);
547 continue;
548 }
549 goto retry;
550 }
38e35860 551
77bf45e7 552 migrate_page_add(page, qp->pagelist, flags);
6f4576e3
NH
553 }
554 pte_unmap_unlock(pte - 1, ptl);
555 cond_resched();
556 return 0;
91612e0d
HD
557}
558
6f4576e3
NH
559static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
560 unsigned long addr, unsigned long end,
561 struct mm_walk *walk)
e2d8cf40
NH
562{
563#ifdef CONFIG_HUGETLB_PAGE
6f4576e3
NH
564 struct queue_pages *qp = walk->private;
565 unsigned long flags = qp->flags;
e2d8cf40
NH
566 int nid;
567 struct page *page;
cb900f41 568 spinlock_t *ptl;
d4c54919 569 pte_t entry;
e2d8cf40 570
6f4576e3
NH
571 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
572 entry = huge_ptep_get(pte);
d4c54919
NH
573 if (!pte_present(entry))
574 goto unlock;
575 page = pte_page(entry);
e2d8cf40 576 nid = page_to_nid(page);
6f4576e3 577 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
e2d8cf40
NH
578 goto unlock;
579 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
580 if (flags & (MPOL_MF_MOVE_ALL) ||
581 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
6f4576e3 582 isolate_huge_page(page, qp->pagelist);
e2d8cf40 583unlock:
cb900f41 584 spin_unlock(ptl);
e2d8cf40
NH
585#else
586 BUG();
587#endif
91612e0d 588 return 0;
1da177e4
LT
589}
590
5877231f 591#ifdef CONFIG_NUMA_BALANCING
b24f53a0 592/*
4b10e7d5
MG
593 * This is used to mark a range of virtual addresses to be inaccessible.
594 * These are later cleared by a NUMA hinting fault. Depending on these
595 * faults, pages may be migrated for better NUMA placement.
596 *
597 * This is assuming that NUMA faults are handled using PROT_NONE. If
598 * an architecture makes a different choice, it will need further
599 * changes to the core.
b24f53a0 600 */
4b10e7d5
MG
601unsigned long change_prot_numa(struct vm_area_struct *vma,
602 unsigned long addr, unsigned long end)
b24f53a0 603{
4b10e7d5 604 int nr_updated;
b24f53a0 605
4d942466 606 nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
03c5a6e1
MG
607 if (nr_updated)
608 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
b24f53a0 609
4b10e7d5 610 return nr_updated;
b24f53a0
LS
611}
612#else
613static unsigned long change_prot_numa(struct vm_area_struct *vma,
614 unsigned long addr, unsigned long end)
615{
616 return 0;
617}
5877231f 618#endif /* CONFIG_NUMA_BALANCING */
b24f53a0 619
6f4576e3
NH
620static int queue_pages_test_walk(unsigned long start, unsigned long end,
621 struct mm_walk *walk)
622{
623 struct vm_area_struct *vma = walk->vma;
624 struct queue_pages *qp = walk->private;
625 unsigned long endvma = vma->vm_end;
626 unsigned long flags = qp->flags;
627
77bf45e7 628 if (!vma_migratable(vma))
48684a65
NH
629 return 1;
630
6f4576e3
NH
631 if (endvma > end)
632 endvma = end;
633 if (vma->vm_start > start)
634 start = vma->vm_start;
635
636 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
637 if (!vma->vm_next && vma->vm_end < end)
638 return -EFAULT;
639 if (qp->prev && qp->prev->vm_end < vma->vm_start)
640 return -EFAULT;
641 }
642
643 qp->prev = vma;
644
6f4576e3
NH
645 if (flags & MPOL_MF_LAZY) {
646 /* Similar to task_numa_work, skip inaccessible VMAs */
4355c018
LC
647 if (!is_vm_hugetlb_page(vma) &&
648 (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
649 !(vma->vm_flags & VM_MIXEDMAP))
6f4576e3
NH
650 change_prot_numa(vma, start, endvma);
651 return 1;
652 }
653
77bf45e7
KS
654 /* queue pages from current vma */
655 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
6f4576e3
NH
656 return 0;
657 return 1;
658}
659
dc9aa5b9 660/*
98094945
NH
661 * Walk through page tables and collect pages to be migrated.
662 *
663 * If pages found in a given range are on a set of nodes (determined by
664 * @nodes and @flags,) it's isolated and queued to the pagelist which is
665 * passed via @private.)
dc9aa5b9 666 */
d05f0cdc 667static int
98094945 668queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
6f4576e3
NH
669 nodemask_t *nodes, unsigned long flags,
670 struct list_head *pagelist)
1da177e4 671{
6f4576e3
NH
672 struct queue_pages qp = {
673 .pagelist = pagelist,
674 .flags = flags,
675 .nmask = nodes,
676 .prev = NULL,
677 };
678 struct mm_walk queue_pages_walk = {
679 .hugetlb_entry = queue_pages_hugetlb,
680 .pmd_entry = queue_pages_pte_range,
681 .test_walk = queue_pages_test_walk,
682 .mm = mm,
683 .private = &qp,
684 };
685
686 return walk_page_range(start, end, &queue_pages_walk);
1da177e4
LT
687}
688
869833f2
KM
689/*
690 * Apply policy to a single VMA
691 * This must be called with the mmap_sem held for writing.
692 */
693static int vma_replace_policy(struct vm_area_struct *vma,
694 struct mempolicy *pol)
8d34694c 695{
869833f2
KM
696 int err;
697 struct mempolicy *old;
698 struct mempolicy *new;
8d34694c
KM
699
700 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
701 vma->vm_start, vma->vm_end, vma->vm_pgoff,
702 vma->vm_ops, vma->vm_file,
703 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
704
869833f2
KM
705 new = mpol_dup(pol);
706 if (IS_ERR(new))
707 return PTR_ERR(new);
708
709 if (vma->vm_ops && vma->vm_ops->set_policy) {
8d34694c 710 err = vma->vm_ops->set_policy(vma, new);
869833f2
KM
711 if (err)
712 goto err_out;
8d34694c 713 }
869833f2
KM
714
715 old = vma->vm_policy;
716 vma->vm_policy = new; /* protected by mmap_sem */
717 mpol_put(old);
718
719 return 0;
720 err_out:
721 mpol_put(new);
8d34694c
KM
722 return err;
723}
724
1da177e4 725/* Step 2: apply policy to a range and do splits. */
9d8cebd4
KM
726static int mbind_range(struct mm_struct *mm, unsigned long start,
727 unsigned long end, struct mempolicy *new_pol)
1da177e4
LT
728{
729 struct vm_area_struct *next;
9d8cebd4
KM
730 struct vm_area_struct *prev;
731 struct vm_area_struct *vma;
732 int err = 0;
e26a5114 733 pgoff_t pgoff;
9d8cebd4
KM
734 unsigned long vmstart;
735 unsigned long vmend;
1da177e4 736
097d5910 737 vma = find_vma(mm, start);
9d8cebd4
KM
738 if (!vma || vma->vm_start > start)
739 return -EFAULT;
740
097d5910 741 prev = vma->vm_prev;
e26a5114
KM
742 if (start > vma->vm_start)
743 prev = vma;
744
9d8cebd4 745 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
1da177e4 746 next = vma->vm_next;
9d8cebd4
KM
747 vmstart = max(start, vma->vm_start);
748 vmend = min(end, vma->vm_end);
749
e26a5114
KM
750 if (mpol_equal(vma_policy(vma), new_pol))
751 continue;
752
753 pgoff = vma->vm_pgoff +
754 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
9d8cebd4 755 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
19a809af
AA
756 vma->anon_vma, vma->vm_file, pgoff,
757 new_pol, vma->vm_userfaultfd_ctx);
9d8cebd4
KM
758 if (prev) {
759 vma = prev;
760 next = vma->vm_next;
3964acd0
ON
761 if (mpol_equal(vma_policy(vma), new_pol))
762 continue;
763 /* vma_merge() joined vma && vma->next, case 8 */
764 goto replace;
9d8cebd4
KM
765 }
766 if (vma->vm_start != vmstart) {
767 err = split_vma(vma->vm_mm, vma, vmstart, 1);
768 if (err)
769 goto out;
770 }
771 if (vma->vm_end != vmend) {
772 err = split_vma(vma->vm_mm, vma, vmend, 0);
773 if (err)
774 goto out;
775 }
3964acd0 776 replace:
869833f2 777 err = vma_replace_policy(vma, new_pol);
8d34694c
KM
778 if (err)
779 goto out;
1da177e4 780 }
9d8cebd4
KM
781
782 out:
1da177e4
LT
783 return err;
784}
785
1da177e4 786/* Set the process memory policy */
028fec41
DR
787static long do_set_mempolicy(unsigned short mode, unsigned short flags,
788 nodemask_t *nodes)
1da177e4 789{
58568d2a 790 struct mempolicy *new, *old;
4bfc4495 791 NODEMASK_SCRATCH(scratch);
58568d2a 792 int ret;
1da177e4 793
4bfc4495
KH
794 if (!scratch)
795 return -ENOMEM;
f4e53d91 796
4bfc4495
KH
797 new = mpol_new(mode, flags, nodes);
798 if (IS_ERR(new)) {
799 ret = PTR_ERR(new);
800 goto out;
801 }
2c7c3a7d 802
58568d2a 803 task_lock(current);
4bfc4495 804 ret = mpol_set_nodemask(new, nodes, scratch);
58568d2a
MX
805 if (ret) {
806 task_unlock(current);
58568d2a 807 mpol_put(new);
4bfc4495 808 goto out;
58568d2a
MX
809 }
810 old = current->mempolicy;
1da177e4 811 current->mempolicy = new;
45c4745a 812 if (new && new->mode == MPOL_INTERLEAVE &&
f5b087b5 813 nodes_weight(new->v.nodes))
dfcd3c0d 814 current->il_next = first_node(new->v.nodes);
58568d2a 815 task_unlock(current);
58568d2a 816 mpol_put(old);
4bfc4495
KH
817 ret = 0;
818out:
819 NODEMASK_SCRATCH_FREE(scratch);
820 return ret;
1da177e4
LT
821}
822
bea904d5
LS
823/*
824 * Return nodemask for policy for get_mempolicy() query
58568d2a
MX
825 *
826 * Called with task's alloc_lock held
bea904d5
LS
827 */
828static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
1da177e4 829{
dfcd3c0d 830 nodes_clear(*nodes);
bea904d5
LS
831 if (p == &default_policy)
832 return;
833
45c4745a 834 switch (p->mode) {
19770b32
MG
835 case MPOL_BIND:
836 /* Fall through */
1da177e4 837 case MPOL_INTERLEAVE:
dfcd3c0d 838 *nodes = p->v.nodes;
1da177e4
LT
839 break;
840 case MPOL_PREFERRED:
fc36b8d3 841 if (!(p->flags & MPOL_F_LOCAL))
dfcd3c0d 842 node_set(p->v.preferred_node, *nodes);
53f2556b 843 /* else return empty node mask for local allocation */
1da177e4
LT
844 break;
845 default:
846 BUG();
847 }
848}
849
d4edcf0d 850static int lookup_node(unsigned long addr)
1da177e4
LT
851{
852 struct page *p;
853 int err;
854
768ae309 855 err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL);
1da177e4
LT
856 if (err >= 0) {
857 err = page_to_nid(p);
858 put_page(p);
859 }
860 return err;
861}
862
1da177e4 863/* Retrieve NUMA policy */
dbcb0f19
AB
864static long do_get_mempolicy(int *policy, nodemask_t *nmask,
865 unsigned long addr, unsigned long flags)
1da177e4 866{
8bccd85f 867 int err;
1da177e4
LT
868 struct mm_struct *mm = current->mm;
869 struct vm_area_struct *vma = NULL;
870 struct mempolicy *pol = current->mempolicy;
871
754af6f5
LS
872 if (flags &
873 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
1da177e4 874 return -EINVAL;
754af6f5
LS
875
876 if (flags & MPOL_F_MEMS_ALLOWED) {
877 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
878 return -EINVAL;
879 *policy = 0; /* just so it's initialized */
58568d2a 880 task_lock(current);
754af6f5 881 *nmask = cpuset_current_mems_allowed;
58568d2a 882 task_unlock(current);
754af6f5
LS
883 return 0;
884 }
885
1da177e4 886 if (flags & MPOL_F_ADDR) {
bea904d5
LS
887 /*
888 * Do NOT fall back to task policy if the
889 * vma/shared policy at addr is NULL. We
890 * want to return MPOL_DEFAULT in this case.
891 */
1da177e4
LT
892 down_read(&mm->mmap_sem);
893 vma = find_vma_intersection(mm, addr, addr+1);
894 if (!vma) {
895 up_read(&mm->mmap_sem);
896 return -EFAULT;
897 }
898 if (vma->vm_ops && vma->vm_ops->get_policy)
899 pol = vma->vm_ops->get_policy(vma, addr);
900 else
901 pol = vma->vm_policy;
902 } else if (addr)
903 return -EINVAL;
904
905 if (!pol)
bea904d5 906 pol = &default_policy; /* indicates default behavior */
1da177e4
LT
907
908 if (flags & MPOL_F_NODE) {
909 if (flags & MPOL_F_ADDR) {
d4edcf0d 910 err = lookup_node(addr);
1da177e4
LT
911 if (err < 0)
912 goto out;
8bccd85f 913 *policy = err;
1da177e4 914 } else if (pol == current->mempolicy &&
45c4745a 915 pol->mode == MPOL_INTERLEAVE) {
8bccd85f 916 *policy = current->il_next;
1da177e4
LT
917 } else {
918 err = -EINVAL;
919 goto out;
920 }
bea904d5
LS
921 } else {
922 *policy = pol == &default_policy ? MPOL_DEFAULT :
923 pol->mode;
d79df630
DR
924 /*
925 * Internal mempolicy flags must be masked off before exposing
926 * the policy to userspace.
927 */
928 *policy |= (pol->flags & MPOL_MODE_FLAGS);
bea904d5 929 }
1da177e4
LT
930
931 if (vma) {
932 up_read(&current->mm->mmap_sem);
933 vma = NULL;
934 }
935
1da177e4 936 err = 0;
58568d2a 937 if (nmask) {
c6b6ef8b
LS
938 if (mpol_store_user_nodemask(pol)) {
939 *nmask = pol->w.user_nodemask;
940 } else {
941 task_lock(current);
942 get_policy_nodemask(pol, nmask);
943 task_unlock(current);
944 }
58568d2a 945 }
1da177e4
LT
946
947 out:
52cd3b07 948 mpol_cond_put(pol);
1da177e4
LT
949 if (vma)
950 up_read(&current->mm->mmap_sem);
951 return err;
952}
953
b20a3503 954#ifdef CONFIG_MIGRATION
6ce3c4c0
CL
955/*
956 * page migration
957 */
fc301289
CL
958static void migrate_page_add(struct page *page, struct list_head *pagelist,
959 unsigned long flags)
6ce3c4c0
CL
960{
961 /*
fc301289 962 * Avoid migrating a page that is shared with others.
6ce3c4c0 963 */
62695a84
NP
964 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
965 if (!isolate_lru_page(page)) {
966 list_add_tail(&page->lru, pagelist);
599d0c95 967 inc_node_page_state(page, NR_ISOLATED_ANON +
6d9c285a 968 page_is_file_cache(page));
62695a84
NP
969 }
970 }
7e2ab150 971}
6ce3c4c0 972
742755a1 973static struct page *new_node_page(struct page *page, unsigned long node, int **x)
95a402c3 974{
e2d8cf40
NH
975 if (PageHuge(page))
976 return alloc_huge_page_node(page_hstate(compound_head(page)),
977 node);
978 else
96db800f 979 return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
b360edb4 980 __GFP_THISNODE, 0);
95a402c3
CL
981}
982
7e2ab150
CL
983/*
984 * Migrate pages from one node to a target node.
985 * Returns error or the number of pages not migrated.
986 */
dbcb0f19
AB
987static int migrate_to_node(struct mm_struct *mm, int source, int dest,
988 int flags)
7e2ab150
CL
989{
990 nodemask_t nmask;
991 LIST_HEAD(pagelist);
992 int err = 0;
993
994 nodes_clear(nmask);
995 node_set(source, nmask);
6ce3c4c0 996
08270807
MK
997 /*
998 * This does not "check" the range but isolates all pages that
999 * need migration. Between passing in the full user address
1000 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1001 */
1002 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
98094945 1003 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
7e2ab150
CL
1004 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1005
cf608ac1 1006 if (!list_empty(&pagelist)) {
68711a74 1007 err = migrate_pages(&pagelist, new_node_page, NULL, dest,
9c620e2b 1008 MIGRATE_SYNC, MR_SYSCALL);
cf608ac1 1009 if (err)
e2d8cf40 1010 putback_movable_pages(&pagelist);
cf608ac1 1011 }
95a402c3 1012
7e2ab150 1013 return err;
6ce3c4c0
CL
1014}
1015
39743889 1016/*
7e2ab150
CL
1017 * Move pages between the two nodesets so as to preserve the physical
1018 * layout as much as possible.
39743889
CL
1019 *
1020 * Returns the number of page that could not be moved.
1021 */
0ce72d4f
AM
1022int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1023 const nodemask_t *to, int flags)
39743889 1024{
7e2ab150 1025 int busy = 0;
0aedadf9 1026 int err;
7e2ab150 1027 nodemask_t tmp;
39743889 1028
0aedadf9
CL
1029 err = migrate_prep();
1030 if (err)
1031 return err;
1032
53f2556b 1033 down_read(&mm->mmap_sem);
39743889 1034
da0aa138
KM
1035 /*
1036 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1037 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1038 * bit in 'tmp', and return that <source, dest> pair for migration.
1039 * The pair of nodemasks 'to' and 'from' define the map.
1040 *
1041 * If no pair of bits is found that way, fallback to picking some
1042 * pair of 'source' and 'dest' bits that are not the same. If the
1043 * 'source' and 'dest' bits are the same, this represents a node
1044 * that will be migrating to itself, so no pages need move.
1045 *
1046 * If no bits are left in 'tmp', or if all remaining bits left
1047 * in 'tmp' correspond to the same bit in 'to', return false
1048 * (nothing left to migrate).
1049 *
1050 * This lets us pick a pair of nodes to migrate between, such that
1051 * if possible the dest node is not already occupied by some other
1052 * source node, minimizing the risk of overloading the memory on a
1053 * node that would happen if we migrated incoming memory to a node
1054 * before migrating outgoing memory source that same node.
1055 *
1056 * A single scan of tmp is sufficient. As we go, we remember the
1057 * most recent <s, d> pair that moved (s != d). If we find a pair
1058 * that not only moved, but what's better, moved to an empty slot
1059 * (d is not set in tmp), then we break out then, with that pair.
ae0e47f0 1060 * Otherwise when we finish scanning from_tmp, we at least have the
da0aa138
KM
1061 * most recent <s, d> pair that moved. If we get all the way through
1062 * the scan of tmp without finding any node that moved, much less
1063 * moved to an empty node, then there is nothing left worth migrating.
1064 */
d4984711 1065
0ce72d4f 1066 tmp = *from;
7e2ab150
CL
1067 while (!nodes_empty(tmp)) {
1068 int s,d;
b76ac7e7 1069 int source = NUMA_NO_NODE;
7e2ab150
CL
1070 int dest = 0;
1071
1072 for_each_node_mask(s, tmp) {
4a5b18cc
LW
1073
1074 /*
1075 * do_migrate_pages() tries to maintain the relative
1076 * node relationship of the pages established between
1077 * threads and memory areas.
1078 *
1079 * However if the number of source nodes is not equal to
1080 * the number of destination nodes we can not preserve
1081 * this node relative relationship. In that case, skip
1082 * copying memory from a node that is in the destination
1083 * mask.
1084 *
1085 * Example: [2,3,4] -> [3,4,5] moves everything.
1086 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1087 */
1088
0ce72d4f
AM
1089 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1090 (node_isset(s, *to)))
4a5b18cc
LW
1091 continue;
1092
0ce72d4f 1093 d = node_remap(s, *from, *to);
7e2ab150
CL
1094 if (s == d)
1095 continue;
1096
1097 source = s; /* Node moved. Memorize */
1098 dest = d;
1099
1100 /* dest not in remaining from nodes? */
1101 if (!node_isset(dest, tmp))
1102 break;
1103 }
b76ac7e7 1104 if (source == NUMA_NO_NODE)
7e2ab150
CL
1105 break;
1106
1107 node_clear(source, tmp);
1108 err = migrate_to_node(mm, source, dest, flags);
1109 if (err > 0)
1110 busy += err;
1111 if (err < 0)
1112 break;
39743889
CL
1113 }
1114 up_read(&mm->mmap_sem);
7e2ab150
CL
1115 if (err < 0)
1116 return err;
1117 return busy;
b20a3503
CL
1118
1119}
1120
3ad33b24
LS
1121/*
1122 * Allocate a new page for page migration based on vma policy.
d05f0cdc 1123 * Start by assuming the page is mapped by the same vma as contains @start.
3ad33b24
LS
1124 * Search forward from there, if not. N.B., this assumes that the
1125 * list of pages handed to migrate_pages()--which is how we get here--
1126 * is in virtual address order.
1127 */
d05f0cdc 1128static struct page *new_page(struct page *page, unsigned long start, int **x)
95a402c3 1129{
d05f0cdc 1130 struct vm_area_struct *vma;
3ad33b24 1131 unsigned long uninitialized_var(address);
95a402c3 1132
d05f0cdc 1133 vma = find_vma(current->mm, start);
3ad33b24
LS
1134 while (vma) {
1135 address = page_address_in_vma(page, vma);
1136 if (address != -EFAULT)
1137 break;
1138 vma = vma->vm_next;
1139 }
11c731e8
WL
1140
1141 if (PageHuge(page)) {
cc81717e
MH
1142 BUG_ON(!vma);
1143 return alloc_huge_page_noerr(vma, address, 1);
11c731e8 1144 }
0bf598d8 1145 /*
11c731e8 1146 * if !vma, alloc_page_vma() will use task or system default policy
0bf598d8 1147 */
3ad33b24 1148 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
95a402c3 1149}
b20a3503
CL
1150#else
1151
1152static void migrate_page_add(struct page *page, struct list_head *pagelist,
1153 unsigned long flags)
1154{
39743889
CL
1155}
1156
0ce72d4f
AM
1157int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1158 const nodemask_t *to, int flags)
b20a3503
CL
1159{
1160 return -ENOSYS;
1161}
95a402c3 1162
d05f0cdc 1163static struct page *new_page(struct page *page, unsigned long start, int **x)
95a402c3
CL
1164{
1165 return NULL;
1166}
b20a3503
CL
1167#endif
1168
dbcb0f19 1169static long do_mbind(unsigned long start, unsigned long len,
028fec41
DR
1170 unsigned short mode, unsigned short mode_flags,
1171 nodemask_t *nmask, unsigned long flags)
6ce3c4c0 1172{
6ce3c4c0
CL
1173 struct mm_struct *mm = current->mm;
1174 struct mempolicy *new;
1175 unsigned long end;
1176 int err;
1177 LIST_HEAD(pagelist);
1178
b24f53a0 1179 if (flags & ~(unsigned long)MPOL_MF_VALID)
6ce3c4c0 1180 return -EINVAL;
74c00241 1181 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
6ce3c4c0
CL
1182 return -EPERM;
1183
1184 if (start & ~PAGE_MASK)
1185 return -EINVAL;
1186
1187 if (mode == MPOL_DEFAULT)
1188 flags &= ~MPOL_MF_STRICT;
1189
1190 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1191 end = start + len;
1192
1193 if (end < start)
1194 return -EINVAL;
1195 if (end == start)
1196 return 0;
1197
028fec41 1198 new = mpol_new(mode, mode_flags, nmask);
6ce3c4c0
CL
1199 if (IS_ERR(new))
1200 return PTR_ERR(new);
1201
b24f53a0
LS
1202 if (flags & MPOL_MF_LAZY)
1203 new->flags |= MPOL_F_MOF;
1204
6ce3c4c0
CL
1205 /*
1206 * If we are using the default policy then operation
1207 * on discontinuous address spaces is okay after all
1208 */
1209 if (!new)
1210 flags |= MPOL_MF_DISCONTIG_OK;
1211
028fec41
DR
1212 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1213 start, start + len, mode, mode_flags,
00ef2d2f 1214 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
6ce3c4c0 1215
0aedadf9
CL
1216 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1217
1218 err = migrate_prep();
1219 if (err)
b05ca738 1220 goto mpol_out;
0aedadf9 1221 }
4bfc4495
KH
1222 {
1223 NODEMASK_SCRATCH(scratch);
1224 if (scratch) {
1225 down_write(&mm->mmap_sem);
1226 task_lock(current);
1227 err = mpol_set_nodemask(new, nmask, scratch);
1228 task_unlock(current);
1229 if (err)
1230 up_write(&mm->mmap_sem);
1231 } else
1232 err = -ENOMEM;
1233 NODEMASK_SCRATCH_FREE(scratch);
1234 }
b05ca738
KM
1235 if (err)
1236 goto mpol_out;
1237
d05f0cdc 1238 err = queue_pages_range(mm, start, end, nmask,
6ce3c4c0 1239 flags | MPOL_MF_INVERT, &pagelist);
d05f0cdc 1240 if (!err)
9d8cebd4 1241 err = mbind_range(mm, start, end, new);
7e2ab150 1242
b24f53a0
LS
1243 if (!err) {
1244 int nr_failed = 0;
1245
cf608ac1 1246 if (!list_empty(&pagelist)) {
b24f53a0 1247 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
d05f0cdc
HD
1248 nr_failed = migrate_pages(&pagelist, new_page, NULL,
1249 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
cf608ac1 1250 if (nr_failed)
74060e4d 1251 putback_movable_pages(&pagelist);
cf608ac1 1252 }
6ce3c4c0 1253
b24f53a0 1254 if (nr_failed && (flags & MPOL_MF_STRICT))
6ce3c4c0 1255 err = -EIO;
ab8a3e14 1256 } else
b0e5fd73 1257 putback_movable_pages(&pagelist);
b20a3503 1258
6ce3c4c0 1259 up_write(&mm->mmap_sem);
b05ca738 1260 mpol_out:
f0be3d32 1261 mpol_put(new);
6ce3c4c0
CL
1262 return err;
1263}
1264
8bccd85f
CL
1265/*
1266 * User space interface with variable sized bitmaps for nodelists.
1267 */
1268
1269/* Copy a node mask from user space. */
39743889 1270static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
8bccd85f
CL
1271 unsigned long maxnode)
1272{
1273 unsigned long k;
1274 unsigned long nlongs;
1275 unsigned long endmask;
1276
1277 --maxnode;
1278 nodes_clear(*nodes);
1279 if (maxnode == 0 || !nmask)
1280 return 0;
a9c930ba 1281 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
636f13c1 1282 return -EINVAL;
8bccd85f
CL
1283
1284 nlongs = BITS_TO_LONGS(maxnode);
1285 if ((maxnode % BITS_PER_LONG) == 0)
1286 endmask = ~0UL;
1287 else
1288 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1289
1290 /* When the user specified more nodes than supported just check
1291 if the non supported part is all zero. */
1292 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1293 if (nlongs > PAGE_SIZE/sizeof(long))
1294 return -EINVAL;
1295 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1296 unsigned long t;
1297 if (get_user(t, nmask + k))
1298 return -EFAULT;
1299 if (k == nlongs - 1) {
1300 if (t & endmask)
1301 return -EINVAL;
1302 } else if (t)
1303 return -EINVAL;
1304 }
1305 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1306 endmask = ~0UL;
1307 }
1308
1309 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1310 return -EFAULT;
1311 nodes_addr(*nodes)[nlongs-1] &= endmask;
1312 return 0;
1313}
1314
1315/* Copy a kernel node mask to user space */
1316static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1317 nodemask_t *nodes)
1318{
1319 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1320 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1321
1322 if (copy > nbytes) {
1323 if (copy > PAGE_SIZE)
1324 return -EINVAL;
1325 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1326 return -EFAULT;
1327 copy = nbytes;
1328 }
1329 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1330}
1331
938bb9f5 1332SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
f7f28ca9 1333 unsigned long, mode, const unsigned long __user *, nmask,
938bb9f5 1334 unsigned long, maxnode, unsigned, flags)
8bccd85f
CL
1335{
1336 nodemask_t nodes;
1337 int err;
028fec41 1338 unsigned short mode_flags;
8bccd85f 1339
028fec41
DR
1340 mode_flags = mode & MPOL_MODE_FLAGS;
1341 mode &= ~MPOL_MODE_FLAGS;
a3b51e01
DR
1342 if (mode >= MPOL_MAX)
1343 return -EINVAL;
4c50bc01
DR
1344 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1345 (mode_flags & MPOL_F_RELATIVE_NODES))
1346 return -EINVAL;
8bccd85f
CL
1347 err = get_nodes(&nodes, nmask, maxnode);
1348 if (err)
1349 return err;
028fec41 1350 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
8bccd85f
CL
1351}
1352
1353/* Set the process memory policy */
23c8902d 1354SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
938bb9f5 1355 unsigned long, maxnode)
8bccd85f
CL
1356{
1357 int err;
1358 nodemask_t nodes;
028fec41 1359 unsigned short flags;
8bccd85f 1360
028fec41
DR
1361 flags = mode & MPOL_MODE_FLAGS;
1362 mode &= ~MPOL_MODE_FLAGS;
1363 if ((unsigned int)mode >= MPOL_MAX)
8bccd85f 1364 return -EINVAL;
4c50bc01
DR
1365 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1366 return -EINVAL;
8bccd85f
CL
1367 err = get_nodes(&nodes, nmask, maxnode);
1368 if (err)
1369 return err;
028fec41 1370 return do_set_mempolicy(mode, flags, &nodes);
8bccd85f
CL
1371}
1372
938bb9f5
HC
1373SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1374 const unsigned long __user *, old_nodes,
1375 const unsigned long __user *, new_nodes)
39743889 1376{
c69e8d9c 1377 const struct cred *cred = current_cred(), *tcred;
596d7cfa 1378 struct mm_struct *mm = NULL;
39743889 1379 struct task_struct *task;
39743889
CL
1380 nodemask_t task_nodes;
1381 int err;
596d7cfa
KM
1382 nodemask_t *old;
1383 nodemask_t *new;
1384 NODEMASK_SCRATCH(scratch);
1385
1386 if (!scratch)
1387 return -ENOMEM;
39743889 1388
596d7cfa
KM
1389 old = &scratch->mask1;
1390 new = &scratch->mask2;
1391
1392 err = get_nodes(old, old_nodes, maxnode);
39743889 1393 if (err)
596d7cfa 1394 goto out;
39743889 1395
596d7cfa 1396 err = get_nodes(new, new_nodes, maxnode);
39743889 1397 if (err)
596d7cfa 1398 goto out;
39743889
CL
1399
1400 /* Find the mm_struct */
55cfaa3c 1401 rcu_read_lock();
228ebcbe 1402 task = pid ? find_task_by_vpid(pid) : current;
39743889 1403 if (!task) {
55cfaa3c 1404 rcu_read_unlock();
596d7cfa
KM
1405 err = -ESRCH;
1406 goto out;
39743889 1407 }
3268c63e 1408 get_task_struct(task);
39743889 1409
596d7cfa 1410 err = -EINVAL;
39743889
CL
1411
1412 /*
1413 * Check if this process has the right to modify the specified
1414 * process. The right exists if the process has administrative
7f927fcc 1415 * capabilities, superuser privileges or the same
39743889
CL
1416 * userid as the target process.
1417 */
c69e8d9c 1418 tcred = __task_cred(task);
b38a86eb
EB
1419 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1420 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
74c00241 1421 !capable(CAP_SYS_NICE)) {
c69e8d9c 1422 rcu_read_unlock();
39743889 1423 err = -EPERM;
3268c63e 1424 goto out_put;
39743889 1425 }
c69e8d9c 1426 rcu_read_unlock();
39743889
CL
1427
1428 task_nodes = cpuset_mems_allowed(task);
1429 /* Is the user allowed to access the target nodes? */
596d7cfa 1430 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
39743889 1431 err = -EPERM;
3268c63e 1432 goto out_put;
39743889
CL
1433 }
1434
01f13bd6 1435 if (!nodes_subset(*new, node_states[N_MEMORY])) {
3b42d28b 1436 err = -EINVAL;
3268c63e 1437 goto out_put;
3b42d28b
CL
1438 }
1439
86c3a764
DQ
1440 err = security_task_movememory(task);
1441 if (err)
3268c63e 1442 goto out_put;
86c3a764 1443
3268c63e
CL
1444 mm = get_task_mm(task);
1445 put_task_struct(task);
f2a9ef88
SL
1446
1447 if (!mm) {
3268c63e 1448 err = -EINVAL;
f2a9ef88
SL
1449 goto out;
1450 }
1451
1452 err = do_migrate_pages(mm, old, new,
1453 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
3268c63e
CL
1454
1455 mmput(mm);
1456out:
596d7cfa
KM
1457 NODEMASK_SCRATCH_FREE(scratch);
1458
39743889 1459 return err;
3268c63e
CL
1460
1461out_put:
1462 put_task_struct(task);
1463 goto out;
1464
39743889
CL
1465}
1466
1467
8bccd85f 1468/* Retrieve NUMA policy */
938bb9f5
HC
1469SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1470 unsigned long __user *, nmask, unsigned long, maxnode,
1471 unsigned long, addr, unsigned long, flags)
8bccd85f 1472{
dbcb0f19
AB
1473 int err;
1474 int uninitialized_var(pval);
8bccd85f
CL
1475 nodemask_t nodes;
1476
1477 if (nmask != NULL && maxnode < MAX_NUMNODES)
1478 return -EINVAL;
1479
1480 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1481
1482 if (err)
1483 return err;
1484
1485 if (policy && put_user(pval, policy))
1486 return -EFAULT;
1487
1488 if (nmask)
1489 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1490
1491 return err;
1492}
1493
1da177e4
LT
1494#ifdef CONFIG_COMPAT
1495
c93e0f6c
HC
1496COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1497 compat_ulong_t __user *, nmask,
1498 compat_ulong_t, maxnode,
1499 compat_ulong_t, addr, compat_ulong_t, flags)
1da177e4
LT
1500{
1501 long err;
1502 unsigned long __user *nm = NULL;
1503 unsigned long nr_bits, alloc_size;
1504 DECLARE_BITMAP(bm, MAX_NUMNODES);
1505
1506 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1507 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1508
1509 if (nmask)
1510 nm = compat_alloc_user_space(alloc_size);
1511
1512 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1513
1514 if (!err && nmask) {
2bbff6c7
KH
1515 unsigned long copy_size;
1516 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1517 err = copy_from_user(bm, nm, copy_size);
1da177e4
LT
1518 /* ensure entire bitmap is zeroed */
1519 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1520 err |= compat_put_bitmap(nmask, bm, nr_bits);
1521 }
1522
1523 return err;
1524}
1525
c93e0f6c
HC
1526COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1527 compat_ulong_t, maxnode)
1da177e4 1528{
1da177e4
LT
1529 unsigned long __user *nm = NULL;
1530 unsigned long nr_bits, alloc_size;
1531 DECLARE_BITMAP(bm, MAX_NUMNODES);
1532
1533 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1534 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1535
1536 if (nmask) {
1e78d67f
CS
1537 if (compat_get_bitmap(bm, nmask, nr_bits))
1538 return -EFAULT;
1da177e4 1539 nm = compat_alloc_user_space(alloc_size);
1e78d67f
CS
1540 if (copy_to_user(nm, bm, alloc_size))
1541 return -EFAULT;
1da177e4
LT
1542 }
1543
1da177e4
LT
1544 return sys_set_mempolicy(mode, nm, nr_bits+1);
1545}
1546
c93e0f6c
HC
1547COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1548 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1549 compat_ulong_t, maxnode, compat_ulong_t, flags)
1da177e4 1550{
1da177e4
LT
1551 unsigned long __user *nm = NULL;
1552 unsigned long nr_bits, alloc_size;
dfcd3c0d 1553 nodemask_t bm;
1da177e4
LT
1554
1555 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1556 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1557
1558 if (nmask) {
1e78d67f
CS
1559 if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1560 return -EFAULT;
1da177e4 1561 nm = compat_alloc_user_space(alloc_size);
1e78d67f
CS
1562 if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1563 return -EFAULT;
1da177e4
LT
1564 }
1565
1da177e4
LT
1566 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1567}
1568
1569#endif
1570
74d2c3a0
ON
1571struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1572 unsigned long addr)
1da177e4 1573{
8d90274b 1574 struct mempolicy *pol = NULL;
1da177e4
LT
1575
1576 if (vma) {
480eccf9 1577 if (vma->vm_ops && vma->vm_ops->get_policy) {
8d90274b 1578 pol = vma->vm_ops->get_policy(vma, addr);
00442ad0 1579 } else if (vma->vm_policy) {
1da177e4 1580 pol = vma->vm_policy;
00442ad0
MG
1581
1582 /*
1583 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1584 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1585 * count on these policies which will be dropped by
1586 * mpol_cond_put() later
1587 */
1588 if (mpol_needs_cond_ref(pol))
1589 mpol_get(pol);
1590 }
1da177e4 1591 }
f15ca78e 1592
74d2c3a0
ON
1593 return pol;
1594}
1595
1596/*
dd6eecb9 1597 * get_vma_policy(@vma, @addr)
74d2c3a0
ON
1598 * @vma: virtual memory area whose policy is sought
1599 * @addr: address in @vma for shared policy lookup
1600 *
1601 * Returns effective policy for a VMA at specified address.
dd6eecb9 1602 * Falls back to current->mempolicy or system default policy, as necessary.
74d2c3a0
ON
1603 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1604 * count--added by the get_policy() vm_op, as appropriate--to protect against
1605 * freeing by another task. It is the caller's responsibility to free the
1606 * extra reference for shared policies.
1607 */
dd6eecb9
ON
1608static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1609 unsigned long addr)
74d2c3a0
ON
1610{
1611 struct mempolicy *pol = __get_vma_policy(vma, addr);
1612
8d90274b 1613 if (!pol)
dd6eecb9 1614 pol = get_task_policy(current);
8d90274b 1615
1da177e4
LT
1616 return pol;
1617}
1618
6b6482bb 1619bool vma_policy_mof(struct vm_area_struct *vma)
fc314724 1620{
6b6482bb 1621 struct mempolicy *pol;
fc314724 1622
6b6482bb
ON
1623 if (vma->vm_ops && vma->vm_ops->get_policy) {
1624 bool ret = false;
fc314724 1625
6b6482bb
ON
1626 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1627 if (pol && (pol->flags & MPOL_F_MOF))
1628 ret = true;
1629 mpol_cond_put(pol);
8d90274b 1630
6b6482bb 1631 return ret;
fc314724
MG
1632 }
1633
6b6482bb 1634 pol = vma->vm_policy;
8d90274b 1635 if (!pol)
6b6482bb 1636 pol = get_task_policy(current);
8d90274b 1637
fc314724
MG
1638 return pol->flags & MPOL_F_MOF;
1639}
1640
d3eb1570
LJ
1641static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1642{
1643 enum zone_type dynamic_policy_zone = policy_zone;
1644
1645 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1646
1647 /*
1648 * if policy->v.nodes has movable memory only,
1649 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1650 *
1651 * policy->v.nodes is intersect with node_states[N_MEMORY].
1652 * so if the following test faile, it implies
1653 * policy->v.nodes has movable memory only.
1654 */
1655 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1656 dynamic_policy_zone = ZONE_MOVABLE;
1657
1658 return zone >= dynamic_policy_zone;
1659}
1660
52cd3b07
LS
1661/*
1662 * Return a nodemask representing a mempolicy for filtering nodes for
1663 * page allocation
1664 */
1665static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
19770b32
MG
1666{
1667 /* Lower zones don't get a nodemask applied for MPOL_BIND */
45c4745a 1668 if (unlikely(policy->mode == MPOL_BIND) &&
d3eb1570 1669 apply_policy_zone(policy, gfp_zone(gfp)) &&
19770b32
MG
1670 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1671 return &policy->v.nodes;
1672
1673 return NULL;
1674}
1675
52cd3b07 1676/* Return a zonelist indicated by gfp for node representing a mempolicy */
2f5f9486
AK
1677static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1678 int nd)
1da177e4 1679{
6d840958
MH
1680 if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
1681 nd = policy->v.preferred_node;
1682 else {
19770b32 1683 /*
6d840958
MH
1684 * __GFP_THISNODE shouldn't even be used with the bind policy
1685 * because we might easily break the expectation to stay on the
1686 * requested node and not break the policy.
19770b32 1687 */
6d840958 1688 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1da177e4 1689 }
6d840958 1690
0e88460d 1691 return node_zonelist(nd, gfp);
1da177e4
LT
1692}
1693
1694/* Do dynamic interleaving for a process */
1695static unsigned interleave_nodes(struct mempolicy *policy)
1696{
1697 unsigned nid, next;
1698 struct task_struct *me = current;
1699
1700 nid = me->il_next;
0edaf86c 1701 next = next_node_in(nid, policy->v.nodes);
f5b087b5
DR
1702 if (next < MAX_NUMNODES)
1703 me->il_next = next;
1da177e4
LT
1704 return nid;
1705}
1706
dc85da15
CL
1707/*
1708 * Depending on the memory policy provide a node from which to allocate the
1709 * next slab entry.
1710 */
2a389610 1711unsigned int mempolicy_slab_node(void)
dc85da15 1712{
e7b691b0 1713 struct mempolicy *policy;
2a389610 1714 int node = numa_mem_id();
e7b691b0
AK
1715
1716 if (in_interrupt())
2a389610 1717 return node;
e7b691b0
AK
1718
1719 policy = current->mempolicy;
fc36b8d3 1720 if (!policy || policy->flags & MPOL_F_LOCAL)
2a389610 1721 return node;
bea904d5
LS
1722
1723 switch (policy->mode) {
1724 case MPOL_PREFERRED:
fc36b8d3
LS
1725 /*
1726 * handled MPOL_F_LOCAL above
1727 */
1728 return policy->v.preferred_node;
765c4507 1729
dc85da15
CL
1730 case MPOL_INTERLEAVE:
1731 return interleave_nodes(policy);
1732
dd1a239f 1733 case MPOL_BIND: {
c33d6c06
MG
1734 struct zoneref *z;
1735
dc85da15
CL
1736 /*
1737 * Follow bind policy behavior and start allocation at the
1738 * first node.
1739 */
19770b32 1740 struct zonelist *zonelist;
19770b32 1741 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
c9634cf0 1742 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
c33d6c06
MG
1743 z = first_zones_zonelist(zonelist, highest_zoneidx,
1744 &policy->v.nodes);
1745 return z->zone ? z->zone->node : node;
dd1a239f 1746 }
dc85da15 1747
dc85da15 1748 default:
bea904d5 1749 BUG();
dc85da15
CL
1750 }
1751}
1752
fee83b3a
AM
1753/*
1754 * Do static interleaving for a VMA with known offset @n. Returns the n'th
1755 * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1756 * number of present nodes.
1757 */
1da177e4 1758static unsigned offset_il_node(struct mempolicy *pol,
fee83b3a 1759 struct vm_area_struct *vma, unsigned long n)
1da177e4 1760{
dfcd3c0d 1761 unsigned nnodes = nodes_weight(pol->v.nodes);
f5b087b5 1762 unsigned target;
fee83b3a
AM
1763 int i;
1764 int nid;
1da177e4 1765
f5b087b5
DR
1766 if (!nnodes)
1767 return numa_node_id();
fee83b3a
AM
1768 target = (unsigned int)n % nnodes;
1769 nid = first_node(pol->v.nodes);
1770 for (i = 0; i < target; i++)
dfcd3c0d 1771 nid = next_node(nid, pol->v.nodes);
1da177e4
LT
1772 return nid;
1773}
1774
5da7ca86
CL
1775/* Determine a node number for interleave */
1776static inline unsigned interleave_nid(struct mempolicy *pol,
1777 struct vm_area_struct *vma, unsigned long addr, int shift)
1778{
1779 if (vma) {
1780 unsigned long off;
1781
3b98b087
NA
1782 /*
1783 * for small pages, there is no difference between
1784 * shift and PAGE_SHIFT, so the bit-shift is safe.
1785 * for huge pages, since vm_pgoff is in units of small
1786 * pages, we need to shift off the always 0 bits to get
1787 * a useful offset.
1788 */
1789 BUG_ON(shift < PAGE_SHIFT);
1790 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
5da7ca86
CL
1791 off += (addr - vma->vm_start) >> shift;
1792 return offset_il_node(pol, vma, off);
1793 } else
1794 return interleave_nodes(pol);
1795}
1796
00ac59ad 1797#ifdef CONFIG_HUGETLBFS
480eccf9
LS
1798/*
1799 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
b46e14ac
FF
1800 * @vma: virtual memory area whose policy is sought
1801 * @addr: address in @vma for shared policy lookup and interleave policy
1802 * @gfp_flags: for requested zone
1803 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1804 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
480eccf9 1805 *
52cd3b07
LS
1806 * Returns a zonelist suitable for a huge page allocation and a pointer
1807 * to the struct mempolicy for conditional unref after allocation.
1808 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1809 * @nodemask for filtering the zonelist.
c0ff7453 1810 *
d26914d1 1811 * Must be protected by read_mems_allowed_begin()
480eccf9 1812 */
396faf03 1813struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
19770b32
MG
1814 gfp_t gfp_flags, struct mempolicy **mpol,
1815 nodemask_t **nodemask)
5da7ca86 1816{
480eccf9 1817 struct zonelist *zl;
5da7ca86 1818
dd6eecb9 1819 *mpol = get_vma_policy(vma, addr);
19770b32 1820 *nodemask = NULL; /* assume !MPOL_BIND */
5da7ca86 1821
52cd3b07
LS
1822 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1823 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
a5516438 1824 huge_page_shift(hstate_vma(vma))), gfp_flags);
52cd3b07 1825 } else {
2f5f9486 1826 zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
52cd3b07
LS
1827 if ((*mpol)->mode == MPOL_BIND)
1828 *nodemask = &(*mpol)->v.nodes;
480eccf9
LS
1829 }
1830 return zl;
5da7ca86 1831}
06808b08
LS
1832
1833/*
1834 * init_nodemask_of_mempolicy
1835 *
1836 * If the current task's mempolicy is "default" [NULL], return 'false'
1837 * to indicate default policy. Otherwise, extract the policy nodemask
1838 * for 'bind' or 'interleave' policy into the argument nodemask, or
1839 * initialize the argument nodemask to contain the single node for
1840 * 'preferred' or 'local' policy and return 'true' to indicate presence
1841 * of non-default mempolicy.
1842 *
1843 * We don't bother with reference counting the mempolicy [mpol_get/put]
1844 * because the current task is examining it's own mempolicy and a task's
1845 * mempolicy is only ever changed by the task itself.
1846 *
1847 * N.B., it is the caller's responsibility to free a returned nodemask.
1848 */
1849bool init_nodemask_of_mempolicy(nodemask_t *mask)
1850{
1851 struct mempolicy *mempolicy;
1852 int nid;
1853
1854 if (!(mask && current->mempolicy))
1855 return false;
1856
c0ff7453 1857 task_lock(current);
06808b08
LS
1858 mempolicy = current->mempolicy;
1859 switch (mempolicy->mode) {
1860 case MPOL_PREFERRED:
1861 if (mempolicy->flags & MPOL_F_LOCAL)
1862 nid = numa_node_id();
1863 else
1864 nid = mempolicy->v.preferred_node;
1865 init_nodemask_of_node(mask, nid);
1866 break;
1867
1868 case MPOL_BIND:
1869 /* Fall through */
1870 case MPOL_INTERLEAVE:
1871 *mask = mempolicy->v.nodes;
1872 break;
1873
1874 default:
1875 BUG();
1876 }
c0ff7453 1877 task_unlock(current);
06808b08
LS
1878
1879 return true;
1880}
00ac59ad 1881#endif
5da7ca86 1882
6f48d0eb
DR
1883/*
1884 * mempolicy_nodemask_intersects
1885 *
1886 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1887 * policy. Otherwise, check for intersection between mask and the policy
1888 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1889 * policy, always return true since it may allocate elsewhere on fallback.
1890 *
1891 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1892 */
1893bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1894 const nodemask_t *mask)
1895{
1896 struct mempolicy *mempolicy;
1897 bool ret = true;
1898
1899 if (!mask)
1900 return ret;
1901 task_lock(tsk);
1902 mempolicy = tsk->mempolicy;
1903 if (!mempolicy)
1904 goto out;
1905
1906 switch (mempolicy->mode) {
1907 case MPOL_PREFERRED:
1908 /*
1909 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1910 * allocate from, they may fallback to other nodes when oom.
1911 * Thus, it's possible for tsk to have allocated memory from
1912 * nodes in mask.
1913 */
1914 break;
1915 case MPOL_BIND:
1916 case MPOL_INTERLEAVE:
1917 ret = nodes_intersects(mempolicy->v.nodes, *mask);
1918 break;
1919 default:
1920 BUG();
1921 }
1922out:
1923 task_unlock(tsk);
1924 return ret;
1925}
1926
1da177e4
LT
1927/* Allocate a page in interleaved policy.
1928 Own path because it needs to do special accounting. */
662f3a0b
AK
1929static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1930 unsigned nid)
1da177e4
LT
1931{
1932 struct zonelist *zl;
1933 struct page *page;
1934
0e88460d 1935 zl = node_zonelist(nid, gfp);
1da177e4 1936 page = __alloc_pages(gfp, order, zl);
dd1a239f 1937 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
ca889e6c 1938 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1da177e4
LT
1939 return page;
1940}
1941
1942/**
0bbbc0b3 1943 * alloc_pages_vma - Allocate a page for a VMA.
1da177e4
LT
1944 *
1945 * @gfp:
1946 * %GFP_USER user allocation.
1947 * %GFP_KERNEL kernel allocations,
1948 * %GFP_HIGHMEM highmem/user allocations,
1949 * %GFP_FS allocation should not call back into a file system.
1950 * %GFP_ATOMIC don't sleep.
1951 *
0bbbc0b3 1952 * @order:Order of the GFP allocation.
1da177e4
LT
1953 * @vma: Pointer to VMA or NULL if not available.
1954 * @addr: Virtual Address of the allocation. Must be inside the VMA.
be97a41b
VB
1955 * @node: Which node to prefer for allocation (modulo policy).
1956 * @hugepage: for hugepages try only the preferred node if possible
1da177e4
LT
1957 *
1958 * This function allocates a page from the kernel page pool and applies
1959 * a NUMA policy associated with the VMA or the current process.
1960 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1961 * mm_struct of the VMA to prevent it from going away. Should be used for
be97a41b
VB
1962 * all allocations for pages that will be mapped into user space. Returns
1963 * NULL when no page can be allocated.
1da177e4
LT
1964 */
1965struct page *
0bbbc0b3 1966alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
be97a41b 1967 unsigned long addr, int node, bool hugepage)
1da177e4 1968{
cc9a6c87 1969 struct mempolicy *pol;
c0ff7453 1970 struct page *page;
cc9a6c87 1971 unsigned int cpuset_mems_cookie;
be97a41b
VB
1972 struct zonelist *zl;
1973 nodemask_t *nmask;
cc9a6c87
MG
1974
1975retry_cpuset:
dd6eecb9 1976 pol = get_vma_policy(vma, addr);
d26914d1 1977 cpuset_mems_cookie = read_mems_allowed_begin();
1da177e4 1978
0867a57c
VB
1979 if (pol->mode == MPOL_INTERLEAVE) {
1980 unsigned nid;
1981
1982 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
1983 mpol_cond_put(pol);
1984 page = alloc_page_interleave(gfp, order, nid);
1985 goto out;
1986 }
1987
1988 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
1989 int hpage_node = node;
1990
be97a41b
VB
1991 /*
1992 * For hugepage allocation and non-interleave policy which
0867a57c
VB
1993 * allows the current node (or other explicitly preferred
1994 * node) we only try to allocate from the current/preferred
1995 * node and don't fall back to other nodes, as the cost of
1996 * remote accesses would likely offset THP benefits.
be97a41b
VB
1997 *
1998 * If the policy is interleave, or does not allow the current
1999 * node in its nodemask, we allocate the standard way.
2000 */
0867a57c
VB
2001 if (pol->mode == MPOL_PREFERRED &&
2002 !(pol->flags & MPOL_F_LOCAL))
2003 hpage_node = pol->v.preferred_node;
2004
be97a41b 2005 nmask = policy_nodemask(gfp, pol);
0867a57c 2006 if (!nmask || node_isset(hpage_node, *nmask)) {
be97a41b 2007 mpol_cond_put(pol);
96db800f 2008 page = __alloc_pages_node(hpage_node,
5265047a 2009 gfp | __GFP_THISNODE, order);
be97a41b
VB
2010 goto out;
2011 }
2012 }
2013
be97a41b
VB
2014 nmask = policy_nodemask(gfp, pol);
2015 zl = policy_zonelist(gfp, pol, node);
be97a41b 2016 page = __alloc_pages_nodemask(gfp, order, zl, nmask);
d51e9894 2017 mpol_cond_put(pol);
be97a41b 2018out:
d26914d1 2019 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
cc9a6c87 2020 goto retry_cpuset;
c0ff7453 2021 return page;
1da177e4
LT
2022}
2023
2024/**
2025 * alloc_pages_current - Allocate pages.
2026 *
2027 * @gfp:
2028 * %GFP_USER user allocation,
2029 * %GFP_KERNEL kernel allocation,
2030 * %GFP_HIGHMEM highmem allocation,
2031 * %GFP_FS don't call back into a file system.
2032 * %GFP_ATOMIC don't sleep.
2033 * @order: Power of two of allocation size in pages. 0 is a single page.
2034 *
2035 * Allocate a page from the kernel page pool. When not in
2036 * interrupt context and apply the current process NUMA policy.
2037 * Returns NULL when no page can be allocated.
2038 *
cf2a473c 2039 * Don't call cpuset_update_task_memory_state() unless
1da177e4
LT
2040 * 1) it's ok to take cpuset_sem (can WAIT), and
2041 * 2) allocating for current task (not interrupt).
2042 */
dd0fc66f 2043struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1da177e4 2044{
8d90274b 2045 struct mempolicy *pol = &default_policy;
c0ff7453 2046 struct page *page;
cc9a6c87 2047 unsigned int cpuset_mems_cookie;
1da177e4 2048
8d90274b
ON
2049 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2050 pol = get_task_policy(current);
52cd3b07 2051
cc9a6c87 2052retry_cpuset:
d26914d1 2053 cpuset_mems_cookie = read_mems_allowed_begin();
cc9a6c87 2054
52cd3b07
LS
2055 /*
2056 * No reference counting needed for current->mempolicy
2057 * nor system default_policy
2058 */
45c4745a 2059 if (pol->mode == MPOL_INTERLEAVE)
c0ff7453
MX
2060 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2061 else
2062 page = __alloc_pages_nodemask(gfp, order,
5c4b4be3
AK
2063 policy_zonelist(gfp, pol, numa_node_id()),
2064 policy_nodemask(gfp, pol));
cc9a6c87 2065
d26914d1 2066 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
cc9a6c87
MG
2067 goto retry_cpuset;
2068
c0ff7453 2069 return page;
1da177e4
LT
2070}
2071EXPORT_SYMBOL(alloc_pages_current);
2072
ef0855d3
ON
2073int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2074{
2075 struct mempolicy *pol = mpol_dup(vma_policy(src));
2076
2077 if (IS_ERR(pol))
2078 return PTR_ERR(pol);
2079 dst->vm_policy = pol;
2080 return 0;
2081}
2082
4225399a 2083/*
846a16bf 2084 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
4225399a
PJ
2085 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2086 * with the mems_allowed returned by cpuset_mems_allowed(). This
2087 * keeps mempolicies cpuset relative after its cpuset moves. See
2088 * further kernel/cpuset.c update_nodemask().
708c1bbc
MX
2089 *
2090 * current's mempolicy may be rebinded by the other task(the task that changes
2091 * cpuset's mems), so we needn't do rebind work for current task.
4225399a 2092 */
4225399a 2093
846a16bf
LS
2094/* Slow path of a mempolicy duplicate */
2095struct mempolicy *__mpol_dup(struct mempolicy *old)
1da177e4
LT
2096{
2097 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2098
2099 if (!new)
2100 return ERR_PTR(-ENOMEM);
708c1bbc
MX
2101
2102 /* task's mempolicy is protected by alloc_lock */
2103 if (old == current->mempolicy) {
2104 task_lock(current);
2105 *new = *old;
2106 task_unlock(current);
2107 } else
2108 *new = *old;
2109
4225399a
PJ
2110 if (current_cpuset_is_being_rebound()) {
2111 nodemask_t mems = cpuset_mems_allowed(current);
708c1bbc
MX
2112 if (new->flags & MPOL_F_REBINDING)
2113 mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2114 else
2115 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
4225399a 2116 }
1da177e4 2117 atomic_set(&new->refcnt, 1);
1da177e4
LT
2118 return new;
2119}
2120
2121/* Slow path of a mempolicy comparison */
fcfb4dcc 2122bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1da177e4
LT
2123{
2124 if (!a || !b)
fcfb4dcc 2125 return false;
45c4745a 2126 if (a->mode != b->mode)
fcfb4dcc 2127 return false;
19800502 2128 if (a->flags != b->flags)
fcfb4dcc 2129 return false;
19800502
BL
2130 if (mpol_store_user_nodemask(a))
2131 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
fcfb4dcc 2132 return false;
19800502 2133
45c4745a 2134 switch (a->mode) {
19770b32
MG
2135 case MPOL_BIND:
2136 /* Fall through */
1da177e4 2137 case MPOL_INTERLEAVE:
fcfb4dcc 2138 return !!nodes_equal(a->v.nodes, b->v.nodes);
1da177e4 2139 case MPOL_PREFERRED:
75719661 2140 return a->v.preferred_node == b->v.preferred_node;
1da177e4
LT
2141 default:
2142 BUG();
fcfb4dcc 2143 return false;
1da177e4
LT
2144 }
2145}
2146
1da177e4
LT
2147/*
2148 * Shared memory backing store policy support.
2149 *
2150 * Remember policies even when nobody has shared memory mapped.
2151 * The policies are kept in Red-Black tree linked from the inode.
4a8c7bb5 2152 * They are protected by the sp->lock rwlock, which should be held
1da177e4
LT
2153 * for any accesses to the tree.
2154 */
2155
4a8c7bb5
NZ
2156/*
2157 * lookup first element intersecting start-end. Caller holds sp->lock for
2158 * reading or for writing
2159 */
1da177e4
LT
2160static struct sp_node *
2161sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2162{
2163 struct rb_node *n = sp->root.rb_node;
2164
2165 while (n) {
2166 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2167
2168 if (start >= p->end)
2169 n = n->rb_right;
2170 else if (end <= p->start)
2171 n = n->rb_left;
2172 else
2173 break;
2174 }
2175 if (!n)
2176 return NULL;
2177 for (;;) {
2178 struct sp_node *w = NULL;
2179 struct rb_node *prev = rb_prev(n);
2180 if (!prev)
2181 break;
2182 w = rb_entry(prev, struct sp_node, nd);
2183 if (w->end <= start)
2184 break;
2185 n = prev;
2186 }
2187 return rb_entry(n, struct sp_node, nd);
2188}
2189
4a8c7bb5
NZ
2190/*
2191 * Insert a new shared policy into the list. Caller holds sp->lock for
2192 * writing.
2193 */
1da177e4
LT
2194static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2195{
2196 struct rb_node **p = &sp->root.rb_node;
2197 struct rb_node *parent = NULL;
2198 struct sp_node *nd;
2199
2200 while (*p) {
2201 parent = *p;
2202 nd = rb_entry(parent, struct sp_node, nd);
2203 if (new->start < nd->start)
2204 p = &(*p)->rb_left;
2205 else if (new->end > nd->end)
2206 p = &(*p)->rb_right;
2207 else
2208 BUG();
2209 }
2210 rb_link_node(&new->nd, parent, p);
2211 rb_insert_color(&new->nd, &sp->root);
140d5a49 2212 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
45c4745a 2213 new->policy ? new->policy->mode : 0);
1da177e4
LT
2214}
2215
2216/* Find shared policy intersecting idx */
2217struct mempolicy *
2218mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2219{
2220 struct mempolicy *pol = NULL;
2221 struct sp_node *sn;
2222
2223 if (!sp->root.rb_node)
2224 return NULL;
4a8c7bb5 2225 read_lock(&sp->lock);
1da177e4
LT
2226 sn = sp_lookup(sp, idx, idx+1);
2227 if (sn) {
2228 mpol_get(sn->policy);
2229 pol = sn->policy;
2230 }
4a8c7bb5 2231 read_unlock(&sp->lock);
1da177e4
LT
2232 return pol;
2233}
2234
63f74ca2
KM
2235static void sp_free(struct sp_node *n)
2236{
2237 mpol_put(n->policy);
2238 kmem_cache_free(sn_cache, n);
2239}
2240
771fb4d8
LS
2241/**
2242 * mpol_misplaced - check whether current page node is valid in policy
2243 *
b46e14ac
FF
2244 * @page: page to be checked
2245 * @vma: vm area where page mapped
2246 * @addr: virtual address where page mapped
771fb4d8
LS
2247 *
2248 * Lookup current policy node id for vma,addr and "compare to" page's
2249 * node id.
2250 *
2251 * Returns:
2252 * -1 - not misplaced, page is in the right node
2253 * node - node id where the page should be
2254 *
2255 * Policy determination "mimics" alloc_page_vma().
2256 * Called from fault path where we know the vma and faulting address.
2257 */
2258int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2259{
2260 struct mempolicy *pol;
c33d6c06 2261 struct zoneref *z;
771fb4d8
LS
2262 int curnid = page_to_nid(page);
2263 unsigned long pgoff;
90572890
PZ
2264 int thiscpu = raw_smp_processor_id();
2265 int thisnid = cpu_to_node(thiscpu);
771fb4d8
LS
2266 int polnid = -1;
2267 int ret = -1;
2268
2269 BUG_ON(!vma);
2270
dd6eecb9 2271 pol = get_vma_policy(vma, addr);
771fb4d8
LS
2272 if (!(pol->flags & MPOL_F_MOF))
2273 goto out;
2274
2275 switch (pol->mode) {
2276 case MPOL_INTERLEAVE:
2277 BUG_ON(addr >= vma->vm_end);
2278 BUG_ON(addr < vma->vm_start);
2279
2280 pgoff = vma->vm_pgoff;
2281 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2282 polnid = offset_il_node(pol, vma, pgoff);
2283 break;
2284
2285 case MPOL_PREFERRED:
2286 if (pol->flags & MPOL_F_LOCAL)
2287 polnid = numa_node_id();
2288 else
2289 polnid = pol->v.preferred_node;
2290 break;
2291
2292 case MPOL_BIND:
c33d6c06 2293
771fb4d8
LS
2294 /*
2295 * allows binding to multiple nodes.
2296 * use current page if in policy nodemask,
2297 * else select nearest allowed node, if any.
2298 * If no allowed nodes, use current [!misplaced].
2299 */
2300 if (node_isset(curnid, pol->v.nodes))
2301 goto out;
c33d6c06 2302 z = first_zones_zonelist(
771fb4d8
LS
2303 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2304 gfp_zone(GFP_HIGHUSER),
c33d6c06
MG
2305 &pol->v.nodes);
2306 polnid = z->zone->node;
771fb4d8
LS
2307 break;
2308
2309 default:
2310 BUG();
2311 }
5606e387
MG
2312
2313 /* Migrate the page towards the node whose CPU is referencing it */
e42c8ff2 2314 if (pol->flags & MPOL_F_MORON) {
90572890 2315 polnid = thisnid;
5606e387 2316
10f39042 2317 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
de1c9ce6 2318 goto out;
e42c8ff2
MG
2319 }
2320
771fb4d8
LS
2321 if (curnid != polnid)
2322 ret = polnid;
2323out:
2324 mpol_cond_put(pol);
2325
2326 return ret;
2327}
2328
c11600e4
DR
2329/*
2330 * Drop the (possibly final) reference to task->mempolicy. It needs to be
2331 * dropped after task->mempolicy is set to NULL so that any allocation done as
2332 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2333 * policy.
2334 */
2335void mpol_put_task_policy(struct task_struct *task)
2336{
2337 struct mempolicy *pol;
2338
2339 task_lock(task);
2340 pol = task->mempolicy;
2341 task->mempolicy = NULL;
2342 task_unlock(task);
2343 mpol_put(pol);
2344}
2345
1da177e4
LT
2346static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2347{
140d5a49 2348 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
1da177e4 2349 rb_erase(&n->nd, &sp->root);
63f74ca2 2350 sp_free(n);
1da177e4
LT
2351}
2352
42288fe3
MG
2353static void sp_node_init(struct sp_node *node, unsigned long start,
2354 unsigned long end, struct mempolicy *pol)
2355{
2356 node->start = start;
2357 node->end = end;
2358 node->policy = pol;
2359}
2360
dbcb0f19
AB
2361static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2362 struct mempolicy *pol)
1da177e4 2363{
869833f2
KM
2364 struct sp_node *n;
2365 struct mempolicy *newpol;
1da177e4 2366
869833f2 2367 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1da177e4
LT
2368 if (!n)
2369 return NULL;
869833f2
KM
2370
2371 newpol = mpol_dup(pol);
2372 if (IS_ERR(newpol)) {
2373 kmem_cache_free(sn_cache, n);
2374 return NULL;
2375 }
2376 newpol->flags |= MPOL_F_SHARED;
42288fe3 2377 sp_node_init(n, start, end, newpol);
869833f2 2378
1da177e4
LT
2379 return n;
2380}
2381
2382/* Replace a policy range. */
2383static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2384 unsigned long end, struct sp_node *new)
2385{
b22d127a 2386 struct sp_node *n;
42288fe3
MG
2387 struct sp_node *n_new = NULL;
2388 struct mempolicy *mpol_new = NULL;
b22d127a 2389 int ret = 0;
1da177e4 2390
42288fe3 2391restart:
4a8c7bb5 2392 write_lock(&sp->lock);
1da177e4
LT
2393 n = sp_lookup(sp, start, end);
2394 /* Take care of old policies in the same range. */
2395 while (n && n->start < end) {
2396 struct rb_node *next = rb_next(&n->nd);
2397 if (n->start >= start) {
2398 if (n->end <= end)
2399 sp_delete(sp, n);
2400 else
2401 n->start = end;
2402 } else {
2403 /* Old policy spanning whole new range. */
2404 if (n->end > end) {
42288fe3
MG
2405 if (!n_new)
2406 goto alloc_new;
2407
2408 *mpol_new = *n->policy;
2409 atomic_set(&mpol_new->refcnt, 1);
7880639c 2410 sp_node_init(n_new, end, n->end, mpol_new);
1da177e4 2411 n->end = start;
5ca39575 2412 sp_insert(sp, n_new);
42288fe3
MG
2413 n_new = NULL;
2414 mpol_new = NULL;
1da177e4
LT
2415 break;
2416 } else
2417 n->end = start;
2418 }
2419 if (!next)
2420 break;
2421 n = rb_entry(next, struct sp_node, nd);
2422 }
2423 if (new)
2424 sp_insert(sp, new);
4a8c7bb5 2425 write_unlock(&sp->lock);
42288fe3
MG
2426 ret = 0;
2427
2428err_out:
2429 if (mpol_new)
2430 mpol_put(mpol_new);
2431 if (n_new)
2432 kmem_cache_free(sn_cache, n_new);
2433
b22d127a 2434 return ret;
42288fe3
MG
2435
2436alloc_new:
4a8c7bb5 2437 write_unlock(&sp->lock);
42288fe3
MG
2438 ret = -ENOMEM;
2439 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2440 if (!n_new)
2441 goto err_out;
2442 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2443 if (!mpol_new)
2444 goto err_out;
2445 goto restart;
1da177e4
LT
2446}
2447
71fe804b
LS
2448/**
2449 * mpol_shared_policy_init - initialize shared policy for inode
2450 * @sp: pointer to inode shared policy
2451 * @mpol: struct mempolicy to install
2452 *
2453 * Install non-NULL @mpol in inode's shared policy rb-tree.
2454 * On entry, the current task has a reference on a non-NULL @mpol.
2455 * This must be released on exit.
4bfc4495 2456 * This is called at get_inode() calls and we can use GFP_KERNEL.
71fe804b
LS
2457 */
2458void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2459{
58568d2a
MX
2460 int ret;
2461
71fe804b 2462 sp->root = RB_ROOT; /* empty tree == default mempolicy */
4a8c7bb5 2463 rwlock_init(&sp->lock);
71fe804b
LS
2464
2465 if (mpol) {
2466 struct vm_area_struct pvma;
2467 struct mempolicy *new;
4bfc4495 2468 NODEMASK_SCRATCH(scratch);
71fe804b 2469
4bfc4495 2470 if (!scratch)
5c0c1654 2471 goto put_mpol;
71fe804b
LS
2472 /* contextualize the tmpfs mount point mempolicy */
2473 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
15d77835 2474 if (IS_ERR(new))
0cae3457 2475 goto free_scratch; /* no valid nodemask intersection */
58568d2a
MX
2476
2477 task_lock(current);
4bfc4495 2478 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
58568d2a 2479 task_unlock(current);
15d77835 2480 if (ret)
5c0c1654 2481 goto put_new;
71fe804b
LS
2482
2483 /* Create pseudo-vma that contains just the policy */
2484 memset(&pvma, 0, sizeof(struct vm_area_struct));
2485 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2486 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
15d77835 2487
5c0c1654 2488put_new:
71fe804b 2489 mpol_put(new); /* drop initial ref */
0cae3457 2490free_scratch:
4bfc4495 2491 NODEMASK_SCRATCH_FREE(scratch);
5c0c1654
LS
2492put_mpol:
2493 mpol_put(mpol); /* drop our incoming ref on sb mpol */
7339ff83
RH
2494 }
2495}
2496
1da177e4
LT
2497int mpol_set_shared_policy(struct shared_policy *info,
2498 struct vm_area_struct *vma, struct mempolicy *npol)
2499{
2500 int err;
2501 struct sp_node *new = NULL;
2502 unsigned long sz = vma_pages(vma);
2503
028fec41 2504 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
1da177e4 2505 vma->vm_pgoff,
45c4745a 2506 sz, npol ? npol->mode : -1,
028fec41 2507 npol ? npol->flags : -1,
00ef2d2f 2508 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
1da177e4
LT
2509
2510 if (npol) {
2511 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2512 if (!new)
2513 return -ENOMEM;
2514 }
2515 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2516 if (err && new)
63f74ca2 2517 sp_free(new);
1da177e4
LT
2518 return err;
2519}
2520
2521/* Free a backing policy store on inode delete. */
2522void mpol_free_shared_policy(struct shared_policy *p)
2523{
2524 struct sp_node *n;
2525 struct rb_node *next;
2526
2527 if (!p->root.rb_node)
2528 return;
4a8c7bb5 2529 write_lock(&p->lock);
1da177e4
LT
2530 next = rb_first(&p->root);
2531 while (next) {
2532 n = rb_entry(next, struct sp_node, nd);
2533 next = rb_next(&n->nd);
63f74ca2 2534 sp_delete(p, n);
1da177e4 2535 }
4a8c7bb5 2536 write_unlock(&p->lock);
1da177e4
LT
2537}
2538
1a687c2e 2539#ifdef CONFIG_NUMA_BALANCING
c297663c 2540static int __initdata numabalancing_override;
1a687c2e
MG
2541
2542static void __init check_numabalancing_enable(void)
2543{
2544 bool numabalancing_default = false;
2545
2546 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2547 numabalancing_default = true;
2548
c297663c
MG
2549 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2550 if (numabalancing_override)
2551 set_numabalancing_state(numabalancing_override == 1);
2552
b0dc2b9b 2553 if (num_online_nodes() > 1 && !numabalancing_override) {
756a025f 2554 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
c297663c 2555 numabalancing_default ? "Enabling" : "Disabling");
1a687c2e
MG
2556 set_numabalancing_state(numabalancing_default);
2557 }
2558}
2559
2560static int __init setup_numabalancing(char *str)
2561{
2562 int ret = 0;
2563 if (!str)
2564 goto out;
1a687c2e
MG
2565
2566 if (!strcmp(str, "enable")) {
c297663c 2567 numabalancing_override = 1;
1a687c2e
MG
2568 ret = 1;
2569 } else if (!strcmp(str, "disable")) {
c297663c 2570 numabalancing_override = -1;
1a687c2e
MG
2571 ret = 1;
2572 }
2573out:
2574 if (!ret)
4a404bea 2575 pr_warn("Unable to parse numa_balancing=\n");
1a687c2e
MG
2576
2577 return ret;
2578}
2579__setup("numa_balancing=", setup_numabalancing);
2580#else
2581static inline void __init check_numabalancing_enable(void)
2582{
2583}
2584#endif /* CONFIG_NUMA_BALANCING */
2585
1da177e4
LT
2586/* assumes fs == KERNEL_DS */
2587void __init numa_policy_init(void)
2588{
b71636e2
PM
2589 nodemask_t interleave_nodes;
2590 unsigned long largest = 0;
2591 int nid, prefer = 0;
2592
1da177e4
LT
2593 policy_cache = kmem_cache_create("numa_policy",
2594 sizeof(struct mempolicy),
20c2df83 2595 0, SLAB_PANIC, NULL);
1da177e4
LT
2596
2597 sn_cache = kmem_cache_create("shared_policy_node",
2598 sizeof(struct sp_node),
20c2df83 2599 0, SLAB_PANIC, NULL);
1da177e4 2600
5606e387
MG
2601 for_each_node(nid) {
2602 preferred_node_policy[nid] = (struct mempolicy) {
2603 .refcnt = ATOMIC_INIT(1),
2604 .mode = MPOL_PREFERRED,
2605 .flags = MPOL_F_MOF | MPOL_F_MORON,
2606 .v = { .preferred_node = nid, },
2607 };
2608 }
2609
b71636e2
PM
2610 /*
2611 * Set interleaving policy for system init. Interleaving is only
2612 * enabled across suitably sized nodes (default is >= 16MB), or
2613 * fall back to the largest node if they're all smaller.
2614 */
2615 nodes_clear(interleave_nodes);
01f13bd6 2616 for_each_node_state(nid, N_MEMORY) {
b71636e2
PM
2617 unsigned long total_pages = node_present_pages(nid);
2618
2619 /* Preserve the largest node */
2620 if (largest < total_pages) {
2621 largest = total_pages;
2622 prefer = nid;
2623 }
2624
2625 /* Interleave this node? */
2626 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2627 node_set(nid, interleave_nodes);
2628 }
2629
2630 /* All too small, use the largest */
2631 if (unlikely(nodes_empty(interleave_nodes)))
2632 node_set(prefer, interleave_nodes);
1da177e4 2633
028fec41 2634 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
b1de0d13 2635 pr_err("%s: interleaving failed\n", __func__);
1a687c2e
MG
2636
2637 check_numabalancing_enable();
1da177e4
LT
2638}
2639
8bccd85f 2640/* Reset policy of current process to default */
1da177e4
LT
2641void numa_default_policy(void)
2642{
028fec41 2643 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
1da177e4 2644}
68860ec1 2645
095f1fc4
LS
2646/*
2647 * Parse and format mempolicy from/to strings
2648 */
2649
1a75a6c8 2650/*
f2a07f40 2651 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
1a75a6c8 2652 */
345ace9c
LS
2653static const char * const policy_modes[] =
2654{
2655 [MPOL_DEFAULT] = "default",
2656 [MPOL_PREFERRED] = "prefer",
2657 [MPOL_BIND] = "bind",
2658 [MPOL_INTERLEAVE] = "interleave",
d3a71033 2659 [MPOL_LOCAL] = "local",
345ace9c 2660};
1a75a6c8 2661
095f1fc4
LS
2662
2663#ifdef CONFIG_TMPFS
2664/**
f2a07f40 2665 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
095f1fc4 2666 * @str: string containing mempolicy to parse
71fe804b 2667 * @mpol: pointer to struct mempolicy pointer, returned on success.
095f1fc4
LS
2668 *
2669 * Format of input:
2670 * <mode>[=<flags>][:<nodelist>]
2671 *
71fe804b 2672 * On success, returns 0, else 1
095f1fc4 2673 */
a7a88b23 2674int mpol_parse_str(char *str, struct mempolicy **mpol)
095f1fc4 2675{
71fe804b 2676 struct mempolicy *new = NULL;
b4652e84 2677 unsigned short mode;
f2a07f40 2678 unsigned short mode_flags;
71fe804b 2679 nodemask_t nodes;
095f1fc4
LS
2680 char *nodelist = strchr(str, ':');
2681 char *flags = strchr(str, '=');
095f1fc4
LS
2682 int err = 1;
2683
2684 if (nodelist) {
2685 /* NUL-terminate mode or flags string */
2686 *nodelist++ = '\0';
71fe804b 2687 if (nodelist_parse(nodelist, nodes))
095f1fc4 2688 goto out;
01f13bd6 2689 if (!nodes_subset(nodes, node_states[N_MEMORY]))
095f1fc4 2690 goto out;
71fe804b
LS
2691 } else
2692 nodes_clear(nodes);
2693
095f1fc4
LS
2694 if (flags)
2695 *flags++ = '\0'; /* terminate mode string */
2696
479e2802 2697 for (mode = 0; mode < MPOL_MAX; mode++) {
345ace9c 2698 if (!strcmp(str, policy_modes[mode])) {
095f1fc4
LS
2699 break;
2700 }
2701 }
a720094d 2702 if (mode >= MPOL_MAX)
095f1fc4
LS
2703 goto out;
2704
71fe804b 2705 switch (mode) {
095f1fc4 2706 case MPOL_PREFERRED:
71fe804b
LS
2707 /*
2708 * Insist on a nodelist of one node only
2709 */
095f1fc4
LS
2710 if (nodelist) {
2711 char *rest = nodelist;
2712 while (isdigit(*rest))
2713 rest++;
926f2ae0
KM
2714 if (*rest)
2715 goto out;
095f1fc4
LS
2716 }
2717 break;
095f1fc4
LS
2718 case MPOL_INTERLEAVE:
2719 /*
2720 * Default to online nodes with memory if no nodelist
2721 */
2722 if (!nodelist)
01f13bd6 2723 nodes = node_states[N_MEMORY];
3f226aa1 2724 break;
71fe804b 2725 case MPOL_LOCAL:
3f226aa1 2726 /*
71fe804b 2727 * Don't allow a nodelist; mpol_new() checks flags
3f226aa1 2728 */
71fe804b 2729 if (nodelist)
3f226aa1 2730 goto out;
71fe804b 2731 mode = MPOL_PREFERRED;
3f226aa1 2732 break;
413b43de
RT
2733 case MPOL_DEFAULT:
2734 /*
2735 * Insist on a empty nodelist
2736 */
2737 if (!nodelist)
2738 err = 0;
2739 goto out;
d69b2e63
KM
2740 case MPOL_BIND:
2741 /*
2742 * Insist on a nodelist
2743 */
2744 if (!nodelist)
2745 goto out;
095f1fc4
LS
2746 }
2747
71fe804b 2748 mode_flags = 0;
095f1fc4
LS
2749 if (flags) {
2750 /*
2751 * Currently, we only support two mutually exclusive
2752 * mode flags.
2753 */
2754 if (!strcmp(flags, "static"))
71fe804b 2755 mode_flags |= MPOL_F_STATIC_NODES;
095f1fc4 2756 else if (!strcmp(flags, "relative"))
71fe804b 2757 mode_flags |= MPOL_F_RELATIVE_NODES;
095f1fc4 2758 else
926f2ae0 2759 goto out;
095f1fc4 2760 }
71fe804b
LS
2761
2762 new = mpol_new(mode, mode_flags, &nodes);
2763 if (IS_ERR(new))
926f2ae0
KM
2764 goto out;
2765
f2a07f40
HD
2766 /*
2767 * Save nodes for mpol_to_str() to show the tmpfs mount options
2768 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2769 */
2770 if (mode != MPOL_PREFERRED)
2771 new->v.nodes = nodes;
2772 else if (nodelist)
2773 new->v.preferred_node = first_node(nodes);
2774 else
2775 new->flags |= MPOL_F_LOCAL;
2776
2777 /*
2778 * Save nodes for contextualization: this will be used to "clone"
2779 * the mempolicy in a specific context [cpuset] at a later time.
2780 */
2781 new->w.user_nodemask = nodes;
2782
926f2ae0 2783 err = 0;
71fe804b 2784
095f1fc4
LS
2785out:
2786 /* Restore string for error message */
2787 if (nodelist)
2788 *--nodelist = ':';
2789 if (flags)
2790 *--flags = '=';
71fe804b
LS
2791 if (!err)
2792 *mpol = new;
095f1fc4
LS
2793 return err;
2794}
2795#endif /* CONFIG_TMPFS */
2796
71fe804b
LS
2797/**
2798 * mpol_to_str - format a mempolicy structure for printing
2799 * @buffer: to contain formatted mempolicy string
2800 * @maxlen: length of @buffer
2801 * @pol: pointer to mempolicy to be formatted
71fe804b 2802 *
948927ee
DR
2803 * Convert @pol into a string. If @buffer is too short, truncate the string.
2804 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2805 * longest flag, "relative", and to display at least a few node ids.
1a75a6c8 2806 */
948927ee 2807void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1a75a6c8
CL
2808{
2809 char *p = buffer;
948927ee
DR
2810 nodemask_t nodes = NODE_MASK_NONE;
2811 unsigned short mode = MPOL_DEFAULT;
2812 unsigned short flags = 0;
2291990a 2813
8790c71a 2814 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
bea904d5 2815 mode = pol->mode;
948927ee
DR
2816 flags = pol->flags;
2817 }
bea904d5 2818
1a75a6c8
CL
2819 switch (mode) {
2820 case MPOL_DEFAULT:
1a75a6c8 2821 break;
1a75a6c8 2822 case MPOL_PREFERRED:
fc36b8d3 2823 if (flags & MPOL_F_LOCAL)
f2a07f40 2824 mode = MPOL_LOCAL;
53f2556b 2825 else
fc36b8d3 2826 node_set(pol->v.preferred_node, nodes);
1a75a6c8 2827 break;
1a75a6c8 2828 case MPOL_BIND:
1a75a6c8 2829 case MPOL_INTERLEAVE:
f2a07f40 2830 nodes = pol->v.nodes;
1a75a6c8 2831 break;
1a75a6c8 2832 default:
948927ee
DR
2833 WARN_ON_ONCE(1);
2834 snprintf(p, maxlen, "unknown");
2835 return;
1a75a6c8
CL
2836 }
2837
b7a9f420 2838 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
1a75a6c8 2839
fc36b8d3 2840 if (flags & MPOL_MODE_FLAGS) {
948927ee 2841 p += snprintf(p, buffer + maxlen - p, "=");
f5b087b5 2842
2291990a
LS
2843 /*
2844 * Currently, the only defined flags are mutually exclusive
2845 */
f5b087b5 2846 if (flags & MPOL_F_STATIC_NODES)
2291990a
LS
2847 p += snprintf(p, buffer + maxlen - p, "static");
2848 else if (flags & MPOL_F_RELATIVE_NODES)
2849 p += snprintf(p, buffer + maxlen - p, "relative");
f5b087b5
DR
2850 }
2851
9e763e0f
TH
2852 if (!nodes_empty(nodes))
2853 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
2854 nodemask_pr_args(&nodes));
1a75a6c8 2855}