]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - mm/mempolicy.c
userfaultfd: wp: add UFFDIO_COPY_MODE_WP
[mirror_ubuntu-hirsute-kernel.git] / mm / mempolicy.c
CommitLineData
46aeb7e6 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * Simple NUMA memory policy for the Linux kernel.
4 *
5 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
8bccd85f 6 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
1da177e4
LT
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
8bccd85f 21 *
1da177e4
LT
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
8bccd85f
CL
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
1da177e4 28 * preferred Try a specific node first before normal fallback.
00ef2d2f 29 * As a special case NUMA_NO_NODE here means do the allocation
1da177e4
LT
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
8bccd85f 33 *
1da177e4
LT
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
1da177e4
LT
66*/
67
b1de0d13
MH
68#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69
1da177e4 70#include <linux/mempolicy.h>
a520110e 71#include <linux/pagewalk.h>
1da177e4
LT
72#include <linux/highmem.h>
73#include <linux/hugetlb.h>
74#include <linux/kernel.h>
75#include <linux/sched.h>
6e84f315 76#include <linux/sched/mm.h>
6a3827d7 77#include <linux/sched/numa_balancing.h>
f719ff9b 78#include <linux/sched/task.h>
1da177e4
LT
79#include <linux/nodemask.h>
80#include <linux/cpuset.h>
1da177e4
LT
81#include <linux/slab.h>
82#include <linux/string.h>
b95f1b31 83#include <linux/export.h>
b488893a 84#include <linux/nsproxy.h>
1da177e4
LT
85#include <linux/interrupt.h>
86#include <linux/init.h>
87#include <linux/compat.h>
31367466 88#include <linux/ptrace.h>
dc9aa5b9 89#include <linux/swap.h>
1a75a6c8
CL
90#include <linux/seq_file.h>
91#include <linux/proc_fs.h>
b20a3503 92#include <linux/migrate.h>
62b61f61 93#include <linux/ksm.h>
95a402c3 94#include <linux/rmap.h>
86c3a764 95#include <linux/security.h>
dbcb0f19 96#include <linux/syscalls.h>
095f1fc4 97#include <linux/ctype.h>
6d9c285a 98#include <linux/mm_inline.h>
b24f53a0 99#include <linux/mmu_notifier.h>
b1de0d13 100#include <linux/printk.h>
c8633798 101#include <linux/swapops.h>
dc9aa5b9 102
1da177e4 103#include <asm/tlbflush.h>
7c0f6ba6 104#include <linux/uaccess.h>
1da177e4 105
62695a84
NP
106#include "internal.h"
107
38e35860 108/* Internal flags */
dc9aa5b9 109#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
38e35860 110#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
dc9aa5b9 111
fcc234f8
PE
112static struct kmem_cache *policy_cache;
113static struct kmem_cache *sn_cache;
1da177e4 114
1da177e4
LT
115/* Highest zone. An specific allocation for a zone below that is not
116 policied. */
6267276f 117enum zone_type policy_zone = 0;
1da177e4 118
bea904d5
LS
119/*
120 * run-time system-wide default policy => local allocation
121 */
e754d79d 122static struct mempolicy default_policy = {
1da177e4 123 .refcnt = ATOMIC_INIT(1), /* never free it */
bea904d5 124 .mode = MPOL_PREFERRED,
fc36b8d3 125 .flags = MPOL_F_LOCAL,
1da177e4
LT
126};
127
5606e387
MG
128static struct mempolicy preferred_node_policy[MAX_NUMNODES];
129
74d2c3a0 130struct mempolicy *get_task_policy(struct task_struct *p)
5606e387
MG
131{
132 struct mempolicy *pol = p->mempolicy;
f15ca78e 133 int node;
5606e387 134
f15ca78e
ON
135 if (pol)
136 return pol;
5606e387 137
f15ca78e
ON
138 node = numa_node_id();
139 if (node != NUMA_NO_NODE) {
140 pol = &preferred_node_policy[node];
141 /* preferred_node_policy is not initialised early in boot */
142 if (pol->mode)
143 return pol;
5606e387
MG
144 }
145
f15ca78e 146 return &default_policy;
5606e387
MG
147}
148
37012946
DR
149static const struct mempolicy_operations {
150 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
213980c0 151 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
37012946
DR
152} mpol_ops[MPOL_MAX];
153
f5b087b5
DR
154static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
155{
6d556294 156 return pol->flags & MPOL_MODE_FLAGS;
4c50bc01
DR
157}
158
159static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
160 const nodemask_t *rel)
161{
162 nodemask_t tmp;
163 nodes_fold(tmp, *orig, nodes_weight(*rel));
164 nodes_onto(*ret, tmp, *rel);
f5b087b5
DR
165}
166
37012946
DR
167static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
168{
169 if (nodes_empty(*nodes))
170 return -EINVAL;
171 pol->v.nodes = *nodes;
172 return 0;
173}
174
175static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
176{
177 if (!nodes)
fc36b8d3 178 pol->flags |= MPOL_F_LOCAL; /* local allocation */
37012946
DR
179 else if (nodes_empty(*nodes))
180 return -EINVAL; /* no allowed nodes */
181 else
182 pol->v.preferred_node = first_node(*nodes);
183 return 0;
184}
185
186static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
187{
859f7ef1 188 if (nodes_empty(*nodes))
37012946
DR
189 return -EINVAL;
190 pol->v.nodes = *nodes;
191 return 0;
192}
193
58568d2a
MX
194/*
195 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
196 * any, for the new policy. mpol_new() has already validated the nodes
197 * parameter with respect to the policy mode and flags. But, we need to
198 * handle an empty nodemask with MPOL_PREFERRED here.
199 *
200 * Must be called holding task's alloc_lock to protect task's mems_allowed
201 * and mempolicy. May also be called holding the mmap_semaphore for write.
202 */
4bfc4495
KH
203static int mpol_set_nodemask(struct mempolicy *pol,
204 const nodemask_t *nodes, struct nodemask_scratch *nsc)
58568d2a 205{
58568d2a
MX
206 int ret;
207
208 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
209 if (pol == NULL)
210 return 0;
01f13bd6 211 /* Check N_MEMORY */
4bfc4495 212 nodes_and(nsc->mask1,
01f13bd6 213 cpuset_current_mems_allowed, node_states[N_MEMORY]);
58568d2a
MX
214
215 VM_BUG_ON(!nodes);
216 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
217 nodes = NULL; /* explicit local allocation */
218 else {
219 if (pol->flags & MPOL_F_RELATIVE_NODES)
859f7ef1 220 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
58568d2a 221 else
4bfc4495
KH
222 nodes_and(nsc->mask2, *nodes, nsc->mask1);
223
58568d2a
MX
224 if (mpol_store_user_nodemask(pol))
225 pol->w.user_nodemask = *nodes;
226 else
227 pol->w.cpuset_mems_allowed =
228 cpuset_current_mems_allowed;
229 }
230
4bfc4495
KH
231 if (nodes)
232 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
233 else
234 ret = mpol_ops[pol->mode].create(pol, NULL);
58568d2a
MX
235 return ret;
236}
237
238/*
239 * This function just creates a new policy, does some check and simple
240 * initialization. You must invoke mpol_set_nodemask() to set nodes.
241 */
028fec41
DR
242static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
243 nodemask_t *nodes)
1da177e4
LT
244{
245 struct mempolicy *policy;
246
028fec41 247 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
00ef2d2f 248 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
140d5a49 249
3e1f0645
DR
250 if (mode == MPOL_DEFAULT) {
251 if (nodes && !nodes_empty(*nodes))
37012946 252 return ERR_PTR(-EINVAL);
d3a71033 253 return NULL;
37012946 254 }
3e1f0645
DR
255 VM_BUG_ON(!nodes);
256
257 /*
258 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
259 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
260 * All other modes require a valid pointer to a non-empty nodemask.
261 */
262 if (mode == MPOL_PREFERRED) {
263 if (nodes_empty(*nodes)) {
264 if (((flags & MPOL_F_STATIC_NODES) ||
265 (flags & MPOL_F_RELATIVE_NODES)))
266 return ERR_PTR(-EINVAL);
3e1f0645 267 }
479e2802 268 } else if (mode == MPOL_LOCAL) {
8d303e44
PK
269 if (!nodes_empty(*nodes) ||
270 (flags & MPOL_F_STATIC_NODES) ||
271 (flags & MPOL_F_RELATIVE_NODES))
479e2802
PZ
272 return ERR_PTR(-EINVAL);
273 mode = MPOL_PREFERRED;
3e1f0645
DR
274 } else if (nodes_empty(*nodes))
275 return ERR_PTR(-EINVAL);
1da177e4
LT
276 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
277 if (!policy)
278 return ERR_PTR(-ENOMEM);
279 atomic_set(&policy->refcnt, 1);
45c4745a 280 policy->mode = mode;
3e1f0645 281 policy->flags = flags;
37012946 282
1da177e4 283 return policy;
37012946
DR
284}
285
52cd3b07
LS
286/* Slow path of a mpol destructor. */
287void __mpol_put(struct mempolicy *p)
288{
289 if (!atomic_dec_and_test(&p->refcnt))
290 return;
52cd3b07
LS
291 kmem_cache_free(policy_cache, p);
292}
293
213980c0 294static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
37012946
DR
295{
296}
297
213980c0 298static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
37012946
DR
299{
300 nodemask_t tmp;
301
302 if (pol->flags & MPOL_F_STATIC_NODES)
303 nodes_and(tmp, pol->w.user_nodemask, *nodes);
304 else if (pol->flags & MPOL_F_RELATIVE_NODES)
305 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
306 else {
213980c0
VB
307 nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
308 *nodes);
29b190fa 309 pol->w.cpuset_mems_allowed = *nodes;
37012946 310 }
f5b087b5 311
708c1bbc
MX
312 if (nodes_empty(tmp))
313 tmp = *nodes;
314
213980c0 315 pol->v.nodes = tmp;
37012946
DR
316}
317
318static void mpol_rebind_preferred(struct mempolicy *pol,
213980c0 319 const nodemask_t *nodes)
37012946
DR
320{
321 nodemask_t tmp;
322
37012946
DR
323 if (pol->flags & MPOL_F_STATIC_NODES) {
324 int node = first_node(pol->w.user_nodemask);
325
fc36b8d3 326 if (node_isset(node, *nodes)) {
37012946 327 pol->v.preferred_node = node;
fc36b8d3
LS
328 pol->flags &= ~MPOL_F_LOCAL;
329 } else
330 pol->flags |= MPOL_F_LOCAL;
37012946
DR
331 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
332 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
333 pol->v.preferred_node = first_node(tmp);
fc36b8d3 334 } else if (!(pol->flags & MPOL_F_LOCAL)) {
37012946
DR
335 pol->v.preferred_node = node_remap(pol->v.preferred_node,
336 pol->w.cpuset_mems_allowed,
337 *nodes);
338 pol->w.cpuset_mems_allowed = *nodes;
339 }
1da177e4
LT
340}
341
708c1bbc
MX
342/*
343 * mpol_rebind_policy - Migrate a policy to a different set of nodes
344 *
213980c0
VB
345 * Per-vma policies are protected by mmap_sem. Allocations using per-task
346 * policies are protected by task->mems_allowed_seq to prevent a premature
347 * OOM/allocation failure due to parallel nodemask modification.
708c1bbc 348 */
213980c0 349static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
1d0d2680 350{
1d0d2680
DR
351 if (!pol)
352 return;
2e25644e 353 if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) &&
1d0d2680
DR
354 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
355 return;
708c1bbc 356
213980c0 357 mpol_ops[pol->mode].rebind(pol, newmask);
1d0d2680
DR
358}
359
360/*
361 * Wrapper for mpol_rebind_policy() that just requires task
362 * pointer, and updates task mempolicy.
58568d2a
MX
363 *
364 * Called with task's alloc_lock held.
1d0d2680
DR
365 */
366
213980c0 367void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
1d0d2680 368{
213980c0 369 mpol_rebind_policy(tsk->mempolicy, new);
1d0d2680
DR
370}
371
372/*
373 * Rebind each vma in mm to new nodemask.
374 *
375 * Call holding a reference to mm. Takes mm->mmap_sem during call.
376 */
377
378void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
379{
380 struct vm_area_struct *vma;
381
382 down_write(&mm->mmap_sem);
383 for (vma = mm->mmap; vma; vma = vma->vm_next)
213980c0 384 mpol_rebind_policy(vma->vm_policy, new);
1d0d2680
DR
385 up_write(&mm->mmap_sem);
386}
387
37012946
DR
388static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
389 [MPOL_DEFAULT] = {
390 .rebind = mpol_rebind_default,
391 },
392 [MPOL_INTERLEAVE] = {
393 .create = mpol_new_interleave,
394 .rebind = mpol_rebind_nodemask,
395 },
396 [MPOL_PREFERRED] = {
397 .create = mpol_new_preferred,
398 .rebind = mpol_rebind_preferred,
399 },
400 [MPOL_BIND] = {
401 .create = mpol_new_bind,
402 .rebind = mpol_rebind_nodemask,
403 },
404};
405
a53190a4 406static int migrate_page_add(struct page *page, struct list_head *pagelist,
fc301289 407 unsigned long flags);
1a75a6c8 408
6f4576e3
NH
409struct queue_pages {
410 struct list_head *pagelist;
411 unsigned long flags;
412 nodemask_t *nmask;
f18da660
LX
413 unsigned long start;
414 unsigned long end;
415 struct vm_area_struct *first;
6f4576e3
NH
416};
417
88aaa2a1
NH
418/*
419 * Check if the page's nid is in qp->nmask.
420 *
421 * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
422 * in the invert of qp->nmask.
423 */
424static inline bool queue_pages_required(struct page *page,
425 struct queue_pages *qp)
426{
427 int nid = page_to_nid(page);
428 unsigned long flags = qp->flags;
429
430 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
431}
432
a7f40cfe 433/*
d8835445
YS
434 * queue_pages_pmd() has four possible return values:
435 * 0 - pages are placed on the right node or queued successfully.
436 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
437 * specified.
438 * 2 - THP was split.
439 * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
440 * existing page was already on a node that does not follow the
441 * policy.
a7f40cfe 442 */
c8633798
NH
443static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
444 unsigned long end, struct mm_walk *walk)
445{
446 int ret = 0;
447 struct page *page;
448 struct queue_pages *qp = walk->private;
449 unsigned long flags;
450
451 if (unlikely(is_pmd_migration_entry(*pmd))) {
a7f40cfe 452 ret = -EIO;
c8633798
NH
453 goto unlock;
454 }
455 page = pmd_page(*pmd);
456 if (is_huge_zero_page(page)) {
457 spin_unlock(ptl);
458 __split_huge_pmd(walk->vma, pmd, addr, false, NULL);
d8835445 459 ret = 2;
c8633798
NH
460 goto out;
461 }
d8835445 462 if (!queue_pages_required(page, qp))
c8633798 463 goto unlock;
c8633798 464
c8633798
NH
465 flags = qp->flags;
466 /* go to thp migration */
a7f40cfe 467 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
a53190a4
YS
468 if (!vma_migratable(walk->vma) ||
469 migrate_page_add(page, qp->pagelist, flags)) {
d8835445 470 ret = 1;
a7f40cfe
YS
471 goto unlock;
472 }
a7f40cfe
YS
473 } else
474 ret = -EIO;
c8633798
NH
475unlock:
476 spin_unlock(ptl);
477out:
478 return ret;
479}
480
98094945
NH
481/*
482 * Scan through pages checking if pages follow certain conditions,
483 * and move them to the pagelist if they do.
d8835445
YS
484 *
485 * queue_pages_pte_range() has three possible return values:
486 * 0 - pages are placed on the right node or queued successfully.
487 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
488 * specified.
489 * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
490 * on a node that does not follow the policy.
98094945 491 */
6f4576e3
NH
492static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
493 unsigned long end, struct mm_walk *walk)
1da177e4 494{
6f4576e3
NH
495 struct vm_area_struct *vma = walk->vma;
496 struct page *page;
497 struct queue_pages *qp = walk->private;
498 unsigned long flags = qp->flags;
c8633798 499 int ret;
d8835445 500 bool has_unmovable = false;
91612e0d 501 pte_t *pte;
705e87c0 502 spinlock_t *ptl;
941150a3 503
c8633798
NH
504 ptl = pmd_trans_huge_lock(pmd, vma);
505 if (ptl) {
506 ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
d8835445 507 if (ret != 2)
a7f40cfe 508 return ret;
248db92d 509 }
d8835445 510 /* THP was split, fall through to pte walk */
91612e0d 511
337d9abf
NH
512 if (pmd_trans_unstable(pmd))
513 return 0;
94723aaf 514
6f4576e3
NH
515 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
516 for (; addr != end; pte++, addr += PAGE_SIZE) {
91612e0d 517 if (!pte_present(*pte))
1da177e4 518 continue;
6aab341e
LT
519 page = vm_normal_page(vma, addr, *pte);
520 if (!page)
1da177e4 521 continue;
053837fc 522 /*
62b61f61
HD
523 * vm_normal_page() filters out zero pages, but there might
524 * still be PageReserved pages to skip, perhaps in a VDSO.
053837fc 525 */
b79bc0a0 526 if (PageReserved(page))
f4598c8b 527 continue;
88aaa2a1 528 if (!queue_pages_required(page, qp))
38e35860 529 continue;
a7f40cfe 530 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
d8835445
YS
531 /* MPOL_MF_STRICT must be specified if we get here */
532 if (!vma_migratable(vma)) {
533 has_unmovable = true;
a7f40cfe 534 break;
d8835445 535 }
a53190a4
YS
536
537 /*
538 * Do not abort immediately since there may be
539 * temporary off LRU pages in the range. Still
540 * need migrate other LRU pages.
541 */
542 if (migrate_page_add(page, qp->pagelist, flags))
543 has_unmovable = true;
a7f40cfe
YS
544 } else
545 break;
6f4576e3
NH
546 }
547 pte_unmap_unlock(pte - 1, ptl);
548 cond_resched();
d8835445
YS
549
550 if (has_unmovable)
551 return 1;
552
a7f40cfe 553 return addr != end ? -EIO : 0;
91612e0d
HD
554}
555
6f4576e3
NH
556static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
557 unsigned long addr, unsigned long end,
558 struct mm_walk *walk)
e2d8cf40 559{
dcf17635 560 int ret = 0;
e2d8cf40 561#ifdef CONFIG_HUGETLB_PAGE
6f4576e3 562 struct queue_pages *qp = walk->private;
dcf17635 563 unsigned long flags = (qp->flags & MPOL_MF_VALID);
e2d8cf40 564 struct page *page;
cb900f41 565 spinlock_t *ptl;
d4c54919 566 pte_t entry;
e2d8cf40 567
6f4576e3
NH
568 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
569 entry = huge_ptep_get(pte);
d4c54919
NH
570 if (!pte_present(entry))
571 goto unlock;
572 page = pte_page(entry);
88aaa2a1 573 if (!queue_pages_required(page, qp))
e2d8cf40 574 goto unlock;
dcf17635
LX
575
576 if (flags == MPOL_MF_STRICT) {
577 /*
578 * STRICT alone means only detecting misplaced page and no
579 * need to further check other vma.
580 */
581 ret = -EIO;
582 goto unlock;
583 }
584
585 if (!vma_migratable(walk->vma)) {
586 /*
587 * Must be STRICT with MOVE*, otherwise .test_walk() have
588 * stopped walking current vma.
589 * Detecting misplaced page but allow migrating pages which
590 * have been queued.
591 */
592 ret = 1;
593 goto unlock;
594 }
595
e2d8cf40
NH
596 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
597 if (flags & (MPOL_MF_MOVE_ALL) ||
dcf17635
LX
598 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) {
599 if (!isolate_huge_page(page, qp->pagelist) &&
600 (flags & MPOL_MF_STRICT))
601 /*
602 * Failed to isolate page but allow migrating pages
603 * which have been queued.
604 */
605 ret = 1;
606 }
e2d8cf40 607unlock:
cb900f41 608 spin_unlock(ptl);
e2d8cf40
NH
609#else
610 BUG();
611#endif
dcf17635 612 return ret;
1da177e4
LT
613}
614
5877231f 615#ifdef CONFIG_NUMA_BALANCING
b24f53a0 616/*
4b10e7d5
MG
617 * This is used to mark a range of virtual addresses to be inaccessible.
618 * These are later cleared by a NUMA hinting fault. Depending on these
619 * faults, pages may be migrated for better NUMA placement.
620 *
621 * This is assuming that NUMA faults are handled using PROT_NONE. If
622 * an architecture makes a different choice, it will need further
623 * changes to the core.
b24f53a0 624 */
4b10e7d5
MG
625unsigned long change_prot_numa(struct vm_area_struct *vma,
626 unsigned long addr, unsigned long end)
b24f53a0 627{
4b10e7d5 628 int nr_updated;
b24f53a0 629
4d942466 630 nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
03c5a6e1
MG
631 if (nr_updated)
632 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
b24f53a0 633
4b10e7d5 634 return nr_updated;
b24f53a0
LS
635}
636#else
637static unsigned long change_prot_numa(struct vm_area_struct *vma,
638 unsigned long addr, unsigned long end)
639{
640 return 0;
641}
5877231f 642#endif /* CONFIG_NUMA_BALANCING */
b24f53a0 643
6f4576e3
NH
644static int queue_pages_test_walk(unsigned long start, unsigned long end,
645 struct mm_walk *walk)
646{
647 struct vm_area_struct *vma = walk->vma;
648 struct queue_pages *qp = walk->private;
649 unsigned long endvma = vma->vm_end;
650 unsigned long flags = qp->flags;
651
a18b3ac2 652 /* range check first */
d888fb2b 653 VM_BUG_ON_VMA((vma->vm_start > start) || (vma->vm_end < end), vma);
f18da660
LX
654
655 if (!qp->first) {
656 qp->first = vma;
657 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
658 (qp->start < vma->vm_start))
659 /* hole at head side of range */
a18b3ac2
LX
660 return -EFAULT;
661 }
f18da660
LX
662 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
663 ((vma->vm_end < qp->end) &&
664 (!vma->vm_next || vma->vm_end < vma->vm_next->vm_start)))
665 /* hole at middle or tail of range */
666 return -EFAULT;
a18b3ac2 667
a7f40cfe
YS
668 /*
669 * Need check MPOL_MF_STRICT to return -EIO if possible
670 * regardless of vma_migratable
671 */
672 if (!vma_migratable(vma) &&
673 !(flags & MPOL_MF_STRICT))
48684a65
NH
674 return 1;
675
6f4576e3
NH
676 if (endvma > end)
677 endvma = end;
6f4576e3 678
6f4576e3
NH
679 if (flags & MPOL_MF_LAZY) {
680 /* Similar to task_numa_work, skip inaccessible VMAs */
3122e80e 681 if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
4355c018 682 !(vma->vm_flags & VM_MIXEDMAP))
6f4576e3
NH
683 change_prot_numa(vma, start, endvma);
684 return 1;
685 }
686
77bf45e7 687 /* queue pages from current vma */
a7f40cfe 688 if (flags & MPOL_MF_VALID)
6f4576e3
NH
689 return 0;
690 return 1;
691}
692
7b86ac33
CH
693static const struct mm_walk_ops queue_pages_walk_ops = {
694 .hugetlb_entry = queue_pages_hugetlb,
695 .pmd_entry = queue_pages_pte_range,
696 .test_walk = queue_pages_test_walk,
697};
698
dc9aa5b9 699/*
98094945
NH
700 * Walk through page tables and collect pages to be migrated.
701 *
702 * If pages found in a given range are on a set of nodes (determined by
703 * @nodes and @flags,) it's isolated and queued to the pagelist which is
d8835445
YS
704 * passed via @private.
705 *
706 * queue_pages_range() has three possible return values:
707 * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
708 * specified.
709 * 0 - queue pages successfully or no misplaced page.
a85dfc30
YS
710 * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
711 * memory range specified by nodemask and maxnode points outside
712 * your accessible address space (-EFAULT)
dc9aa5b9 713 */
d05f0cdc 714static int
98094945 715queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
6f4576e3
NH
716 nodemask_t *nodes, unsigned long flags,
717 struct list_head *pagelist)
1da177e4 718{
f18da660 719 int err;
6f4576e3
NH
720 struct queue_pages qp = {
721 .pagelist = pagelist,
722 .flags = flags,
723 .nmask = nodes,
f18da660
LX
724 .start = start,
725 .end = end,
726 .first = NULL,
6f4576e3 727 };
6f4576e3 728
f18da660
LX
729 err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
730
731 if (!qp.first)
732 /* whole range in hole */
733 err = -EFAULT;
734
735 return err;
1da177e4
LT
736}
737
869833f2
KM
738/*
739 * Apply policy to a single VMA
740 * This must be called with the mmap_sem held for writing.
741 */
742static int vma_replace_policy(struct vm_area_struct *vma,
743 struct mempolicy *pol)
8d34694c 744{
869833f2
KM
745 int err;
746 struct mempolicy *old;
747 struct mempolicy *new;
8d34694c
KM
748
749 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
750 vma->vm_start, vma->vm_end, vma->vm_pgoff,
751 vma->vm_ops, vma->vm_file,
752 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
753
869833f2
KM
754 new = mpol_dup(pol);
755 if (IS_ERR(new))
756 return PTR_ERR(new);
757
758 if (vma->vm_ops && vma->vm_ops->set_policy) {
8d34694c 759 err = vma->vm_ops->set_policy(vma, new);
869833f2
KM
760 if (err)
761 goto err_out;
8d34694c 762 }
869833f2
KM
763
764 old = vma->vm_policy;
765 vma->vm_policy = new; /* protected by mmap_sem */
766 mpol_put(old);
767
768 return 0;
769 err_out:
770 mpol_put(new);
8d34694c
KM
771 return err;
772}
773
1da177e4 774/* Step 2: apply policy to a range and do splits. */
9d8cebd4
KM
775static int mbind_range(struct mm_struct *mm, unsigned long start,
776 unsigned long end, struct mempolicy *new_pol)
1da177e4
LT
777{
778 struct vm_area_struct *next;
9d8cebd4
KM
779 struct vm_area_struct *prev;
780 struct vm_area_struct *vma;
781 int err = 0;
e26a5114 782 pgoff_t pgoff;
9d8cebd4
KM
783 unsigned long vmstart;
784 unsigned long vmend;
1da177e4 785
097d5910 786 vma = find_vma(mm, start);
f18da660 787 VM_BUG_ON(!vma);
9d8cebd4 788
097d5910 789 prev = vma->vm_prev;
e26a5114
KM
790 if (start > vma->vm_start)
791 prev = vma;
792
9d8cebd4 793 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
1da177e4 794 next = vma->vm_next;
9d8cebd4
KM
795 vmstart = max(start, vma->vm_start);
796 vmend = min(end, vma->vm_end);
797
e26a5114
KM
798 if (mpol_equal(vma_policy(vma), new_pol))
799 continue;
800
801 pgoff = vma->vm_pgoff +
802 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
9d8cebd4 803 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
19a809af
AA
804 vma->anon_vma, vma->vm_file, pgoff,
805 new_pol, vma->vm_userfaultfd_ctx);
9d8cebd4
KM
806 if (prev) {
807 vma = prev;
808 next = vma->vm_next;
3964acd0
ON
809 if (mpol_equal(vma_policy(vma), new_pol))
810 continue;
811 /* vma_merge() joined vma && vma->next, case 8 */
812 goto replace;
9d8cebd4
KM
813 }
814 if (vma->vm_start != vmstart) {
815 err = split_vma(vma->vm_mm, vma, vmstart, 1);
816 if (err)
817 goto out;
818 }
819 if (vma->vm_end != vmend) {
820 err = split_vma(vma->vm_mm, vma, vmend, 0);
821 if (err)
822 goto out;
823 }
3964acd0 824 replace:
869833f2 825 err = vma_replace_policy(vma, new_pol);
8d34694c
KM
826 if (err)
827 goto out;
1da177e4 828 }
9d8cebd4
KM
829
830 out:
1da177e4
LT
831 return err;
832}
833
1da177e4 834/* Set the process memory policy */
028fec41
DR
835static long do_set_mempolicy(unsigned short mode, unsigned short flags,
836 nodemask_t *nodes)
1da177e4 837{
58568d2a 838 struct mempolicy *new, *old;
4bfc4495 839 NODEMASK_SCRATCH(scratch);
58568d2a 840 int ret;
1da177e4 841
4bfc4495
KH
842 if (!scratch)
843 return -ENOMEM;
f4e53d91 844
4bfc4495
KH
845 new = mpol_new(mode, flags, nodes);
846 if (IS_ERR(new)) {
847 ret = PTR_ERR(new);
848 goto out;
849 }
2c7c3a7d 850
58568d2a 851 task_lock(current);
4bfc4495 852 ret = mpol_set_nodemask(new, nodes, scratch);
58568d2a
MX
853 if (ret) {
854 task_unlock(current);
58568d2a 855 mpol_put(new);
4bfc4495 856 goto out;
58568d2a
MX
857 }
858 old = current->mempolicy;
1da177e4 859 current->mempolicy = new;
45816682
VB
860 if (new && new->mode == MPOL_INTERLEAVE)
861 current->il_prev = MAX_NUMNODES-1;
58568d2a 862 task_unlock(current);
58568d2a 863 mpol_put(old);
4bfc4495
KH
864 ret = 0;
865out:
866 NODEMASK_SCRATCH_FREE(scratch);
867 return ret;
1da177e4
LT
868}
869
bea904d5
LS
870/*
871 * Return nodemask for policy for get_mempolicy() query
58568d2a
MX
872 *
873 * Called with task's alloc_lock held
bea904d5
LS
874 */
875static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
1da177e4 876{
dfcd3c0d 877 nodes_clear(*nodes);
bea904d5
LS
878 if (p == &default_policy)
879 return;
880
45c4745a 881 switch (p->mode) {
19770b32
MG
882 case MPOL_BIND:
883 /* Fall through */
1da177e4 884 case MPOL_INTERLEAVE:
dfcd3c0d 885 *nodes = p->v.nodes;
1da177e4
LT
886 break;
887 case MPOL_PREFERRED:
fc36b8d3 888 if (!(p->flags & MPOL_F_LOCAL))
dfcd3c0d 889 node_set(p->v.preferred_node, *nodes);
53f2556b 890 /* else return empty node mask for local allocation */
1da177e4
LT
891 break;
892 default:
893 BUG();
894 }
895}
896
3b9aadf7 897static int lookup_node(struct mm_struct *mm, unsigned long addr)
1da177e4
LT
898{
899 struct page *p;
900 int err;
901
3b9aadf7
AA
902 int locked = 1;
903 err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
1da177e4
LT
904 if (err >= 0) {
905 err = page_to_nid(p);
906 put_page(p);
907 }
3b9aadf7
AA
908 if (locked)
909 up_read(&mm->mmap_sem);
1da177e4
LT
910 return err;
911}
912
1da177e4 913/* Retrieve NUMA policy */
dbcb0f19
AB
914static long do_get_mempolicy(int *policy, nodemask_t *nmask,
915 unsigned long addr, unsigned long flags)
1da177e4 916{
8bccd85f 917 int err;
1da177e4
LT
918 struct mm_struct *mm = current->mm;
919 struct vm_area_struct *vma = NULL;
3b9aadf7 920 struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
1da177e4 921
754af6f5
LS
922 if (flags &
923 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
1da177e4 924 return -EINVAL;
754af6f5
LS
925
926 if (flags & MPOL_F_MEMS_ALLOWED) {
927 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
928 return -EINVAL;
929 *policy = 0; /* just so it's initialized */
58568d2a 930 task_lock(current);
754af6f5 931 *nmask = cpuset_current_mems_allowed;
58568d2a 932 task_unlock(current);
754af6f5
LS
933 return 0;
934 }
935
1da177e4 936 if (flags & MPOL_F_ADDR) {
bea904d5
LS
937 /*
938 * Do NOT fall back to task policy if the
939 * vma/shared policy at addr is NULL. We
940 * want to return MPOL_DEFAULT in this case.
941 */
1da177e4
LT
942 down_read(&mm->mmap_sem);
943 vma = find_vma_intersection(mm, addr, addr+1);
944 if (!vma) {
945 up_read(&mm->mmap_sem);
946 return -EFAULT;
947 }
948 if (vma->vm_ops && vma->vm_ops->get_policy)
949 pol = vma->vm_ops->get_policy(vma, addr);
950 else
951 pol = vma->vm_policy;
952 } else if (addr)
953 return -EINVAL;
954
955 if (!pol)
bea904d5 956 pol = &default_policy; /* indicates default behavior */
1da177e4
LT
957
958 if (flags & MPOL_F_NODE) {
959 if (flags & MPOL_F_ADDR) {
3b9aadf7
AA
960 /*
961 * Take a refcount on the mpol, lookup_node()
962 * wil drop the mmap_sem, so after calling
963 * lookup_node() only "pol" remains valid, "vma"
964 * is stale.
965 */
966 pol_refcount = pol;
967 vma = NULL;
968 mpol_get(pol);
969 err = lookup_node(mm, addr);
1da177e4
LT
970 if (err < 0)
971 goto out;
8bccd85f 972 *policy = err;
1da177e4 973 } else if (pol == current->mempolicy &&
45c4745a 974 pol->mode == MPOL_INTERLEAVE) {
45816682 975 *policy = next_node_in(current->il_prev, pol->v.nodes);
1da177e4
LT
976 } else {
977 err = -EINVAL;
978 goto out;
979 }
bea904d5
LS
980 } else {
981 *policy = pol == &default_policy ? MPOL_DEFAULT :
982 pol->mode;
d79df630
DR
983 /*
984 * Internal mempolicy flags must be masked off before exposing
985 * the policy to userspace.
986 */
987 *policy |= (pol->flags & MPOL_MODE_FLAGS);
bea904d5 988 }
1da177e4 989
1da177e4 990 err = 0;
58568d2a 991 if (nmask) {
c6b6ef8b
LS
992 if (mpol_store_user_nodemask(pol)) {
993 *nmask = pol->w.user_nodemask;
994 } else {
995 task_lock(current);
996 get_policy_nodemask(pol, nmask);
997 task_unlock(current);
998 }
58568d2a 999 }
1da177e4
LT
1000
1001 out:
52cd3b07 1002 mpol_cond_put(pol);
1da177e4 1003 if (vma)
3b9aadf7
AA
1004 up_read(&mm->mmap_sem);
1005 if (pol_refcount)
1006 mpol_put(pol_refcount);
1da177e4
LT
1007 return err;
1008}
1009
b20a3503 1010#ifdef CONFIG_MIGRATION
6ce3c4c0 1011/*
c8633798 1012 * page migration, thp tail pages can be passed.
6ce3c4c0 1013 */
a53190a4 1014static int migrate_page_add(struct page *page, struct list_head *pagelist,
fc301289 1015 unsigned long flags)
6ce3c4c0 1016{
c8633798 1017 struct page *head = compound_head(page);
6ce3c4c0 1018 /*
fc301289 1019 * Avoid migrating a page that is shared with others.
6ce3c4c0 1020 */
c8633798
NH
1021 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
1022 if (!isolate_lru_page(head)) {
1023 list_add_tail(&head->lru, pagelist);
1024 mod_node_page_state(page_pgdat(head),
9de4f22a 1025 NR_ISOLATED_ANON + page_is_file_lru(head),
c8633798 1026 hpage_nr_pages(head));
a53190a4
YS
1027 } else if (flags & MPOL_MF_STRICT) {
1028 /*
1029 * Non-movable page may reach here. And, there may be
1030 * temporary off LRU pages or non-LRU movable pages.
1031 * Treat them as unmovable pages since they can't be
1032 * isolated, so they can't be moved at the moment. It
1033 * should return -EIO for this case too.
1034 */
1035 return -EIO;
62695a84
NP
1036 }
1037 }
a53190a4
YS
1038
1039 return 0;
7e2ab150 1040}
6ce3c4c0 1041
a49bd4d7 1042/* page allocation callback for NUMA node migration */
666feb21 1043struct page *alloc_new_node_page(struct page *page, unsigned long node)
95a402c3 1044{
e2d8cf40
NH
1045 if (PageHuge(page))
1046 return alloc_huge_page_node(page_hstate(compound_head(page)),
1047 node);
94723aaf 1048 else if (PageTransHuge(page)) {
c8633798
NH
1049 struct page *thp;
1050
1051 thp = alloc_pages_node(node,
1052 (GFP_TRANSHUGE | __GFP_THISNODE),
1053 HPAGE_PMD_ORDER);
1054 if (!thp)
1055 return NULL;
1056 prep_transhuge_page(thp);
1057 return thp;
1058 } else
96db800f 1059 return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
b360edb4 1060 __GFP_THISNODE, 0);
95a402c3
CL
1061}
1062
7e2ab150
CL
1063/*
1064 * Migrate pages from one node to a target node.
1065 * Returns error or the number of pages not migrated.
1066 */
dbcb0f19
AB
1067static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1068 int flags)
7e2ab150
CL
1069{
1070 nodemask_t nmask;
1071 LIST_HEAD(pagelist);
1072 int err = 0;
1073
1074 nodes_clear(nmask);
1075 node_set(source, nmask);
6ce3c4c0 1076
08270807
MK
1077 /*
1078 * This does not "check" the range but isolates all pages that
1079 * need migration. Between passing in the full user address
1080 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1081 */
1082 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
98094945 1083 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
7e2ab150
CL
1084 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1085
cf608ac1 1086 if (!list_empty(&pagelist)) {
a49bd4d7 1087 err = migrate_pages(&pagelist, alloc_new_node_page, NULL, dest,
9c620e2b 1088 MIGRATE_SYNC, MR_SYSCALL);
cf608ac1 1089 if (err)
e2d8cf40 1090 putback_movable_pages(&pagelist);
cf608ac1 1091 }
95a402c3 1092
7e2ab150 1093 return err;
6ce3c4c0
CL
1094}
1095
39743889 1096/*
7e2ab150
CL
1097 * Move pages between the two nodesets so as to preserve the physical
1098 * layout as much as possible.
39743889
CL
1099 *
1100 * Returns the number of page that could not be moved.
1101 */
0ce72d4f
AM
1102int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1103 const nodemask_t *to, int flags)
39743889 1104{
7e2ab150 1105 int busy = 0;
0aedadf9 1106 int err;
7e2ab150 1107 nodemask_t tmp;
39743889 1108
0aedadf9
CL
1109 err = migrate_prep();
1110 if (err)
1111 return err;
1112
53f2556b 1113 down_read(&mm->mmap_sem);
39743889 1114
da0aa138
KM
1115 /*
1116 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1117 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1118 * bit in 'tmp', and return that <source, dest> pair for migration.
1119 * The pair of nodemasks 'to' and 'from' define the map.
1120 *
1121 * If no pair of bits is found that way, fallback to picking some
1122 * pair of 'source' and 'dest' bits that are not the same. If the
1123 * 'source' and 'dest' bits are the same, this represents a node
1124 * that will be migrating to itself, so no pages need move.
1125 *
1126 * If no bits are left in 'tmp', or if all remaining bits left
1127 * in 'tmp' correspond to the same bit in 'to', return false
1128 * (nothing left to migrate).
1129 *
1130 * This lets us pick a pair of nodes to migrate between, such that
1131 * if possible the dest node is not already occupied by some other
1132 * source node, minimizing the risk of overloading the memory on a
1133 * node that would happen if we migrated incoming memory to a node
1134 * before migrating outgoing memory source that same node.
1135 *
1136 * A single scan of tmp is sufficient. As we go, we remember the
1137 * most recent <s, d> pair that moved (s != d). If we find a pair
1138 * that not only moved, but what's better, moved to an empty slot
1139 * (d is not set in tmp), then we break out then, with that pair.
ae0e47f0 1140 * Otherwise when we finish scanning from_tmp, we at least have the
da0aa138
KM
1141 * most recent <s, d> pair that moved. If we get all the way through
1142 * the scan of tmp without finding any node that moved, much less
1143 * moved to an empty node, then there is nothing left worth migrating.
1144 */
d4984711 1145
0ce72d4f 1146 tmp = *from;
7e2ab150
CL
1147 while (!nodes_empty(tmp)) {
1148 int s,d;
b76ac7e7 1149 int source = NUMA_NO_NODE;
7e2ab150
CL
1150 int dest = 0;
1151
1152 for_each_node_mask(s, tmp) {
4a5b18cc
LW
1153
1154 /*
1155 * do_migrate_pages() tries to maintain the relative
1156 * node relationship of the pages established between
1157 * threads and memory areas.
1158 *
1159 * However if the number of source nodes is not equal to
1160 * the number of destination nodes we can not preserve
1161 * this node relative relationship. In that case, skip
1162 * copying memory from a node that is in the destination
1163 * mask.
1164 *
1165 * Example: [2,3,4] -> [3,4,5] moves everything.
1166 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1167 */
1168
0ce72d4f
AM
1169 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1170 (node_isset(s, *to)))
4a5b18cc
LW
1171 continue;
1172
0ce72d4f 1173 d = node_remap(s, *from, *to);
7e2ab150
CL
1174 if (s == d)
1175 continue;
1176
1177 source = s; /* Node moved. Memorize */
1178 dest = d;
1179
1180 /* dest not in remaining from nodes? */
1181 if (!node_isset(dest, tmp))
1182 break;
1183 }
b76ac7e7 1184 if (source == NUMA_NO_NODE)
7e2ab150
CL
1185 break;
1186
1187 node_clear(source, tmp);
1188 err = migrate_to_node(mm, source, dest, flags);
1189 if (err > 0)
1190 busy += err;
1191 if (err < 0)
1192 break;
39743889
CL
1193 }
1194 up_read(&mm->mmap_sem);
7e2ab150
CL
1195 if (err < 0)
1196 return err;
1197 return busy;
b20a3503
CL
1198
1199}
1200
3ad33b24
LS
1201/*
1202 * Allocate a new page for page migration based on vma policy.
d05f0cdc 1203 * Start by assuming the page is mapped by the same vma as contains @start.
3ad33b24
LS
1204 * Search forward from there, if not. N.B., this assumes that the
1205 * list of pages handed to migrate_pages()--which is how we get here--
1206 * is in virtual address order.
1207 */
666feb21 1208static struct page *new_page(struct page *page, unsigned long start)
95a402c3 1209{
d05f0cdc 1210 struct vm_area_struct *vma;
3ad33b24 1211 unsigned long uninitialized_var(address);
95a402c3 1212
d05f0cdc 1213 vma = find_vma(current->mm, start);
3ad33b24
LS
1214 while (vma) {
1215 address = page_address_in_vma(page, vma);
1216 if (address != -EFAULT)
1217 break;
1218 vma = vma->vm_next;
1219 }
11c731e8
WL
1220
1221 if (PageHuge(page)) {
389c8178
MH
1222 return alloc_huge_page_vma(page_hstate(compound_head(page)),
1223 vma, address);
94723aaf 1224 } else if (PageTransHuge(page)) {
c8633798
NH
1225 struct page *thp;
1226
19deb769
DR
1227 thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
1228 HPAGE_PMD_ORDER);
c8633798
NH
1229 if (!thp)
1230 return NULL;
1231 prep_transhuge_page(thp);
1232 return thp;
11c731e8 1233 }
0bf598d8 1234 /*
11c731e8 1235 * if !vma, alloc_page_vma() will use task or system default policy
0bf598d8 1236 */
0f556856
MH
1237 return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
1238 vma, address);
95a402c3 1239}
b20a3503
CL
1240#else
1241
a53190a4 1242static int migrate_page_add(struct page *page, struct list_head *pagelist,
b20a3503
CL
1243 unsigned long flags)
1244{
a53190a4 1245 return -EIO;
39743889
CL
1246}
1247
0ce72d4f
AM
1248int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1249 const nodemask_t *to, int flags)
b20a3503
CL
1250{
1251 return -ENOSYS;
1252}
95a402c3 1253
666feb21 1254static struct page *new_page(struct page *page, unsigned long start)
95a402c3
CL
1255{
1256 return NULL;
1257}
b20a3503
CL
1258#endif
1259
dbcb0f19 1260static long do_mbind(unsigned long start, unsigned long len,
028fec41
DR
1261 unsigned short mode, unsigned short mode_flags,
1262 nodemask_t *nmask, unsigned long flags)
6ce3c4c0 1263{
6ce3c4c0
CL
1264 struct mm_struct *mm = current->mm;
1265 struct mempolicy *new;
1266 unsigned long end;
1267 int err;
d8835445 1268 int ret;
6ce3c4c0
CL
1269 LIST_HEAD(pagelist);
1270
b24f53a0 1271 if (flags & ~(unsigned long)MPOL_MF_VALID)
6ce3c4c0 1272 return -EINVAL;
74c00241 1273 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
6ce3c4c0
CL
1274 return -EPERM;
1275
1276 if (start & ~PAGE_MASK)
1277 return -EINVAL;
1278
1279 if (mode == MPOL_DEFAULT)
1280 flags &= ~MPOL_MF_STRICT;
1281
1282 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1283 end = start + len;
1284
1285 if (end < start)
1286 return -EINVAL;
1287 if (end == start)
1288 return 0;
1289
028fec41 1290 new = mpol_new(mode, mode_flags, nmask);
6ce3c4c0
CL
1291 if (IS_ERR(new))
1292 return PTR_ERR(new);
1293
b24f53a0
LS
1294 if (flags & MPOL_MF_LAZY)
1295 new->flags |= MPOL_F_MOF;
1296
6ce3c4c0
CL
1297 /*
1298 * If we are using the default policy then operation
1299 * on discontinuous address spaces is okay after all
1300 */
1301 if (!new)
1302 flags |= MPOL_MF_DISCONTIG_OK;
1303
028fec41
DR
1304 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1305 start, start + len, mode, mode_flags,
00ef2d2f 1306 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
6ce3c4c0 1307
0aedadf9
CL
1308 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1309
1310 err = migrate_prep();
1311 if (err)
b05ca738 1312 goto mpol_out;
0aedadf9 1313 }
4bfc4495
KH
1314 {
1315 NODEMASK_SCRATCH(scratch);
1316 if (scratch) {
1317 down_write(&mm->mmap_sem);
1318 task_lock(current);
1319 err = mpol_set_nodemask(new, nmask, scratch);
1320 task_unlock(current);
1321 if (err)
1322 up_write(&mm->mmap_sem);
1323 } else
1324 err = -ENOMEM;
1325 NODEMASK_SCRATCH_FREE(scratch);
1326 }
b05ca738
KM
1327 if (err)
1328 goto mpol_out;
1329
d8835445 1330 ret = queue_pages_range(mm, start, end, nmask,
6ce3c4c0 1331 flags | MPOL_MF_INVERT, &pagelist);
d8835445
YS
1332
1333 if (ret < 0) {
a85dfc30 1334 err = ret;
d8835445
YS
1335 goto up_out;
1336 }
1337
1338 err = mbind_range(mm, start, end, new);
7e2ab150 1339
b24f53a0
LS
1340 if (!err) {
1341 int nr_failed = 0;
1342
cf608ac1 1343 if (!list_empty(&pagelist)) {
b24f53a0 1344 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
d05f0cdc
HD
1345 nr_failed = migrate_pages(&pagelist, new_page, NULL,
1346 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
cf608ac1 1347 if (nr_failed)
74060e4d 1348 putback_movable_pages(&pagelist);
cf608ac1 1349 }
6ce3c4c0 1350
d8835445 1351 if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
6ce3c4c0 1352 err = -EIO;
a85dfc30 1353 } else {
d8835445 1354up_out:
a85dfc30
YS
1355 if (!list_empty(&pagelist))
1356 putback_movable_pages(&pagelist);
1357 }
1358
6ce3c4c0 1359 up_write(&mm->mmap_sem);
d8835445 1360mpol_out:
f0be3d32 1361 mpol_put(new);
6ce3c4c0
CL
1362 return err;
1363}
1364
8bccd85f
CL
1365/*
1366 * User space interface with variable sized bitmaps for nodelists.
1367 */
1368
1369/* Copy a node mask from user space. */
39743889 1370static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
8bccd85f
CL
1371 unsigned long maxnode)
1372{
1373 unsigned long k;
56521e7a 1374 unsigned long t;
8bccd85f
CL
1375 unsigned long nlongs;
1376 unsigned long endmask;
1377
1378 --maxnode;
1379 nodes_clear(*nodes);
1380 if (maxnode == 0 || !nmask)
1381 return 0;
a9c930ba 1382 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
636f13c1 1383 return -EINVAL;
8bccd85f
CL
1384
1385 nlongs = BITS_TO_LONGS(maxnode);
1386 if ((maxnode % BITS_PER_LONG) == 0)
1387 endmask = ~0UL;
1388 else
1389 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1390
56521e7a
YX
1391 /*
1392 * When the user specified more nodes than supported just check
1393 * if the non supported part is all zero.
1394 *
1395 * If maxnode have more longs than MAX_NUMNODES, check
1396 * the bits in that area first. And then go through to
1397 * check the rest bits which equal or bigger than MAX_NUMNODES.
1398 * Otherwise, just check bits [MAX_NUMNODES, maxnode).
1399 */
8bccd85f 1400 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
8bccd85f 1401 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
8bccd85f
CL
1402 if (get_user(t, nmask + k))
1403 return -EFAULT;
1404 if (k == nlongs - 1) {
1405 if (t & endmask)
1406 return -EINVAL;
1407 } else if (t)
1408 return -EINVAL;
1409 }
1410 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1411 endmask = ~0UL;
1412 }
1413
56521e7a
YX
1414 if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
1415 unsigned long valid_mask = endmask;
1416
1417 valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1418 if (get_user(t, nmask + nlongs - 1))
1419 return -EFAULT;
1420 if (t & valid_mask)
1421 return -EINVAL;
1422 }
1423
8bccd85f
CL
1424 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1425 return -EFAULT;
1426 nodes_addr(*nodes)[nlongs-1] &= endmask;
1427 return 0;
1428}
1429
1430/* Copy a kernel node mask to user space */
1431static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1432 nodemask_t *nodes)
1433{
1434 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
050c17f2 1435 unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
8bccd85f
CL
1436
1437 if (copy > nbytes) {
1438 if (copy > PAGE_SIZE)
1439 return -EINVAL;
1440 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1441 return -EFAULT;
1442 copy = nbytes;
1443 }
1444 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1445}
1446
e7dc9ad6
DB
1447static long kernel_mbind(unsigned long start, unsigned long len,
1448 unsigned long mode, const unsigned long __user *nmask,
1449 unsigned long maxnode, unsigned int flags)
8bccd85f
CL
1450{
1451 nodemask_t nodes;
1452 int err;
028fec41 1453 unsigned short mode_flags;
8bccd85f 1454
057d3389 1455 start = untagged_addr(start);
028fec41
DR
1456 mode_flags = mode & MPOL_MODE_FLAGS;
1457 mode &= ~MPOL_MODE_FLAGS;
a3b51e01
DR
1458 if (mode >= MPOL_MAX)
1459 return -EINVAL;
4c50bc01
DR
1460 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1461 (mode_flags & MPOL_F_RELATIVE_NODES))
1462 return -EINVAL;
8bccd85f
CL
1463 err = get_nodes(&nodes, nmask, maxnode);
1464 if (err)
1465 return err;
028fec41 1466 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
8bccd85f
CL
1467}
1468
e7dc9ad6
DB
1469SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1470 unsigned long, mode, const unsigned long __user *, nmask,
1471 unsigned long, maxnode, unsigned int, flags)
1472{
1473 return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1474}
1475
8bccd85f 1476/* Set the process memory policy */
af03c4ac
DB
1477static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1478 unsigned long maxnode)
8bccd85f
CL
1479{
1480 int err;
1481 nodemask_t nodes;
028fec41 1482 unsigned short flags;
8bccd85f 1483
028fec41
DR
1484 flags = mode & MPOL_MODE_FLAGS;
1485 mode &= ~MPOL_MODE_FLAGS;
1486 if ((unsigned int)mode >= MPOL_MAX)
8bccd85f 1487 return -EINVAL;
4c50bc01
DR
1488 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1489 return -EINVAL;
8bccd85f
CL
1490 err = get_nodes(&nodes, nmask, maxnode);
1491 if (err)
1492 return err;
028fec41 1493 return do_set_mempolicy(mode, flags, &nodes);
8bccd85f
CL
1494}
1495
af03c4ac
DB
1496SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1497 unsigned long, maxnode)
1498{
1499 return kernel_set_mempolicy(mode, nmask, maxnode);
1500}
1501
b6e9b0ba
DB
1502static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1503 const unsigned long __user *old_nodes,
1504 const unsigned long __user *new_nodes)
39743889 1505{
596d7cfa 1506 struct mm_struct *mm = NULL;
39743889 1507 struct task_struct *task;
39743889
CL
1508 nodemask_t task_nodes;
1509 int err;
596d7cfa
KM
1510 nodemask_t *old;
1511 nodemask_t *new;
1512 NODEMASK_SCRATCH(scratch);
1513
1514 if (!scratch)
1515 return -ENOMEM;
39743889 1516
596d7cfa
KM
1517 old = &scratch->mask1;
1518 new = &scratch->mask2;
1519
1520 err = get_nodes(old, old_nodes, maxnode);
39743889 1521 if (err)
596d7cfa 1522 goto out;
39743889 1523
596d7cfa 1524 err = get_nodes(new, new_nodes, maxnode);
39743889 1525 if (err)
596d7cfa 1526 goto out;
39743889
CL
1527
1528 /* Find the mm_struct */
55cfaa3c 1529 rcu_read_lock();
228ebcbe 1530 task = pid ? find_task_by_vpid(pid) : current;
39743889 1531 if (!task) {
55cfaa3c 1532 rcu_read_unlock();
596d7cfa
KM
1533 err = -ESRCH;
1534 goto out;
39743889 1535 }
3268c63e 1536 get_task_struct(task);
39743889 1537
596d7cfa 1538 err = -EINVAL;
39743889
CL
1539
1540 /*
31367466
OE
1541 * Check if this process has the right to modify the specified process.
1542 * Use the regular "ptrace_may_access()" checks.
39743889 1543 */
31367466 1544 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
c69e8d9c 1545 rcu_read_unlock();
39743889 1546 err = -EPERM;
3268c63e 1547 goto out_put;
39743889 1548 }
c69e8d9c 1549 rcu_read_unlock();
39743889
CL
1550
1551 task_nodes = cpuset_mems_allowed(task);
1552 /* Is the user allowed to access the target nodes? */
596d7cfa 1553 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
39743889 1554 err = -EPERM;
3268c63e 1555 goto out_put;
39743889
CL
1556 }
1557
0486a38b
YX
1558 task_nodes = cpuset_mems_allowed(current);
1559 nodes_and(*new, *new, task_nodes);
1560 if (nodes_empty(*new))
1561 goto out_put;
1562
86c3a764
DQ
1563 err = security_task_movememory(task);
1564 if (err)
3268c63e 1565 goto out_put;
86c3a764 1566
3268c63e
CL
1567 mm = get_task_mm(task);
1568 put_task_struct(task);
f2a9ef88
SL
1569
1570 if (!mm) {
3268c63e 1571 err = -EINVAL;
f2a9ef88
SL
1572 goto out;
1573 }
1574
1575 err = do_migrate_pages(mm, old, new,
1576 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
3268c63e
CL
1577
1578 mmput(mm);
1579out:
596d7cfa
KM
1580 NODEMASK_SCRATCH_FREE(scratch);
1581
39743889 1582 return err;
3268c63e
CL
1583
1584out_put:
1585 put_task_struct(task);
1586 goto out;
1587
39743889
CL
1588}
1589
b6e9b0ba
DB
1590SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1591 const unsigned long __user *, old_nodes,
1592 const unsigned long __user *, new_nodes)
1593{
1594 return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1595}
1596
39743889 1597
8bccd85f 1598/* Retrieve NUMA policy */
af03c4ac
DB
1599static int kernel_get_mempolicy(int __user *policy,
1600 unsigned long __user *nmask,
1601 unsigned long maxnode,
1602 unsigned long addr,
1603 unsigned long flags)
8bccd85f 1604{
dbcb0f19
AB
1605 int err;
1606 int uninitialized_var(pval);
8bccd85f
CL
1607 nodemask_t nodes;
1608
057d3389
AK
1609 addr = untagged_addr(addr);
1610
050c17f2 1611 if (nmask != NULL && maxnode < nr_node_ids)
8bccd85f
CL
1612 return -EINVAL;
1613
1614 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1615
1616 if (err)
1617 return err;
1618
1619 if (policy && put_user(pval, policy))
1620 return -EFAULT;
1621
1622 if (nmask)
1623 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1624
1625 return err;
1626}
1627
af03c4ac
DB
1628SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1629 unsigned long __user *, nmask, unsigned long, maxnode,
1630 unsigned long, addr, unsigned long, flags)
1631{
1632 return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1633}
1634
1da177e4
LT
1635#ifdef CONFIG_COMPAT
1636
c93e0f6c
HC
1637COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1638 compat_ulong_t __user *, nmask,
1639 compat_ulong_t, maxnode,
1640 compat_ulong_t, addr, compat_ulong_t, flags)
1da177e4
LT
1641{
1642 long err;
1643 unsigned long __user *nm = NULL;
1644 unsigned long nr_bits, alloc_size;
1645 DECLARE_BITMAP(bm, MAX_NUMNODES);
1646
050c17f2 1647 nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
1da177e4
LT
1648 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1649
1650 if (nmask)
1651 nm = compat_alloc_user_space(alloc_size);
1652
af03c4ac 1653 err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1da177e4
LT
1654
1655 if (!err && nmask) {
2bbff6c7
KH
1656 unsigned long copy_size;
1657 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1658 err = copy_from_user(bm, nm, copy_size);
1da177e4
LT
1659 /* ensure entire bitmap is zeroed */
1660 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1661 err |= compat_put_bitmap(nmask, bm, nr_bits);
1662 }
1663
1664 return err;
1665}
1666
c93e0f6c
HC
1667COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1668 compat_ulong_t, maxnode)
1da177e4 1669{
1da177e4
LT
1670 unsigned long __user *nm = NULL;
1671 unsigned long nr_bits, alloc_size;
1672 DECLARE_BITMAP(bm, MAX_NUMNODES);
1673
1674 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1675 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1676
1677 if (nmask) {
cf01fb99
CS
1678 if (compat_get_bitmap(bm, nmask, nr_bits))
1679 return -EFAULT;
1da177e4 1680 nm = compat_alloc_user_space(alloc_size);
cf01fb99
CS
1681 if (copy_to_user(nm, bm, alloc_size))
1682 return -EFAULT;
1da177e4
LT
1683 }
1684
af03c4ac 1685 return kernel_set_mempolicy(mode, nm, nr_bits+1);
1da177e4
LT
1686}
1687
c93e0f6c
HC
1688COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1689 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1690 compat_ulong_t, maxnode, compat_ulong_t, flags)
1da177e4 1691{
1da177e4
LT
1692 unsigned long __user *nm = NULL;
1693 unsigned long nr_bits, alloc_size;
dfcd3c0d 1694 nodemask_t bm;
1da177e4
LT
1695
1696 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1697 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1698
1699 if (nmask) {
cf01fb99
CS
1700 if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1701 return -EFAULT;
1da177e4 1702 nm = compat_alloc_user_space(alloc_size);
cf01fb99
CS
1703 if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1704 return -EFAULT;
1da177e4
LT
1705 }
1706
e7dc9ad6 1707 return kernel_mbind(start, len, mode, nm, nr_bits+1, flags);
1da177e4
LT
1708}
1709
b6e9b0ba
DB
1710COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid,
1711 compat_ulong_t, maxnode,
1712 const compat_ulong_t __user *, old_nodes,
1713 const compat_ulong_t __user *, new_nodes)
1714{
1715 unsigned long __user *old = NULL;
1716 unsigned long __user *new = NULL;
1717 nodemask_t tmp_mask;
1718 unsigned long nr_bits;
1719 unsigned long size;
1720
1721 nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES);
1722 size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1723 if (old_nodes) {
1724 if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits))
1725 return -EFAULT;
1726 old = compat_alloc_user_space(new_nodes ? size * 2 : size);
1727 if (new_nodes)
1728 new = old + size / sizeof(unsigned long);
1729 if (copy_to_user(old, nodes_addr(tmp_mask), size))
1730 return -EFAULT;
1731 }
1732 if (new_nodes) {
1733 if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits))
1734 return -EFAULT;
1735 if (new == NULL)
1736 new = compat_alloc_user_space(size);
1737 if (copy_to_user(new, nodes_addr(tmp_mask), size))
1738 return -EFAULT;
1739 }
1740 return kernel_migrate_pages(pid, nr_bits + 1, old, new);
1741}
1742
1743#endif /* CONFIG_COMPAT */
1da177e4 1744
20ca87f2
LX
1745bool vma_migratable(struct vm_area_struct *vma)
1746{
1747 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1748 return false;
1749
1750 /*
1751 * DAX device mappings require predictable access latency, so avoid
1752 * incurring periodic faults.
1753 */
1754 if (vma_is_dax(vma))
1755 return false;
1756
1757 if (is_vm_hugetlb_page(vma) &&
1758 !hugepage_migration_supported(hstate_vma(vma)))
1759 return false;
1760
1761 /*
1762 * Migration allocates pages in the highest zone. If we cannot
1763 * do so then migration (at least from node to node) is not
1764 * possible.
1765 */
1766 if (vma->vm_file &&
1767 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
1768 < policy_zone)
1769 return false;
1770 return true;
1771}
1772
74d2c3a0
ON
1773struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1774 unsigned long addr)
1da177e4 1775{
8d90274b 1776 struct mempolicy *pol = NULL;
1da177e4
LT
1777
1778 if (vma) {
480eccf9 1779 if (vma->vm_ops && vma->vm_ops->get_policy) {
8d90274b 1780 pol = vma->vm_ops->get_policy(vma, addr);
00442ad0 1781 } else if (vma->vm_policy) {
1da177e4 1782 pol = vma->vm_policy;
00442ad0
MG
1783
1784 /*
1785 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1786 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1787 * count on these policies which will be dropped by
1788 * mpol_cond_put() later
1789 */
1790 if (mpol_needs_cond_ref(pol))
1791 mpol_get(pol);
1792 }
1da177e4 1793 }
f15ca78e 1794
74d2c3a0
ON
1795 return pol;
1796}
1797
1798/*
dd6eecb9 1799 * get_vma_policy(@vma, @addr)
74d2c3a0
ON
1800 * @vma: virtual memory area whose policy is sought
1801 * @addr: address in @vma for shared policy lookup
1802 *
1803 * Returns effective policy for a VMA at specified address.
dd6eecb9 1804 * Falls back to current->mempolicy or system default policy, as necessary.
74d2c3a0
ON
1805 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1806 * count--added by the get_policy() vm_op, as appropriate--to protect against
1807 * freeing by another task. It is the caller's responsibility to free the
1808 * extra reference for shared policies.
1809 */
ac79f78d 1810static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
dd6eecb9 1811 unsigned long addr)
74d2c3a0
ON
1812{
1813 struct mempolicy *pol = __get_vma_policy(vma, addr);
1814
8d90274b 1815 if (!pol)
dd6eecb9 1816 pol = get_task_policy(current);
8d90274b 1817
1da177e4
LT
1818 return pol;
1819}
1820
6b6482bb 1821bool vma_policy_mof(struct vm_area_struct *vma)
fc314724 1822{
6b6482bb 1823 struct mempolicy *pol;
fc314724 1824
6b6482bb
ON
1825 if (vma->vm_ops && vma->vm_ops->get_policy) {
1826 bool ret = false;
fc314724 1827
6b6482bb
ON
1828 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1829 if (pol && (pol->flags & MPOL_F_MOF))
1830 ret = true;
1831 mpol_cond_put(pol);
8d90274b 1832
6b6482bb 1833 return ret;
fc314724
MG
1834 }
1835
6b6482bb 1836 pol = vma->vm_policy;
8d90274b 1837 if (!pol)
6b6482bb 1838 pol = get_task_policy(current);
8d90274b 1839
fc314724
MG
1840 return pol->flags & MPOL_F_MOF;
1841}
1842
d3eb1570
LJ
1843static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1844{
1845 enum zone_type dynamic_policy_zone = policy_zone;
1846
1847 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1848
1849 /*
1850 * if policy->v.nodes has movable memory only,
1851 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1852 *
1853 * policy->v.nodes is intersect with node_states[N_MEMORY].
1854 * so if the following test faile, it implies
1855 * policy->v.nodes has movable memory only.
1856 */
1857 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1858 dynamic_policy_zone = ZONE_MOVABLE;
1859
1860 return zone >= dynamic_policy_zone;
1861}
1862
52cd3b07
LS
1863/*
1864 * Return a nodemask representing a mempolicy for filtering nodes for
1865 * page allocation
1866 */
1867static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
19770b32
MG
1868{
1869 /* Lower zones don't get a nodemask applied for MPOL_BIND */
45c4745a 1870 if (unlikely(policy->mode == MPOL_BIND) &&
d3eb1570 1871 apply_policy_zone(policy, gfp_zone(gfp)) &&
19770b32
MG
1872 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1873 return &policy->v.nodes;
1874
1875 return NULL;
1876}
1877
04ec6264
VB
1878/* Return the node id preferred by the given mempolicy, or the given id */
1879static int policy_node(gfp_t gfp, struct mempolicy *policy,
1880 int nd)
1da177e4 1881{
6d840958
MH
1882 if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
1883 nd = policy->v.preferred_node;
1884 else {
19770b32 1885 /*
6d840958
MH
1886 * __GFP_THISNODE shouldn't even be used with the bind policy
1887 * because we might easily break the expectation to stay on the
1888 * requested node and not break the policy.
19770b32 1889 */
6d840958 1890 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1da177e4 1891 }
6d840958 1892
04ec6264 1893 return nd;
1da177e4
LT
1894}
1895
1896/* Do dynamic interleaving for a process */
1897static unsigned interleave_nodes(struct mempolicy *policy)
1898{
45816682 1899 unsigned next;
1da177e4
LT
1900 struct task_struct *me = current;
1901
45816682 1902 next = next_node_in(me->il_prev, policy->v.nodes);
f5b087b5 1903 if (next < MAX_NUMNODES)
45816682
VB
1904 me->il_prev = next;
1905 return next;
1da177e4
LT
1906}
1907
dc85da15
CL
1908/*
1909 * Depending on the memory policy provide a node from which to allocate the
1910 * next slab entry.
1911 */
2a389610 1912unsigned int mempolicy_slab_node(void)
dc85da15 1913{
e7b691b0 1914 struct mempolicy *policy;
2a389610 1915 int node = numa_mem_id();
e7b691b0
AK
1916
1917 if (in_interrupt())
2a389610 1918 return node;
e7b691b0
AK
1919
1920 policy = current->mempolicy;
fc36b8d3 1921 if (!policy || policy->flags & MPOL_F_LOCAL)
2a389610 1922 return node;
bea904d5
LS
1923
1924 switch (policy->mode) {
1925 case MPOL_PREFERRED:
fc36b8d3
LS
1926 /*
1927 * handled MPOL_F_LOCAL above
1928 */
1929 return policy->v.preferred_node;
765c4507 1930
dc85da15
CL
1931 case MPOL_INTERLEAVE:
1932 return interleave_nodes(policy);
1933
dd1a239f 1934 case MPOL_BIND: {
c33d6c06
MG
1935 struct zoneref *z;
1936
dc85da15
CL
1937 /*
1938 * Follow bind policy behavior and start allocation at the
1939 * first node.
1940 */
19770b32 1941 struct zonelist *zonelist;
19770b32 1942 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
c9634cf0 1943 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
c33d6c06
MG
1944 z = first_zones_zonelist(zonelist, highest_zoneidx,
1945 &policy->v.nodes);
c1093b74 1946 return z->zone ? zone_to_nid(z->zone) : node;
dd1a239f 1947 }
dc85da15 1948
dc85da15 1949 default:
bea904d5 1950 BUG();
dc85da15
CL
1951 }
1952}
1953
fee83b3a
AM
1954/*
1955 * Do static interleaving for a VMA with known offset @n. Returns the n'th
1956 * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1957 * number of present nodes.
1958 */
98c70baa 1959static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
1da177e4 1960{
dfcd3c0d 1961 unsigned nnodes = nodes_weight(pol->v.nodes);
f5b087b5 1962 unsigned target;
fee83b3a
AM
1963 int i;
1964 int nid;
1da177e4 1965
f5b087b5
DR
1966 if (!nnodes)
1967 return numa_node_id();
fee83b3a
AM
1968 target = (unsigned int)n % nnodes;
1969 nid = first_node(pol->v.nodes);
1970 for (i = 0; i < target; i++)
dfcd3c0d 1971 nid = next_node(nid, pol->v.nodes);
1da177e4
LT
1972 return nid;
1973}
1974
5da7ca86
CL
1975/* Determine a node number for interleave */
1976static inline unsigned interleave_nid(struct mempolicy *pol,
1977 struct vm_area_struct *vma, unsigned long addr, int shift)
1978{
1979 if (vma) {
1980 unsigned long off;
1981
3b98b087
NA
1982 /*
1983 * for small pages, there is no difference between
1984 * shift and PAGE_SHIFT, so the bit-shift is safe.
1985 * for huge pages, since vm_pgoff is in units of small
1986 * pages, we need to shift off the always 0 bits to get
1987 * a useful offset.
1988 */
1989 BUG_ON(shift < PAGE_SHIFT);
1990 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
5da7ca86 1991 off += (addr - vma->vm_start) >> shift;
98c70baa 1992 return offset_il_node(pol, off);
5da7ca86
CL
1993 } else
1994 return interleave_nodes(pol);
1995}
1996
00ac59ad 1997#ifdef CONFIG_HUGETLBFS
480eccf9 1998/*
04ec6264 1999 * huge_node(@vma, @addr, @gfp_flags, @mpol)
b46e14ac
FF
2000 * @vma: virtual memory area whose policy is sought
2001 * @addr: address in @vma for shared policy lookup and interleave policy
2002 * @gfp_flags: for requested zone
2003 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
2004 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
480eccf9 2005 *
04ec6264 2006 * Returns a nid suitable for a huge page allocation and a pointer
52cd3b07
LS
2007 * to the struct mempolicy for conditional unref after allocation.
2008 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
2009 * @nodemask for filtering the zonelist.
c0ff7453 2010 *
d26914d1 2011 * Must be protected by read_mems_allowed_begin()
480eccf9 2012 */
04ec6264
VB
2013int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
2014 struct mempolicy **mpol, nodemask_t **nodemask)
5da7ca86 2015{
04ec6264 2016 int nid;
5da7ca86 2017
dd6eecb9 2018 *mpol = get_vma_policy(vma, addr);
19770b32 2019 *nodemask = NULL; /* assume !MPOL_BIND */
5da7ca86 2020
52cd3b07 2021 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
04ec6264
VB
2022 nid = interleave_nid(*mpol, vma, addr,
2023 huge_page_shift(hstate_vma(vma)));
52cd3b07 2024 } else {
04ec6264 2025 nid = policy_node(gfp_flags, *mpol, numa_node_id());
52cd3b07
LS
2026 if ((*mpol)->mode == MPOL_BIND)
2027 *nodemask = &(*mpol)->v.nodes;
480eccf9 2028 }
04ec6264 2029 return nid;
5da7ca86 2030}
06808b08
LS
2031
2032/*
2033 * init_nodemask_of_mempolicy
2034 *
2035 * If the current task's mempolicy is "default" [NULL], return 'false'
2036 * to indicate default policy. Otherwise, extract the policy nodemask
2037 * for 'bind' or 'interleave' policy into the argument nodemask, or
2038 * initialize the argument nodemask to contain the single node for
2039 * 'preferred' or 'local' policy and return 'true' to indicate presence
2040 * of non-default mempolicy.
2041 *
2042 * We don't bother with reference counting the mempolicy [mpol_get/put]
2043 * because the current task is examining it's own mempolicy and a task's
2044 * mempolicy is only ever changed by the task itself.
2045 *
2046 * N.B., it is the caller's responsibility to free a returned nodemask.
2047 */
2048bool init_nodemask_of_mempolicy(nodemask_t *mask)
2049{
2050 struct mempolicy *mempolicy;
2051 int nid;
2052
2053 if (!(mask && current->mempolicy))
2054 return false;
2055
c0ff7453 2056 task_lock(current);
06808b08
LS
2057 mempolicy = current->mempolicy;
2058 switch (mempolicy->mode) {
2059 case MPOL_PREFERRED:
2060 if (mempolicy->flags & MPOL_F_LOCAL)
2061 nid = numa_node_id();
2062 else
2063 nid = mempolicy->v.preferred_node;
2064 init_nodemask_of_node(mask, nid);
2065 break;
2066
2067 case MPOL_BIND:
2068 /* Fall through */
2069 case MPOL_INTERLEAVE:
2070 *mask = mempolicy->v.nodes;
2071 break;
2072
2073 default:
2074 BUG();
2075 }
c0ff7453 2076 task_unlock(current);
06808b08
LS
2077
2078 return true;
2079}
00ac59ad 2080#endif
5da7ca86 2081
6f48d0eb
DR
2082/*
2083 * mempolicy_nodemask_intersects
2084 *
2085 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
2086 * policy. Otherwise, check for intersection between mask and the policy
2087 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
2088 * policy, always return true since it may allocate elsewhere on fallback.
2089 *
2090 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
2091 */
2092bool mempolicy_nodemask_intersects(struct task_struct *tsk,
2093 const nodemask_t *mask)
2094{
2095 struct mempolicy *mempolicy;
2096 bool ret = true;
2097
2098 if (!mask)
2099 return ret;
2100 task_lock(tsk);
2101 mempolicy = tsk->mempolicy;
2102 if (!mempolicy)
2103 goto out;
2104
2105 switch (mempolicy->mode) {
2106 case MPOL_PREFERRED:
2107 /*
2108 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
2109 * allocate from, they may fallback to other nodes when oom.
2110 * Thus, it's possible for tsk to have allocated memory from
2111 * nodes in mask.
2112 */
2113 break;
2114 case MPOL_BIND:
2115 case MPOL_INTERLEAVE:
2116 ret = nodes_intersects(mempolicy->v.nodes, *mask);
2117 break;
2118 default:
2119 BUG();
2120 }
2121out:
2122 task_unlock(tsk);
2123 return ret;
2124}
2125
1da177e4
LT
2126/* Allocate a page in interleaved policy.
2127 Own path because it needs to do special accounting. */
662f3a0b
AK
2128static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2129 unsigned nid)
1da177e4 2130{
1da177e4
LT
2131 struct page *page;
2132
04ec6264 2133 page = __alloc_pages(gfp, order, nid);
4518085e
KW
2134 /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
2135 if (!static_branch_likely(&vm_numa_stat_key))
2136 return page;
de55c8b2
AR
2137 if (page && page_to_nid(page) == nid) {
2138 preempt_disable();
2139 __inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
2140 preempt_enable();
2141 }
1da177e4
LT
2142 return page;
2143}
2144
2145/**
0bbbc0b3 2146 * alloc_pages_vma - Allocate a page for a VMA.
1da177e4
LT
2147 *
2148 * @gfp:
2149 * %GFP_USER user allocation.
2150 * %GFP_KERNEL kernel allocations,
2151 * %GFP_HIGHMEM highmem/user allocations,
2152 * %GFP_FS allocation should not call back into a file system.
2153 * %GFP_ATOMIC don't sleep.
2154 *
0bbbc0b3 2155 * @order:Order of the GFP allocation.
1da177e4
LT
2156 * @vma: Pointer to VMA or NULL if not available.
2157 * @addr: Virtual Address of the allocation. Must be inside the VMA.
be97a41b 2158 * @node: Which node to prefer for allocation (modulo policy).
19deb769 2159 * @hugepage: for hugepages try only the preferred node if possible
1da177e4
LT
2160 *
2161 * This function allocates a page from the kernel page pool and applies
2162 * a NUMA policy associated with the VMA or the current process.
2163 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
2164 * mm_struct of the VMA to prevent it from going away. Should be used for
be97a41b
VB
2165 * all allocations for pages that will be mapped into user space. Returns
2166 * NULL when no page can be allocated.
1da177e4
LT
2167 */
2168struct page *
0bbbc0b3 2169alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
19deb769 2170 unsigned long addr, int node, bool hugepage)
1da177e4 2171{
cc9a6c87 2172 struct mempolicy *pol;
c0ff7453 2173 struct page *page;
04ec6264 2174 int preferred_nid;
be97a41b 2175 nodemask_t *nmask;
cc9a6c87 2176
dd6eecb9 2177 pol = get_vma_policy(vma, addr);
1da177e4 2178
0867a57c
VB
2179 if (pol->mode == MPOL_INTERLEAVE) {
2180 unsigned nid;
2181
2182 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2183 mpol_cond_put(pol);
2184 page = alloc_page_interleave(gfp, order, nid);
2185 goto out;
19deb769
DR
2186 }
2187
2188 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2189 int hpage_node = node;
2190
2191 /*
2192 * For hugepage allocation and non-interleave policy which
2193 * allows the current node (or other explicitly preferred
2194 * node) we only try to allocate from the current/preferred
2195 * node and don't fall back to other nodes, as the cost of
2196 * remote accesses would likely offset THP benefits.
2197 *
2198 * If the policy is interleave, or does not allow the current
2199 * node in its nodemask, we allocate the standard way.
2200 */
2201 if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
2202 hpage_node = pol->v.preferred_node;
2203
2204 nmask = policy_nodemask(gfp, pol);
2205 if (!nmask || node_isset(hpage_node, *nmask)) {
2206 mpol_cond_put(pol);
cc638f32
VB
2207 /*
2208 * First, try to allocate THP only on local node, but
2209 * don't reclaim unnecessarily, just compact.
2210 */
19deb769 2211 page = __alloc_pages_node(hpage_node,
cc638f32 2212 gfp | __GFP_THISNODE | __GFP_NORETRY, order);
76e654cc
DR
2213
2214 /*
2215 * If hugepage allocations are configured to always
2216 * synchronous compact or the vma has been madvised
2217 * to prefer hugepage backing, retry allowing remote
cc638f32 2218 * memory with both reclaim and compact as well.
76e654cc
DR
2219 */
2220 if (!page && (gfp & __GFP_DIRECT_RECLAIM))
2221 page = __alloc_pages_node(hpage_node,
cc638f32 2222 gfp, order);
76e654cc 2223
19deb769
DR
2224 goto out;
2225 }
356ff8a9
DR
2226 }
2227
be97a41b 2228 nmask = policy_nodemask(gfp, pol);
04ec6264
VB
2229 preferred_nid = policy_node(gfp, pol, node);
2230 page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
d51e9894 2231 mpol_cond_put(pol);
be97a41b 2232out:
c0ff7453 2233 return page;
1da177e4 2234}
69262215 2235EXPORT_SYMBOL(alloc_pages_vma);
1da177e4
LT
2236
2237/**
2238 * alloc_pages_current - Allocate pages.
2239 *
2240 * @gfp:
2241 * %GFP_USER user allocation,
2242 * %GFP_KERNEL kernel allocation,
2243 * %GFP_HIGHMEM highmem allocation,
2244 * %GFP_FS don't call back into a file system.
2245 * %GFP_ATOMIC don't sleep.
2246 * @order: Power of two of allocation size in pages. 0 is a single page.
2247 *
2248 * Allocate a page from the kernel page pool. When not in
2249 * interrupt context and apply the current process NUMA policy.
2250 * Returns NULL when no page can be allocated.
1da177e4 2251 */
dd0fc66f 2252struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1da177e4 2253{
8d90274b 2254 struct mempolicy *pol = &default_policy;
c0ff7453 2255 struct page *page;
1da177e4 2256
8d90274b
ON
2257 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2258 pol = get_task_policy(current);
52cd3b07
LS
2259
2260 /*
2261 * No reference counting needed for current->mempolicy
2262 * nor system default_policy
2263 */
45c4745a 2264 if (pol->mode == MPOL_INTERLEAVE)
c0ff7453
MX
2265 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2266 else
2267 page = __alloc_pages_nodemask(gfp, order,
04ec6264 2268 policy_node(gfp, pol, numa_node_id()),
5c4b4be3 2269 policy_nodemask(gfp, pol));
cc9a6c87 2270
c0ff7453 2271 return page;
1da177e4
LT
2272}
2273EXPORT_SYMBOL(alloc_pages_current);
2274
ef0855d3
ON
2275int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2276{
2277 struct mempolicy *pol = mpol_dup(vma_policy(src));
2278
2279 if (IS_ERR(pol))
2280 return PTR_ERR(pol);
2281 dst->vm_policy = pol;
2282 return 0;
2283}
2284
4225399a 2285/*
846a16bf 2286 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
4225399a
PJ
2287 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2288 * with the mems_allowed returned by cpuset_mems_allowed(). This
2289 * keeps mempolicies cpuset relative after its cpuset moves. See
2290 * further kernel/cpuset.c update_nodemask().
708c1bbc
MX
2291 *
2292 * current's mempolicy may be rebinded by the other task(the task that changes
2293 * cpuset's mems), so we needn't do rebind work for current task.
4225399a 2294 */
4225399a 2295
846a16bf
LS
2296/* Slow path of a mempolicy duplicate */
2297struct mempolicy *__mpol_dup(struct mempolicy *old)
1da177e4
LT
2298{
2299 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2300
2301 if (!new)
2302 return ERR_PTR(-ENOMEM);
708c1bbc
MX
2303
2304 /* task's mempolicy is protected by alloc_lock */
2305 if (old == current->mempolicy) {
2306 task_lock(current);
2307 *new = *old;
2308 task_unlock(current);
2309 } else
2310 *new = *old;
2311
4225399a
PJ
2312 if (current_cpuset_is_being_rebound()) {
2313 nodemask_t mems = cpuset_mems_allowed(current);
213980c0 2314 mpol_rebind_policy(new, &mems);
4225399a 2315 }
1da177e4 2316 atomic_set(&new->refcnt, 1);
1da177e4
LT
2317 return new;
2318}
2319
2320/* Slow path of a mempolicy comparison */
fcfb4dcc 2321bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1da177e4
LT
2322{
2323 if (!a || !b)
fcfb4dcc 2324 return false;
45c4745a 2325 if (a->mode != b->mode)
fcfb4dcc 2326 return false;
19800502 2327 if (a->flags != b->flags)
fcfb4dcc 2328 return false;
19800502
BL
2329 if (mpol_store_user_nodemask(a))
2330 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
fcfb4dcc 2331 return false;
19800502 2332
45c4745a 2333 switch (a->mode) {
19770b32
MG
2334 case MPOL_BIND:
2335 /* Fall through */
1da177e4 2336 case MPOL_INTERLEAVE:
fcfb4dcc 2337 return !!nodes_equal(a->v.nodes, b->v.nodes);
1da177e4 2338 case MPOL_PREFERRED:
8970a63e
YX
2339 /* a's ->flags is the same as b's */
2340 if (a->flags & MPOL_F_LOCAL)
2341 return true;
75719661 2342 return a->v.preferred_node == b->v.preferred_node;
1da177e4
LT
2343 default:
2344 BUG();
fcfb4dcc 2345 return false;
1da177e4
LT
2346 }
2347}
2348
1da177e4
LT
2349/*
2350 * Shared memory backing store policy support.
2351 *
2352 * Remember policies even when nobody has shared memory mapped.
2353 * The policies are kept in Red-Black tree linked from the inode.
4a8c7bb5 2354 * They are protected by the sp->lock rwlock, which should be held
1da177e4
LT
2355 * for any accesses to the tree.
2356 */
2357
4a8c7bb5
NZ
2358/*
2359 * lookup first element intersecting start-end. Caller holds sp->lock for
2360 * reading or for writing
2361 */
1da177e4
LT
2362static struct sp_node *
2363sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2364{
2365 struct rb_node *n = sp->root.rb_node;
2366
2367 while (n) {
2368 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2369
2370 if (start >= p->end)
2371 n = n->rb_right;
2372 else if (end <= p->start)
2373 n = n->rb_left;
2374 else
2375 break;
2376 }
2377 if (!n)
2378 return NULL;
2379 for (;;) {
2380 struct sp_node *w = NULL;
2381 struct rb_node *prev = rb_prev(n);
2382 if (!prev)
2383 break;
2384 w = rb_entry(prev, struct sp_node, nd);
2385 if (w->end <= start)
2386 break;
2387 n = prev;
2388 }
2389 return rb_entry(n, struct sp_node, nd);
2390}
2391
4a8c7bb5
NZ
2392/*
2393 * Insert a new shared policy into the list. Caller holds sp->lock for
2394 * writing.
2395 */
1da177e4
LT
2396static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2397{
2398 struct rb_node **p = &sp->root.rb_node;
2399 struct rb_node *parent = NULL;
2400 struct sp_node *nd;
2401
2402 while (*p) {
2403 parent = *p;
2404 nd = rb_entry(parent, struct sp_node, nd);
2405 if (new->start < nd->start)
2406 p = &(*p)->rb_left;
2407 else if (new->end > nd->end)
2408 p = &(*p)->rb_right;
2409 else
2410 BUG();
2411 }
2412 rb_link_node(&new->nd, parent, p);
2413 rb_insert_color(&new->nd, &sp->root);
140d5a49 2414 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
45c4745a 2415 new->policy ? new->policy->mode : 0);
1da177e4
LT
2416}
2417
2418/* Find shared policy intersecting idx */
2419struct mempolicy *
2420mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2421{
2422 struct mempolicy *pol = NULL;
2423 struct sp_node *sn;
2424
2425 if (!sp->root.rb_node)
2426 return NULL;
4a8c7bb5 2427 read_lock(&sp->lock);
1da177e4
LT
2428 sn = sp_lookup(sp, idx, idx+1);
2429 if (sn) {
2430 mpol_get(sn->policy);
2431 pol = sn->policy;
2432 }
4a8c7bb5 2433 read_unlock(&sp->lock);
1da177e4
LT
2434 return pol;
2435}
2436
63f74ca2
KM
2437static void sp_free(struct sp_node *n)
2438{
2439 mpol_put(n->policy);
2440 kmem_cache_free(sn_cache, n);
2441}
2442
771fb4d8
LS
2443/**
2444 * mpol_misplaced - check whether current page node is valid in policy
2445 *
b46e14ac
FF
2446 * @page: page to be checked
2447 * @vma: vm area where page mapped
2448 * @addr: virtual address where page mapped
771fb4d8
LS
2449 *
2450 * Lookup current policy node id for vma,addr and "compare to" page's
2451 * node id.
2452 *
2453 * Returns:
2454 * -1 - not misplaced, page is in the right node
2455 * node - node id where the page should be
2456 *
2457 * Policy determination "mimics" alloc_page_vma().
2458 * Called from fault path where we know the vma and faulting address.
2459 */
2460int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2461{
2462 struct mempolicy *pol;
c33d6c06 2463 struct zoneref *z;
771fb4d8
LS
2464 int curnid = page_to_nid(page);
2465 unsigned long pgoff;
90572890
PZ
2466 int thiscpu = raw_smp_processor_id();
2467 int thisnid = cpu_to_node(thiscpu);
98fa15f3 2468 int polnid = NUMA_NO_NODE;
771fb4d8
LS
2469 int ret = -1;
2470
dd6eecb9 2471 pol = get_vma_policy(vma, addr);
771fb4d8
LS
2472 if (!(pol->flags & MPOL_F_MOF))
2473 goto out;
2474
2475 switch (pol->mode) {
2476 case MPOL_INTERLEAVE:
771fb4d8
LS
2477 pgoff = vma->vm_pgoff;
2478 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
98c70baa 2479 polnid = offset_il_node(pol, pgoff);
771fb4d8
LS
2480 break;
2481
2482 case MPOL_PREFERRED:
2483 if (pol->flags & MPOL_F_LOCAL)
2484 polnid = numa_node_id();
2485 else
2486 polnid = pol->v.preferred_node;
2487 break;
2488
2489 case MPOL_BIND:
c33d6c06 2490
771fb4d8
LS
2491 /*
2492 * allows binding to multiple nodes.
2493 * use current page if in policy nodemask,
2494 * else select nearest allowed node, if any.
2495 * If no allowed nodes, use current [!misplaced].
2496 */
2497 if (node_isset(curnid, pol->v.nodes))
2498 goto out;
c33d6c06 2499 z = first_zones_zonelist(
771fb4d8
LS
2500 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2501 gfp_zone(GFP_HIGHUSER),
c33d6c06 2502 &pol->v.nodes);
c1093b74 2503 polnid = zone_to_nid(z->zone);
771fb4d8
LS
2504 break;
2505
2506 default:
2507 BUG();
2508 }
5606e387
MG
2509
2510 /* Migrate the page towards the node whose CPU is referencing it */
e42c8ff2 2511 if (pol->flags & MPOL_F_MORON) {
90572890 2512 polnid = thisnid;
5606e387 2513
10f39042 2514 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
de1c9ce6 2515 goto out;
e42c8ff2
MG
2516 }
2517
771fb4d8
LS
2518 if (curnid != polnid)
2519 ret = polnid;
2520out:
2521 mpol_cond_put(pol);
2522
2523 return ret;
2524}
2525
c11600e4
DR
2526/*
2527 * Drop the (possibly final) reference to task->mempolicy. It needs to be
2528 * dropped after task->mempolicy is set to NULL so that any allocation done as
2529 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2530 * policy.
2531 */
2532void mpol_put_task_policy(struct task_struct *task)
2533{
2534 struct mempolicy *pol;
2535
2536 task_lock(task);
2537 pol = task->mempolicy;
2538 task->mempolicy = NULL;
2539 task_unlock(task);
2540 mpol_put(pol);
2541}
2542
1da177e4
LT
2543static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2544{
140d5a49 2545 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
1da177e4 2546 rb_erase(&n->nd, &sp->root);
63f74ca2 2547 sp_free(n);
1da177e4
LT
2548}
2549
42288fe3
MG
2550static void sp_node_init(struct sp_node *node, unsigned long start,
2551 unsigned long end, struct mempolicy *pol)
2552{
2553 node->start = start;
2554 node->end = end;
2555 node->policy = pol;
2556}
2557
dbcb0f19
AB
2558static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2559 struct mempolicy *pol)
1da177e4 2560{
869833f2
KM
2561 struct sp_node *n;
2562 struct mempolicy *newpol;
1da177e4 2563
869833f2 2564 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1da177e4
LT
2565 if (!n)
2566 return NULL;
869833f2
KM
2567
2568 newpol = mpol_dup(pol);
2569 if (IS_ERR(newpol)) {
2570 kmem_cache_free(sn_cache, n);
2571 return NULL;
2572 }
2573 newpol->flags |= MPOL_F_SHARED;
42288fe3 2574 sp_node_init(n, start, end, newpol);
869833f2 2575
1da177e4
LT
2576 return n;
2577}
2578
2579/* Replace a policy range. */
2580static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2581 unsigned long end, struct sp_node *new)
2582{
b22d127a 2583 struct sp_node *n;
42288fe3
MG
2584 struct sp_node *n_new = NULL;
2585 struct mempolicy *mpol_new = NULL;
b22d127a 2586 int ret = 0;
1da177e4 2587
42288fe3 2588restart:
4a8c7bb5 2589 write_lock(&sp->lock);
1da177e4
LT
2590 n = sp_lookup(sp, start, end);
2591 /* Take care of old policies in the same range. */
2592 while (n && n->start < end) {
2593 struct rb_node *next = rb_next(&n->nd);
2594 if (n->start >= start) {
2595 if (n->end <= end)
2596 sp_delete(sp, n);
2597 else
2598 n->start = end;
2599 } else {
2600 /* Old policy spanning whole new range. */
2601 if (n->end > end) {
42288fe3
MG
2602 if (!n_new)
2603 goto alloc_new;
2604
2605 *mpol_new = *n->policy;
2606 atomic_set(&mpol_new->refcnt, 1);
7880639c 2607 sp_node_init(n_new, end, n->end, mpol_new);
1da177e4 2608 n->end = start;
5ca39575 2609 sp_insert(sp, n_new);
42288fe3
MG
2610 n_new = NULL;
2611 mpol_new = NULL;
1da177e4
LT
2612 break;
2613 } else
2614 n->end = start;
2615 }
2616 if (!next)
2617 break;
2618 n = rb_entry(next, struct sp_node, nd);
2619 }
2620 if (new)
2621 sp_insert(sp, new);
4a8c7bb5 2622 write_unlock(&sp->lock);
42288fe3
MG
2623 ret = 0;
2624
2625err_out:
2626 if (mpol_new)
2627 mpol_put(mpol_new);
2628 if (n_new)
2629 kmem_cache_free(sn_cache, n_new);
2630
b22d127a 2631 return ret;
42288fe3
MG
2632
2633alloc_new:
4a8c7bb5 2634 write_unlock(&sp->lock);
42288fe3
MG
2635 ret = -ENOMEM;
2636 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2637 if (!n_new)
2638 goto err_out;
2639 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2640 if (!mpol_new)
2641 goto err_out;
2642 goto restart;
1da177e4
LT
2643}
2644
71fe804b
LS
2645/**
2646 * mpol_shared_policy_init - initialize shared policy for inode
2647 * @sp: pointer to inode shared policy
2648 * @mpol: struct mempolicy to install
2649 *
2650 * Install non-NULL @mpol in inode's shared policy rb-tree.
2651 * On entry, the current task has a reference on a non-NULL @mpol.
2652 * This must be released on exit.
4bfc4495 2653 * This is called at get_inode() calls and we can use GFP_KERNEL.
71fe804b
LS
2654 */
2655void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2656{
58568d2a
MX
2657 int ret;
2658
71fe804b 2659 sp->root = RB_ROOT; /* empty tree == default mempolicy */
4a8c7bb5 2660 rwlock_init(&sp->lock);
71fe804b
LS
2661
2662 if (mpol) {
2663 struct vm_area_struct pvma;
2664 struct mempolicy *new;
4bfc4495 2665 NODEMASK_SCRATCH(scratch);
71fe804b 2666
4bfc4495 2667 if (!scratch)
5c0c1654 2668 goto put_mpol;
71fe804b
LS
2669 /* contextualize the tmpfs mount point mempolicy */
2670 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
15d77835 2671 if (IS_ERR(new))
0cae3457 2672 goto free_scratch; /* no valid nodemask intersection */
58568d2a
MX
2673
2674 task_lock(current);
4bfc4495 2675 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
58568d2a 2676 task_unlock(current);
15d77835 2677 if (ret)
5c0c1654 2678 goto put_new;
71fe804b
LS
2679
2680 /* Create pseudo-vma that contains just the policy */
2c4541e2 2681 vma_init(&pvma, NULL);
71fe804b
LS
2682 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2683 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
15d77835 2684
5c0c1654 2685put_new:
71fe804b 2686 mpol_put(new); /* drop initial ref */
0cae3457 2687free_scratch:
4bfc4495 2688 NODEMASK_SCRATCH_FREE(scratch);
5c0c1654
LS
2689put_mpol:
2690 mpol_put(mpol); /* drop our incoming ref on sb mpol */
7339ff83
RH
2691 }
2692}
2693
1da177e4
LT
2694int mpol_set_shared_policy(struct shared_policy *info,
2695 struct vm_area_struct *vma, struct mempolicy *npol)
2696{
2697 int err;
2698 struct sp_node *new = NULL;
2699 unsigned long sz = vma_pages(vma);
2700
028fec41 2701 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
1da177e4 2702 vma->vm_pgoff,
45c4745a 2703 sz, npol ? npol->mode : -1,
028fec41 2704 npol ? npol->flags : -1,
00ef2d2f 2705 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
1da177e4
LT
2706
2707 if (npol) {
2708 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2709 if (!new)
2710 return -ENOMEM;
2711 }
2712 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2713 if (err && new)
63f74ca2 2714 sp_free(new);
1da177e4
LT
2715 return err;
2716}
2717
2718/* Free a backing policy store on inode delete. */
2719void mpol_free_shared_policy(struct shared_policy *p)
2720{
2721 struct sp_node *n;
2722 struct rb_node *next;
2723
2724 if (!p->root.rb_node)
2725 return;
4a8c7bb5 2726 write_lock(&p->lock);
1da177e4
LT
2727 next = rb_first(&p->root);
2728 while (next) {
2729 n = rb_entry(next, struct sp_node, nd);
2730 next = rb_next(&n->nd);
63f74ca2 2731 sp_delete(p, n);
1da177e4 2732 }
4a8c7bb5 2733 write_unlock(&p->lock);
1da177e4
LT
2734}
2735
1a687c2e 2736#ifdef CONFIG_NUMA_BALANCING
c297663c 2737static int __initdata numabalancing_override;
1a687c2e
MG
2738
2739static void __init check_numabalancing_enable(void)
2740{
2741 bool numabalancing_default = false;
2742
2743 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2744 numabalancing_default = true;
2745
c297663c
MG
2746 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2747 if (numabalancing_override)
2748 set_numabalancing_state(numabalancing_override == 1);
2749
b0dc2b9b 2750 if (num_online_nodes() > 1 && !numabalancing_override) {
756a025f 2751 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
c297663c 2752 numabalancing_default ? "Enabling" : "Disabling");
1a687c2e
MG
2753 set_numabalancing_state(numabalancing_default);
2754 }
2755}
2756
2757static int __init setup_numabalancing(char *str)
2758{
2759 int ret = 0;
2760 if (!str)
2761 goto out;
1a687c2e
MG
2762
2763 if (!strcmp(str, "enable")) {
c297663c 2764 numabalancing_override = 1;
1a687c2e
MG
2765 ret = 1;
2766 } else if (!strcmp(str, "disable")) {
c297663c 2767 numabalancing_override = -1;
1a687c2e
MG
2768 ret = 1;
2769 }
2770out:
2771 if (!ret)
4a404bea 2772 pr_warn("Unable to parse numa_balancing=\n");
1a687c2e
MG
2773
2774 return ret;
2775}
2776__setup("numa_balancing=", setup_numabalancing);
2777#else
2778static inline void __init check_numabalancing_enable(void)
2779{
2780}
2781#endif /* CONFIG_NUMA_BALANCING */
2782
1da177e4
LT
2783/* assumes fs == KERNEL_DS */
2784void __init numa_policy_init(void)
2785{
b71636e2
PM
2786 nodemask_t interleave_nodes;
2787 unsigned long largest = 0;
2788 int nid, prefer = 0;
2789
1da177e4
LT
2790 policy_cache = kmem_cache_create("numa_policy",
2791 sizeof(struct mempolicy),
20c2df83 2792 0, SLAB_PANIC, NULL);
1da177e4
LT
2793
2794 sn_cache = kmem_cache_create("shared_policy_node",
2795 sizeof(struct sp_node),
20c2df83 2796 0, SLAB_PANIC, NULL);
1da177e4 2797
5606e387
MG
2798 for_each_node(nid) {
2799 preferred_node_policy[nid] = (struct mempolicy) {
2800 .refcnt = ATOMIC_INIT(1),
2801 .mode = MPOL_PREFERRED,
2802 .flags = MPOL_F_MOF | MPOL_F_MORON,
2803 .v = { .preferred_node = nid, },
2804 };
2805 }
2806
b71636e2
PM
2807 /*
2808 * Set interleaving policy for system init. Interleaving is only
2809 * enabled across suitably sized nodes (default is >= 16MB), or
2810 * fall back to the largest node if they're all smaller.
2811 */
2812 nodes_clear(interleave_nodes);
01f13bd6 2813 for_each_node_state(nid, N_MEMORY) {
b71636e2
PM
2814 unsigned long total_pages = node_present_pages(nid);
2815
2816 /* Preserve the largest node */
2817 if (largest < total_pages) {
2818 largest = total_pages;
2819 prefer = nid;
2820 }
2821
2822 /* Interleave this node? */
2823 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2824 node_set(nid, interleave_nodes);
2825 }
2826
2827 /* All too small, use the largest */
2828 if (unlikely(nodes_empty(interleave_nodes)))
2829 node_set(prefer, interleave_nodes);
1da177e4 2830
028fec41 2831 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
b1de0d13 2832 pr_err("%s: interleaving failed\n", __func__);
1a687c2e
MG
2833
2834 check_numabalancing_enable();
1da177e4
LT
2835}
2836
8bccd85f 2837/* Reset policy of current process to default */
1da177e4
LT
2838void numa_default_policy(void)
2839{
028fec41 2840 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
1da177e4 2841}
68860ec1 2842
095f1fc4
LS
2843/*
2844 * Parse and format mempolicy from/to strings
2845 */
2846
1a75a6c8 2847/*
f2a07f40 2848 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
1a75a6c8 2849 */
345ace9c
LS
2850static const char * const policy_modes[] =
2851{
2852 [MPOL_DEFAULT] = "default",
2853 [MPOL_PREFERRED] = "prefer",
2854 [MPOL_BIND] = "bind",
2855 [MPOL_INTERLEAVE] = "interleave",
d3a71033 2856 [MPOL_LOCAL] = "local",
345ace9c 2857};
1a75a6c8 2858
095f1fc4
LS
2859
2860#ifdef CONFIG_TMPFS
2861/**
f2a07f40 2862 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
095f1fc4 2863 * @str: string containing mempolicy to parse
71fe804b 2864 * @mpol: pointer to struct mempolicy pointer, returned on success.
095f1fc4
LS
2865 *
2866 * Format of input:
2867 * <mode>[=<flags>][:<nodelist>]
2868 *
71fe804b 2869 * On success, returns 0, else 1
095f1fc4 2870 */
a7a88b23 2871int mpol_parse_str(char *str, struct mempolicy **mpol)
095f1fc4 2872{
71fe804b 2873 struct mempolicy *new = NULL;
f2a07f40 2874 unsigned short mode_flags;
71fe804b 2875 nodemask_t nodes;
095f1fc4
LS
2876 char *nodelist = strchr(str, ':');
2877 char *flags = strchr(str, '=');
dedf2c73 2878 int err = 1, mode;
095f1fc4 2879
c7a91bc7
DC
2880 if (flags)
2881 *flags++ = '\0'; /* terminate mode string */
2882
095f1fc4
LS
2883 if (nodelist) {
2884 /* NUL-terminate mode or flags string */
2885 *nodelist++ = '\0';
71fe804b 2886 if (nodelist_parse(nodelist, nodes))
095f1fc4 2887 goto out;
01f13bd6 2888 if (!nodes_subset(nodes, node_states[N_MEMORY]))
095f1fc4 2889 goto out;
71fe804b
LS
2890 } else
2891 nodes_clear(nodes);
2892
dedf2c73 2893 mode = match_string(policy_modes, MPOL_MAX, str);
2894 if (mode < 0)
095f1fc4
LS
2895 goto out;
2896
71fe804b 2897 switch (mode) {
095f1fc4 2898 case MPOL_PREFERRED:
71fe804b 2899 /*
aa9f7d51
RD
2900 * Insist on a nodelist of one node only, although later
2901 * we use first_node(nodes) to grab a single node, so here
2902 * nodelist (or nodes) cannot be empty.
71fe804b 2903 */
095f1fc4
LS
2904 if (nodelist) {
2905 char *rest = nodelist;
2906 while (isdigit(*rest))
2907 rest++;
926f2ae0
KM
2908 if (*rest)
2909 goto out;
aa9f7d51
RD
2910 if (nodes_empty(nodes))
2911 goto out;
095f1fc4
LS
2912 }
2913 break;
095f1fc4
LS
2914 case MPOL_INTERLEAVE:
2915 /*
2916 * Default to online nodes with memory if no nodelist
2917 */
2918 if (!nodelist)
01f13bd6 2919 nodes = node_states[N_MEMORY];
3f226aa1 2920 break;
71fe804b 2921 case MPOL_LOCAL:
3f226aa1 2922 /*
71fe804b 2923 * Don't allow a nodelist; mpol_new() checks flags
3f226aa1 2924 */
71fe804b 2925 if (nodelist)
3f226aa1 2926 goto out;
71fe804b 2927 mode = MPOL_PREFERRED;
3f226aa1 2928 break;
413b43de
RT
2929 case MPOL_DEFAULT:
2930 /*
2931 * Insist on a empty nodelist
2932 */
2933 if (!nodelist)
2934 err = 0;
2935 goto out;
d69b2e63
KM
2936 case MPOL_BIND:
2937 /*
2938 * Insist on a nodelist
2939 */
2940 if (!nodelist)
2941 goto out;
095f1fc4
LS
2942 }
2943
71fe804b 2944 mode_flags = 0;
095f1fc4
LS
2945 if (flags) {
2946 /*
2947 * Currently, we only support two mutually exclusive
2948 * mode flags.
2949 */
2950 if (!strcmp(flags, "static"))
71fe804b 2951 mode_flags |= MPOL_F_STATIC_NODES;
095f1fc4 2952 else if (!strcmp(flags, "relative"))
71fe804b 2953 mode_flags |= MPOL_F_RELATIVE_NODES;
095f1fc4 2954 else
926f2ae0 2955 goto out;
095f1fc4 2956 }
71fe804b
LS
2957
2958 new = mpol_new(mode, mode_flags, &nodes);
2959 if (IS_ERR(new))
926f2ae0
KM
2960 goto out;
2961
f2a07f40
HD
2962 /*
2963 * Save nodes for mpol_to_str() to show the tmpfs mount options
2964 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2965 */
2966 if (mode != MPOL_PREFERRED)
2967 new->v.nodes = nodes;
2968 else if (nodelist)
2969 new->v.preferred_node = first_node(nodes);
2970 else
2971 new->flags |= MPOL_F_LOCAL;
2972
2973 /*
2974 * Save nodes for contextualization: this will be used to "clone"
2975 * the mempolicy in a specific context [cpuset] at a later time.
2976 */
2977 new->w.user_nodemask = nodes;
2978
926f2ae0 2979 err = 0;
71fe804b 2980
095f1fc4
LS
2981out:
2982 /* Restore string for error message */
2983 if (nodelist)
2984 *--nodelist = ':';
2985 if (flags)
2986 *--flags = '=';
71fe804b
LS
2987 if (!err)
2988 *mpol = new;
095f1fc4
LS
2989 return err;
2990}
2991#endif /* CONFIG_TMPFS */
2992
71fe804b
LS
2993/**
2994 * mpol_to_str - format a mempolicy structure for printing
2995 * @buffer: to contain formatted mempolicy string
2996 * @maxlen: length of @buffer
2997 * @pol: pointer to mempolicy to be formatted
71fe804b 2998 *
948927ee
DR
2999 * Convert @pol into a string. If @buffer is too short, truncate the string.
3000 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
3001 * longest flag, "relative", and to display at least a few node ids.
1a75a6c8 3002 */
948927ee 3003void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1a75a6c8
CL
3004{
3005 char *p = buffer;
948927ee
DR
3006 nodemask_t nodes = NODE_MASK_NONE;
3007 unsigned short mode = MPOL_DEFAULT;
3008 unsigned short flags = 0;
2291990a 3009
8790c71a 3010 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
bea904d5 3011 mode = pol->mode;
948927ee
DR
3012 flags = pol->flags;
3013 }
bea904d5 3014
1a75a6c8
CL
3015 switch (mode) {
3016 case MPOL_DEFAULT:
1a75a6c8 3017 break;
1a75a6c8 3018 case MPOL_PREFERRED:
fc36b8d3 3019 if (flags & MPOL_F_LOCAL)
f2a07f40 3020 mode = MPOL_LOCAL;
53f2556b 3021 else
fc36b8d3 3022 node_set(pol->v.preferred_node, nodes);
1a75a6c8 3023 break;
1a75a6c8 3024 case MPOL_BIND:
1a75a6c8 3025 case MPOL_INTERLEAVE:
f2a07f40 3026 nodes = pol->v.nodes;
1a75a6c8 3027 break;
1a75a6c8 3028 default:
948927ee
DR
3029 WARN_ON_ONCE(1);
3030 snprintf(p, maxlen, "unknown");
3031 return;
1a75a6c8
CL
3032 }
3033
b7a9f420 3034 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
1a75a6c8 3035
fc36b8d3 3036 if (flags & MPOL_MODE_FLAGS) {
948927ee 3037 p += snprintf(p, buffer + maxlen - p, "=");
f5b087b5 3038
2291990a
LS
3039 /*
3040 * Currently, the only defined flags are mutually exclusive
3041 */
f5b087b5 3042 if (flags & MPOL_F_STATIC_NODES)
2291990a
LS
3043 p += snprintf(p, buffer + maxlen - p, "static");
3044 else if (flags & MPOL_F_RELATIVE_NODES)
3045 p += snprintf(p, buffer + maxlen - p, "relative");
f5b087b5
DR
3046 }
3047
9e763e0f
TH
3048 if (!nodes_empty(nodes))
3049 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
3050 nodemask_pr_args(&nodes));
1a75a6c8 3051}