]> git.proxmox.com Git - mirror_ubuntu-impish-kernel.git/blame - mm/mempolicy.c
UBUNTU: Ubuntu-5.13.0-12.12
[mirror_ubuntu-impish-kernel.git] / mm / mempolicy.c
CommitLineData
46aeb7e6 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * Simple NUMA memory policy for the Linux kernel.
4 *
5 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
8bccd85f 6 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
1da177e4
LT
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
8bccd85f 21 *
1da177e4
LT
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
8bccd85f
CL
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
1da177e4 28 * preferred Try a specific node first before normal fallback.
00ef2d2f 29 * As a special case NUMA_NO_NODE here means do the allocation
1da177e4
LT
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
8bccd85f 33 *
1da177e4
LT
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
1da177e4
LT
66*/
67
b1de0d13
MH
68#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69
1da177e4 70#include <linux/mempolicy.h>
a520110e 71#include <linux/pagewalk.h>
1da177e4
LT
72#include <linux/highmem.h>
73#include <linux/hugetlb.h>
74#include <linux/kernel.h>
75#include <linux/sched.h>
6e84f315 76#include <linux/sched/mm.h>
6a3827d7 77#include <linux/sched/numa_balancing.h>
f719ff9b 78#include <linux/sched/task.h>
1da177e4
LT
79#include <linux/nodemask.h>
80#include <linux/cpuset.h>
1da177e4
LT
81#include <linux/slab.h>
82#include <linux/string.h>
b95f1b31 83#include <linux/export.h>
b488893a 84#include <linux/nsproxy.h>
1da177e4
LT
85#include <linux/interrupt.h>
86#include <linux/init.h>
87#include <linux/compat.h>
31367466 88#include <linux/ptrace.h>
dc9aa5b9 89#include <linux/swap.h>
1a75a6c8
CL
90#include <linux/seq_file.h>
91#include <linux/proc_fs.h>
b20a3503 92#include <linux/migrate.h>
62b61f61 93#include <linux/ksm.h>
95a402c3 94#include <linux/rmap.h>
86c3a764 95#include <linux/security.h>
dbcb0f19 96#include <linux/syscalls.h>
095f1fc4 97#include <linux/ctype.h>
6d9c285a 98#include <linux/mm_inline.h>
b24f53a0 99#include <linux/mmu_notifier.h>
b1de0d13 100#include <linux/printk.h>
c8633798 101#include <linux/swapops.h>
dc9aa5b9 102
1da177e4 103#include <asm/tlbflush.h>
7c0f6ba6 104#include <linux/uaccess.h>
1da177e4 105
62695a84
NP
106#include "internal.h"
107
38e35860 108/* Internal flags */
dc9aa5b9 109#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
38e35860 110#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
dc9aa5b9 111
fcc234f8
PE
112static struct kmem_cache *policy_cache;
113static struct kmem_cache *sn_cache;
1da177e4 114
1da177e4
LT
115/* Highest zone. An specific allocation for a zone below that is not
116 policied. */
6267276f 117enum zone_type policy_zone = 0;
1da177e4 118
bea904d5
LS
119/*
120 * run-time system-wide default policy => local allocation
121 */
e754d79d 122static struct mempolicy default_policy = {
1da177e4 123 .refcnt = ATOMIC_INIT(1), /* never free it */
bea904d5 124 .mode = MPOL_PREFERRED,
fc36b8d3 125 .flags = MPOL_F_LOCAL,
1da177e4
LT
126};
127
5606e387
MG
128static struct mempolicy preferred_node_policy[MAX_NUMNODES];
129
b2ca916c
DW
130/**
131 * numa_map_to_online_node - Find closest online node
f6e92f40 132 * @node: Node id to start the search
b2ca916c
DW
133 *
134 * Lookup the next closest node by distance if @nid is not online.
135 */
136int numa_map_to_online_node(int node)
137{
4fcbe96e 138 int min_dist = INT_MAX, dist, n, min_node;
b2ca916c 139
4fcbe96e
DW
140 if (node == NUMA_NO_NODE || node_online(node))
141 return node;
b2ca916c
DW
142
143 min_node = node;
4fcbe96e
DW
144 for_each_online_node(n) {
145 dist = node_distance(node, n);
146 if (dist < min_dist) {
147 min_dist = dist;
148 min_node = n;
b2ca916c
DW
149 }
150 }
151
152 return min_node;
153}
154EXPORT_SYMBOL_GPL(numa_map_to_online_node);
155
74d2c3a0 156struct mempolicy *get_task_policy(struct task_struct *p)
5606e387
MG
157{
158 struct mempolicy *pol = p->mempolicy;
f15ca78e 159 int node;
5606e387 160
f15ca78e
ON
161 if (pol)
162 return pol;
5606e387 163
f15ca78e
ON
164 node = numa_node_id();
165 if (node != NUMA_NO_NODE) {
166 pol = &preferred_node_policy[node];
167 /* preferred_node_policy is not initialised early in boot */
168 if (pol->mode)
169 return pol;
5606e387
MG
170 }
171
f15ca78e 172 return &default_policy;
5606e387
MG
173}
174
37012946
DR
175static const struct mempolicy_operations {
176 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
213980c0 177 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
37012946
DR
178} mpol_ops[MPOL_MAX];
179
f5b087b5
DR
180static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
181{
6d556294 182 return pol->flags & MPOL_MODE_FLAGS;
4c50bc01
DR
183}
184
185static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
186 const nodemask_t *rel)
187{
188 nodemask_t tmp;
189 nodes_fold(tmp, *orig, nodes_weight(*rel));
190 nodes_onto(*ret, tmp, *rel);
f5b087b5
DR
191}
192
37012946
DR
193static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
194{
195 if (nodes_empty(*nodes))
196 return -EINVAL;
197 pol->v.nodes = *nodes;
198 return 0;
199}
200
201static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
202{
203 if (!nodes)
fc36b8d3 204 pol->flags |= MPOL_F_LOCAL; /* local allocation */
37012946
DR
205 else if (nodes_empty(*nodes))
206 return -EINVAL; /* no allowed nodes */
207 else
208 pol->v.preferred_node = first_node(*nodes);
209 return 0;
210}
211
212static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
213{
859f7ef1 214 if (nodes_empty(*nodes))
37012946
DR
215 return -EINVAL;
216 pol->v.nodes = *nodes;
217 return 0;
218}
219
58568d2a
MX
220/*
221 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
222 * any, for the new policy. mpol_new() has already validated the nodes
223 * parameter with respect to the policy mode and flags. But, we need to
224 * handle an empty nodemask with MPOL_PREFERRED here.
225 *
226 * Must be called holding task's alloc_lock to protect task's mems_allowed
c1e8d7c6 227 * and mempolicy. May also be called holding the mmap_lock for write.
58568d2a 228 */
4bfc4495
KH
229static int mpol_set_nodemask(struct mempolicy *pol,
230 const nodemask_t *nodes, struct nodemask_scratch *nsc)
58568d2a 231{
58568d2a
MX
232 int ret;
233
234 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
235 if (pol == NULL)
236 return 0;
01f13bd6 237 /* Check N_MEMORY */
4bfc4495 238 nodes_and(nsc->mask1,
01f13bd6 239 cpuset_current_mems_allowed, node_states[N_MEMORY]);
58568d2a
MX
240
241 VM_BUG_ON(!nodes);
242 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
243 nodes = NULL; /* explicit local allocation */
244 else {
245 if (pol->flags & MPOL_F_RELATIVE_NODES)
859f7ef1 246 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
58568d2a 247 else
4bfc4495
KH
248 nodes_and(nsc->mask2, *nodes, nsc->mask1);
249
58568d2a
MX
250 if (mpol_store_user_nodemask(pol))
251 pol->w.user_nodemask = *nodes;
252 else
253 pol->w.cpuset_mems_allowed =
254 cpuset_current_mems_allowed;
255 }
256
4bfc4495
KH
257 if (nodes)
258 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
259 else
260 ret = mpol_ops[pol->mode].create(pol, NULL);
58568d2a
MX
261 return ret;
262}
263
264/*
265 * This function just creates a new policy, does some check and simple
266 * initialization. You must invoke mpol_set_nodemask() to set nodes.
267 */
028fec41
DR
268static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
269 nodemask_t *nodes)
1da177e4
LT
270{
271 struct mempolicy *policy;
272
028fec41 273 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
00ef2d2f 274 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
140d5a49 275
3e1f0645
DR
276 if (mode == MPOL_DEFAULT) {
277 if (nodes && !nodes_empty(*nodes))
37012946 278 return ERR_PTR(-EINVAL);
d3a71033 279 return NULL;
37012946 280 }
3e1f0645
DR
281 VM_BUG_ON(!nodes);
282
283 /*
284 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
285 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
286 * All other modes require a valid pointer to a non-empty nodemask.
287 */
288 if (mode == MPOL_PREFERRED) {
289 if (nodes_empty(*nodes)) {
290 if (((flags & MPOL_F_STATIC_NODES) ||
291 (flags & MPOL_F_RELATIVE_NODES)))
292 return ERR_PTR(-EINVAL);
3e1f0645 293 }
479e2802 294 } else if (mode == MPOL_LOCAL) {
8d303e44
PK
295 if (!nodes_empty(*nodes) ||
296 (flags & MPOL_F_STATIC_NODES) ||
297 (flags & MPOL_F_RELATIVE_NODES))
479e2802
PZ
298 return ERR_PTR(-EINVAL);
299 mode = MPOL_PREFERRED;
3e1f0645
DR
300 } else if (nodes_empty(*nodes))
301 return ERR_PTR(-EINVAL);
1da177e4
LT
302 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
303 if (!policy)
304 return ERR_PTR(-ENOMEM);
305 atomic_set(&policy->refcnt, 1);
45c4745a 306 policy->mode = mode;
3e1f0645 307 policy->flags = flags;
37012946 308
1da177e4 309 return policy;
37012946
DR
310}
311
52cd3b07
LS
312/* Slow path of a mpol destructor. */
313void __mpol_put(struct mempolicy *p)
314{
315 if (!atomic_dec_and_test(&p->refcnt))
316 return;
52cd3b07
LS
317 kmem_cache_free(policy_cache, p);
318}
319
213980c0 320static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
37012946
DR
321{
322}
323
213980c0 324static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
37012946
DR
325{
326 nodemask_t tmp;
327
328 if (pol->flags & MPOL_F_STATIC_NODES)
329 nodes_and(tmp, pol->w.user_nodemask, *nodes);
330 else if (pol->flags & MPOL_F_RELATIVE_NODES)
331 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
332 else {
68d68ff6 333 nodes_remap(tmp, pol->v.nodes, pol->w.cpuset_mems_allowed,
213980c0 334 *nodes);
29b190fa 335 pol->w.cpuset_mems_allowed = *nodes;
37012946 336 }
f5b087b5 337
708c1bbc
MX
338 if (nodes_empty(tmp))
339 tmp = *nodes;
340
213980c0 341 pol->v.nodes = tmp;
37012946
DR
342}
343
344static void mpol_rebind_preferred(struct mempolicy *pol,
213980c0 345 const nodemask_t *nodes)
37012946
DR
346{
347 nodemask_t tmp;
348
37012946
DR
349 if (pol->flags & MPOL_F_STATIC_NODES) {
350 int node = first_node(pol->w.user_nodemask);
351
fc36b8d3 352 if (node_isset(node, *nodes)) {
37012946 353 pol->v.preferred_node = node;
fc36b8d3
LS
354 pol->flags &= ~MPOL_F_LOCAL;
355 } else
356 pol->flags |= MPOL_F_LOCAL;
37012946
DR
357 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
358 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
359 pol->v.preferred_node = first_node(tmp);
fc36b8d3 360 } else if (!(pol->flags & MPOL_F_LOCAL)) {
37012946
DR
361 pol->v.preferred_node = node_remap(pol->v.preferred_node,
362 pol->w.cpuset_mems_allowed,
363 *nodes);
364 pol->w.cpuset_mems_allowed = *nodes;
365 }
1da177e4
LT
366}
367
708c1bbc
MX
368/*
369 * mpol_rebind_policy - Migrate a policy to a different set of nodes
370 *
c1e8d7c6 371 * Per-vma policies are protected by mmap_lock. Allocations using per-task
213980c0
VB
372 * policies are protected by task->mems_allowed_seq to prevent a premature
373 * OOM/allocation failure due to parallel nodemask modification.
708c1bbc 374 */
213980c0 375static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
1d0d2680 376{
1d0d2680
DR
377 if (!pol)
378 return;
2e25644e 379 if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) &&
1d0d2680
DR
380 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
381 return;
708c1bbc 382
213980c0 383 mpol_ops[pol->mode].rebind(pol, newmask);
1d0d2680
DR
384}
385
386/*
387 * Wrapper for mpol_rebind_policy() that just requires task
388 * pointer, and updates task mempolicy.
58568d2a
MX
389 *
390 * Called with task's alloc_lock held.
1d0d2680
DR
391 */
392
213980c0 393void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
1d0d2680 394{
213980c0 395 mpol_rebind_policy(tsk->mempolicy, new);
1d0d2680
DR
396}
397
398/*
399 * Rebind each vma in mm to new nodemask.
400 *
c1e8d7c6 401 * Call holding a reference to mm. Takes mm->mmap_lock during call.
1d0d2680
DR
402 */
403
404void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
405{
406 struct vm_area_struct *vma;
407
d8ed45c5 408 mmap_write_lock(mm);
1d0d2680 409 for (vma = mm->mmap; vma; vma = vma->vm_next)
213980c0 410 mpol_rebind_policy(vma->vm_policy, new);
d8ed45c5 411 mmap_write_unlock(mm);
1d0d2680
DR
412}
413
37012946
DR
414static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
415 [MPOL_DEFAULT] = {
416 .rebind = mpol_rebind_default,
417 },
418 [MPOL_INTERLEAVE] = {
419 .create = mpol_new_interleave,
420 .rebind = mpol_rebind_nodemask,
421 },
422 [MPOL_PREFERRED] = {
423 .create = mpol_new_preferred,
424 .rebind = mpol_rebind_preferred,
425 },
426 [MPOL_BIND] = {
427 .create = mpol_new_bind,
428 .rebind = mpol_rebind_nodemask,
429 },
430};
431
a53190a4 432static int migrate_page_add(struct page *page, struct list_head *pagelist,
fc301289 433 unsigned long flags);
1a75a6c8 434
6f4576e3
NH
435struct queue_pages {
436 struct list_head *pagelist;
437 unsigned long flags;
438 nodemask_t *nmask;
f18da660
LX
439 unsigned long start;
440 unsigned long end;
441 struct vm_area_struct *first;
6f4576e3
NH
442};
443
88aaa2a1
NH
444/*
445 * Check if the page's nid is in qp->nmask.
446 *
447 * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
448 * in the invert of qp->nmask.
449 */
450static inline bool queue_pages_required(struct page *page,
451 struct queue_pages *qp)
452{
453 int nid = page_to_nid(page);
454 unsigned long flags = qp->flags;
455
456 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
457}
458
a7f40cfe 459/*
d8835445
YS
460 * queue_pages_pmd() has four possible return values:
461 * 0 - pages are placed on the right node or queued successfully.
462 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
463 * specified.
464 * 2 - THP was split.
465 * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
466 * existing page was already on a node that does not follow the
467 * policy.
a7f40cfe 468 */
c8633798
NH
469static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
470 unsigned long end, struct mm_walk *walk)
959a7e13 471 __releases(ptl)
c8633798
NH
472{
473 int ret = 0;
474 struct page *page;
475 struct queue_pages *qp = walk->private;
476 unsigned long flags;
477
478 if (unlikely(is_pmd_migration_entry(*pmd))) {
a7f40cfe 479 ret = -EIO;
c8633798
NH
480 goto unlock;
481 }
482 page = pmd_page(*pmd);
483 if (is_huge_zero_page(page)) {
484 spin_unlock(ptl);
485 __split_huge_pmd(walk->vma, pmd, addr, false, NULL);
d8835445 486 ret = 2;
c8633798
NH
487 goto out;
488 }
d8835445 489 if (!queue_pages_required(page, qp))
c8633798 490 goto unlock;
c8633798 491
c8633798
NH
492 flags = qp->flags;
493 /* go to thp migration */
a7f40cfe 494 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
a53190a4
YS
495 if (!vma_migratable(walk->vma) ||
496 migrate_page_add(page, qp->pagelist, flags)) {
d8835445 497 ret = 1;
a7f40cfe
YS
498 goto unlock;
499 }
a7f40cfe
YS
500 } else
501 ret = -EIO;
c8633798
NH
502unlock:
503 spin_unlock(ptl);
504out:
505 return ret;
506}
507
98094945
NH
508/*
509 * Scan through pages checking if pages follow certain conditions,
510 * and move them to the pagelist if they do.
d8835445
YS
511 *
512 * queue_pages_pte_range() has three possible return values:
513 * 0 - pages are placed on the right node or queued successfully.
514 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
515 * specified.
516 * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
517 * on a node that does not follow the policy.
98094945 518 */
6f4576e3
NH
519static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
520 unsigned long end, struct mm_walk *walk)
1da177e4 521{
6f4576e3
NH
522 struct vm_area_struct *vma = walk->vma;
523 struct page *page;
524 struct queue_pages *qp = walk->private;
525 unsigned long flags = qp->flags;
c8633798 526 int ret;
d8835445 527 bool has_unmovable = false;
3f088420 528 pte_t *pte, *mapped_pte;
705e87c0 529 spinlock_t *ptl;
941150a3 530
c8633798
NH
531 ptl = pmd_trans_huge_lock(pmd, vma);
532 if (ptl) {
533 ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
d8835445 534 if (ret != 2)
a7f40cfe 535 return ret;
248db92d 536 }
d8835445 537 /* THP was split, fall through to pte walk */
91612e0d 538
337d9abf
NH
539 if (pmd_trans_unstable(pmd))
540 return 0;
94723aaf 541
3f088420 542 mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
6f4576e3 543 for (; addr != end; pte++, addr += PAGE_SIZE) {
91612e0d 544 if (!pte_present(*pte))
1da177e4 545 continue;
6aab341e
LT
546 page = vm_normal_page(vma, addr, *pte);
547 if (!page)
1da177e4 548 continue;
053837fc 549 /*
62b61f61
HD
550 * vm_normal_page() filters out zero pages, but there might
551 * still be PageReserved pages to skip, perhaps in a VDSO.
053837fc 552 */
b79bc0a0 553 if (PageReserved(page))
f4598c8b 554 continue;
88aaa2a1 555 if (!queue_pages_required(page, qp))
38e35860 556 continue;
a7f40cfe 557 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
d8835445
YS
558 /* MPOL_MF_STRICT must be specified if we get here */
559 if (!vma_migratable(vma)) {
560 has_unmovable = true;
a7f40cfe 561 break;
d8835445 562 }
a53190a4
YS
563
564 /*
565 * Do not abort immediately since there may be
566 * temporary off LRU pages in the range. Still
567 * need migrate other LRU pages.
568 */
569 if (migrate_page_add(page, qp->pagelist, flags))
570 has_unmovable = true;
a7f40cfe
YS
571 } else
572 break;
6f4576e3 573 }
3f088420 574 pte_unmap_unlock(mapped_pte, ptl);
6f4576e3 575 cond_resched();
d8835445
YS
576
577 if (has_unmovable)
578 return 1;
579
a7f40cfe 580 return addr != end ? -EIO : 0;
91612e0d
HD
581}
582
6f4576e3
NH
583static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
584 unsigned long addr, unsigned long end,
585 struct mm_walk *walk)
e2d8cf40 586{
dcf17635 587 int ret = 0;
e2d8cf40 588#ifdef CONFIG_HUGETLB_PAGE
6f4576e3 589 struct queue_pages *qp = walk->private;
dcf17635 590 unsigned long flags = (qp->flags & MPOL_MF_VALID);
e2d8cf40 591 struct page *page;
cb900f41 592 spinlock_t *ptl;
d4c54919 593 pte_t entry;
e2d8cf40 594
6f4576e3
NH
595 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
596 entry = huge_ptep_get(pte);
d4c54919
NH
597 if (!pte_present(entry))
598 goto unlock;
599 page = pte_page(entry);
88aaa2a1 600 if (!queue_pages_required(page, qp))
e2d8cf40 601 goto unlock;
dcf17635
LX
602
603 if (flags == MPOL_MF_STRICT) {
604 /*
605 * STRICT alone means only detecting misplaced page and no
606 * need to further check other vma.
607 */
608 ret = -EIO;
609 goto unlock;
610 }
611
612 if (!vma_migratable(walk->vma)) {
613 /*
614 * Must be STRICT with MOVE*, otherwise .test_walk() have
615 * stopped walking current vma.
616 * Detecting misplaced page but allow migrating pages which
617 * have been queued.
618 */
619 ret = 1;
620 goto unlock;
621 }
622
e2d8cf40
NH
623 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
624 if (flags & (MPOL_MF_MOVE_ALL) ||
dcf17635
LX
625 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) {
626 if (!isolate_huge_page(page, qp->pagelist) &&
627 (flags & MPOL_MF_STRICT))
628 /*
629 * Failed to isolate page but allow migrating pages
630 * which have been queued.
631 */
632 ret = 1;
633 }
e2d8cf40 634unlock:
cb900f41 635 spin_unlock(ptl);
e2d8cf40
NH
636#else
637 BUG();
638#endif
dcf17635 639 return ret;
1da177e4
LT
640}
641
5877231f 642#ifdef CONFIG_NUMA_BALANCING
b24f53a0 643/*
4b10e7d5
MG
644 * This is used to mark a range of virtual addresses to be inaccessible.
645 * These are later cleared by a NUMA hinting fault. Depending on these
646 * faults, pages may be migrated for better NUMA placement.
647 *
648 * This is assuming that NUMA faults are handled using PROT_NONE. If
649 * an architecture makes a different choice, it will need further
650 * changes to the core.
b24f53a0 651 */
4b10e7d5
MG
652unsigned long change_prot_numa(struct vm_area_struct *vma,
653 unsigned long addr, unsigned long end)
b24f53a0 654{
4b10e7d5 655 int nr_updated;
b24f53a0 656
58705444 657 nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA);
03c5a6e1
MG
658 if (nr_updated)
659 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
b24f53a0 660
4b10e7d5 661 return nr_updated;
b24f53a0
LS
662}
663#else
664static unsigned long change_prot_numa(struct vm_area_struct *vma,
665 unsigned long addr, unsigned long end)
666{
667 return 0;
668}
5877231f 669#endif /* CONFIG_NUMA_BALANCING */
b24f53a0 670
6f4576e3
NH
671static int queue_pages_test_walk(unsigned long start, unsigned long end,
672 struct mm_walk *walk)
673{
674 struct vm_area_struct *vma = walk->vma;
675 struct queue_pages *qp = walk->private;
676 unsigned long endvma = vma->vm_end;
677 unsigned long flags = qp->flags;
678
a18b3ac2 679 /* range check first */
ce33135c 680 VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma);
f18da660
LX
681
682 if (!qp->first) {
683 qp->first = vma;
684 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
685 (qp->start < vma->vm_start))
686 /* hole at head side of range */
a18b3ac2
LX
687 return -EFAULT;
688 }
f18da660
LX
689 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
690 ((vma->vm_end < qp->end) &&
691 (!vma->vm_next || vma->vm_end < vma->vm_next->vm_start)))
692 /* hole at middle or tail of range */
693 return -EFAULT;
a18b3ac2 694
a7f40cfe
YS
695 /*
696 * Need check MPOL_MF_STRICT to return -EIO if possible
697 * regardless of vma_migratable
698 */
699 if (!vma_migratable(vma) &&
700 !(flags & MPOL_MF_STRICT))
48684a65
NH
701 return 1;
702
6f4576e3
NH
703 if (endvma > end)
704 endvma = end;
6f4576e3 705
6f4576e3
NH
706 if (flags & MPOL_MF_LAZY) {
707 /* Similar to task_numa_work, skip inaccessible VMAs */
3122e80e 708 if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
4355c018 709 !(vma->vm_flags & VM_MIXEDMAP))
6f4576e3
NH
710 change_prot_numa(vma, start, endvma);
711 return 1;
712 }
713
77bf45e7 714 /* queue pages from current vma */
a7f40cfe 715 if (flags & MPOL_MF_VALID)
6f4576e3
NH
716 return 0;
717 return 1;
718}
719
7b86ac33
CH
720static const struct mm_walk_ops queue_pages_walk_ops = {
721 .hugetlb_entry = queue_pages_hugetlb,
722 .pmd_entry = queue_pages_pte_range,
723 .test_walk = queue_pages_test_walk,
724};
725
dc9aa5b9 726/*
98094945
NH
727 * Walk through page tables and collect pages to be migrated.
728 *
729 * If pages found in a given range are on a set of nodes (determined by
730 * @nodes and @flags,) it's isolated and queued to the pagelist which is
d8835445
YS
731 * passed via @private.
732 *
733 * queue_pages_range() has three possible return values:
734 * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
735 * specified.
736 * 0 - queue pages successfully or no misplaced page.
a85dfc30
YS
737 * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
738 * memory range specified by nodemask and maxnode points outside
739 * your accessible address space (-EFAULT)
dc9aa5b9 740 */
d05f0cdc 741static int
98094945 742queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
6f4576e3
NH
743 nodemask_t *nodes, unsigned long flags,
744 struct list_head *pagelist)
1da177e4 745{
f18da660 746 int err;
6f4576e3
NH
747 struct queue_pages qp = {
748 .pagelist = pagelist,
749 .flags = flags,
750 .nmask = nodes,
f18da660
LX
751 .start = start,
752 .end = end,
753 .first = NULL,
6f4576e3 754 };
6f4576e3 755
f18da660
LX
756 err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
757
758 if (!qp.first)
759 /* whole range in hole */
760 err = -EFAULT;
761
762 return err;
1da177e4
LT
763}
764
869833f2
KM
765/*
766 * Apply policy to a single VMA
c1e8d7c6 767 * This must be called with the mmap_lock held for writing.
869833f2
KM
768 */
769static int vma_replace_policy(struct vm_area_struct *vma,
770 struct mempolicy *pol)
8d34694c 771{
869833f2
KM
772 int err;
773 struct mempolicy *old;
774 struct mempolicy *new;
8d34694c
KM
775
776 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
777 vma->vm_start, vma->vm_end, vma->vm_pgoff,
778 vma->vm_ops, vma->vm_file,
779 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
780
869833f2
KM
781 new = mpol_dup(pol);
782 if (IS_ERR(new))
783 return PTR_ERR(new);
784
785 if (vma->vm_ops && vma->vm_ops->set_policy) {
8d34694c 786 err = vma->vm_ops->set_policy(vma, new);
869833f2
KM
787 if (err)
788 goto err_out;
8d34694c 789 }
869833f2
KM
790
791 old = vma->vm_policy;
c1e8d7c6 792 vma->vm_policy = new; /* protected by mmap_lock */
869833f2
KM
793 mpol_put(old);
794
795 return 0;
796 err_out:
797 mpol_put(new);
8d34694c
KM
798 return err;
799}
800
1da177e4 801/* Step 2: apply policy to a range and do splits. */
9d8cebd4
KM
802static int mbind_range(struct mm_struct *mm, unsigned long start,
803 unsigned long end, struct mempolicy *new_pol)
1da177e4
LT
804{
805 struct vm_area_struct *next;
9d8cebd4
KM
806 struct vm_area_struct *prev;
807 struct vm_area_struct *vma;
808 int err = 0;
e26a5114 809 pgoff_t pgoff;
9d8cebd4
KM
810 unsigned long vmstart;
811 unsigned long vmend;
1da177e4 812
097d5910 813 vma = find_vma(mm, start);
f18da660 814 VM_BUG_ON(!vma);
9d8cebd4 815
097d5910 816 prev = vma->vm_prev;
e26a5114
KM
817 if (start > vma->vm_start)
818 prev = vma;
819
9d8cebd4 820 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
1da177e4 821 next = vma->vm_next;
9d8cebd4
KM
822 vmstart = max(start, vma->vm_start);
823 vmend = min(end, vma->vm_end);
824
e26a5114
KM
825 if (mpol_equal(vma_policy(vma), new_pol))
826 continue;
827
828 pgoff = vma->vm_pgoff +
829 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
9d8cebd4 830 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
19a809af
AA
831 vma->anon_vma, vma->vm_file, pgoff,
832 new_pol, vma->vm_userfaultfd_ctx);
9d8cebd4
KM
833 if (prev) {
834 vma = prev;
835 next = vma->vm_next;
3964acd0
ON
836 if (mpol_equal(vma_policy(vma), new_pol))
837 continue;
838 /* vma_merge() joined vma && vma->next, case 8 */
839 goto replace;
9d8cebd4
KM
840 }
841 if (vma->vm_start != vmstart) {
842 err = split_vma(vma->vm_mm, vma, vmstart, 1);
843 if (err)
844 goto out;
845 }
846 if (vma->vm_end != vmend) {
847 err = split_vma(vma->vm_mm, vma, vmend, 0);
848 if (err)
849 goto out;
850 }
3964acd0 851 replace:
869833f2 852 err = vma_replace_policy(vma, new_pol);
8d34694c
KM
853 if (err)
854 goto out;
1da177e4 855 }
9d8cebd4
KM
856
857 out:
1da177e4
LT
858 return err;
859}
860
1da177e4 861/* Set the process memory policy */
028fec41
DR
862static long do_set_mempolicy(unsigned short mode, unsigned short flags,
863 nodemask_t *nodes)
1da177e4 864{
58568d2a 865 struct mempolicy *new, *old;
4bfc4495 866 NODEMASK_SCRATCH(scratch);
58568d2a 867 int ret;
1da177e4 868
4bfc4495
KH
869 if (!scratch)
870 return -ENOMEM;
f4e53d91 871
4bfc4495
KH
872 new = mpol_new(mode, flags, nodes);
873 if (IS_ERR(new)) {
874 ret = PTR_ERR(new);
875 goto out;
876 }
2c7c3a7d 877
bda420b9
HY
878 if (flags & MPOL_F_NUMA_BALANCING) {
879 if (new && new->mode == MPOL_BIND) {
880 new->flags |= (MPOL_F_MOF | MPOL_F_MORON);
881 } else {
882 ret = -EINVAL;
883 mpol_put(new);
884 goto out;
885 }
886 }
887
4bfc4495 888 ret = mpol_set_nodemask(new, nodes, scratch);
58568d2a 889 if (ret) {
58568d2a 890 mpol_put(new);
4bfc4495 891 goto out;
58568d2a 892 }
78b132e9 893 task_lock(current);
58568d2a 894 old = current->mempolicy;
1da177e4 895 current->mempolicy = new;
45816682
VB
896 if (new && new->mode == MPOL_INTERLEAVE)
897 current->il_prev = MAX_NUMNODES-1;
58568d2a 898 task_unlock(current);
58568d2a 899 mpol_put(old);
4bfc4495
KH
900 ret = 0;
901out:
902 NODEMASK_SCRATCH_FREE(scratch);
903 return ret;
1da177e4
LT
904}
905
bea904d5
LS
906/*
907 * Return nodemask for policy for get_mempolicy() query
58568d2a
MX
908 *
909 * Called with task's alloc_lock held
bea904d5
LS
910 */
911static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
1da177e4 912{
dfcd3c0d 913 nodes_clear(*nodes);
bea904d5
LS
914 if (p == &default_policy)
915 return;
916
45c4745a 917 switch (p->mode) {
19770b32 918 case MPOL_BIND:
1da177e4 919 case MPOL_INTERLEAVE:
dfcd3c0d 920 *nodes = p->v.nodes;
1da177e4
LT
921 break;
922 case MPOL_PREFERRED:
fc36b8d3 923 if (!(p->flags & MPOL_F_LOCAL))
dfcd3c0d 924 node_set(p->v.preferred_node, *nodes);
53f2556b 925 /* else return empty node mask for local allocation */
1da177e4
LT
926 break;
927 default:
928 BUG();
929 }
930}
931
3b9aadf7 932static int lookup_node(struct mm_struct *mm, unsigned long addr)
1da177e4 933{
ba841078 934 struct page *p = NULL;
1da177e4
LT
935 int err;
936
3b9aadf7
AA
937 int locked = 1;
938 err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
2d3a36a4 939 if (err > 0) {
1da177e4
LT
940 err = page_to_nid(p);
941 put_page(p);
942 }
3b9aadf7 943 if (locked)
d8ed45c5 944 mmap_read_unlock(mm);
1da177e4
LT
945 return err;
946}
947
1da177e4 948/* Retrieve NUMA policy */
dbcb0f19
AB
949static long do_get_mempolicy(int *policy, nodemask_t *nmask,
950 unsigned long addr, unsigned long flags)
1da177e4 951{
8bccd85f 952 int err;
1da177e4
LT
953 struct mm_struct *mm = current->mm;
954 struct vm_area_struct *vma = NULL;
3b9aadf7 955 struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
1da177e4 956
754af6f5
LS
957 if (flags &
958 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
1da177e4 959 return -EINVAL;
754af6f5
LS
960
961 if (flags & MPOL_F_MEMS_ALLOWED) {
962 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
963 return -EINVAL;
964 *policy = 0; /* just so it's initialized */
58568d2a 965 task_lock(current);
754af6f5 966 *nmask = cpuset_current_mems_allowed;
58568d2a 967 task_unlock(current);
754af6f5
LS
968 return 0;
969 }
970
1da177e4 971 if (flags & MPOL_F_ADDR) {
bea904d5
LS
972 /*
973 * Do NOT fall back to task policy if the
974 * vma/shared policy at addr is NULL. We
975 * want to return MPOL_DEFAULT in this case.
976 */
d8ed45c5 977 mmap_read_lock(mm);
1da177e4
LT
978 vma = find_vma_intersection(mm, addr, addr+1);
979 if (!vma) {
d8ed45c5 980 mmap_read_unlock(mm);
1da177e4
LT
981 return -EFAULT;
982 }
983 if (vma->vm_ops && vma->vm_ops->get_policy)
984 pol = vma->vm_ops->get_policy(vma, addr);
985 else
986 pol = vma->vm_policy;
987 } else if (addr)
988 return -EINVAL;
989
990 if (!pol)
bea904d5 991 pol = &default_policy; /* indicates default behavior */
1da177e4
LT
992
993 if (flags & MPOL_F_NODE) {
994 if (flags & MPOL_F_ADDR) {
3b9aadf7
AA
995 /*
996 * Take a refcount on the mpol, lookup_node()
baf2f90b 997 * will drop the mmap_lock, so after calling
3b9aadf7
AA
998 * lookup_node() only "pol" remains valid, "vma"
999 * is stale.
1000 */
1001 pol_refcount = pol;
1002 vma = NULL;
1003 mpol_get(pol);
1004 err = lookup_node(mm, addr);
1da177e4
LT
1005 if (err < 0)
1006 goto out;
8bccd85f 1007 *policy = err;
1da177e4 1008 } else if (pol == current->mempolicy &&
45c4745a 1009 pol->mode == MPOL_INTERLEAVE) {
45816682 1010 *policy = next_node_in(current->il_prev, pol->v.nodes);
1da177e4
LT
1011 } else {
1012 err = -EINVAL;
1013 goto out;
1014 }
bea904d5
LS
1015 } else {
1016 *policy = pol == &default_policy ? MPOL_DEFAULT :
1017 pol->mode;
d79df630
DR
1018 /*
1019 * Internal mempolicy flags must be masked off before exposing
1020 * the policy to userspace.
1021 */
1022 *policy |= (pol->flags & MPOL_MODE_FLAGS);
bea904d5 1023 }
1da177e4 1024
1da177e4 1025 err = 0;
58568d2a 1026 if (nmask) {
c6b6ef8b
LS
1027 if (mpol_store_user_nodemask(pol)) {
1028 *nmask = pol->w.user_nodemask;
1029 } else {
1030 task_lock(current);
1031 get_policy_nodemask(pol, nmask);
1032 task_unlock(current);
1033 }
58568d2a 1034 }
1da177e4
LT
1035
1036 out:
52cd3b07 1037 mpol_cond_put(pol);
1da177e4 1038 if (vma)
d8ed45c5 1039 mmap_read_unlock(mm);
3b9aadf7
AA
1040 if (pol_refcount)
1041 mpol_put(pol_refcount);
1da177e4
LT
1042 return err;
1043}
1044
b20a3503 1045#ifdef CONFIG_MIGRATION
6ce3c4c0 1046/*
c8633798 1047 * page migration, thp tail pages can be passed.
6ce3c4c0 1048 */
a53190a4 1049static int migrate_page_add(struct page *page, struct list_head *pagelist,
fc301289 1050 unsigned long flags)
6ce3c4c0 1051{
c8633798 1052 struct page *head = compound_head(page);
6ce3c4c0 1053 /*
fc301289 1054 * Avoid migrating a page that is shared with others.
6ce3c4c0 1055 */
c8633798
NH
1056 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
1057 if (!isolate_lru_page(head)) {
1058 list_add_tail(&head->lru, pagelist);
1059 mod_node_page_state(page_pgdat(head),
9de4f22a 1060 NR_ISOLATED_ANON + page_is_file_lru(head),
6c357848 1061 thp_nr_pages(head));
a53190a4
YS
1062 } else if (flags & MPOL_MF_STRICT) {
1063 /*
1064 * Non-movable page may reach here. And, there may be
1065 * temporary off LRU pages or non-LRU movable pages.
1066 * Treat them as unmovable pages since they can't be
1067 * isolated, so they can't be moved at the moment. It
1068 * should return -EIO for this case too.
1069 */
1070 return -EIO;
62695a84
NP
1071 }
1072 }
a53190a4
YS
1073
1074 return 0;
7e2ab150 1075}
6ce3c4c0 1076
7e2ab150
CL
1077/*
1078 * Migrate pages from one node to a target node.
1079 * Returns error or the number of pages not migrated.
1080 */
dbcb0f19
AB
1081static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1082 int flags)
7e2ab150
CL
1083{
1084 nodemask_t nmask;
1085 LIST_HEAD(pagelist);
1086 int err = 0;
a0976311
JK
1087 struct migration_target_control mtc = {
1088 .nid = dest,
1089 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1090 };
7e2ab150
CL
1091
1092 nodes_clear(nmask);
1093 node_set(source, nmask);
6ce3c4c0 1094
08270807
MK
1095 /*
1096 * This does not "check" the range but isolates all pages that
1097 * need migration. Between passing in the full user address
1098 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1099 */
1100 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
98094945 1101 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
7e2ab150
CL
1102 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1103
cf608ac1 1104 if (!list_empty(&pagelist)) {
a0976311
JK
1105 err = migrate_pages(&pagelist, alloc_migration_target, NULL,
1106 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL);
cf608ac1 1107 if (err)
e2d8cf40 1108 putback_movable_pages(&pagelist);
cf608ac1 1109 }
95a402c3 1110
7e2ab150 1111 return err;
6ce3c4c0
CL
1112}
1113
39743889 1114/*
7e2ab150
CL
1115 * Move pages between the two nodesets so as to preserve the physical
1116 * layout as much as possible.
39743889
CL
1117 *
1118 * Returns the number of page that could not be moved.
1119 */
0ce72d4f
AM
1120int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1121 const nodemask_t *to, int flags)
39743889 1122{
7e2ab150 1123 int busy = 0;
f555befd 1124 int err = 0;
7e2ab150 1125 nodemask_t tmp;
39743889 1126
361a2a22 1127 lru_cache_disable();
0aedadf9 1128
d8ed45c5 1129 mmap_read_lock(mm);
39743889 1130
da0aa138
KM
1131 /*
1132 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1133 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1134 * bit in 'tmp', and return that <source, dest> pair for migration.
1135 * The pair of nodemasks 'to' and 'from' define the map.
1136 *
1137 * If no pair of bits is found that way, fallback to picking some
1138 * pair of 'source' and 'dest' bits that are not the same. If the
1139 * 'source' and 'dest' bits are the same, this represents a node
1140 * that will be migrating to itself, so no pages need move.
1141 *
1142 * If no bits are left in 'tmp', or if all remaining bits left
1143 * in 'tmp' correspond to the same bit in 'to', return false
1144 * (nothing left to migrate).
1145 *
1146 * This lets us pick a pair of nodes to migrate between, such that
1147 * if possible the dest node is not already occupied by some other
1148 * source node, minimizing the risk of overloading the memory on a
1149 * node that would happen if we migrated incoming memory to a node
1150 * before migrating outgoing memory source that same node.
1151 *
1152 * A single scan of tmp is sufficient. As we go, we remember the
1153 * most recent <s, d> pair that moved (s != d). If we find a pair
1154 * that not only moved, but what's better, moved to an empty slot
1155 * (d is not set in tmp), then we break out then, with that pair.
ae0e47f0 1156 * Otherwise when we finish scanning from_tmp, we at least have the
da0aa138
KM
1157 * most recent <s, d> pair that moved. If we get all the way through
1158 * the scan of tmp without finding any node that moved, much less
1159 * moved to an empty node, then there is nothing left worth migrating.
1160 */
d4984711 1161
0ce72d4f 1162 tmp = *from;
7e2ab150 1163 while (!nodes_empty(tmp)) {
68d68ff6 1164 int s, d;
b76ac7e7 1165 int source = NUMA_NO_NODE;
7e2ab150
CL
1166 int dest = 0;
1167
1168 for_each_node_mask(s, tmp) {
4a5b18cc
LW
1169
1170 /*
1171 * do_migrate_pages() tries to maintain the relative
1172 * node relationship of the pages established between
1173 * threads and memory areas.
1174 *
1175 * However if the number of source nodes is not equal to
1176 * the number of destination nodes we can not preserve
1177 * this node relative relationship. In that case, skip
1178 * copying memory from a node that is in the destination
1179 * mask.
1180 *
1181 * Example: [2,3,4] -> [3,4,5] moves everything.
1182 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1183 */
1184
0ce72d4f
AM
1185 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1186 (node_isset(s, *to)))
4a5b18cc
LW
1187 continue;
1188
0ce72d4f 1189 d = node_remap(s, *from, *to);
7e2ab150
CL
1190 if (s == d)
1191 continue;
1192
1193 source = s; /* Node moved. Memorize */
1194 dest = d;
1195
1196 /* dest not in remaining from nodes? */
1197 if (!node_isset(dest, tmp))
1198 break;
1199 }
b76ac7e7 1200 if (source == NUMA_NO_NODE)
7e2ab150
CL
1201 break;
1202
1203 node_clear(source, tmp);
1204 err = migrate_to_node(mm, source, dest, flags);
1205 if (err > 0)
1206 busy += err;
1207 if (err < 0)
1208 break;
39743889 1209 }
d8ed45c5 1210 mmap_read_unlock(mm);
d479960e 1211
361a2a22 1212 lru_cache_enable();
7e2ab150
CL
1213 if (err < 0)
1214 return err;
1215 return busy;
b20a3503
CL
1216
1217}
1218
3ad33b24
LS
1219/*
1220 * Allocate a new page for page migration based on vma policy.
d05f0cdc 1221 * Start by assuming the page is mapped by the same vma as contains @start.
3ad33b24
LS
1222 * Search forward from there, if not. N.B., this assumes that the
1223 * list of pages handed to migrate_pages()--which is how we get here--
1224 * is in virtual address order.
1225 */
666feb21 1226static struct page *new_page(struct page *page, unsigned long start)
95a402c3 1227{
d05f0cdc 1228 struct vm_area_struct *vma;
3f649ab7 1229 unsigned long address;
95a402c3 1230
d05f0cdc 1231 vma = find_vma(current->mm, start);
3ad33b24
LS
1232 while (vma) {
1233 address = page_address_in_vma(page, vma);
1234 if (address != -EFAULT)
1235 break;
1236 vma = vma->vm_next;
1237 }
11c731e8
WL
1238
1239 if (PageHuge(page)) {
389c8178
MH
1240 return alloc_huge_page_vma(page_hstate(compound_head(page)),
1241 vma, address);
94723aaf 1242 } else if (PageTransHuge(page)) {
c8633798
NH
1243 struct page *thp;
1244
19deb769
DR
1245 thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
1246 HPAGE_PMD_ORDER);
c8633798
NH
1247 if (!thp)
1248 return NULL;
1249 prep_transhuge_page(thp);
1250 return thp;
11c731e8 1251 }
0bf598d8 1252 /*
11c731e8 1253 * if !vma, alloc_page_vma() will use task or system default policy
0bf598d8 1254 */
0f556856
MH
1255 return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
1256 vma, address);
95a402c3 1257}
b20a3503
CL
1258#else
1259
a53190a4 1260static int migrate_page_add(struct page *page, struct list_head *pagelist,
b20a3503
CL
1261 unsigned long flags)
1262{
a53190a4 1263 return -EIO;
39743889
CL
1264}
1265
0ce72d4f
AM
1266int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1267 const nodemask_t *to, int flags)
b20a3503
CL
1268{
1269 return -ENOSYS;
1270}
95a402c3 1271
666feb21 1272static struct page *new_page(struct page *page, unsigned long start)
95a402c3
CL
1273{
1274 return NULL;
1275}
b20a3503
CL
1276#endif
1277
dbcb0f19 1278static long do_mbind(unsigned long start, unsigned long len,
028fec41
DR
1279 unsigned short mode, unsigned short mode_flags,
1280 nodemask_t *nmask, unsigned long flags)
6ce3c4c0 1281{
6ce3c4c0
CL
1282 struct mm_struct *mm = current->mm;
1283 struct mempolicy *new;
1284 unsigned long end;
1285 int err;
d8835445 1286 int ret;
6ce3c4c0
CL
1287 LIST_HEAD(pagelist);
1288
b24f53a0 1289 if (flags & ~(unsigned long)MPOL_MF_VALID)
6ce3c4c0 1290 return -EINVAL;
74c00241 1291 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
6ce3c4c0
CL
1292 return -EPERM;
1293
1294 if (start & ~PAGE_MASK)
1295 return -EINVAL;
1296
1297 if (mode == MPOL_DEFAULT)
1298 flags &= ~MPOL_MF_STRICT;
1299
1300 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1301 end = start + len;
1302
1303 if (end < start)
1304 return -EINVAL;
1305 if (end == start)
1306 return 0;
1307
028fec41 1308 new = mpol_new(mode, mode_flags, nmask);
6ce3c4c0
CL
1309 if (IS_ERR(new))
1310 return PTR_ERR(new);
1311
b24f53a0
LS
1312 if (flags & MPOL_MF_LAZY)
1313 new->flags |= MPOL_F_MOF;
1314
6ce3c4c0
CL
1315 /*
1316 * If we are using the default policy then operation
1317 * on discontinuous address spaces is okay after all
1318 */
1319 if (!new)
1320 flags |= MPOL_MF_DISCONTIG_OK;
1321
028fec41
DR
1322 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1323 start, start + len, mode, mode_flags,
00ef2d2f 1324 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
6ce3c4c0 1325
0aedadf9
CL
1326 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1327
361a2a22 1328 lru_cache_disable();
0aedadf9 1329 }
4bfc4495
KH
1330 {
1331 NODEMASK_SCRATCH(scratch);
1332 if (scratch) {
d8ed45c5 1333 mmap_write_lock(mm);
4bfc4495 1334 err = mpol_set_nodemask(new, nmask, scratch);
4bfc4495 1335 if (err)
d8ed45c5 1336 mmap_write_unlock(mm);
4bfc4495
KH
1337 } else
1338 err = -ENOMEM;
1339 NODEMASK_SCRATCH_FREE(scratch);
1340 }
b05ca738
KM
1341 if (err)
1342 goto mpol_out;
1343
d8835445 1344 ret = queue_pages_range(mm, start, end, nmask,
6ce3c4c0 1345 flags | MPOL_MF_INVERT, &pagelist);
d8835445
YS
1346
1347 if (ret < 0) {
a85dfc30 1348 err = ret;
d8835445
YS
1349 goto up_out;
1350 }
1351
1352 err = mbind_range(mm, start, end, new);
7e2ab150 1353
b24f53a0
LS
1354 if (!err) {
1355 int nr_failed = 0;
1356
cf608ac1 1357 if (!list_empty(&pagelist)) {
b24f53a0 1358 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
d05f0cdc
HD
1359 nr_failed = migrate_pages(&pagelist, new_page, NULL,
1360 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
cf608ac1 1361 if (nr_failed)
74060e4d 1362 putback_movable_pages(&pagelist);
cf608ac1 1363 }
6ce3c4c0 1364
d8835445 1365 if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
6ce3c4c0 1366 err = -EIO;
a85dfc30 1367 } else {
d8835445 1368up_out:
a85dfc30
YS
1369 if (!list_empty(&pagelist))
1370 putback_movable_pages(&pagelist);
1371 }
1372
d8ed45c5 1373 mmap_write_unlock(mm);
d8835445 1374mpol_out:
f0be3d32 1375 mpol_put(new);
d479960e 1376 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
361a2a22 1377 lru_cache_enable();
6ce3c4c0
CL
1378 return err;
1379}
1380
8bccd85f
CL
1381/*
1382 * User space interface with variable sized bitmaps for nodelists.
1383 */
1384
1385/* Copy a node mask from user space. */
39743889 1386static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
8bccd85f
CL
1387 unsigned long maxnode)
1388{
1389 unsigned long k;
56521e7a 1390 unsigned long t;
8bccd85f
CL
1391 unsigned long nlongs;
1392 unsigned long endmask;
1393
1394 --maxnode;
1395 nodes_clear(*nodes);
1396 if (maxnode == 0 || !nmask)
1397 return 0;
a9c930ba 1398 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
636f13c1 1399 return -EINVAL;
8bccd85f
CL
1400
1401 nlongs = BITS_TO_LONGS(maxnode);
1402 if ((maxnode % BITS_PER_LONG) == 0)
1403 endmask = ~0UL;
1404 else
1405 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1406
56521e7a
YX
1407 /*
1408 * When the user specified more nodes than supported just check
1409 * if the non supported part is all zero.
1410 *
1411 * If maxnode have more longs than MAX_NUMNODES, check
1412 * the bits in that area first. And then go through to
1413 * check the rest bits which equal or bigger than MAX_NUMNODES.
1414 * Otherwise, just check bits [MAX_NUMNODES, maxnode).
1415 */
8bccd85f 1416 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
8bccd85f 1417 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
8bccd85f
CL
1418 if (get_user(t, nmask + k))
1419 return -EFAULT;
1420 if (k == nlongs - 1) {
1421 if (t & endmask)
1422 return -EINVAL;
1423 } else if (t)
1424 return -EINVAL;
1425 }
1426 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1427 endmask = ~0UL;
1428 }
1429
56521e7a
YX
1430 if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
1431 unsigned long valid_mask = endmask;
1432
1433 valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1434 if (get_user(t, nmask + nlongs - 1))
1435 return -EFAULT;
1436 if (t & valid_mask)
1437 return -EINVAL;
1438 }
1439
8bccd85f
CL
1440 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1441 return -EFAULT;
1442 nodes_addr(*nodes)[nlongs-1] &= endmask;
1443 return 0;
1444}
1445
1446/* Copy a kernel node mask to user space */
1447static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1448 nodemask_t *nodes)
1449{
1450 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
050c17f2 1451 unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
8bccd85f
CL
1452
1453 if (copy > nbytes) {
1454 if (copy > PAGE_SIZE)
1455 return -EINVAL;
1456 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1457 return -EFAULT;
1458 copy = nbytes;
1459 }
1460 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1461}
1462
e7dc9ad6
DB
1463static long kernel_mbind(unsigned long start, unsigned long len,
1464 unsigned long mode, const unsigned long __user *nmask,
1465 unsigned long maxnode, unsigned int flags)
8bccd85f
CL
1466{
1467 nodemask_t nodes;
1468 int err;
028fec41 1469 unsigned short mode_flags;
8bccd85f 1470
057d3389 1471 start = untagged_addr(start);
028fec41
DR
1472 mode_flags = mode & MPOL_MODE_FLAGS;
1473 mode &= ~MPOL_MODE_FLAGS;
a3b51e01
DR
1474 if (mode >= MPOL_MAX)
1475 return -EINVAL;
4c50bc01
DR
1476 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1477 (mode_flags & MPOL_F_RELATIVE_NODES))
1478 return -EINVAL;
8bccd85f
CL
1479 err = get_nodes(&nodes, nmask, maxnode);
1480 if (err)
1481 return err;
028fec41 1482 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
8bccd85f
CL
1483}
1484
e7dc9ad6
DB
1485SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1486 unsigned long, mode, const unsigned long __user *, nmask,
1487 unsigned long, maxnode, unsigned int, flags)
1488{
1489 return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1490}
1491
8bccd85f 1492/* Set the process memory policy */
af03c4ac
DB
1493static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1494 unsigned long maxnode)
8bccd85f
CL
1495{
1496 int err;
1497 nodemask_t nodes;
028fec41 1498 unsigned short flags;
8bccd85f 1499
028fec41
DR
1500 flags = mode & MPOL_MODE_FLAGS;
1501 mode &= ~MPOL_MODE_FLAGS;
1502 if ((unsigned int)mode >= MPOL_MAX)
8bccd85f 1503 return -EINVAL;
4c50bc01
DR
1504 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1505 return -EINVAL;
8bccd85f
CL
1506 err = get_nodes(&nodes, nmask, maxnode);
1507 if (err)
1508 return err;
028fec41 1509 return do_set_mempolicy(mode, flags, &nodes);
8bccd85f
CL
1510}
1511
af03c4ac
DB
1512SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1513 unsigned long, maxnode)
1514{
1515 return kernel_set_mempolicy(mode, nmask, maxnode);
1516}
1517
b6e9b0ba
DB
1518static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1519 const unsigned long __user *old_nodes,
1520 const unsigned long __user *new_nodes)
39743889 1521{
596d7cfa 1522 struct mm_struct *mm = NULL;
39743889 1523 struct task_struct *task;
39743889
CL
1524 nodemask_t task_nodes;
1525 int err;
596d7cfa
KM
1526 nodemask_t *old;
1527 nodemask_t *new;
1528 NODEMASK_SCRATCH(scratch);
1529
1530 if (!scratch)
1531 return -ENOMEM;
39743889 1532
596d7cfa
KM
1533 old = &scratch->mask1;
1534 new = &scratch->mask2;
1535
1536 err = get_nodes(old, old_nodes, maxnode);
39743889 1537 if (err)
596d7cfa 1538 goto out;
39743889 1539
596d7cfa 1540 err = get_nodes(new, new_nodes, maxnode);
39743889 1541 if (err)
596d7cfa 1542 goto out;
39743889
CL
1543
1544 /* Find the mm_struct */
55cfaa3c 1545 rcu_read_lock();
228ebcbe 1546 task = pid ? find_task_by_vpid(pid) : current;
39743889 1547 if (!task) {
55cfaa3c 1548 rcu_read_unlock();
596d7cfa
KM
1549 err = -ESRCH;
1550 goto out;
39743889 1551 }
3268c63e 1552 get_task_struct(task);
39743889 1553
596d7cfa 1554 err = -EINVAL;
39743889
CL
1555
1556 /*
31367466
OE
1557 * Check if this process has the right to modify the specified process.
1558 * Use the regular "ptrace_may_access()" checks.
39743889 1559 */
31367466 1560 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
c69e8d9c 1561 rcu_read_unlock();
39743889 1562 err = -EPERM;
3268c63e 1563 goto out_put;
39743889 1564 }
c69e8d9c 1565 rcu_read_unlock();
39743889
CL
1566
1567 task_nodes = cpuset_mems_allowed(task);
1568 /* Is the user allowed to access the target nodes? */
596d7cfa 1569 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
39743889 1570 err = -EPERM;
3268c63e 1571 goto out_put;
39743889
CL
1572 }
1573
0486a38b
YX
1574 task_nodes = cpuset_mems_allowed(current);
1575 nodes_and(*new, *new, task_nodes);
1576 if (nodes_empty(*new))
1577 goto out_put;
1578
86c3a764
DQ
1579 err = security_task_movememory(task);
1580 if (err)
3268c63e 1581 goto out_put;
86c3a764 1582
3268c63e
CL
1583 mm = get_task_mm(task);
1584 put_task_struct(task);
f2a9ef88
SL
1585
1586 if (!mm) {
3268c63e 1587 err = -EINVAL;
f2a9ef88
SL
1588 goto out;
1589 }
1590
1591 err = do_migrate_pages(mm, old, new,
1592 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
3268c63e
CL
1593
1594 mmput(mm);
1595out:
596d7cfa
KM
1596 NODEMASK_SCRATCH_FREE(scratch);
1597
39743889 1598 return err;
3268c63e
CL
1599
1600out_put:
1601 put_task_struct(task);
1602 goto out;
1603
39743889
CL
1604}
1605
b6e9b0ba
DB
1606SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1607 const unsigned long __user *, old_nodes,
1608 const unsigned long __user *, new_nodes)
1609{
1610 return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1611}
1612
39743889 1613
8bccd85f 1614/* Retrieve NUMA policy */
af03c4ac
DB
1615static int kernel_get_mempolicy(int __user *policy,
1616 unsigned long __user *nmask,
1617 unsigned long maxnode,
1618 unsigned long addr,
1619 unsigned long flags)
8bccd85f 1620{
dbcb0f19 1621 int err;
3f649ab7 1622 int pval;
8bccd85f
CL
1623 nodemask_t nodes;
1624
050c17f2 1625 if (nmask != NULL && maxnode < nr_node_ids)
8bccd85f
CL
1626 return -EINVAL;
1627
4605f057
WH
1628 addr = untagged_addr(addr);
1629
8bccd85f
CL
1630 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1631
1632 if (err)
1633 return err;
1634
1635 if (policy && put_user(pval, policy))
1636 return -EFAULT;
1637
1638 if (nmask)
1639 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1640
1641 return err;
1642}
1643
af03c4ac
DB
1644SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1645 unsigned long __user *, nmask, unsigned long, maxnode,
1646 unsigned long, addr, unsigned long, flags)
1647{
1648 return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1649}
1650
1da177e4
LT
1651#ifdef CONFIG_COMPAT
1652
c93e0f6c
HC
1653COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1654 compat_ulong_t __user *, nmask,
1655 compat_ulong_t, maxnode,
1656 compat_ulong_t, addr, compat_ulong_t, flags)
1da177e4
LT
1657{
1658 long err;
1659 unsigned long __user *nm = NULL;
1660 unsigned long nr_bits, alloc_size;
1661 DECLARE_BITMAP(bm, MAX_NUMNODES);
1662
050c17f2 1663 nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
1da177e4
LT
1664 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1665
1666 if (nmask)
1667 nm = compat_alloc_user_space(alloc_size);
1668
af03c4ac 1669 err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1da177e4
LT
1670
1671 if (!err && nmask) {
2bbff6c7
KH
1672 unsigned long copy_size;
1673 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1674 err = copy_from_user(bm, nm, copy_size);
1da177e4
LT
1675 /* ensure entire bitmap is zeroed */
1676 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1677 err |= compat_put_bitmap(nmask, bm, nr_bits);
1678 }
1679
1680 return err;
1681}
1682
c93e0f6c
HC
1683COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1684 compat_ulong_t, maxnode)
1da177e4 1685{
1da177e4
LT
1686 unsigned long __user *nm = NULL;
1687 unsigned long nr_bits, alloc_size;
1688 DECLARE_BITMAP(bm, MAX_NUMNODES);
1689
1690 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1691 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1692
1693 if (nmask) {
cf01fb99
CS
1694 if (compat_get_bitmap(bm, nmask, nr_bits))
1695 return -EFAULT;
1da177e4 1696 nm = compat_alloc_user_space(alloc_size);
cf01fb99
CS
1697 if (copy_to_user(nm, bm, alloc_size))
1698 return -EFAULT;
1da177e4
LT
1699 }
1700
af03c4ac 1701 return kernel_set_mempolicy(mode, nm, nr_bits+1);
1da177e4
LT
1702}
1703
c93e0f6c
HC
1704COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1705 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1706 compat_ulong_t, maxnode, compat_ulong_t, flags)
1da177e4 1707{
1da177e4
LT
1708 unsigned long __user *nm = NULL;
1709 unsigned long nr_bits, alloc_size;
dfcd3c0d 1710 nodemask_t bm;
1da177e4
LT
1711
1712 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1713 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1714
1715 if (nmask) {
cf01fb99
CS
1716 if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1717 return -EFAULT;
1da177e4 1718 nm = compat_alloc_user_space(alloc_size);
cf01fb99
CS
1719 if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1720 return -EFAULT;
1da177e4
LT
1721 }
1722
e7dc9ad6 1723 return kernel_mbind(start, len, mode, nm, nr_bits+1, flags);
1da177e4
LT
1724}
1725
b6e9b0ba
DB
1726COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid,
1727 compat_ulong_t, maxnode,
1728 const compat_ulong_t __user *, old_nodes,
1729 const compat_ulong_t __user *, new_nodes)
1730{
1731 unsigned long __user *old = NULL;
1732 unsigned long __user *new = NULL;
1733 nodemask_t tmp_mask;
1734 unsigned long nr_bits;
1735 unsigned long size;
1736
1737 nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES);
1738 size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1739 if (old_nodes) {
1740 if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits))
1741 return -EFAULT;
1742 old = compat_alloc_user_space(new_nodes ? size * 2 : size);
1743 if (new_nodes)
1744 new = old + size / sizeof(unsigned long);
1745 if (copy_to_user(old, nodes_addr(tmp_mask), size))
1746 return -EFAULT;
1747 }
1748 if (new_nodes) {
1749 if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits))
1750 return -EFAULT;
1751 if (new == NULL)
1752 new = compat_alloc_user_space(size);
1753 if (copy_to_user(new, nodes_addr(tmp_mask), size))
1754 return -EFAULT;
1755 }
1756 return kernel_migrate_pages(pid, nr_bits + 1, old, new);
1757}
1758
1759#endif /* CONFIG_COMPAT */
1da177e4 1760
20ca87f2
LX
1761bool vma_migratable(struct vm_area_struct *vma)
1762{
1763 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1764 return false;
1765
1766 /*
1767 * DAX device mappings require predictable access latency, so avoid
1768 * incurring periodic faults.
1769 */
1770 if (vma_is_dax(vma))
1771 return false;
1772
1773 if (is_vm_hugetlb_page(vma) &&
1774 !hugepage_migration_supported(hstate_vma(vma)))
1775 return false;
1776
1777 /*
1778 * Migration allocates pages in the highest zone. If we cannot
1779 * do so then migration (at least from node to node) is not
1780 * possible.
1781 */
1782 if (vma->vm_file &&
1783 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
1784 < policy_zone)
1785 return false;
1786 return true;
1787}
1788
74d2c3a0
ON
1789struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1790 unsigned long addr)
1da177e4 1791{
8d90274b 1792 struct mempolicy *pol = NULL;
1da177e4
LT
1793
1794 if (vma) {
480eccf9 1795 if (vma->vm_ops && vma->vm_ops->get_policy) {
8d90274b 1796 pol = vma->vm_ops->get_policy(vma, addr);
00442ad0 1797 } else if (vma->vm_policy) {
1da177e4 1798 pol = vma->vm_policy;
00442ad0
MG
1799
1800 /*
1801 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1802 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1803 * count on these policies which will be dropped by
1804 * mpol_cond_put() later
1805 */
1806 if (mpol_needs_cond_ref(pol))
1807 mpol_get(pol);
1808 }
1da177e4 1809 }
f15ca78e 1810
74d2c3a0
ON
1811 return pol;
1812}
1813
1814/*
dd6eecb9 1815 * get_vma_policy(@vma, @addr)
74d2c3a0
ON
1816 * @vma: virtual memory area whose policy is sought
1817 * @addr: address in @vma for shared policy lookup
1818 *
1819 * Returns effective policy for a VMA at specified address.
dd6eecb9 1820 * Falls back to current->mempolicy or system default policy, as necessary.
74d2c3a0
ON
1821 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1822 * count--added by the get_policy() vm_op, as appropriate--to protect against
1823 * freeing by another task. It is the caller's responsibility to free the
1824 * extra reference for shared policies.
1825 */
ac79f78d 1826static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
dd6eecb9 1827 unsigned long addr)
74d2c3a0
ON
1828{
1829 struct mempolicy *pol = __get_vma_policy(vma, addr);
1830
8d90274b 1831 if (!pol)
dd6eecb9 1832 pol = get_task_policy(current);
8d90274b 1833
1da177e4
LT
1834 return pol;
1835}
1836
6b6482bb 1837bool vma_policy_mof(struct vm_area_struct *vma)
fc314724 1838{
6b6482bb 1839 struct mempolicy *pol;
fc314724 1840
6b6482bb
ON
1841 if (vma->vm_ops && vma->vm_ops->get_policy) {
1842 bool ret = false;
fc314724 1843
6b6482bb
ON
1844 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1845 if (pol && (pol->flags & MPOL_F_MOF))
1846 ret = true;
1847 mpol_cond_put(pol);
8d90274b 1848
6b6482bb 1849 return ret;
fc314724
MG
1850 }
1851
6b6482bb 1852 pol = vma->vm_policy;
8d90274b 1853 if (!pol)
6b6482bb 1854 pol = get_task_policy(current);
8d90274b 1855
fc314724
MG
1856 return pol->flags & MPOL_F_MOF;
1857}
1858
d3eb1570
LJ
1859static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1860{
1861 enum zone_type dynamic_policy_zone = policy_zone;
1862
1863 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1864
1865 /*
1866 * if policy->v.nodes has movable memory only,
1867 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1868 *
1869 * policy->v.nodes is intersect with node_states[N_MEMORY].
f0953a1b 1870 * so if the following test fails, it implies
d3eb1570
LJ
1871 * policy->v.nodes has movable memory only.
1872 */
1873 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1874 dynamic_policy_zone = ZONE_MOVABLE;
1875
1876 return zone >= dynamic_policy_zone;
1877}
1878
52cd3b07
LS
1879/*
1880 * Return a nodemask representing a mempolicy for filtering nodes for
1881 * page allocation
1882 */
8ca39e68 1883nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
19770b32
MG
1884{
1885 /* Lower zones don't get a nodemask applied for MPOL_BIND */
45c4745a 1886 if (unlikely(policy->mode == MPOL_BIND) &&
d3eb1570 1887 apply_policy_zone(policy, gfp_zone(gfp)) &&
19770b32
MG
1888 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1889 return &policy->v.nodes;
1890
1891 return NULL;
1892}
1893
04ec6264 1894/* Return the node id preferred by the given mempolicy, or the given id */
f8fd5253 1895static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
1da177e4 1896{
6d840958
MH
1897 if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
1898 nd = policy->v.preferred_node;
1899 else {
19770b32 1900 /*
6d840958
MH
1901 * __GFP_THISNODE shouldn't even be used with the bind policy
1902 * because we might easily break the expectation to stay on the
1903 * requested node and not break the policy.
19770b32 1904 */
6d840958 1905 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1da177e4 1906 }
6d840958 1907
04ec6264 1908 return nd;
1da177e4
LT
1909}
1910
1911/* Do dynamic interleaving for a process */
1912static unsigned interleave_nodes(struct mempolicy *policy)
1913{
45816682 1914 unsigned next;
1da177e4
LT
1915 struct task_struct *me = current;
1916
45816682 1917 next = next_node_in(me->il_prev, policy->v.nodes);
f5b087b5 1918 if (next < MAX_NUMNODES)
45816682
VB
1919 me->il_prev = next;
1920 return next;
1da177e4
LT
1921}
1922
dc85da15
CL
1923/*
1924 * Depending on the memory policy provide a node from which to allocate the
1925 * next slab entry.
1926 */
2a389610 1927unsigned int mempolicy_slab_node(void)
dc85da15 1928{
e7b691b0 1929 struct mempolicy *policy;
2a389610 1930 int node = numa_mem_id();
e7b691b0
AK
1931
1932 if (in_interrupt())
2a389610 1933 return node;
e7b691b0
AK
1934
1935 policy = current->mempolicy;
fc36b8d3 1936 if (!policy || policy->flags & MPOL_F_LOCAL)
2a389610 1937 return node;
bea904d5
LS
1938
1939 switch (policy->mode) {
1940 case MPOL_PREFERRED:
fc36b8d3
LS
1941 /*
1942 * handled MPOL_F_LOCAL above
1943 */
1944 return policy->v.preferred_node;
765c4507 1945
dc85da15
CL
1946 case MPOL_INTERLEAVE:
1947 return interleave_nodes(policy);
1948
dd1a239f 1949 case MPOL_BIND: {
c33d6c06
MG
1950 struct zoneref *z;
1951
dc85da15
CL
1952 /*
1953 * Follow bind policy behavior and start allocation at the
1954 * first node.
1955 */
19770b32 1956 struct zonelist *zonelist;
19770b32 1957 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
c9634cf0 1958 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
c33d6c06
MG
1959 z = first_zones_zonelist(zonelist, highest_zoneidx,
1960 &policy->v.nodes);
c1093b74 1961 return z->zone ? zone_to_nid(z->zone) : node;
dd1a239f 1962 }
dc85da15 1963
dc85da15 1964 default:
bea904d5 1965 BUG();
dc85da15
CL
1966 }
1967}
1968
fee83b3a
AM
1969/*
1970 * Do static interleaving for a VMA with known offset @n. Returns the n'th
1971 * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1972 * number of present nodes.
1973 */
98c70baa 1974static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
1da177e4 1975{
dfcd3c0d 1976 unsigned nnodes = nodes_weight(pol->v.nodes);
f5b087b5 1977 unsigned target;
fee83b3a
AM
1978 int i;
1979 int nid;
1da177e4 1980
f5b087b5
DR
1981 if (!nnodes)
1982 return numa_node_id();
fee83b3a
AM
1983 target = (unsigned int)n % nnodes;
1984 nid = first_node(pol->v.nodes);
1985 for (i = 0; i < target; i++)
dfcd3c0d 1986 nid = next_node(nid, pol->v.nodes);
1da177e4
LT
1987 return nid;
1988}
1989
5da7ca86
CL
1990/* Determine a node number for interleave */
1991static inline unsigned interleave_nid(struct mempolicy *pol,
1992 struct vm_area_struct *vma, unsigned long addr, int shift)
1993{
1994 if (vma) {
1995 unsigned long off;
1996
3b98b087
NA
1997 /*
1998 * for small pages, there is no difference between
1999 * shift and PAGE_SHIFT, so the bit-shift is safe.
2000 * for huge pages, since vm_pgoff is in units of small
2001 * pages, we need to shift off the always 0 bits to get
2002 * a useful offset.
2003 */
2004 BUG_ON(shift < PAGE_SHIFT);
2005 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
5da7ca86 2006 off += (addr - vma->vm_start) >> shift;
98c70baa 2007 return offset_il_node(pol, off);
5da7ca86
CL
2008 } else
2009 return interleave_nodes(pol);
2010}
2011
00ac59ad 2012#ifdef CONFIG_HUGETLBFS
480eccf9 2013/*
04ec6264 2014 * huge_node(@vma, @addr, @gfp_flags, @mpol)
b46e14ac
FF
2015 * @vma: virtual memory area whose policy is sought
2016 * @addr: address in @vma for shared policy lookup and interleave policy
2017 * @gfp_flags: for requested zone
2018 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
2019 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
480eccf9 2020 *
04ec6264 2021 * Returns a nid suitable for a huge page allocation and a pointer
52cd3b07
LS
2022 * to the struct mempolicy for conditional unref after allocation.
2023 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
2024 * @nodemask for filtering the zonelist.
c0ff7453 2025 *
d26914d1 2026 * Must be protected by read_mems_allowed_begin()
480eccf9 2027 */
04ec6264
VB
2028int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
2029 struct mempolicy **mpol, nodemask_t **nodemask)
5da7ca86 2030{
04ec6264 2031 int nid;
5da7ca86 2032
dd6eecb9 2033 *mpol = get_vma_policy(vma, addr);
19770b32 2034 *nodemask = NULL; /* assume !MPOL_BIND */
5da7ca86 2035
52cd3b07 2036 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
04ec6264
VB
2037 nid = interleave_nid(*mpol, vma, addr,
2038 huge_page_shift(hstate_vma(vma)));
52cd3b07 2039 } else {
04ec6264 2040 nid = policy_node(gfp_flags, *mpol, numa_node_id());
52cd3b07
LS
2041 if ((*mpol)->mode == MPOL_BIND)
2042 *nodemask = &(*mpol)->v.nodes;
480eccf9 2043 }
04ec6264 2044 return nid;
5da7ca86 2045}
06808b08
LS
2046
2047/*
2048 * init_nodemask_of_mempolicy
2049 *
2050 * If the current task's mempolicy is "default" [NULL], return 'false'
2051 * to indicate default policy. Otherwise, extract the policy nodemask
2052 * for 'bind' or 'interleave' policy into the argument nodemask, or
2053 * initialize the argument nodemask to contain the single node for
2054 * 'preferred' or 'local' policy and return 'true' to indicate presence
2055 * of non-default mempolicy.
2056 *
2057 * We don't bother with reference counting the mempolicy [mpol_get/put]
2058 * because the current task is examining it's own mempolicy and a task's
2059 * mempolicy is only ever changed by the task itself.
2060 *
2061 * N.B., it is the caller's responsibility to free a returned nodemask.
2062 */
2063bool init_nodemask_of_mempolicy(nodemask_t *mask)
2064{
2065 struct mempolicy *mempolicy;
2066 int nid;
2067
2068 if (!(mask && current->mempolicy))
2069 return false;
2070
c0ff7453 2071 task_lock(current);
06808b08
LS
2072 mempolicy = current->mempolicy;
2073 switch (mempolicy->mode) {
2074 case MPOL_PREFERRED:
2075 if (mempolicy->flags & MPOL_F_LOCAL)
2076 nid = numa_node_id();
2077 else
2078 nid = mempolicy->v.preferred_node;
2079 init_nodemask_of_node(mask, nid);
2080 break;
2081
2082 case MPOL_BIND:
06808b08
LS
2083 case MPOL_INTERLEAVE:
2084 *mask = mempolicy->v.nodes;
2085 break;
2086
2087 default:
2088 BUG();
2089 }
c0ff7453 2090 task_unlock(current);
06808b08
LS
2091
2092 return true;
2093}
00ac59ad 2094#endif
5da7ca86 2095
6f48d0eb
DR
2096/*
2097 * mempolicy_nodemask_intersects
2098 *
2099 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
2100 * policy. Otherwise, check for intersection between mask and the policy
f0953a1b 2101 * nodemask for 'bind' or 'interleave' policy. For 'preferred' or 'local'
6f48d0eb
DR
2102 * policy, always return true since it may allocate elsewhere on fallback.
2103 *
2104 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
2105 */
2106bool mempolicy_nodemask_intersects(struct task_struct *tsk,
2107 const nodemask_t *mask)
2108{
2109 struct mempolicy *mempolicy;
2110 bool ret = true;
2111
2112 if (!mask)
2113 return ret;
2114 task_lock(tsk);
2115 mempolicy = tsk->mempolicy;
2116 if (!mempolicy)
2117 goto out;
2118
2119 switch (mempolicy->mode) {
2120 case MPOL_PREFERRED:
2121 /*
2122 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
2123 * allocate from, they may fallback to other nodes when oom.
2124 * Thus, it's possible for tsk to have allocated memory from
2125 * nodes in mask.
2126 */
2127 break;
2128 case MPOL_BIND:
2129 case MPOL_INTERLEAVE:
2130 ret = nodes_intersects(mempolicy->v.nodes, *mask);
2131 break;
2132 default:
2133 BUG();
2134 }
2135out:
2136 task_unlock(tsk);
2137 return ret;
2138}
2139
1da177e4
LT
2140/* Allocate a page in interleaved policy.
2141 Own path because it needs to do special accounting. */
662f3a0b
AK
2142static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2143 unsigned nid)
1da177e4 2144{
1da177e4
LT
2145 struct page *page;
2146
84172f4b 2147 page = __alloc_pages(gfp, order, nid, NULL);
4518085e
KW
2148 /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
2149 if (!static_branch_likely(&vm_numa_stat_key))
2150 return page;
de55c8b2
AR
2151 if (page && page_to_nid(page) == nid) {
2152 preempt_disable();
2153 __inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
2154 preempt_enable();
2155 }
1da177e4
LT
2156 return page;
2157}
2158
2159/**
eb350739
MWO
2160 * alloc_pages_vma - Allocate a page for a VMA.
2161 * @gfp: GFP flags.
2162 * @order: Order of the GFP allocation.
2163 * @vma: Pointer to VMA or NULL if not available.
2164 * @addr: Virtual address of the allocation. Must be inside @vma.
2165 * @node: Which node to prefer for allocation (modulo policy).
2166 * @hugepage: For hugepages try only the preferred node if possible.
1da177e4 2167 *
eb350739
MWO
2168 * Allocate a page for a specific address in @vma, using the appropriate
2169 * NUMA policy. When @vma is not NULL the caller must hold the mmap_lock
2170 * of the mm_struct of the VMA to prevent it from going away. Should be
2171 * used for all allocations for pages that will be mapped into user space.
1da177e4 2172 *
eb350739 2173 * Return: The page on success or NULL if allocation fails.
1da177e4 2174 */
eb350739 2175struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
19deb769 2176 unsigned long addr, int node, bool hugepage)
1da177e4 2177{
cc9a6c87 2178 struct mempolicy *pol;
c0ff7453 2179 struct page *page;
04ec6264 2180 int preferred_nid;
be97a41b 2181 nodemask_t *nmask;
cc9a6c87 2182
dd6eecb9 2183 pol = get_vma_policy(vma, addr);
1da177e4 2184
0867a57c
VB
2185 if (pol->mode == MPOL_INTERLEAVE) {
2186 unsigned nid;
2187
2188 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2189 mpol_cond_put(pol);
2190 page = alloc_page_interleave(gfp, order, nid);
2191 goto out;
19deb769
DR
2192 }
2193
2194 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2195 int hpage_node = node;
2196
2197 /*
2198 * For hugepage allocation and non-interleave policy which
2199 * allows the current node (or other explicitly preferred
2200 * node) we only try to allocate from the current/preferred
2201 * node and don't fall back to other nodes, as the cost of
2202 * remote accesses would likely offset THP benefits.
2203 *
2204 * If the policy is interleave, or does not allow the current
2205 * node in its nodemask, we allocate the standard way.
2206 */
2207 if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
2208 hpage_node = pol->v.preferred_node;
2209
2210 nmask = policy_nodemask(gfp, pol);
2211 if (!nmask || node_isset(hpage_node, *nmask)) {
2212 mpol_cond_put(pol);
cc638f32
VB
2213 /*
2214 * First, try to allocate THP only on local node, but
2215 * don't reclaim unnecessarily, just compact.
2216 */
19deb769 2217 page = __alloc_pages_node(hpage_node,
cc638f32 2218 gfp | __GFP_THISNODE | __GFP_NORETRY, order);
76e654cc
DR
2219
2220 /*
2221 * If hugepage allocations are configured to always
2222 * synchronous compact or the vma has been madvised
2223 * to prefer hugepage backing, retry allowing remote
cc638f32 2224 * memory with both reclaim and compact as well.
76e654cc
DR
2225 */
2226 if (!page && (gfp & __GFP_DIRECT_RECLAIM))
2227 page = __alloc_pages_node(hpage_node,
cc638f32 2228 gfp, order);
76e654cc 2229
19deb769
DR
2230 goto out;
2231 }
356ff8a9
DR
2232 }
2233
be97a41b 2234 nmask = policy_nodemask(gfp, pol);
04ec6264 2235 preferred_nid = policy_node(gfp, pol, node);
84172f4b 2236 page = __alloc_pages(gfp, order, preferred_nid, nmask);
d51e9894 2237 mpol_cond_put(pol);
be97a41b 2238out:
c0ff7453 2239 return page;
1da177e4 2240}
69262215 2241EXPORT_SYMBOL(alloc_pages_vma);
1da177e4
LT
2242
2243/**
6421ec76
MWO
2244 * alloc_pages - Allocate pages.
2245 * @gfp: GFP flags.
2246 * @order: Power of two of number of pages to allocate.
1da177e4 2247 *
6421ec76
MWO
2248 * Allocate 1 << @order contiguous pages. The physical address of the
2249 * first page is naturally aligned (eg an order-3 allocation will be aligned
2250 * to a multiple of 8 * PAGE_SIZE bytes). The NUMA policy of the current
2251 * process is honoured when in process context.
1da177e4 2252 *
6421ec76
MWO
2253 * Context: Can be called from any context, providing the appropriate GFP
2254 * flags are used.
2255 * Return: The page on success or NULL if allocation fails.
1da177e4 2256 */
d7f946d0 2257struct page *alloc_pages(gfp_t gfp, unsigned order)
1da177e4 2258{
8d90274b 2259 struct mempolicy *pol = &default_policy;
c0ff7453 2260 struct page *page;
1da177e4 2261
8d90274b
ON
2262 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2263 pol = get_task_policy(current);
52cd3b07
LS
2264
2265 /*
2266 * No reference counting needed for current->mempolicy
2267 * nor system default_policy
2268 */
45c4745a 2269 if (pol->mode == MPOL_INTERLEAVE)
c0ff7453
MX
2270 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2271 else
84172f4b 2272 page = __alloc_pages(gfp, order,
04ec6264 2273 policy_node(gfp, pol, numa_node_id()),
5c4b4be3 2274 policy_nodemask(gfp, pol));
cc9a6c87 2275
c0ff7453 2276 return page;
1da177e4 2277}
d7f946d0 2278EXPORT_SYMBOL(alloc_pages);
1da177e4 2279
ef0855d3
ON
2280int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2281{
2282 struct mempolicy *pol = mpol_dup(vma_policy(src));
2283
2284 if (IS_ERR(pol))
2285 return PTR_ERR(pol);
2286 dst->vm_policy = pol;
2287 return 0;
2288}
2289
4225399a 2290/*
846a16bf 2291 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
4225399a
PJ
2292 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2293 * with the mems_allowed returned by cpuset_mems_allowed(). This
2294 * keeps mempolicies cpuset relative after its cpuset moves. See
2295 * further kernel/cpuset.c update_nodemask().
708c1bbc
MX
2296 *
2297 * current's mempolicy may be rebinded by the other task(the task that changes
2298 * cpuset's mems), so we needn't do rebind work for current task.
4225399a 2299 */
4225399a 2300
846a16bf
LS
2301/* Slow path of a mempolicy duplicate */
2302struct mempolicy *__mpol_dup(struct mempolicy *old)
1da177e4
LT
2303{
2304 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2305
2306 if (!new)
2307 return ERR_PTR(-ENOMEM);
708c1bbc
MX
2308
2309 /* task's mempolicy is protected by alloc_lock */
2310 if (old == current->mempolicy) {
2311 task_lock(current);
2312 *new = *old;
2313 task_unlock(current);
2314 } else
2315 *new = *old;
2316
4225399a
PJ
2317 if (current_cpuset_is_being_rebound()) {
2318 nodemask_t mems = cpuset_mems_allowed(current);
213980c0 2319 mpol_rebind_policy(new, &mems);
4225399a 2320 }
1da177e4 2321 atomic_set(&new->refcnt, 1);
1da177e4
LT
2322 return new;
2323}
2324
2325/* Slow path of a mempolicy comparison */
fcfb4dcc 2326bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1da177e4
LT
2327{
2328 if (!a || !b)
fcfb4dcc 2329 return false;
45c4745a 2330 if (a->mode != b->mode)
fcfb4dcc 2331 return false;
19800502 2332 if (a->flags != b->flags)
fcfb4dcc 2333 return false;
19800502
BL
2334 if (mpol_store_user_nodemask(a))
2335 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
fcfb4dcc 2336 return false;
19800502 2337
45c4745a 2338 switch (a->mode) {
19770b32 2339 case MPOL_BIND:
1da177e4 2340 case MPOL_INTERLEAVE:
fcfb4dcc 2341 return !!nodes_equal(a->v.nodes, b->v.nodes);
1da177e4 2342 case MPOL_PREFERRED:
8970a63e
YX
2343 /* a's ->flags is the same as b's */
2344 if (a->flags & MPOL_F_LOCAL)
2345 return true;
75719661 2346 return a->v.preferred_node == b->v.preferred_node;
1da177e4
LT
2347 default:
2348 BUG();
fcfb4dcc 2349 return false;
1da177e4
LT
2350 }
2351}
2352
1da177e4
LT
2353/*
2354 * Shared memory backing store policy support.
2355 *
2356 * Remember policies even when nobody has shared memory mapped.
2357 * The policies are kept in Red-Black tree linked from the inode.
4a8c7bb5 2358 * They are protected by the sp->lock rwlock, which should be held
1da177e4
LT
2359 * for any accesses to the tree.
2360 */
2361
4a8c7bb5
NZ
2362/*
2363 * lookup first element intersecting start-end. Caller holds sp->lock for
2364 * reading or for writing
2365 */
1da177e4
LT
2366static struct sp_node *
2367sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2368{
2369 struct rb_node *n = sp->root.rb_node;
2370
2371 while (n) {
2372 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2373
2374 if (start >= p->end)
2375 n = n->rb_right;
2376 else if (end <= p->start)
2377 n = n->rb_left;
2378 else
2379 break;
2380 }
2381 if (!n)
2382 return NULL;
2383 for (;;) {
2384 struct sp_node *w = NULL;
2385 struct rb_node *prev = rb_prev(n);
2386 if (!prev)
2387 break;
2388 w = rb_entry(prev, struct sp_node, nd);
2389 if (w->end <= start)
2390 break;
2391 n = prev;
2392 }
2393 return rb_entry(n, struct sp_node, nd);
2394}
2395
4a8c7bb5
NZ
2396/*
2397 * Insert a new shared policy into the list. Caller holds sp->lock for
2398 * writing.
2399 */
1da177e4
LT
2400static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2401{
2402 struct rb_node **p = &sp->root.rb_node;
2403 struct rb_node *parent = NULL;
2404 struct sp_node *nd;
2405
2406 while (*p) {
2407 parent = *p;
2408 nd = rb_entry(parent, struct sp_node, nd);
2409 if (new->start < nd->start)
2410 p = &(*p)->rb_left;
2411 else if (new->end > nd->end)
2412 p = &(*p)->rb_right;
2413 else
2414 BUG();
2415 }
2416 rb_link_node(&new->nd, parent, p);
2417 rb_insert_color(&new->nd, &sp->root);
140d5a49 2418 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
45c4745a 2419 new->policy ? new->policy->mode : 0);
1da177e4
LT
2420}
2421
2422/* Find shared policy intersecting idx */
2423struct mempolicy *
2424mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2425{
2426 struct mempolicy *pol = NULL;
2427 struct sp_node *sn;
2428
2429 if (!sp->root.rb_node)
2430 return NULL;
4a8c7bb5 2431 read_lock(&sp->lock);
1da177e4
LT
2432 sn = sp_lookup(sp, idx, idx+1);
2433 if (sn) {
2434 mpol_get(sn->policy);
2435 pol = sn->policy;
2436 }
4a8c7bb5 2437 read_unlock(&sp->lock);
1da177e4
LT
2438 return pol;
2439}
2440
63f74ca2
KM
2441static void sp_free(struct sp_node *n)
2442{
2443 mpol_put(n->policy);
2444 kmem_cache_free(sn_cache, n);
2445}
2446
771fb4d8
LS
2447/**
2448 * mpol_misplaced - check whether current page node is valid in policy
2449 *
b46e14ac
FF
2450 * @page: page to be checked
2451 * @vma: vm area where page mapped
2452 * @addr: virtual address where page mapped
771fb4d8
LS
2453 *
2454 * Lookup current policy node id for vma,addr and "compare to" page's
5f076944 2455 * node id. Policy determination "mimics" alloc_page_vma().
771fb4d8 2456 * Called from fault path where we know the vma and faulting address.
5f076944
MWO
2457 *
2458 * Return: -1 if the page is in a node that is valid for this policy, or a
2459 * suitable node ID to allocate a replacement page from.
771fb4d8
LS
2460 */
2461int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2462{
2463 struct mempolicy *pol;
c33d6c06 2464 struct zoneref *z;
771fb4d8
LS
2465 int curnid = page_to_nid(page);
2466 unsigned long pgoff;
90572890
PZ
2467 int thiscpu = raw_smp_processor_id();
2468 int thisnid = cpu_to_node(thiscpu);
98fa15f3 2469 int polnid = NUMA_NO_NODE;
771fb4d8
LS
2470 int ret = -1;
2471
dd6eecb9 2472 pol = get_vma_policy(vma, addr);
771fb4d8
LS
2473 if (!(pol->flags & MPOL_F_MOF))
2474 goto out;
2475
2476 switch (pol->mode) {
2477 case MPOL_INTERLEAVE:
771fb4d8
LS
2478 pgoff = vma->vm_pgoff;
2479 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
98c70baa 2480 polnid = offset_il_node(pol, pgoff);
771fb4d8
LS
2481 break;
2482
2483 case MPOL_PREFERRED:
2484 if (pol->flags & MPOL_F_LOCAL)
2485 polnid = numa_node_id();
2486 else
2487 polnid = pol->v.preferred_node;
2488 break;
2489
2490 case MPOL_BIND:
bda420b9
HY
2491 /* Optimize placement among multiple nodes via NUMA balancing */
2492 if (pol->flags & MPOL_F_MORON) {
2493 if (node_isset(thisnid, pol->v.nodes))
2494 break;
2495 goto out;
2496 }
c33d6c06 2497
771fb4d8
LS
2498 /*
2499 * allows binding to multiple nodes.
2500 * use current page if in policy nodemask,
2501 * else select nearest allowed node, if any.
2502 * If no allowed nodes, use current [!misplaced].
2503 */
2504 if (node_isset(curnid, pol->v.nodes))
2505 goto out;
c33d6c06 2506 z = first_zones_zonelist(
771fb4d8
LS
2507 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2508 gfp_zone(GFP_HIGHUSER),
c33d6c06 2509 &pol->v.nodes);
c1093b74 2510 polnid = zone_to_nid(z->zone);
771fb4d8
LS
2511 break;
2512
2513 default:
2514 BUG();
2515 }
5606e387
MG
2516
2517 /* Migrate the page towards the node whose CPU is referencing it */
e42c8ff2 2518 if (pol->flags & MPOL_F_MORON) {
90572890 2519 polnid = thisnid;
5606e387 2520
10f39042 2521 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
de1c9ce6 2522 goto out;
e42c8ff2
MG
2523 }
2524
771fb4d8
LS
2525 if (curnid != polnid)
2526 ret = polnid;
2527out:
2528 mpol_cond_put(pol);
2529
2530 return ret;
2531}
2532
c11600e4
DR
2533/*
2534 * Drop the (possibly final) reference to task->mempolicy. It needs to be
2535 * dropped after task->mempolicy is set to NULL so that any allocation done as
2536 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2537 * policy.
2538 */
2539void mpol_put_task_policy(struct task_struct *task)
2540{
2541 struct mempolicy *pol;
2542
2543 task_lock(task);
2544 pol = task->mempolicy;
2545 task->mempolicy = NULL;
2546 task_unlock(task);
2547 mpol_put(pol);
2548}
2549
1da177e4
LT
2550static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2551{
140d5a49 2552 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
1da177e4 2553 rb_erase(&n->nd, &sp->root);
63f74ca2 2554 sp_free(n);
1da177e4
LT
2555}
2556
42288fe3
MG
2557static void sp_node_init(struct sp_node *node, unsigned long start,
2558 unsigned long end, struct mempolicy *pol)
2559{
2560 node->start = start;
2561 node->end = end;
2562 node->policy = pol;
2563}
2564
dbcb0f19
AB
2565static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2566 struct mempolicy *pol)
1da177e4 2567{
869833f2
KM
2568 struct sp_node *n;
2569 struct mempolicy *newpol;
1da177e4 2570
869833f2 2571 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1da177e4
LT
2572 if (!n)
2573 return NULL;
869833f2
KM
2574
2575 newpol = mpol_dup(pol);
2576 if (IS_ERR(newpol)) {
2577 kmem_cache_free(sn_cache, n);
2578 return NULL;
2579 }
2580 newpol->flags |= MPOL_F_SHARED;
42288fe3 2581 sp_node_init(n, start, end, newpol);
869833f2 2582
1da177e4
LT
2583 return n;
2584}
2585
2586/* Replace a policy range. */
2587static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2588 unsigned long end, struct sp_node *new)
2589{
b22d127a 2590 struct sp_node *n;
42288fe3
MG
2591 struct sp_node *n_new = NULL;
2592 struct mempolicy *mpol_new = NULL;
b22d127a 2593 int ret = 0;
1da177e4 2594
42288fe3 2595restart:
4a8c7bb5 2596 write_lock(&sp->lock);
1da177e4
LT
2597 n = sp_lookup(sp, start, end);
2598 /* Take care of old policies in the same range. */
2599 while (n && n->start < end) {
2600 struct rb_node *next = rb_next(&n->nd);
2601 if (n->start >= start) {
2602 if (n->end <= end)
2603 sp_delete(sp, n);
2604 else
2605 n->start = end;
2606 } else {
2607 /* Old policy spanning whole new range. */
2608 if (n->end > end) {
42288fe3
MG
2609 if (!n_new)
2610 goto alloc_new;
2611
2612 *mpol_new = *n->policy;
2613 atomic_set(&mpol_new->refcnt, 1);
7880639c 2614 sp_node_init(n_new, end, n->end, mpol_new);
1da177e4 2615 n->end = start;
5ca39575 2616 sp_insert(sp, n_new);
42288fe3
MG
2617 n_new = NULL;
2618 mpol_new = NULL;
1da177e4
LT
2619 break;
2620 } else
2621 n->end = start;
2622 }
2623 if (!next)
2624 break;
2625 n = rb_entry(next, struct sp_node, nd);
2626 }
2627 if (new)
2628 sp_insert(sp, new);
4a8c7bb5 2629 write_unlock(&sp->lock);
42288fe3
MG
2630 ret = 0;
2631
2632err_out:
2633 if (mpol_new)
2634 mpol_put(mpol_new);
2635 if (n_new)
2636 kmem_cache_free(sn_cache, n_new);
2637
b22d127a 2638 return ret;
42288fe3
MG
2639
2640alloc_new:
4a8c7bb5 2641 write_unlock(&sp->lock);
42288fe3
MG
2642 ret = -ENOMEM;
2643 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2644 if (!n_new)
2645 goto err_out;
2646 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2647 if (!mpol_new)
2648 goto err_out;
2649 goto restart;
1da177e4
LT
2650}
2651
71fe804b
LS
2652/**
2653 * mpol_shared_policy_init - initialize shared policy for inode
2654 * @sp: pointer to inode shared policy
2655 * @mpol: struct mempolicy to install
2656 *
2657 * Install non-NULL @mpol in inode's shared policy rb-tree.
2658 * On entry, the current task has a reference on a non-NULL @mpol.
2659 * This must be released on exit.
4bfc4495 2660 * This is called at get_inode() calls and we can use GFP_KERNEL.
71fe804b
LS
2661 */
2662void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2663{
58568d2a
MX
2664 int ret;
2665
71fe804b 2666 sp->root = RB_ROOT; /* empty tree == default mempolicy */
4a8c7bb5 2667 rwlock_init(&sp->lock);
71fe804b
LS
2668
2669 if (mpol) {
2670 struct vm_area_struct pvma;
2671 struct mempolicy *new;
4bfc4495 2672 NODEMASK_SCRATCH(scratch);
71fe804b 2673
4bfc4495 2674 if (!scratch)
5c0c1654 2675 goto put_mpol;
71fe804b
LS
2676 /* contextualize the tmpfs mount point mempolicy */
2677 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
15d77835 2678 if (IS_ERR(new))
0cae3457 2679 goto free_scratch; /* no valid nodemask intersection */
58568d2a
MX
2680
2681 task_lock(current);
4bfc4495 2682 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
58568d2a 2683 task_unlock(current);
15d77835 2684 if (ret)
5c0c1654 2685 goto put_new;
71fe804b
LS
2686
2687 /* Create pseudo-vma that contains just the policy */
2c4541e2 2688 vma_init(&pvma, NULL);
71fe804b
LS
2689 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2690 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
15d77835 2691
5c0c1654 2692put_new:
71fe804b 2693 mpol_put(new); /* drop initial ref */
0cae3457 2694free_scratch:
4bfc4495 2695 NODEMASK_SCRATCH_FREE(scratch);
5c0c1654
LS
2696put_mpol:
2697 mpol_put(mpol); /* drop our incoming ref on sb mpol */
7339ff83
RH
2698 }
2699}
2700
1da177e4
LT
2701int mpol_set_shared_policy(struct shared_policy *info,
2702 struct vm_area_struct *vma, struct mempolicy *npol)
2703{
2704 int err;
2705 struct sp_node *new = NULL;
2706 unsigned long sz = vma_pages(vma);
2707
028fec41 2708 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
1da177e4 2709 vma->vm_pgoff,
45c4745a 2710 sz, npol ? npol->mode : -1,
028fec41 2711 npol ? npol->flags : -1,
00ef2d2f 2712 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
1da177e4
LT
2713
2714 if (npol) {
2715 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2716 if (!new)
2717 return -ENOMEM;
2718 }
2719 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2720 if (err && new)
63f74ca2 2721 sp_free(new);
1da177e4
LT
2722 return err;
2723}
2724
2725/* Free a backing policy store on inode delete. */
2726void mpol_free_shared_policy(struct shared_policy *p)
2727{
2728 struct sp_node *n;
2729 struct rb_node *next;
2730
2731 if (!p->root.rb_node)
2732 return;
4a8c7bb5 2733 write_lock(&p->lock);
1da177e4
LT
2734 next = rb_first(&p->root);
2735 while (next) {
2736 n = rb_entry(next, struct sp_node, nd);
2737 next = rb_next(&n->nd);
63f74ca2 2738 sp_delete(p, n);
1da177e4 2739 }
4a8c7bb5 2740 write_unlock(&p->lock);
1da177e4
LT
2741}
2742
1a687c2e 2743#ifdef CONFIG_NUMA_BALANCING
c297663c 2744static int __initdata numabalancing_override;
1a687c2e
MG
2745
2746static void __init check_numabalancing_enable(void)
2747{
2748 bool numabalancing_default = false;
2749
2750 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2751 numabalancing_default = true;
2752
c297663c
MG
2753 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2754 if (numabalancing_override)
2755 set_numabalancing_state(numabalancing_override == 1);
2756
b0dc2b9b 2757 if (num_online_nodes() > 1 && !numabalancing_override) {
756a025f 2758 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
c297663c 2759 numabalancing_default ? "Enabling" : "Disabling");
1a687c2e
MG
2760 set_numabalancing_state(numabalancing_default);
2761 }
2762}
2763
2764static int __init setup_numabalancing(char *str)
2765{
2766 int ret = 0;
2767 if (!str)
2768 goto out;
1a687c2e
MG
2769
2770 if (!strcmp(str, "enable")) {
c297663c 2771 numabalancing_override = 1;
1a687c2e
MG
2772 ret = 1;
2773 } else if (!strcmp(str, "disable")) {
c297663c 2774 numabalancing_override = -1;
1a687c2e
MG
2775 ret = 1;
2776 }
2777out:
2778 if (!ret)
4a404bea 2779 pr_warn("Unable to parse numa_balancing=\n");
1a687c2e
MG
2780
2781 return ret;
2782}
2783__setup("numa_balancing=", setup_numabalancing);
2784#else
2785static inline void __init check_numabalancing_enable(void)
2786{
2787}
2788#endif /* CONFIG_NUMA_BALANCING */
2789
1da177e4
LT
2790/* assumes fs == KERNEL_DS */
2791void __init numa_policy_init(void)
2792{
b71636e2
PM
2793 nodemask_t interleave_nodes;
2794 unsigned long largest = 0;
2795 int nid, prefer = 0;
2796
1da177e4
LT
2797 policy_cache = kmem_cache_create("numa_policy",
2798 sizeof(struct mempolicy),
20c2df83 2799 0, SLAB_PANIC, NULL);
1da177e4
LT
2800
2801 sn_cache = kmem_cache_create("shared_policy_node",
2802 sizeof(struct sp_node),
20c2df83 2803 0, SLAB_PANIC, NULL);
1da177e4 2804
5606e387
MG
2805 for_each_node(nid) {
2806 preferred_node_policy[nid] = (struct mempolicy) {
2807 .refcnt = ATOMIC_INIT(1),
2808 .mode = MPOL_PREFERRED,
2809 .flags = MPOL_F_MOF | MPOL_F_MORON,
2810 .v = { .preferred_node = nid, },
2811 };
2812 }
2813
b71636e2
PM
2814 /*
2815 * Set interleaving policy for system init. Interleaving is only
2816 * enabled across suitably sized nodes (default is >= 16MB), or
2817 * fall back to the largest node if they're all smaller.
2818 */
2819 nodes_clear(interleave_nodes);
01f13bd6 2820 for_each_node_state(nid, N_MEMORY) {
b71636e2
PM
2821 unsigned long total_pages = node_present_pages(nid);
2822
2823 /* Preserve the largest node */
2824 if (largest < total_pages) {
2825 largest = total_pages;
2826 prefer = nid;
2827 }
2828
2829 /* Interleave this node? */
2830 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2831 node_set(nid, interleave_nodes);
2832 }
2833
2834 /* All too small, use the largest */
2835 if (unlikely(nodes_empty(interleave_nodes)))
2836 node_set(prefer, interleave_nodes);
1da177e4 2837
028fec41 2838 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
b1de0d13 2839 pr_err("%s: interleaving failed\n", __func__);
1a687c2e
MG
2840
2841 check_numabalancing_enable();
1da177e4
LT
2842}
2843
8bccd85f 2844/* Reset policy of current process to default */
1da177e4
LT
2845void numa_default_policy(void)
2846{
028fec41 2847 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
1da177e4 2848}
68860ec1 2849
095f1fc4
LS
2850/*
2851 * Parse and format mempolicy from/to strings
2852 */
2853
1a75a6c8 2854/*
f2a07f40 2855 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
1a75a6c8 2856 */
345ace9c
LS
2857static const char * const policy_modes[] =
2858{
2859 [MPOL_DEFAULT] = "default",
2860 [MPOL_PREFERRED] = "prefer",
2861 [MPOL_BIND] = "bind",
2862 [MPOL_INTERLEAVE] = "interleave",
d3a71033 2863 [MPOL_LOCAL] = "local",
345ace9c 2864};
1a75a6c8 2865
095f1fc4
LS
2866
2867#ifdef CONFIG_TMPFS
2868/**
f2a07f40 2869 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
095f1fc4 2870 * @str: string containing mempolicy to parse
71fe804b 2871 * @mpol: pointer to struct mempolicy pointer, returned on success.
095f1fc4
LS
2872 *
2873 * Format of input:
2874 * <mode>[=<flags>][:<nodelist>]
2875 *
71fe804b 2876 * On success, returns 0, else 1
095f1fc4 2877 */
a7a88b23 2878int mpol_parse_str(char *str, struct mempolicy **mpol)
095f1fc4 2879{
71fe804b 2880 struct mempolicy *new = NULL;
f2a07f40 2881 unsigned short mode_flags;
71fe804b 2882 nodemask_t nodes;
095f1fc4
LS
2883 char *nodelist = strchr(str, ':');
2884 char *flags = strchr(str, '=');
dedf2c73 2885 int err = 1, mode;
095f1fc4 2886
c7a91bc7
DC
2887 if (flags)
2888 *flags++ = '\0'; /* terminate mode string */
2889
095f1fc4
LS
2890 if (nodelist) {
2891 /* NUL-terminate mode or flags string */
2892 *nodelist++ = '\0';
71fe804b 2893 if (nodelist_parse(nodelist, nodes))
095f1fc4 2894 goto out;
01f13bd6 2895 if (!nodes_subset(nodes, node_states[N_MEMORY]))
095f1fc4 2896 goto out;
71fe804b
LS
2897 } else
2898 nodes_clear(nodes);
2899
dedf2c73 2900 mode = match_string(policy_modes, MPOL_MAX, str);
2901 if (mode < 0)
095f1fc4
LS
2902 goto out;
2903
71fe804b 2904 switch (mode) {
095f1fc4 2905 case MPOL_PREFERRED:
71fe804b 2906 /*
aa9f7d51
RD
2907 * Insist on a nodelist of one node only, although later
2908 * we use first_node(nodes) to grab a single node, so here
2909 * nodelist (or nodes) cannot be empty.
71fe804b 2910 */
095f1fc4
LS
2911 if (nodelist) {
2912 char *rest = nodelist;
2913 while (isdigit(*rest))
2914 rest++;
926f2ae0
KM
2915 if (*rest)
2916 goto out;
aa9f7d51
RD
2917 if (nodes_empty(nodes))
2918 goto out;
095f1fc4
LS
2919 }
2920 break;
095f1fc4
LS
2921 case MPOL_INTERLEAVE:
2922 /*
2923 * Default to online nodes with memory if no nodelist
2924 */
2925 if (!nodelist)
01f13bd6 2926 nodes = node_states[N_MEMORY];
3f226aa1 2927 break;
71fe804b 2928 case MPOL_LOCAL:
3f226aa1 2929 /*
71fe804b 2930 * Don't allow a nodelist; mpol_new() checks flags
3f226aa1 2931 */
71fe804b 2932 if (nodelist)
3f226aa1 2933 goto out;
71fe804b 2934 mode = MPOL_PREFERRED;
3f226aa1 2935 break;
413b43de
RT
2936 case MPOL_DEFAULT:
2937 /*
2938 * Insist on a empty nodelist
2939 */
2940 if (!nodelist)
2941 err = 0;
2942 goto out;
d69b2e63
KM
2943 case MPOL_BIND:
2944 /*
2945 * Insist on a nodelist
2946 */
2947 if (!nodelist)
2948 goto out;
095f1fc4
LS
2949 }
2950
71fe804b 2951 mode_flags = 0;
095f1fc4
LS
2952 if (flags) {
2953 /*
2954 * Currently, we only support two mutually exclusive
2955 * mode flags.
2956 */
2957 if (!strcmp(flags, "static"))
71fe804b 2958 mode_flags |= MPOL_F_STATIC_NODES;
095f1fc4 2959 else if (!strcmp(flags, "relative"))
71fe804b 2960 mode_flags |= MPOL_F_RELATIVE_NODES;
095f1fc4 2961 else
926f2ae0 2962 goto out;
095f1fc4 2963 }
71fe804b
LS
2964
2965 new = mpol_new(mode, mode_flags, &nodes);
2966 if (IS_ERR(new))
926f2ae0
KM
2967 goto out;
2968
f2a07f40
HD
2969 /*
2970 * Save nodes for mpol_to_str() to show the tmpfs mount options
2971 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2972 */
2973 if (mode != MPOL_PREFERRED)
2974 new->v.nodes = nodes;
2975 else if (nodelist)
2976 new->v.preferred_node = first_node(nodes);
2977 else
2978 new->flags |= MPOL_F_LOCAL;
2979
2980 /*
2981 * Save nodes for contextualization: this will be used to "clone"
2982 * the mempolicy in a specific context [cpuset] at a later time.
2983 */
2984 new->w.user_nodemask = nodes;
2985
926f2ae0 2986 err = 0;
71fe804b 2987
095f1fc4
LS
2988out:
2989 /* Restore string for error message */
2990 if (nodelist)
2991 *--nodelist = ':';
2992 if (flags)
2993 *--flags = '=';
71fe804b
LS
2994 if (!err)
2995 *mpol = new;
095f1fc4
LS
2996 return err;
2997}
2998#endif /* CONFIG_TMPFS */
2999
71fe804b
LS
3000/**
3001 * mpol_to_str - format a mempolicy structure for printing
3002 * @buffer: to contain formatted mempolicy string
3003 * @maxlen: length of @buffer
3004 * @pol: pointer to mempolicy to be formatted
71fe804b 3005 *
948927ee
DR
3006 * Convert @pol into a string. If @buffer is too short, truncate the string.
3007 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
3008 * longest flag, "relative", and to display at least a few node ids.
1a75a6c8 3009 */
948927ee 3010void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1a75a6c8
CL
3011{
3012 char *p = buffer;
948927ee
DR
3013 nodemask_t nodes = NODE_MASK_NONE;
3014 unsigned short mode = MPOL_DEFAULT;
3015 unsigned short flags = 0;
2291990a 3016
8790c71a 3017 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
bea904d5 3018 mode = pol->mode;
948927ee
DR
3019 flags = pol->flags;
3020 }
bea904d5 3021
1a75a6c8
CL
3022 switch (mode) {
3023 case MPOL_DEFAULT:
1a75a6c8 3024 break;
1a75a6c8 3025 case MPOL_PREFERRED:
fc36b8d3 3026 if (flags & MPOL_F_LOCAL)
f2a07f40 3027 mode = MPOL_LOCAL;
53f2556b 3028 else
fc36b8d3 3029 node_set(pol->v.preferred_node, nodes);
1a75a6c8 3030 break;
1a75a6c8 3031 case MPOL_BIND:
1a75a6c8 3032 case MPOL_INTERLEAVE:
f2a07f40 3033 nodes = pol->v.nodes;
1a75a6c8 3034 break;
1a75a6c8 3035 default:
948927ee
DR
3036 WARN_ON_ONCE(1);
3037 snprintf(p, maxlen, "unknown");
3038 return;
1a75a6c8
CL
3039 }
3040
b7a9f420 3041 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
1a75a6c8 3042
fc36b8d3 3043 if (flags & MPOL_MODE_FLAGS) {
948927ee 3044 p += snprintf(p, buffer + maxlen - p, "=");
f5b087b5 3045
2291990a
LS
3046 /*
3047 * Currently, the only defined flags are mutually exclusive
3048 */
f5b087b5 3049 if (flags & MPOL_F_STATIC_NODES)
2291990a
LS
3050 p += snprintf(p, buffer + maxlen - p, "static");
3051 else if (flags & MPOL_F_RELATIVE_NODES)
3052 p += snprintf(p, buffer + maxlen - p, "relative");
f5b087b5
DR
3053 }
3054
9e763e0f
TH
3055 if (!nodes_empty(nodes))
3056 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
3057 nodemask_pr_args(&nodes));
1a75a6c8 3058}