2 * Simple NUMA memory policy for the Linux kernel.
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
6 * Subject to the GNU Public License, version 2.
8 * NUMA policy allows the user to give hints in which node(s) memory should
11 * Support four policies per VMA and per process:
13 * The VMA policy has priority over the process policy for a page fault.
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
22 * bind Only allocate memory on a specific set of nodes,
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
28 * preferred Try a specific node first before normal fallback.
29 * As a special case NUMA_NO_NODE here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
57 fix mmap readahead to honour policy and enable policy for any page cache
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
62 handle mremap for shared memory (currently ignored for the policy)
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
68 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
70 #include <linux/mempolicy.h>
72 #include <linux/highmem.h>
73 #include <linux/hugetlb.h>
74 #include <linux/kernel.h>
75 #include <linux/sched.h>
76 #include <linux/nodemask.h>
77 #include <linux/cpuset.h>
78 #include <linux/slab.h>
79 #include <linux/string.h>
80 #include <linux/export.h>
81 #include <linux/nsproxy.h>
82 #include <linux/interrupt.h>
83 #include <linux/init.h>
84 #include <linux/compat.h>
85 #include <linux/swap.h>
86 #include <linux/seq_file.h>
87 #include <linux/proc_fs.h>
88 #include <linux/migrate.h>
89 #include <linux/ksm.h>
90 #include <linux/rmap.h>
91 #include <linux/security.h>
92 #include <linux/syscalls.h>
93 #include <linux/ctype.h>
94 #include <linux/mm_inline.h>
95 #include <linux/mmu_notifier.h>
96 #include <linux/printk.h>
98 #include <asm/tlbflush.h>
99 #include <linux/uaccess.h>
101 #include "internal.h"
104 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
105 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
107 static struct kmem_cache
*policy_cache
;
108 static struct kmem_cache
*sn_cache
;
110 /* Highest zone. An specific allocation for a zone below that is not
112 enum zone_type policy_zone
= 0;
115 * run-time system-wide default policy => local allocation
117 static struct mempolicy default_policy
= {
118 .refcnt
= ATOMIC_INIT(1), /* never free it */
119 .mode
= MPOL_PREFERRED
,
120 .flags
= MPOL_F_LOCAL
,
123 static struct mempolicy preferred_node_policy
[MAX_NUMNODES
];
125 struct mempolicy
*get_task_policy(struct task_struct
*p
)
127 struct mempolicy
*pol
= p
->mempolicy
;
133 node
= numa_node_id();
134 if (node
!= NUMA_NO_NODE
) {
135 pol
= &preferred_node_policy
[node
];
136 /* preferred_node_policy is not initialised early in boot */
141 return &default_policy
;
144 static const struct mempolicy_operations
{
145 int (*create
)(struct mempolicy
*pol
, const nodemask_t
*nodes
);
147 * If read-side task has no lock to protect task->mempolicy, write-side
148 * task will rebind the task->mempolicy by two step. The first step is
149 * setting all the newly nodes, and the second step is cleaning all the
150 * disallowed nodes. In this way, we can avoid finding no node to alloc
152 * If we have a lock to protect task->mempolicy in read-side, we do
156 * MPOL_REBIND_ONCE - do rebind work at once
157 * MPOL_REBIND_STEP1 - set all the newly nodes
158 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
160 void (*rebind
)(struct mempolicy
*pol
, const nodemask_t
*nodes
,
161 enum mpol_rebind_step step
);
162 } mpol_ops
[MPOL_MAX
];
164 static inline int mpol_store_user_nodemask(const struct mempolicy
*pol
)
166 return pol
->flags
& MPOL_MODE_FLAGS
;
169 static void mpol_relative_nodemask(nodemask_t
*ret
, const nodemask_t
*orig
,
170 const nodemask_t
*rel
)
173 nodes_fold(tmp
, *orig
, nodes_weight(*rel
));
174 nodes_onto(*ret
, tmp
, *rel
);
177 static int mpol_new_interleave(struct mempolicy
*pol
, const nodemask_t
*nodes
)
179 if (nodes_empty(*nodes
))
181 pol
->v
.nodes
= *nodes
;
185 static int mpol_new_preferred(struct mempolicy
*pol
, const nodemask_t
*nodes
)
188 pol
->flags
|= MPOL_F_LOCAL
; /* local allocation */
189 else if (nodes_empty(*nodes
))
190 return -EINVAL
; /* no allowed nodes */
192 pol
->v
.preferred_node
= first_node(*nodes
);
196 static int mpol_new_bind(struct mempolicy
*pol
, const nodemask_t
*nodes
)
198 if (nodes_empty(*nodes
))
200 pol
->v
.nodes
= *nodes
;
205 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
206 * any, for the new policy. mpol_new() has already validated the nodes
207 * parameter with respect to the policy mode and flags. But, we need to
208 * handle an empty nodemask with MPOL_PREFERRED here.
210 * Must be called holding task's alloc_lock to protect task's mems_allowed
211 * and mempolicy. May also be called holding the mmap_semaphore for write.
213 static int mpol_set_nodemask(struct mempolicy
*pol
,
214 const nodemask_t
*nodes
, struct nodemask_scratch
*nsc
)
218 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
222 nodes_and(nsc
->mask1
,
223 cpuset_current_mems_allowed
, node_states
[N_MEMORY
]);
226 if (pol
->mode
== MPOL_PREFERRED
&& nodes_empty(*nodes
))
227 nodes
= NULL
; /* explicit local allocation */
229 if (pol
->flags
& MPOL_F_RELATIVE_NODES
)
230 mpol_relative_nodemask(&nsc
->mask2
, nodes
, &nsc
->mask1
);
232 nodes_and(nsc
->mask2
, *nodes
, nsc
->mask1
);
234 if (mpol_store_user_nodemask(pol
))
235 pol
->w
.user_nodemask
= *nodes
;
237 pol
->w
.cpuset_mems_allowed
=
238 cpuset_current_mems_allowed
;
242 ret
= mpol_ops
[pol
->mode
].create(pol
, &nsc
->mask2
);
244 ret
= mpol_ops
[pol
->mode
].create(pol
, NULL
);
249 * This function just creates a new policy, does some check and simple
250 * initialization. You must invoke mpol_set_nodemask() to set nodes.
252 static struct mempolicy
*mpol_new(unsigned short mode
, unsigned short flags
,
255 struct mempolicy
*policy
;
257 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
258 mode
, flags
, nodes
? nodes_addr(*nodes
)[0] : NUMA_NO_NODE
);
260 if (mode
== MPOL_DEFAULT
) {
261 if (nodes
&& !nodes_empty(*nodes
))
262 return ERR_PTR(-EINVAL
);
268 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
269 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
270 * All other modes require a valid pointer to a non-empty nodemask.
272 if (mode
== MPOL_PREFERRED
) {
273 if (nodes_empty(*nodes
)) {
274 if (((flags
& MPOL_F_STATIC_NODES
) ||
275 (flags
& MPOL_F_RELATIVE_NODES
)))
276 return ERR_PTR(-EINVAL
);
278 } else if (mode
== MPOL_LOCAL
) {
279 if (!nodes_empty(*nodes
) ||
280 (flags
& MPOL_F_STATIC_NODES
) ||
281 (flags
& MPOL_F_RELATIVE_NODES
))
282 return ERR_PTR(-EINVAL
);
283 mode
= MPOL_PREFERRED
;
284 } else if (nodes_empty(*nodes
))
285 return ERR_PTR(-EINVAL
);
286 policy
= kmem_cache_alloc(policy_cache
, GFP_KERNEL
);
288 return ERR_PTR(-ENOMEM
);
289 atomic_set(&policy
->refcnt
, 1);
291 policy
->flags
= flags
;
296 /* Slow path of a mpol destructor. */
297 void __mpol_put(struct mempolicy
*p
)
299 if (!atomic_dec_and_test(&p
->refcnt
))
301 kmem_cache_free(policy_cache
, p
);
304 static void mpol_rebind_default(struct mempolicy
*pol
, const nodemask_t
*nodes
,
305 enum mpol_rebind_step step
)
311 * MPOL_REBIND_ONCE - do rebind work at once
312 * MPOL_REBIND_STEP1 - set all the newly nodes
313 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
315 static void mpol_rebind_nodemask(struct mempolicy
*pol
, const nodemask_t
*nodes
,
316 enum mpol_rebind_step step
)
320 if (pol
->flags
& MPOL_F_STATIC_NODES
)
321 nodes_and(tmp
, pol
->w
.user_nodemask
, *nodes
);
322 else if (pol
->flags
& MPOL_F_RELATIVE_NODES
)
323 mpol_relative_nodemask(&tmp
, &pol
->w
.user_nodemask
, nodes
);
326 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
329 if (step
== MPOL_REBIND_ONCE
|| step
== MPOL_REBIND_STEP1
) {
330 nodes_remap(tmp
, pol
->v
.nodes
,
331 pol
->w
.cpuset_mems_allowed
, *nodes
);
332 pol
->w
.cpuset_mems_allowed
= step
? tmp
: *nodes
;
333 } else if (step
== MPOL_REBIND_STEP2
) {
334 tmp
= pol
->w
.cpuset_mems_allowed
;
335 pol
->w
.cpuset_mems_allowed
= *nodes
;
340 if (nodes_empty(tmp
))
343 if (step
== MPOL_REBIND_STEP1
)
344 nodes_or(pol
->v
.nodes
, pol
->v
.nodes
, tmp
);
345 else if (step
== MPOL_REBIND_ONCE
|| step
== MPOL_REBIND_STEP2
)
350 if (!node_isset(current
->il_next
, tmp
)) {
351 current
->il_next
= next_node_in(current
->il_next
, tmp
);
352 if (current
->il_next
>= MAX_NUMNODES
)
353 current
->il_next
= numa_node_id();
357 static void mpol_rebind_preferred(struct mempolicy
*pol
,
358 const nodemask_t
*nodes
,
359 enum mpol_rebind_step step
)
363 if (pol
->flags
& MPOL_F_STATIC_NODES
) {
364 int node
= first_node(pol
->w
.user_nodemask
);
366 if (node_isset(node
, *nodes
)) {
367 pol
->v
.preferred_node
= node
;
368 pol
->flags
&= ~MPOL_F_LOCAL
;
370 pol
->flags
|= MPOL_F_LOCAL
;
371 } else if (pol
->flags
& MPOL_F_RELATIVE_NODES
) {
372 mpol_relative_nodemask(&tmp
, &pol
->w
.user_nodemask
, nodes
);
373 pol
->v
.preferred_node
= first_node(tmp
);
374 } else if (!(pol
->flags
& MPOL_F_LOCAL
)) {
375 pol
->v
.preferred_node
= node_remap(pol
->v
.preferred_node
,
376 pol
->w
.cpuset_mems_allowed
,
378 pol
->w
.cpuset_mems_allowed
= *nodes
;
383 * mpol_rebind_policy - Migrate a policy to a different set of nodes
385 * If read-side task has no lock to protect task->mempolicy, write-side
386 * task will rebind the task->mempolicy by two step. The first step is
387 * setting all the newly nodes, and the second step is cleaning all the
388 * disallowed nodes. In this way, we can avoid finding no node to alloc
390 * If we have a lock to protect task->mempolicy in read-side, we do
394 * MPOL_REBIND_ONCE - do rebind work at once
395 * MPOL_REBIND_STEP1 - set all the newly nodes
396 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
398 static void mpol_rebind_policy(struct mempolicy
*pol
, const nodemask_t
*newmask
,
399 enum mpol_rebind_step step
)
403 if (!mpol_store_user_nodemask(pol
) && step
== MPOL_REBIND_ONCE
&&
404 nodes_equal(pol
->w
.cpuset_mems_allowed
, *newmask
))
407 if (step
== MPOL_REBIND_STEP1
&& (pol
->flags
& MPOL_F_REBINDING
))
410 if (step
== MPOL_REBIND_STEP2
&& !(pol
->flags
& MPOL_F_REBINDING
))
413 if (step
== MPOL_REBIND_STEP1
)
414 pol
->flags
|= MPOL_F_REBINDING
;
415 else if (step
== MPOL_REBIND_STEP2
)
416 pol
->flags
&= ~MPOL_F_REBINDING
;
417 else if (step
>= MPOL_REBIND_NSTEP
)
420 mpol_ops
[pol
->mode
].rebind(pol
, newmask
, step
);
424 * Wrapper for mpol_rebind_policy() that just requires task
425 * pointer, and updates task mempolicy.
427 * Called with task's alloc_lock held.
430 void mpol_rebind_task(struct task_struct
*tsk
, const nodemask_t
*new,
431 enum mpol_rebind_step step
)
433 mpol_rebind_policy(tsk
->mempolicy
, new, step
);
437 * Rebind each vma in mm to new nodemask.
439 * Call holding a reference to mm. Takes mm->mmap_sem during call.
442 void mpol_rebind_mm(struct mm_struct
*mm
, nodemask_t
*new)
444 struct vm_area_struct
*vma
;
446 down_write(&mm
->mmap_sem
);
447 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
)
448 mpol_rebind_policy(vma
->vm_policy
, new, MPOL_REBIND_ONCE
);
449 up_write(&mm
->mmap_sem
);
452 static const struct mempolicy_operations mpol_ops
[MPOL_MAX
] = {
454 .rebind
= mpol_rebind_default
,
456 [MPOL_INTERLEAVE
] = {
457 .create
= mpol_new_interleave
,
458 .rebind
= mpol_rebind_nodemask
,
461 .create
= mpol_new_preferred
,
462 .rebind
= mpol_rebind_preferred
,
465 .create
= mpol_new_bind
,
466 .rebind
= mpol_rebind_nodemask
,
470 static void migrate_page_add(struct page
*page
, struct list_head
*pagelist
,
471 unsigned long flags
);
474 struct list_head
*pagelist
;
477 struct vm_area_struct
*prev
;
481 * Scan through pages checking if pages follow certain conditions,
482 * and move them to the pagelist if they do.
484 static int queue_pages_pte_range(pmd_t
*pmd
, unsigned long addr
,
485 unsigned long end
, struct mm_walk
*walk
)
487 struct vm_area_struct
*vma
= walk
->vma
;
489 struct queue_pages
*qp
= walk
->private;
490 unsigned long flags
= qp
->flags
;
495 if (pmd_trans_huge(*pmd
)) {
496 ptl
= pmd_lock(walk
->mm
, pmd
);
497 if (pmd_trans_huge(*pmd
)) {
498 page
= pmd_page(*pmd
);
499 if (is_huge_zero_page(page
)) {
501 __split_huge_pmd(vma
, pmd
, addr
, false, NULL
);
506 ret
= split_huge_page(page
);
517 if (pmd_trans_unstable(pmd
))
520 pte
= pte_offset_map_lock(walk
->mm
, pmd
, addr
, &ptl
);
521 for (; addr
!= end
; pte
++, addr
+= PAGE_SIZE
) {
522 if (!pte_present(*pte
))
524 page
= vm_normal_page(vma
, addr
, *pte
);
528 * vm_normal_page() filters out zero pages, but there might
529 * still be PageReserved pages to skip, perhaps in a VDSO.
531 if (PageReserved(page
))
533 nid
= page_to_nid(page
);
534 if (node_isset(nid
, *qp
->nmask
) == !!(flags
& MPOL_MF_INVERT
))
536 if (PageTransCompound(page
)) {
538 pte_unmap_unlock(pte
, ptl
);
540 ret
= split_huge_page(page
);
543 /* Failed to split -- skip. */
545 pte
= pte_offset_map_lock(walk
->mm
, pmd
,
552 migrate_page_add(page
, qp
->pagelist
, flags
);
554 pte_unmap_unlock(pte
- 1, ptl
);
559 static int queue_pages_hugetlb(pte_t
*pte
, unsigned long hmask
,
560 unsigned long addr
, unsigned long end
,
561 struct mm_walk
*walk
)
563 #ifdef CONFIG_HUGETLB_PAGE
564 struct queue_pages
*qp
= walk
->private;
565 unsigned long flags
= qp
->flags
;
571 ptl
= huge_pte_lock(hstate_vma(walk
->vma
), walk
->mm
, pte
);
572 entry
= huge_ptep_get(pte
);
573 if (!pte_present(entry
))
575 page
= pte_page(entry
);
576 nid
= page_to_nid(page
);
577 if (node_isset(nid
, *qp
->nmask
) == !!(flags
& MPOL_MF_INVERT
))
579 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
580 if (flags
& (MPOL_MF_MOVE_ALL
) ||
581 (flags
& MPOL_MF_MOVE
&& page_mapcount(page
) == 1))
582 isolate_huge_page(page
, qp
->pagelist
);
591 #ifdef CONFIG_NUMA_BALANCING
593 * This is used to mark a range of virtual addresses to be inaccessible.
594 * These are later cleared by a NUMA hinting fault. Depending on these
595 * faults, pages may be migrated for better NUMA placement.
597 * This is assuming that NUMA faults are handled using PROT_NONE. If
598 * an architecture makes a different choice, it will need further
599 * changes to the core.
601 unsigned long change_prot_numa(struct vm_area_struct
*vma
,
602 unsigned long addr
, unsigned long end
)
606 nr_updated
= change_protection(vma
, addr
, end
, PAGE_NONE
, 0, 1);
608 count_vm_numa_events(NUMA_PTE_UPDATES
, nr_updated
);
613 static unsigned long change_prot_numa(struct vm_area_struct
*vma
,
614 unsigned long addr
, unsigned long end
)
618 #endif /* CONFIG_NUMA_BALANCING */
620 static int queue_pages_test_walk(unsigned long start
, unsigned long end
,
621 struct mm_walk
*walk
)
623 struct vm_area_struct
*vma
= walk
->vma
;
624 struct queue_pages
*qp
= walk
->private;
625 unsigned long endvma
= vma
->vm_end
;
626 unsigned long flags
= qp
->flags
;
628 if (!vma_migratable(vma
))
633 if (vma
->vm_start
> start
)
634 start
= vma
->vm_start
;
636 if (!(flags
& MPOL_MF_DISCONTIG_OK
)) {
637 if (!vma
->vm_next
&& vma
->vm_end
< end
)
639 if (qp
->prev
&& qp
->prev
->vm_end
< vma
->vm_start
)
645 if (flags
& MPOL_MF_LAZY
) {
646 /* Similar to task_numa_work, skip inaccessible VMAs */
647 if (!is_vm_hugetlb_page(vma
) &&
648 (vma
->vm_flags
& (VM_READ
| VM_EXEC
| VM_WRITE
)) &&
649 !(vma
->vm_flags
& VM_MIXEDMAP
))
650 change_prot_numa(vma
, start
, endvma
);
654 /* queue pages from current vma */
655 if (flags
& (MPOL_MF_MOVE
| MPOL_MF_MOVE_ALL
))
661 * Walk through page tables and collect pages to be migrated.
663 * If pages found in a given range are on a set of nodes (determined by
664 * @nodes and @flags,) it's isolated and queued to the pagelist which is
665 * passed via @private.)
668 queue_pages_range(struct mm_struct
*mm
, unsigned long start
, unsigned long end
,
669 nodemask_t
*nodes
, unsigned long flags
,
670 struct list_head
*pagelist
)
672 struct queue_pages qp
= {
673 .pagelist
= pagelist
,
678 struct mm_walk queue_pages_walk
= {
679 .hugetlb_entry
= queue_pages_hugetlb
,
680 .pmd_entry
= queue_pages_pte_range
,
681 .test_walk
= queue_pages_test_walk
,
686 return walk_page_range(start
, end
, &queue_pages_walk
);
690 * Apply policy to a single VMA
691 * This must be called with the mmap_sem held for writing.
693 static int vma_replace_policy(struct vm_area_struct
*vma
,
694 struct mempolicy
*pol
)
697 struct mempolicy
*old
;
698 struct mempolicy
*new;
700 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
701 vma
->vm_start
, vma
->vm_end
, vma
->vm_pgoff
,
702 vma
->vm_ops
, vma
->vm_file
,
703 vma
->vm_ops
? vma
->vm_ops
->set_policy
: NULL
);
709 if (vma
->vm_ops
&& vma
->vm_ops
->set_policy
) {
710 err
= vma
->vm_ops
->set_policy(vma
, new);
715 old
= vma
->vm_policy
;
716 vma
->vm_policy
= new; /* protected by mmap_sem */
725 /* Step 2: apply policy to a range and do splits. */
726 static int mbind_range(struct mm_struct
*mm
, unsigned long start
,
727 unsigned long end
, struct mempolicy
*new_pol
)
729 struct vm_area_struct
*next
;
730 struct vm_area_struct
*prev
;
731 struct vm_area_struct
*vma
;
734 unsigned long vmstart
;
737 vma
= find_vma(mm
, start
);
738 if (!vma
|| vma
->vm_start
> start
)
742 if (start
> vma
->vm_start
)
745 for (; vma
&& vma
->vm_start
< end
; prev
= vma
, vma
= next
) {
747 vmstart
= max(start
, vma
->vm_start
);
748 vmend
= min(end
, vma
->vm_end
);
750 if (mpol_equal(vma_policy(vma
), new_pol
))
753 pgoff
= vma
->vm_pgoff
+
754 ((vmstart
- vma
->vm_start
) >> PAGE_SHIFT
);
755 prev
= vma_merge(mm
, prev
, vmstart
, vmend
, vma
->vm_flags
,
756 vma
->anon_vma
, vma
->vm_file
, pgoff
,
757 new_pol
, vma
->vm_userfaultfd_ctx
);
761 if (mpol_equal(vma_policy(vma
), new_pol
))
763 /* vma_merge() joined vma && vma->next, case 8 */
766 if (vma
->vm_start
!= vmstart
) {
767 err
= split_vma(vma
->vm_mm
, vma
, vmstart
, 1);
771 if (vma
->vm_end
!= vmend
) {
772 err
= split_vma(vma
->vm_mm
, vma
, vmend
, 0);
777 err
= vma_replace_policy(vma
, new_pol
);
786 /* Set the process memory policy */
787 static long do_set_mempolicy(unsigned short mode
, unsigned short flags
,
790 struct mempolicy
*new, *old
;
791 NODEMASK_SCRATCH(scratch
);
797 new = mpol_new(mode
, flags
, nodes
);
804 ret
= mpol_set_nodemask(new, nodes
, scratch
);
806 task_unlock(current
);
810 old
= current
->mempolicy
;
811 current
->mempolicy
= new;
812 if (new && new->mode
== MPOL_INTERLEAVE
&&
813 nodes_weight(new->v
.nodes
))
814 current
->il_next
= first_node(new->v
.nodes
);
815 task_unlock(current
);
819 NODEMASK_SCRATCH_FREE(scratch
);
824 * Return nodemask for policy for get_mempolicy() query
826 * Called with task's alloc_lock held
828 static void get_policy_nodemask(struct mempolicy
*p
, nodemask_t
*nodes
)
831 if (p
== &default_policy
)
837 case MPOL_INTERLEAVE
:
841 if (!(p
->flags
& MPOL_F_LOCAL
))
842 node_set(p
->v
.preferred_node
, *nodes
);
843 /* else return empty node mask for local allocation */
850 static int lookup_node(unsigned long addr
)
855 err
= get_user_pages(addr
& PAGE_MASK
, 1, 0, &p
, NULL
);
857 err
= page_to_nid(p
);
863 /* Retrieve NUMA policy */
864 static long do_get_mempolicy(int *policy
, nodemask_t
*nmask
,
865 unsigned long addr
, unsigned long flags
)
868 struct mm_struct
*mm
= current
->mm
;
869 struct vm_area_struct
*vma
= NULL
;
870 struct mempolicy
*pol
= current
->mempolicy
;
873 ~(unsigned long)(MPOL_F_NODE
|MPOL_F_ADDR
|MPOL_F_MEMS_ALLOWED
))
876 if (flags
& MPOL_F_MEMS_ALLOWED
) {
877 if (flags
& (MPOL_F_NODE
|MPOL_F_ADDR
))
879 *policy
= 0; /* just so it's initialized */
881 *nmask
= cpuset_current_mems_allowed
;
882 task_unlock(current
);
886 if (flags
& MPOL_F_ADDR
) {
888 * Do NOT fall back to task policy if the
889 * vma/shared policy at addr is NULL. We
890 * want to return MPOL_DEFAULT in this case.
892 down_read(&mm
->mmap_sem
);
893 vma
= find_vma_intersection(mm
, addr
, addr
+1);
895 up_read(&mm
->mmap_sem
);
898 if (vma
->vm_ops
&& vma
->vm_ops
->get_policy
)
899 pol
= vma
->vm_ops
->get_policy(vma
, addr
);
901 pol
= vma
->vm_policy
;
906 pol
= &default_policy
; /* indicates default behavior */
908 if (flags
& MPOL_F_NODE
) {
909 if (flags
& MPOL_F_ADDR
) {
910 err
= lookup_node(addr
);
914 } else if (pol
== current
->mempolicy
&&
915 pol
->mode
== MPOL_INTERLEAVE
) {
916 *policy
= current
->il_next
;
922 *policy
= pol
== &default_policy
? MPOL_DEFAULT
:
925 * Internal mempolicy flags must be masked off before exposing
926 * the policy to userspace.
928 *policy
|= (pol
->flags
& MPOL_MODE_FLAGS
);
932 up_read(¤t
->mm
->mmap_sem
);
938 if (mpol_store_user_nodemask(pol
)) {
939 *nmask
= pol
->w
.user_nodemask
;
942 get_policy_nodemask(pol
, nmask
);
943 task_unlock(current
);
950 up_read(¤t
->mm
->mmap_sem
);
954 #ifdef CONFIG_MIGRATION
958 static void migrate_page_add(struct page
*page
, struct list_head
*pagelist
,
962 * Avoid migrating a page that is shared with others.
964 if ((flags
& MPOL_MF_MOVE_ALL
) || page_mapcount(page
) == 1) {
965 if (!isolate_lru_page(page
)) {
966 list_add_tail(&page
->lru
, pagelist
);
967 inc_node_page_state(page
, NR_ISOLATED_ANON
+
968 page_is_file_cache(page
));
973 static struct page
*new_node_page(struct page
*page
, unsigned long node
, int **x
)
976 return alloc_huge_page_node(page_hstate(compound_head(page
)),
979 return __alloc_pages_node(node
, GFP_HIGHUSER_MOVABLE
|
984 * Migrate pages from one node to a target node.
985 * Returns error or the number of pages not migrated.
987 static int migrate_to_node(struct mm_struct
*mm
, int source
, int dest
,
995 node_set(source
, nmask
);
998 * This does not "check" the range but isolates all pages that
999 * need migration. Between passing in the full user address
1000 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1002 VM_BUG_ON(!(flags
& (MPOL_MF_MOVE
| MPOL_MF_MOVE_ALL
)));
1003 queue_pages_range(mm
, mm
->mmap
->vm_start
, mm
->task_size
, &nmask
,
1004 flags
| MPOL_MF_DISCONTIG_OK
, &pagelist
);
1006 if (!list_empty(&pagelist
)) {
1007 err
= migrate_pages(&pagelist
, new_node_page
, NULL
, dest
,
1008 MIGRATE_SYNC
, MR_SYSCALL
);
1010 putback_movable_pages(&pagelist
);
1017 * Move pages between the two nodesets so as to preserve the physical
1018 * layout as much as possible.
1020 * Returns the number of page that could not be moved.
1022 int do_migrate_pages(struct mm_struct
*mm
, const nodemask_t
*from
,
1023 const nodemask_t
*to
, int flags
)
1029 err
= migrate_prep();
1033 down_read(&mm
->mmap_sem
);
1036 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1037 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1038 * bit in 'tmp', and return that <source, dest> pair for migration.
1039 * The pair of nodemasks 'to' and 'from' define the map.
1041 * If no pair of bits is found that way, fallback to picking some
1042 * pair of 'source' and 'dest' bits that are not the same. If the
1043 * 'source' and 'dest' bits are the same, this represents a node
1044 * that will be migrating to itself, so no pages need move.
1046 * If no bits are left in 'tmp', or if all remaining bits left
1047 * in 'tmp' correspond to the same bit in 'to', return false
1048 * (nothing left to migrate).
1050 * This lets us pick a pair of nodes to migrate between, such that
1051 * if possible the dest node is not already occupied by some other
1052 * source node, minimizing the risk of overloading the memory on a
1053 * node that would happen if we migrated incoming memory to a node
1054 * before migrating outgoing memory source that same node.
1056 * A single scan of tmp is sufficient. As we go, we remember the
1057 * most recent <s, d> pair that moved (s != d). If we find a pair
1058 * that not only moved, but what's better, moved to an empty slot
1059 * (d is not set in tmp), then we break out then, with that pair.
1060 * Otherwise when we finish scanning from_tmp, we at least have the
1061 * most recent <s, d> pair that moved. If we get all the way through
1062 * the scan of tmp without finding any node that moved, much less
1063 * moved to an empty node, then there is nothing left worth migrating.
1067 while (!nodes_empty(tmp
)) {
1069 int source
= NUMA_NO_NODE
;
1072 for_each_node_mask(s
, tmp
) {
1075 * do_migrate_pages() tries to maintain the relative
1076 * node relationship of the pages established between
1077 * threads and memory areas.
1079 * However if the number of source nodes is not equal to
1080 * the number of destination nodes we can not preserve
1081 * this node relative relationship. In that case, skip
1082 * copying memory from a node that is in the destination
1085 * Example: [2,3,4] -> [3,4,5] moves everything.
1086 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1089 if ((nodes_weight(*from
) != nodes_weight(*to
)) &&
1090 (node_isset(s
, *to
)))
1093 d
= node_remap(s
, *from
, *to
);
1097 source
= s
; /* Node moved. Memorize */
1100 /* dest not in remaining from nodes? */
1101 if (!node_isset(dest
, tmp
))
1104 if (source
== NUMA_NO_NODE
)
1107 node_clear(source
, tmp
);
1108 err
= migrate_to_node(mm
, source
, dest
, flags
);
1114 up_read(&mm
->mmap_sem
);
1122 * Allocate a new page for page migration based on vma policy.
1123 * Start by assuming the page is mapped by the same vma as contains @start.
1124 * Search forward from there, if not. N.B., this assumes that the
1125 * list of pages handed to migrate_pages()--which is how we get here--
1126 * is in virtual address order.
1128 static struct page
*new_page(struct page
*page
, unsigned long start
, int **x
)
1130 struct vm_area_struct
*vma
;
1131 unsigned long uninitialized_var(address
);
1133 vma
= find_vma(current
->mm
, start
);
1135 address
= page_address_in_vma(page
, vma
);
1136 if (address
!= -EFAULT
)
1141 if (PageHuge(page
)) {
1143 return alloc_huge_page_noerr(vma
, address
, 1);
1146 * if !vma, alloc_page_vma() will use task or system default policy
1148 return alloc_page_vma(GFP_HIGHUSER_MOVABLE
, vma
, address
);
1152 static void migrate_page_add(struct page
*page
, struct list_head
*pagelist
,
1153 unsigned long flags
)
1157 int do_migrate_pages(struct mm_struct
*mm
, const nodemask_t
*from
,
1158 const nodemask_t
*to
, int flags
)
1163 static struct page
*new_page(struct page
*page
, unsigned long start
, int **x
)
1169 static long do_mbind(unsigned long start
, unsigned long len
,
1170 unsigned short mode
, unsigned short mode_flags
,
1171 nodemask_t
*nmask
, unsigned long flags
)
1173 struct mm_struct
*mm
= current
->mm
;
1174 struct mempolicy
*new;
1177 LIST_HEAD(pagelist
);
1179 if (flags
& ~(unsigned long)MPOL_MF_VALID
)
1181 if ((flags
& MPOL_MF_MOVE_ALL
) && !capable(CAP_SYS_NICE
))
1184 if (start
& ~PAGE_MASK
)
1187 if (mode
== MPOL_DEFAULT
)
1188 flags
&= ~MPOL_MF_STRICT
;
1190 len
= (len
+ PAGE_SIZE
- 1) & PAGE_MASK
;
1198 new = mpol_new(mode
, mode_flags
, nmask
);
1200 return PTR_ERR(new);
1202 if (flags
& MPOL_MF_LAZY
)
1203 new->flags
|= MPOL_F_MOF
;
1206 * If we are using the default policy then operation
1207 * on discontinuous address spaces is okay after all
1210 flags
|= MPOL_MF_DISCONTIG_OK
;
1212 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1213 start
, start
+ len
, mode
, mode_flags
,
1214 nmask
? nodes_addr(*nmask
)[0] : NUMA_NO_NODE
);
1216 if (flags
& (MPOL_MF_MOVE
| MPOL_MF_MOVE_ALL
)) {
1218 err
= migrate_prep();
1223 NODEMASK_SCRATCH(scratch
);
1225 down_write(&mm
->mmap_sem
);
1227 err
= mpol_set_nodemask(new, nmask
, scratch
);
1228 task_unlock(current
);
1230 up_write(&mm
->mmap_sem
);
1233 NODEMASK_SCRATCH_FREE(scratch
);
1238 err
= queue_pages_range(mm
, start
, end
, nmask
,
1239 flags
| MPOL_MF_INVERT
, &pagelist
);
1241 err
= mbind_range(mm
, start
, end
, new);
1246 if (!list_empty(&pagelist
)) {
1247 WARN_ON_ONCE(flags
& MPOL_MF_LAZY
);
1248 nr_failed
= migrate_pages(&pagelist
, new_page
, NULL
,
1249 start
, MIGRATE_SYNC
, MR_MEMPOLICY_MBIND
);
1251 putback_movable_pages(&pagelist
);
1254 if (nr_failed
&& (flags
& MPOL_MF_STRICT
))
1257 putback_movable_pages(&pagelist
);
1259 up_write(&mm
->mmap_sem
);
1266 * User space interface with variable sized bitmaps for nodelists.
1269 /* Copy a node mask from user space. */
1270 static int get_nodes(nodemask_t
*nodes
, const unsigned long __user
*nmask
,
1271 unsigned long maxnode
)
1274 unsigned long nlongs
;
1275 unsigned long endmask
;
1278 nodes_clear(*nodes
);
1279 if (maxnode
== 0 || !nmask
)
1281 if (maxnode
> PAGE_SIZE
*BITS_PER_BYTE
)
1284 nlongs
= BITS_TO_LONGS(maxnode
);
1285 if ((maxnode
% BITS_PER_LONG
) == 0)
1288 endmask
= (1UL << (maxnode
% BITS_PER_LONG
)) - 1;
1290 /* When the user specified more nodes than supported just check
1291 if the non supported part is all zero. */
1292 if (nlongs
> BITS_TO_LONGS(MAX_NUMNODES
)) {
1293 if (nlongs
> PAGE_SIZE
/sizeof(long))
1295 for (k
= BITS_TO_LONGS(MAX_NUMNODES
); k
< nlongs
; k
++) {
1297 if (get_user(t
, nmask
+ k
))
1299 if (k
== nlongs
- 1) {
1305 nlongs
= BITS_TO_LONGS(MAX_NUMNODES
);
1309 if (copy_from_user(nodes_addr(*nodes
), nmask
, nlongs
*sizeof(unsigned long)))
1311 nodes_addr(*nodes
)[nlongs
-1] &= endmask
;
1315 /* Copy a kernel node mask to user space */
1316 static int copy_nodes_to_user(unsigned long __user
*mask
, unsigned long maxnode
,
1319 unsigned long copy
= ALIGN(maxnode
-1, 64) / 8;
1320 const int nbytes
= BITS_TO_LONGS(MAX_NUMNODES
) * sizeof(long);
1322 if (copy
> nbytes
) {
1323 if (copy
> PAGE_SIZE
)
1325 if (clear_user((char __user
*)mask
+ nbytes
, copy
- nbytes
))
1329 return copy_to_user(mask
, nodes_addr(*nodes
), copy
) ? -EFAULT
: 0;
1332 SYSCALL_DEFINE6(mbind
, unsigned long, start
, unsigned long, len
,
1333 unsigned long, mode
, const unsigned long __user
*, nmask
,
1334 unsigned long, maxnode
, unsigned, flags
)
1338 unsigned short mode_flags
;
1340 mode_flags
= mode
& MPOL_MODE_FLAGS
;
1341 mode
&= ~MPOL_MODE_FLAGS
;
1342 if (mode
>= MPOL_MAX
)
1344 if ((mode_flags
& MPOL_F_STATIC_NODES
) &&
1345 (mode_flags
& MPOL_F_RELATIVE_NODES
))
1347 err
= get_nodes(&nodes
, nmask
, maxnode
);
1350 return do_mbind(start
, len
, mode
, mode_flags
, &nodes
, flags
);
1353 /* Set the process memory policy */
1354 SYSCALL_DEFINE3(set_mempolicy
, int, mode
, const unsigned long __user
*, nmask
,
1355 unsigned long, maxnode
)
1359 unsigned short flags
;
1361 flags
= mode
& MPOL_MODE_FLAGS
;
1362 mode
&= ~MPOL_MODE_FLAGS
;
1363 if ((unsigned int)mode
>= MPOL_MAX
)
1365 if ((flags
& MPOL_F_STATIC_NODES
) && (flags
& MPOL_F_RELATIVE_NODES
))
1367 err
= get_nodes(&nodes
, nmask
, maxnode
);
1370 return do_set_mempolicy(mode
, flags
, &nodes
);
1373 SYSCALL_DEFINE4(migrate_pages
, pid_t
, pid
, unsigned long, maxnode
,
1374 const unsigned long __user
*, old_nodes
,
1375 const unsigned long __user
*, new_nodes
)
1377 const struct cred
*cred
= current_cred(), *tcred
;
1378 struct mm_struct
*mm
= NULL
;
1379 struct task_struct
*task
;
1380 nodemask_t task_nodes
;
1384 NODEMASK_SCRATCH(scratch
);
1389 old
= &scratch
->mask1
;
1390 new = &scratch
->mask2
;
1392 err
= get_nodes(old
, old_nodes
, maxnode
);
1396 err
= get_nodes(new, new_nodes
, maxnode
);
1400 /* Find the mm_struct */
1402 task
= pid
? find_task_by_vpid(pid
) : current
;
1408 get_task_struct(task
);
1413 * Check if this process has the right to modify the specified
1414 * process. The right exists if the process has administrative
1415 * capabilities, superuser privileges or the same
1416 * userid as the target process.
1418 tcred
= __task_cred(task
);
1419 if (!uid_eq(cred
->euid
, tcred
->suid
) && !uid_eq(cred
->euid
, tcred
->uid
) &&
1420 !uid_eq(cred
->uid
, tcred
->suid
) && !uid_eq(cred
->uid
, tcred
->uid
) &&
1421 !capable(CAP_SYS_NICE
)) {
1428 task_nodes
= cpuset_mems_allowed(task
);
1429 /* Is the user allowed to access the target nodes? */
1430 if (!nodes_subset(*new, task_nodes
) && !capable(CAP_SYS_NICE
)) {
1435 if (!nodes_subset(*new, node_states
[N_MEMORY
])) {
1440 err
= security_task_movememory(task
);
1444 mm
= get_task_mm(task
);
1445 put_task_struct(task
);
1452 err
= do_migrate_pages(mm
, old
, new,
1453 capable(CAP_SYS_NICE
) ? MPOL_MF_MOVE_ALL
: MPOL_MF_MOVE
);
1457 NODEMASK_SCRATCH_FREE(scratch
);
1462 put_task_struct(task
);
1468 /* Retrieve NUMA policy */
1469 SYSCALL_DEFINE5(get_mempolicy
, int __user
*, policy
,
1470 unsigned long __user
*, nmask
, unsigned long, maxnode
,
1471 unsigned long, addr
, unsigned long, flags
)
1474 int uninitialized_var(pval
);
1477 if (nmask
!= NULL
&& maxnode
< MAX_NUMNODES
)
1480 err
= do_get_mempolicy(&pval
, &nodes
, addr
, flags
);
1485 if (policy
&& put_user(pval
, policy
))
1489 err
= copy_nodes_to_user(nmask
, maxnode
, &nodes
);
1494 #ifdef CONFIG_COMPAT
1496 COMPAT_SYSCALL_DEFINE5(get_mempolicy
, int __user
*, policy
,
1497 compat_ulong_t __user
*, nmask
,
1498 compat_ulong_t
, maxnode
,
1499 compat_ulong_t
, addr
, compat_ulong_t
, flags
)
1502 unsigned long __user
*nm
= NULL
;
1503 unsigned long nr_bits
, alloc_size
;
1504 DECLARE_BITMAP(bm
, MAX_NUMNODES
);
1506 nr_bits
= min_t(unsigned long, maxnode
-1, MAX_NUMNODES
);
1507 alloc_size
= ALIGN(nr_bits
, BITS_PER_LONG
) / 8;
1510 nm
= compat_alloc_user_space(alloc_size
);
1512 err
= sys_get_mempolicy(policy
, nm
, nr_bits
+1, addr
, flags
);
1514 if (!err
&& nmask
) {
1515 unsigned long copy_size
;
1516 copy_size
= min_t(unsigned long, sizeof(bm
), alloc_size
);
1517 err
= copy_from_user(bm
, nm
, copy_size
);
1518 /* ensure entire bitmap is zeroed */
1519 err
|= clear_user(nmask
, ALIGN(maxnode
-1, 8) / 8);
1520 err
|= compat_put_bitmap(nmask
, bm
, nr_bits
);
1526 COMPAT_SYSCALL_DEFINE3(set_mempolicy
, int, mode
, compat_ulong_t __user
*, nmask
,
1527 compat_ulong_t
, maxnode
)
1529 unsigned long __user
*nm
= NULL
;
1530 unsigned long nr_bits
, alloc_size
;
1531 DECLARE_BITMAP(bm
, MAX_NUMNODES
);
1533 nr_bits
= min_t(unsigned long, maxnode
-1, MAX_NUMNODES
);
1534 alloc_size
= ALIGN(nr_bits
, BITS_PER_LONG
) / 8;
1537 if (compat_get_bitmap(bm
, nmask
, nr_bits
))
1539 nm
= compat_alloc_user_space(alloc_size
);
1540 if (copy_to_user(nm
, bm
, alloc_size
))
1544 return sys_set_mempolicy(mode
, nm
, nr_bits
+1);
1547 COMPAT_SYSCALL_DEFINE6(mbind
, compat_ulong_t
, start
, compat_ulong_t
, len
,
1548 compat_ulong_t
, mode
, compat_ulong_t __user
*, nmask
,
1549 compat_ulong_t
, maxnode
, compat_ulong_t
, flags
)
1551 unsigned long __user
*nm
= NULL
;
1552 unsigned long nr_bits
, alloc_size
;
1555 nr_bits
= min_t(unsigned long, maxnode
-1, MAX_NUMNODES
);
1556 alloc_size
= ALIGN(nr_bits
, BITS_PER_LONG
) / 8;
1559 if (compat_get_bitmap(nodes_addr(bm
), nmask
, nr_bits
))
1561 nm
= compat_alloc_user_space(alloc_size
);
1562 if (copy_to_user(nm
, nodes_addr(bm
), alloc_size
))
1566 return sys_mbind(start
, len
, mode
, nm
, nr_bits
+1, flags
);
1571 struct mempolicy
*__get_vma_policy(struct vm_area_struct
*vma
,
1574 struct mempolicy
*pol
= NULL
;
1577 if (vma
->vm_ops
&& vma
->vm_ops
->get_policy
) {
1578 pol
= vma
->vm_ops
->get_policy(vma
, addr
);
1579 } else if (vma
->vm_policy
) {
1580 pol
= vma
->vm_policy
;
1583 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1584 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1585 * count on these policies which will be dropped by
1586 * mpol_cond_put() later
1588 if (mpol_needs_cond_ref(pol
))
1597 * get_vma_policy(@vma, @addr)
1598 * @vma: virtual memory area whose policy is sought
1599 * @addr: address in @vma for shared policy lookup
1601 * Returns effective policy for a VMA at specified address.
1602 * Falls back to current->mempolicy or system default policy, as necessary.
1603 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1604 * count--added by the get_policy() vm_op, as appropriate--to protect against
1605 * freeing by another task. It is the caller's responsibility to free the
1606 * extra reference for shared policies.
1608 static struct mempolicy
*get_vma_policy(struct vm_area_struct
*vma
,
1611 struct mempolicy
*pol
= __get_vma_policy(vma
, addr
);
1614 pol
= get_task_policy(current
);
1619 bool vma_policy_mof(struct vm_area_struct
*vma
)
1621 struct mempolicy
*pol
;
1623 if (vma
->vm_ops
&& vma
->vm_ops
->get_policy
) {
1626 pol
= vma
->vm_ops
->get_policy(vma
, vma
->vm_start
);
1627 if (pol
&& (pol
->flags
& MPOL_F_MOF
))
1634 pol
= vma
->vm_policy
;
1636 pol
= get_task_policy(current
);
1638 return pol
->flags
& MPOL_F_MOF
;
1641 static int apply_policy_zone(struct mempolicy
*policy
, enum zone_type zone
)
1643 enum zone_type dynamic_policy_zone
= policy_zone
;
1645 BUG_ON(dynamic_policy_zone
== ZONE_MOVABLE
);
1648 * if policy->v.nodes has movable memory only,
1649 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1651 * policy->v.nodes is intersect with node_states[N_MEMORY].
1652 * so if the following test faile, it implies
1653 * policy->v.nodes has movable memory only.
1655 if (!nodes_intersects(policy
->v
.nodes
, node_states
[N_HIGH_MEMORY
]))
1656 dynamic_policy_zone
= ZONE_MOVABLE
;
1658 return zone
>= dynamic_policy_zone
;
1662 * Return a nodemask representing a mempolicy for filtering nodes for
1665 static nodemask_t
*policy_nodemask(gfp_t gfp
, struct mempolicy
*policy
)
1667 /* Lower zones don't get a nodemask applied for MPOL_BIND */
1668 if (unlikely(policy
->mode
== MPOL_BIND
) &&
1669 apply_policy_zone(policy
, gfp_zone(gfp
)) &&
1670 cpuset_nodemask_valid_mems_allowed(&policy
->v
.nodes
))
1671 return &policy
->v
.nodes
;
1676 /* Return a zonelist indicated by gfp for node representing a mempolicy */
1677 static struct zonelist
*policy_zonelist(gfp_t gfp
, struct mempolicy
*policy
,
1680 if (policy
->mode
== MPOL_PREFERRED
&& !(policy
->flags
& MPOL_F_LOCAL
))
1681 nd
= policy
->v
.preferred_node
;
1684 * __GFP_THISNODE shouldn't even be used with the bind policy
1685 * because we might easily break the expectation to stay on the
1686 * requested node and not break the policy.
1688 WARN_ON_ONCE(policy
->mode
== MPOL_BIND
&& (gfp
& __GFP_THISNODE
));
1691 return node_zonelist(nd
, gfp
);
1694 /* Do dynamic interleaving for a process */
1695 static unsigned interleave_nodes(struct mempolicy
*policy
)
1698 struct task_struct
*me
= current
;
1701 next
= next_node_in(nid
, policy
->v
.nodes
);
1702 if (next
< MAX_NUMNODES
)
1708 * Depending on the memory policy provide a node from which to allocate the
1711 unsigned int mempolicy_slab_node(void)
1713 struct mempolicy
*policy
;
1714 int node
= numa_mem_id();
1719 policy
= current
->mempolicy
;
1720 if (!policy
|| policy
->flags
& MPOL_F_LOCAL
)
1723 switch (policy
->mode
) {
1724 case MPOL_PREFERRED
:
1726 * handled MPOL_F_LOCAL above
1728 return policy
->v
.preferred_node
;
1730 case MPOL_INTERLEAVE
:
1731 return interleave_nodes(policy
);
1737 * Follow bind policy behavior and start allocation at the
1740 struct zonelist
*zonelist
;
1741 enum zone_type highest_zoneidx
= gfp_zone(GFP_KERNEL
);
1742 zonelist
= &NODE_DATA(node
)->node_zonelists
[ZONELIST_FALLBACK
];
1743 z
= first_zones_zonelist(zonelist
, highest_zoneidx
,
1745 return z
->zone
? z
->zone
->node
: node
;
1754 * Do static interleaving for a VMA with known offset @n. Returns the n'th
1755 * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1756 * number of present nodes.
1758 static unsigned offset_il_node(struct mempolicy
*pol
,
1759 struct vm_area_struct
*vma
, unsigned long n
)
1761 unsigned nnodes
= nodes_weight(pol
->v
.nodes
);
1767 return numa_node_id();
1768 target
= (unsigned int)n
% nnodes
;
1769 nid
= first_node(pol
->v
.nodes
);
1770 for (i
= 0; i
< target
; i
++)
1771 nid
= next_node(nid
, pol
->v
.nodes
);
1775 /* Determine a node number for interleave */
1776 static inline unsigned interleave_nid(struct mempolicy
*pol
,
1777 struct vm_area_struct
*vma
, unsigned long addr
, int shift
)
1783 * for small pages, there is no difference between
1784 * shift and PAGE_SHIFT, so the bit-shift is safe.
1785 * for huge pages, since vm_pgoff is in units of small
1786 * pages, we need to shift off the always 0 bits to get
1789 BUG_ON(shift
< PAGE_SHIFT
);
1790 off
= vma
->vm_pgoff
>> (shift
- PAGE_SHIFT
);
1791 off
+= (addr
- vma
->vm_start
) >> shift
;
1792 return offset_il_node(pol
, vma
, off
);
1794 return interleave_nodes(pol
);
1797 #ifdef CONFIG_HUGETLBFS
1799 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1800 * @vma: virtual memory area whose policy is sought
1801 * @addr: address in @vma for shared policy lookup and interleave policy
1802 * @gfp_flags: for requested zone
1803 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1804 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
1806 * Returns a zonelist suitable for a huge page allocation and a pointer
1807 * to the struct mempolicy for conditional unref after allocation.
1808 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1809 * @nodemask for filtering the zonelist.
1811 * Must be protected by read_mems_allowed_begin()
1813 struct zonelist
*huge_zonelist(struct vm_area_struct
*vma
, unsigned long addr
,
1814 gfp_t gfp_flags
, struct mempolicy
**mpol
,
1815 nodemask_t
**nodemask
)
1817 struct zonelist
*zl
;
1819 *mpol
= get_vma_policy(vma
, addr
);
1820 *nodemask
= NULL
; /* assume !MPOL_BIND */
1822 if (unlikely((*mpol
)->mode
== MPOL_INTERLEAVE
)) {
1823 zl
= node_zonelist(interleave_nid(*mpol
, vma
, addr
,
1824 huge_page_shift(hstate_vma(vma
))), gfp_flags
);
1826 zl
= policy_zonelist(gfp_flags
, *mpol
, numa_node_id());
1827 if ((*mpol
)->mode
== MPOL_BIND
)
1828 *nodemask
= &(*mpol
)->v
.nodes
;
1834 * init_nodemask_of_mempolicy
1836 * If the current task's mempolicy is "default" [NULL], return 'false'
1837 * to indicate default policy. Otherwise, extract the policy nodemask
1838 * for 'bind' or 'interleave' policy into the argument nodemask, or
1839 * initialize the argument nodemask to contain the single node for
1840 * 'preferred' or 'local' policy and return 'true' to indicate presence
1841 * of non-default mempolicy.
1843 * We don't bother with reference counting the mempolicy [mpol_get/put]
1844 * because the current task is examining it's own mempolicy and a task's
1845 * mempolicy is only ever changed by the task itself.
1847 * N.B., it is the caller's responsibility to free a returned nodemask.
1849 bool init_nodemask_of_mempolicy(nodemask_t
*mask
)
1851 struct mempolicy
*mempolicy
;
1854 if (!(mask
&& current
->mempolicy
))
1858 mempolicy
= current
->mempolicy
;
1859 switch (mempolicy
->mode
) {
1860 case MPOL_PREFERRED
:
1861 if (mempolicy
->flags
& MPOL_F_LOCAL
)
1862 nid
= numa_node_id();
1864 nid
= mempolicy
->v
.preferred_node
;
1865 init_nodemask_of_node(mask
, nid
);
1870 case MPOL_INTERLEAVE
:
1871 *mask
= mempolicy
->v
.nodes
;
1877 task_unlock(current
);
1884 * mempolicy_nodemask_intersects
1886 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1887 * policy. Otherwise, check for intersection between mask and the policy
1888 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1889 * policy, always return true since it may allocate elsewhere on fallback.
1891 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1893 bool mempolicy_nodemask_intersects(struct task_struct
*tsk
,
1894 const nodemask_t
*mask
)
1896 struct mempolicy
*mempolicy
;
1902 mempolicy
= tsk
->mempolicy
;
1906 switch (mempolicy
->mode
) {
1907 case MPOL_PREFERRED
:
1909 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1910 * allocate from, they may fallback to other nodes when oom.
1911 * Thus, it's possible for tsk to have allocated memory from
1916 case MPOL_INTERLEAVE
:
1917 ret
= nodes_intersects(mempolicy
->v
.nodes
, *mask
);
1927 /* Allocate a page in interleaved policy.
1928 Own path because it needs to do special accounting. */
1929 static struct page
*alloc_page_interleave(gfp_t gfp
, unsigned order
,
1932 struct zonelist
*zl
;
1935 zl
= node_zonelist(nid
, gfp
);
1936 page
= __alloc_pages(gfp
, order
, zl
);
1937 if (page
&& page_zone(page
) == zonelist_zone(&zl
->_zonerefs
[0]))
1938 inc_zone_page_state(page
, NUMA_INTERLEAVE_HIT
);
1943 * alloc_pages_vma - Allocate a page for a VMA.
1946 * %GFP_USER user allocation.
1947 * %GFP_KERNEL kernel allocations,
1948 * %GFP_HIGHMEM highmem/user allocations,
1949 * %GFP_FS allocation should not call back into a file system.
1950 * %GFP_ATOMIC don't sleep.
1952 * @order:Order of the GFP allocation.
1953 * @vma: Pointer to VMA or NULL if not available.
1954 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1955 * @node: Which node to prefer for allocation (modulo policy).
1956 * @hugepage: for hugepages try only the preferred node if possible
1958 * This function allocates a page from the kernel page pool and applies
1959 * a NUMA policy associated with the VMA or the current process.
1960 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1961 * mm_struct of the VMA to prevent it from going away. Should be used for
1962 * all allocations for pages that will be mapped into user space. Returns
1963 * NULL when no page can be allocated.
1966 alloc_pages_vma(gfp_t gfp
, int order
, struct vm_area_struct
*vma
,
1967 unsigned long addr
, int node
, bool hugepage
)
1969 struct mempolicy
*pol
;
1971 unsigned int cpuset_mems_cookie
;
1972 struct zonelist
*zl
;
1976 pol
= get_vma_policy(vma
, addr
);
1977 cpuset_mems_cookie
= read_mems_allowed_begin();
1979 if (pol
->mode
== MPOL_INTERLEAVE
) {
1982 nid
= interleave_nid(pol
, vma
, addr
, PAGE_SHIFT
+ order
);
1984 page
= alloc_page_interleave(gfp
, order
, nid
);
1988 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE
) && hugepage
)) {
1989 int hpage_node
= node
;
1992 * For hugepage allocation and non-interleave policy which
1993 * allows the current node (or other explicitly preferred
1994 * node) we only try to allocate from the current/preferred
1995 * node and don't fall back to other nodes, as the cost of
1996 * remote accesses would likely offset THP benefits.
1998 * If the policy is interleave, or does not allow the current
1999 * node in its nodemask, we allocate the standard way.
2001 if (pol
->mode
== MPOL_PREFERRED
&&
2002 !(pol
->flags
& MPOL_F_LOCAL
))
2003 hpage_node
= pol
->v
.preferred_node
;
2005 nmask
= policy_nodemask(gfp
, pol
);
2006 if (!nmask
|| node_isset(hpage_node
, *nmask
)) {
2008 page
= __alloc_pages_node(hpage_node
,
2009 gfp
| __GFP_THISNODE
, order
);
2014 nmask
= policy_nodemask(gfp
, pol
);
2015 zl
= policy_zonelist(gfp
, pol
, node
);
2016 page
= __alloc_pages_nodemask(gfp
, order
, zl
, nmask
);
2019 if (unlikely(!page
&& read_mems_allowed_retry(cpuset_mems_cookie
)))
2025 * alloc_pages_current - Allocate pages.
2028 * %GFP_USER user allocation,
2029 * %GFP_KERNEL kernel allocation,
2030 * %GFP_HIGHMEM highmem allocation,
2031 * %GFP_FS don't call back into a file system.
2032 * %GFP_ATOMIC don't sleep.
2033 * @order: Power of two of allocation size in pages. 0 is a single page.
2035 * Allocate a page from the kernel page pool. When not in
2036 * interrupt context and apply the current process NUMA policy.
2037 * Returns NULL when no page can be allocated.
2039 * Don't call cpuset_update_task_memory_state() unless
2040 * 1) it's ok to take cpuset_sem (can WAIT), and
2041 * 2) allocating for current task (not interrupt).
2043 struct page
*alloc_pages_current(gfp_t gfp
, unsigned order
)
2045 struct mempolicy
*pol
= &default_policy
;
2047 unsigned int cpuset_mems_cookie
;
2049 if (!in_interrupt() && !(gfp
& __GFP_THISNODE
))
2050 pol
= get_task_policy(current
);
2053 cpuset_mems_cookie
= read_mems_allowed_begin();
2056 * No reference counting needed for current->mempolicy
2057 * nor system default_policy
2059 if (pol
->mode
== MPOL_INTERLEAVE
)
2060 page
= alloc_page_interleave(gfp
, order
, interleave_nodes(pol
));
2062 page
= __alloc_pages_nodemask(gfp
, order
,
2063 policy_zonelist(gfp
, pol
, numa_node_id()),
2064 policy_nodemask(gfp
, pol
));
2066 if (unlikely(!page
&& read_mems_allowed_retry(cpuset_mems_cookie
)))
2071 EXPORT_SYMBOL(alloc_pages_current
);
2073 int vma_dup_policy(struct vm_area_struct
*src
, struct vm_area_struct
*dst
)
2075 struct mempolicy
*pol
= mpol_dup(vma_policy(src
));
2078 return PTR_ERR(pol
);
2079 dst
->vm_policy
= pol
;
2084 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2085 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2086 * with the mems_allowed returned by cpuset_mems_allowed(). This
2087 * keeps mempolicies cpuset relative after its cpuset moves. See
2088 * further kernel/cpuset.c update_nodemask().
2090 * current's mempolicy may be rebinded by the other task(the task that changes
2091 * cpuset's mems), so we needn't do rebind work for current task.
2094 /* Slow path of a mempolicy duplicate */
2095 struct mempolicy
*__mpol_dup(struct mempolicy
*old
)
2097 struct mempolicy
*new = kmem_cache_alloc(policy_cache
, GFP_KERNEL
);
2100 return ERR_PTR(-ENOMEM
);
2102 /* task's mempolicy is protected by alloc_lock */
2103 if (old
== current
->mempolicy
) {
2106 task_unlock(current
);
2110 if (current_cpuset_is_being_rebound()) {
2111 nodemask_t mems
= cpuset_mems_allowed(current
);
2112 if (new->flags
& MPOL_F_REBINDING
)
2113 mpol_rebind_policy(new, &mems
, MPOL_REBIND_STEP2
);
2115 mpol_rebind_policy(new, &mems
, MPOL_REBIND_ONCE
);
2117 atomic_set(&new->refcnt
, 1);
2121 /* Slow path of a mempolicy comparison */
2122 bool __mpol_equal(struct mempolicy
*a
, struct mempolicy
*b
)
2126 if (a
->mode
!= b
->mode
)
2128 if (a
->flags
!= b
->flags
)
2130 if (mpol_store_user_nodemask(a
))
2131 if (!nodes_equal(a
->w
.user_nodemask
, b
->w
.user_nodemask
))
2137 case MPOL_INTERLEAVE
:
2138 return !!nodes_equal(a
->v
.nodes
, b
->v
.nodes
);
2139 case MPOL_PREFERRED
:
2140 return a
->v
.preferred_node
== b
->v
.preferred_node
;
2148 * Shared memory backing store policy support.
2150 * Remember policies even when nobody has shared memory mapped.
2151 * The policies are kept in Red-Black tree linked from the inode.
2152 * They are protected by the sp->lock rwlock, which should be held
2153 * for any accesses to the tree.
2157 * lookup first element intersecting start-end. Caller holds sp->lock for
2158 * reading or for writing
2160 static struct sp_node
*
2161 sp_lookup(struct shared_policy
*sp
, unsigned long start
, unsigned long end
)
2163 struct rb_node
*n
= sp
->root
.rb_node
;
2166 struct sp_node
*p
= rb_entry(n
, struct sp_node
, nd
);
2168 if (start
>= p
->end
)
2170 else if (end
<= p
->start
)
2178 struct sp_node
*w
= NULL
;
2179 struct rb_node
*prev
= rb_prev(n
);
2182 w
= rb_entry(prev
, struct sp_node
, nd
);
2183 if (w
->end
<= start
)
2187 return rb_entry(n
, struct sp_node
, nd
);
2191 * Insert a new shared policy into the list. Caller holds sp->lock for
2194 static void sp_insert(struct shared_policy
*sp
, struct sp_node
*new)
2196 struct rb_node
**p
= &sp
->root
.rb_node
;
2197 struct rb_node
*parent
= NULL
;
2202 nd
= rb_entry(parent
, struct sp_node
, nd
);
2203 if (new->start
< nd
->start
)
2205 else if (new->end
> nd
->end
)
2206 p
= &(*p
)->rb_right
;
2210 rb_link_node(&new->nd
, parent
, p
);
2211 rb_insert_color(&new->nd
, &sp
->root
);
2212 pr_debug("inserting %lx-%lx: %d\n", new->start
, new->end
,
2213 new->policy
? new->policy
->mode
: 0);
2216 /* Find shared policy intersecting idx */
2218 mpol_shared_policy_lookup(struct shared_policy
*sp
, unsigned long idx
)
2220 struct mempolicy
*pol
= NULL
;
2223 if (!sp
->root
.rb_node
)
2225 read_lock(&sp
->lock
);
2226 sn
= sp_lookup(sp
, idx
, idx
+1);
2228 mpol_get(sn
->policy
);
2231 read_unlock(&sp
->lock
);
2235 static void sp_free(struct sp_node
*n
)
2237 mpol_put(n
->policy
);
2238 kmem_cache_free(sn_cache
, n
);
2242 * mpol_misplaced - check whether current page node is valid in policy
2244 * @page: page to be checked
2245 * @vma: vm area where page mapped
2246 * @addr: virtual address where page mapped
2248 * Lookup current policy node id for vma,addr and "compare to" page's
2252 * -1 - not misplaced, page is in the right node
2253 * node - node id where the page should be
2255 * Policy determination "mimics" alloc_page_vma().
2256 * Called from fault path where we know the vma and faulting address.
2258 int mpol_misplaced(struct page
*page
, struct vm_area_struct
*vma
, unsigned long addr
)
2260 struct mempolicy
*pol
;
2262 int curnid
= page_to_nid(page
);
2263 unsigned long pgoff
;
2264 int thiscpu
= raw_smp_processor_id();
2265 int thisnid
= cpu_to_node(thiscpu
);
2271 pol
= get_vma_policy(vma
, addr
);
2272 if (!(pol
->flags
& MPOL_F_MOF
))
2275 switch (pol
->mode
) {
2276 case MPOL_INTERLEAVE
:
2277 BUG_ON(addr
>= vma
->vm_end
);
2278 BUG_ON(addr
< vma
->vm_start
);
2280 pgoff
= vma
->vm_pgoff
;
2281 pgoff
+= (addr
- vma
->vm_start
) >> PAGE_SHIFT
;
2282 polnid
= offset_il_node(pol
, vma
, pgoff
);
2285 case MPOL_PREFERRED
:
2286 if (pol
->flags
& MPOL_F_LOCAL
)
2287 polnid
= numa_node_id();
2289 polnid
= pol
->v
.preferred_node
;
2295 * allows binding to multiple nodes.
2296 * use current page if in policy nodemask,
2297 * else select nearest allowed node, if any.
2298 * If no allowed nodes, use current [!misplaced].
2300 if (node_isset(curnid
, pol
->v
.nodes
))
2302 z
= first_zones_zonelist(
2303 node_zonelist(numa_node_id(), GFP_HIGHUSER
),
2304 gfp_zone(GFP_HIGHUSER
),
2306 polnid
= z
->zone
->node
;
2313 /* Migrate the page towards the node whose CPU is referencing it */
2314 if (pol
->flags
& MPOL_F_MORON
) {
2317 if (!should_numa_migrate_memory(current
, page
, curnid
, thiscpu
))
2321 if (curnid
!= polnid
)
2330 * Drop the (possibly final) reference to task->mempolicy. It needs to be
2331 * dropped after task->mempolicy is set to NULL so that any allocation done as
2332 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2335 void mpol_put_task_policy(struct task_struct
*task
)
2337 struct mempolicy
*pol
;
2340 pol
= task
->mempolicy
;
2341 task
->mempolicy
= NULL
;
2346 static void sp_delete(struct shared_policy
*sp
, struct sp_node
*n
)
2348 pr_debug("deleting %lx-l%lx\n", n
->start
, n
->end
);
2349 rb_erase(&n
->nd
, &sp
->root
);
2353 static void sp_node_init(struct sp_node
*node
, unsigned long start
,
2354 unsigned long end
, struct mempolicy
*pol
)
2356 node
->start
= start
;
2361 static struct sp_node
*sp_alloc(unsigned long start
, unsigned long end
,
2362 struct mempolicy
*pol
)
2365 struct mempolicy
*newpol
;
2367 n
= kmem_cache_alloc(sn_cache
, GFP_KERNEL
);
2371 newpol
= mpol_dup(pol
);
2372 if (IS_ERR(newpol
)) {
2373 kmem_cache_free(sn_cache
, n
);
2376 newpol
->flags
|= MPOL_F_SHARED
;
2377 sp_node_init(n
, start
, end
, newpol
);
2382 /* Replace a policy range. */
2383 static int shared_policy_replace(struct shared_policy
*sp
, unsigned long start
,
2384 unsigned long end
, struct sp_node
*new)
2387 struct sp_node
*n_new
= NULL
;
2388 struct mempolicy
*mpol_new
= NULL
;
2392 write_lock(&sp
->lock
);
2393 n
= sp_lookup(sp
, start
, end
);
2394 /* Take care of old policies in the same range. */
2395 while (n
&& n
->start
< end
) {
2396 struct rb_node
*next
= rb_next(&n
->nd
);
2397 if (n
->start
>= start
) {
2403 /* Old policy spanning whole new range. */
2408 *mpol_new
= *n
->policy
;
2409 atomic_set(&mpol_new
->refcnt
, 1);
2410 sp_node_init(n_new
, end
, n
->end
, mpol_new
);
2412 sp_insert(sp
, n_new
);
2421 n
= rb_entry(next
, struct sp_node
, nd
);
2425 write_unlock(&sp
->lock
);
2432 kmem_cache_free(sn_cache
, n_new
);
2437 write_unlock(&sp
->lock
);
2439 n_new
= kmem_cache_alloc(sn_cache
, GFP_KERNEL
);
2442 mpol_new
= kmem_cache_alloc(policy_cache
, GFP_KERNEL
);
2449 * mpol_shared_policy_init - initialize shared policy for inode
2450 * @sp: pointer to inode shared policy
2451 * @mpol: struct mempolicy to install
2453 * Install non-NULL @mpol in inode's shared policy rb-tree.
2454 * On entry, the current task has a reference on a non-NULL @mpol.
2455 * This must be released on exit.
2456 * This is called at get_inode() calls and we can use GFP_KERNEL.
2458 void mpol_shared_policy_init(struct shared_policy
*sp
, struct mempolicy
*mpol
)
2462 sp
->root
= RB_ROOT
; /* empty tree == default mempolicy */
2463 rwlock_init(&sp
->lock
);
2466 struct vm_area_struct pvma
;
2467 struct mempolicy
*new;
2468 NODEMASK_SCRATCH(scratch
);
2472 /* contextualize the tmpfs mount point mempolicy */
2473 new = mpol_new(mpol
->mode
, mpol
->flags
, &mpol
->w
.user_nodemask
);
2475 goto free_scratch
; /* no valid nodemask intersection */
2478 ret
= mpol_set_nodemask(new, &mpol
->w
.user_nodemask
, scratch
);
2479 task_unlock(current
);
2483 /* Create pseudo-vma that contains just the policy */
2484 memset(&pvma
, 0, sizeof(struct vm_area_struct
));
2485 pvma
.vm_end
= TASK_SIZE
; /* policy covers entire file */
2486 mpol_set_shared_policy(sp
, &pvma
, new); /* adds ref */
2489 mpol_put(new); /* drop initial ref */
2491 NODEMASK_SCRATCH_FREE(scratch
);
2493 mpol_put(mpol
); /* drop our incoming ref on sb mpol */
2497 int mpol_set_shared_policy(struct shared_policy
*info
,
2498 struct vm_area_struct
*vma
, struct mempolicy
*npol
)
2501 struct sp_node
*new = NULL
;
2502 unsigned long sz
= vma_pages(vma
);
2504 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2506 sz
, npol
? npol
->mode
: -1,
2507 npol
? npol
->flags
: -1,
2508 npol
? nodes_addr(npol
->v
.nodes
)[0] : NUMA_NO_NODE
);
2511 new = sp_alloc(vma
->vm_pgoff
, vma
->vm_pgoff
+ sz
, npol
);
2515 err
= shared_policy_replace(info
, vma
->vm_pgoff
, vma
->vm_pgoff
+sz
, new);
2521 /* Free a backing policy store on inode delete. */
2522 void mpol_free_shared_policy(struct shared_policy
*p
)
2525 struct rb_node
*next
;
2527 if (!p
->root
.rb_node
)
2529 write_lock(&p
->lock
);
2530 next
= rb_first(&p
->root
);
2532 n
= rb_entry(next
, struct sp_node
, nd
);
2533 next
= rb_next(&n
->nd
);
2536 write_unlock(&p
->lock
);
2539 #ifdef CONFIG_NUMA_BALANCING
2540 static int __initdata numabalancing_override
;
2542 static void __init
check_numabalancing_enable(void)
2544 bool numabalancing_default
= false;
2546 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED
))
2547 numabalancing_default
= true;
2549 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2550 if (numabalancing_override
)
2551 set_numabalancing_state(numabalancing_override
== 1);
2553 if (num_online_nodes() > 1 && !numabalancing_override
) {
2554 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2555 numabalancing_default
? "Enabling" : "Disabling");
2556 set_numabalancing_state(numabalancing_default
);
2560 static int __init
setup_numabalancing(char *str
)
2566 if (!strcmp(str
, "enable")) {
2567 numabalancing_override
= 1;
2569 } else if (!strcmp(str
, "disable")) {
2570 numabalancing_override
= -1;
2575 pr_warn("Unable to parse numa_balancing=\n");
2579 __setup("numa_balancing=", setup_numabalancing
);
2581 static inline void __init
check_numabalancing_enable(void)
2584 #endif /* CONFIG_NUMA_BALANCING */
2586 /* assumes fs == KERNEL_DS */
2587 void __init
numa_policy_init(void)
2589 nodemask_t interleave_nodes
;
2590 unsigned long largest
= 0;
2591 int nid
, prefer
= 0;
2593 policy_cache
= kmem_cache_create("numa_policy",
2594 sizeof(struct mempolicy
),
2595 0, SLAB_PANIC
, NULL
);
2597 sn_cache
= kmem_cache_create("shared_policy_node",
2598 sizeof(struct sp_node
),
2599 0, SLAB_PANIC
, NULL
);
2601 for_each_node(nid
) {
2602 preferred_node_policy
[nid
] = (struct mempolicy
) {
2603 .refcnt
= ATOMIC_INIT(1),
2604 .mode
= MPOL_PREFERRED
,
2605 .flags
= MPOL_F_MOF
| MPOL_F_MORON
,
2606 .v
= { .preferred_node
= nid
, },
2611 * Set interleaving policy for system init. Interleaving is only
2612 * enabled across suitably sized nodes (default is >= 16MB), or
2613 * fall back to the largest node if they're all smaller.
2615 nodes_clear(interleave_nodes
);
2616 for_each_node_state(nid
, N_MEMORY
) {
2617 unsigned long total_pages
= node_present_pages(nid
);
2619 /* Preserve the largest node */
2620 if (largest
< total_pages
) {
2621 largest
= total_pages
;
2625 /* Interleave this node? */
2626 if ((total_pages
<< PAGE_SHIFT
) >= (16 << 20))
2627 node_set(nid
, interleave_nodes
);
2630 /* All too small, use the largest */
2631 if (unlikely(nodes_empty(interleave_nodes
)))
2632 node_set(prefer
, interleave_nodes
);
2634 if (do_set_mempolicy(MPOL_INTERLEAVE
, 0, &interleave_nodes
))
2635 pr_err("%s: interleaving failed\n", __func__
);
2637 check_numabalancing_enable();
2640 /* Reset policy of current process to default */
2641 void numa_default_policy(void)
2643 do_set_mempolicy(MPOL_DEFAULT
, 0, NULL
);
2647 * Parse and format mempolicy from/to strings
2651 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
2653 static const char * const policy_modes
[] =
2655 [MPOL_DEFAULT
] = "default",
2656 [MPOL_PREFERRED
] = "prefer",
2657 [MPOL_BIND
] = "bind",
2658 [MPOL_INTERLEAVE
] = "interleave",
2659 [MPOL_LOCAL
] = "local",
2665 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2666 * @str: string containing mempolicy to parse
2667 * @mpol: pointer to struct mempolicy pointer, returned on success.
2670 * <mode>[=<flags>][:<nodelist>]
2672 * On success, returns 0, else 1
2674 int mpol_parse_str(char *str
, struct mempolicy
**mpol
)
2676 struct mempolicy
*new = NULL
;
2677 unsigned short mode
;
2678 unsigned short mode_flags
;
2680 char *nodelist
= strchr(str
, ':');
2681 char *flags
= strchr(str
, '=');
2685 /* NUL-terminate mode or flags string */
2687 if (nodelist_parse(nodelist
, nodes
))
2689 if (!nodes_subset(nodes
, node_states
[N_MEMORY
]))
2695 *flags
++ = '\0'; /* terminate mode string */
2697 for (mode
= 0; mode
< MPOL_MAX
; mode
++) {
2698 if (!strcmp(str
, policy_modes
[mode
])) {
2702 if (mode
>= MPOL_MAX
)
2706 case MPOL_PREFERRED
:
2708 * Insist on a nodelist of one node only
2711 char *rest
= nodelist
;
2712 while (isdigit(*rest
))
2718 case MPOL_INTERLEAVE
:
2720 * Default to online nodes with memory if no nodelist
2723 nodes
= node_states
[N_MEMORY
];
2727 * Don't allow a nodelist; mpol_new() checks flags
2731 mode
= MPOL_PREFERRED
;
2735 * Insist on a empty nodelist
2742 * Insist on a nodelist
2751 * Currently, we only support two mutually exclusive
2754 if (!strcmp(flags
, "static"))
2755 mode_flags
|= MPOL_F_STATIC_NODES
;
2756 else if (!strcmp(flags
, "relative"))
2757 mode_flags
|= MPOL_F_RELATIVE_NODES
;
2762 new = mpol_new(mode
, mode_flags
, &nodes
);
2767 * Save nodes for mpol_to_str() to show the tmpfs mount options
2768 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2770 if (mode
!= MPOL_PREFERRED
)
2771 new->v
.nodes
= nodes
;
2773 new->v
.preferred_node
= first_node(nodes
);
2775 new->flags
|= MPOL_F_LOCAL
;
2778 * Save nodes for contextualization: this will be used to "clone"
2779 * the mempolicy in a specific context [cpuset] at a later time.
2781 new->w
.user_nodemask
= nodes
;
2786 /* Restore string for error message */
2795 #endif /* CONFIG_TMPFS */
2798 * mpol_to_str - format a mempolicy structure for printing
2799 * @buffer: to contain formatted mempolicy string
2800 * @maxlen: length of @buffer
2801 * @pol: pointer to mempolicy to be formatted
2803 * Convert @pol into a string. If @buffer is too short, truncate the string.
2804 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2805 * longest flag, "relative", and to display at least a few node ids.
2807 void mpol_to_str(char *buffer
, int maxlen
, struct mempolicy
*pol
)
2810 nodemask_t nodes
= NODE_MASK_NONE
;
2811 unsigned short mode
= MPOL_DEFAULT
;
2812 unsigned short flags
= 0;
2814 if (pol
&& pol
!= &default_policy
&& !(pol
->flags
& MPOL_F_MORON
)) {
2822 case MPOL_PREFERRED
:
2823 if (flags
& MPOL_F_LOCAL
)
2826 node_set(pol
->v
.preferred_node
, nodes
);
2829 case MPOL_INTERLEAVE
:
2830 nodes
= pol
->v
.nodes
;
2834 snprintf(p
, maxlen
, "unknown");
2838 p
+= snprintf(p
, maxlen
, "%s", policy_modes
[mode
]);
2840 if (flags
& MPOL_MODE_FLAGS
) {
2841 p
+= snprintf(p
, buffer
+ maxlen
- p
, "=");
2844 * Currently, the only defined flags are mutually exclusive
2846 if (flags
& MPOL_F_STATIC_NODES
)
2847 p
+= snprintf(p
, buffer
+ maxlen
- p
, "static");
2848 else if (flags
& MPOL_F_RELATIVE_NODES
)
2849 p
+= snprintf(p
, buffer
+ maxlen
- p
, "relative");
2852 if (!nodes_empty(nodes
))
2853 p
+= scnprintf(p
, buffer
+ maxlen
- p
, ":%*pbl",
2854 nodemask_pr_args(&nodes
));