]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame_incremental - include/linux/mempolicy.h
UBUNTU: Ubuntu-5.15.0-39.42
[mirror_ubuntu-jammy-kernel.git] / include / linux / mempolicy.h
... / ...
CommitLineData
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * NUMA memory policies for Linux.
4 * Copyright 2003,2004 Andi Kleen SuSE Labs
5 */
6#ifndef _LINUX_MEMPOLICY_H
7#define _LINUX_MEMPOLICY_H 1
8
9#include <linux/sched.h>
10#include <linux/mmzone.h>
11#include <linux/dax.h>
12#include <linux/slab.h>
13#include <linux/rbtree.h>
14#include <linux/spinlock.h>
15#include <linux/nodemask.h>
16#include <linux/pagemap.h>
17#include <uapi/linux/mempolicy.h>
18
19struct mm_struct;
20
21#ifdef CONFIG_NUMA
22
23/*
24 * Describe a memory policy.
25 *
26 * A mempolicy can be either associated with a process or with a VMA.
27 * For VMA related allocations the VMA policy is preferred, otherwise
28 * the process policy is used. Interrupts ignore the memory policy
29 * of the current process.
30 *
31 * Locking policy for interleave:
32 * In process context there is no locking because only the process accesses
33 * its own state. All vma manipulation is somewhat protected by a down_read on
34 * mmap_lock.
35 *
36 * Freeing policy:
37 * Mempolicy objects are reference counted. A mempolicy will be freed when
38 * mpol_put() decrements the reference count to zero.
39 *
40 * Duplicating policy objects:
41 * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
42 * to the new storage. The reference count of the new object is initialized
43 * to 1, representing the caller of mpol_dup().
44 */
45struct mempolicy {
46 atomic_t refcnt;
47 unsigned short mode; /* See MPOL_* above */
48 unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
49 nodemask_t nodes; /* interleave/bind/perfer */
50
51 union {
52 nodemask_t cpuset_mems_allowed; /* relative to these nodes */
53 nodemask_t user_nodemask; /* nodemask passed by user */
54 } w;
55};
56
57/*
58 * Support for managing mempolicy data objects (clone, copy, destroy)
59 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
60 */
61
62extern void __mpol_put(struct mempolicy *pol);
63static inline void mpol_put(struct mempolicy *pol)
64{
65 if (pol)
66 __mpol_put(pol);
67}
68
69/*
70 * Does mempolicy pol need explicit unref after use?
71 * Currently only needed for shared policies.
72 */
73static inline int mpol_needs_cond_ref(struct mempolicy *pol)
74{
75 return (pol && (pol->flags & MPOL_F_SHARED));
76}
77
78static inline void mpol_cond_put(struct mempolicy *pol)
79{
80 if (mpol_needs_cond_ref(pol))
81 __mpol_put(pol);
82}
83
84extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
85static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
86{
87 if (pol)
88 pol = __mpol_dup(pol);
89 return pol;
90}
91
92#define vma_policy(vma) ((vma)->vm_policy)
93
94static inline void mpol_get(struct mempolicy *pol)
95{
96 if (pol)
97 atomic_inc(&pol->refcnt);
98}
99
100extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b);
101static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
102{
103 if (a == b)
104 return true;
105 return __mpol_equal(a, b);
106}
107
108/*
109 * Tree of shared policies for a shared memory region.
110 * Maintain the policies in a pseudo mm that contains vmas. The vmas
111 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
112 * bytes, so that we can work with shared memory segments bigger than
113 * unsigned long.
114 */
115
116struct sp_node {
117 struct rb_node nd;
118 unsigned long start, end;
119 struct mempolicy *policy;
120};
121
122struct shared_policy {
123 struct rb_root root;
124 rwlock_t lock;
125};
126
127int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
128void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
129int mpol_set_shared_policy(struct shared_policy *info,
130 struct vm_area_struct *vma,
131 struct mempolicy *new);
132void mpol_free_shared_policy(struct shared_policy *p);
133struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
134 unsigned long idx);
135
136struct mempolicy *get_task_policy(struct task_struct *p);
137struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
138 unsigned long addr);
139bool vma_policy_mof(struct vm_area_struct *vma);
140
141extern void numa_default_policy(void);
142extern void numa_policy_init(void);
143extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new);
144extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
145
146extern int huge_node(struct vm_area_struct *vma,
147 unsigned long addr, gfp_t gfp_flags,
148 struct mempolicy **mpol, nodemask_t **nodemask);
149extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
150extern bool mempolicy_in_oom_domain(struct task_struct *tsk,
151 const nodemask_t *mask);
152extern nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy);
153
154static inline nodemask_t *policy_nodemask_current(gfp_t gfp)
155{
156 struct mempolicy *mpol = get_task_policy(current);
157
158 return policy_nodemask(gfp, mpol);
159}
160
161extern unsigned int mempolicy_slab_node(void);
162
163extern enum zone_type policy_zone;
164
165static inline void check_highest_zone(enum zone_type k)
166{
167 if (k > policy_zone && k != ZONE_MOVABLE)
168 policy_zone = k;
169}
170
171int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
172 const nodemask_t *to, int flags);
173
174
175#ifdef CONFIG_TMPFS
176extern int mpol_parse_str(char *str, struct mempolicy **mpol);
177#endif
178
179extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
180
181/* Check if a vma is migratable */
182extern bool vma_migratable(struct vm_area_struct *vma);
183
184extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
185extern void mpol_put_task_policy(struct task_struct *);
186
187extern bool numa_demotion_enabled;
188
189static inline bool mpol_is_preferred_many(struct mempolicy *pol)
190{
191 return (pol->mode == MPOL_PREFERRED_MANY);
192}
193
194
195#else
196
197struct mempolicy {};
198
199static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
200{
201 return true;
202}
203
204static inline void mpol_put(struct mempolicy *p)
205{
206}
207
208static inline void mpol_cond_put(struct mempolicy *pol)
209{
210}
211
212static inline void mpol_get(struct mempolicy *pol)
213{
214}
215
216struct shared_policy {};
217
218static inline void mpol_shared_policy_init(struct shared_policy *sp,
219 struct mempolicy *mpol)
220{
221}
222
223static inline void mpol_free_shared_policy(struct shared_policy *p)
224{
225}
226
227static inline struct mempolicy *
228mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
229{
230 return NULL;
231}
232
233#define vma_policy(vma) NULL
234
235static inline int
236vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
237{
238 return 0;
239}
240
241static inline void numa_policy_init(void)
242{
243}
244
245static inline void numa_default_policy(void)
246{
247}
248
249static inline void mpol_rebind_task(struct task_struct *tsk,
250 const nodemask_t *new)
251{
252}
253
254static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
255{
256}
257
258static inline int huge_node(struct vm_area_struct *vma,
259 unsigned long addr, gfp_t gfp_flags,
260 struct mempolicy **mpol, nodemask_t **nodemask)
261{
262 *mpol = NULL;
263 *nodemask = NULL;
264 return 0;
265}
266
267static inline bool init_nodemask_of_mempolicy(nodemask_t *m)
268{
269 return false;
270}
271
272static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
273 const nodemask_t *to, int flags)
274{
275 return 0;
276}
277
278static inline void check_highest_zone(int k)
279{
280}
281
282#ifdef CONFIG_TMPFS
283static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
284{
285 return 1; /* error */
286}
287#endif
288
289static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
290 unsigned long address)
291{
292 return -1; /* no node preference */
293}
294
295static inline void mpol_put_task_policy(struct task_struct *task)
296{
297}
298
299static inline nodemask_t *policy_nodemask_current(gfp_t gfp)
300{
301 return NULL;
302}
303
304#define numa_demotion_enabled false
305
306static inline bool mpol_is_preferred_many(struct mempolicy *pol)
307{
308 return false;
309}
310
311#endif /* CONFIG_NUMA */
312#endif