]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - include/linux/cpuset.h
cpuset: Make cpuset hotplug synchronous
[mirror_ubuntu-jammy-kernel.git] / include / linux / cpuset.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_CPUSET_H
3 #define _LINUX_CPUSET_H
4 /*
5 * cpuset interface
6 *
7 * Copyright (C) 2003 BULL SA
8 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
9 *
10 */
11
12 #include <linux/sched.h>
13 #include <linux/sched/topology.h>
14 #include <linux/sched/task.h>
15 #include <linux/cpumask.h>
16 #include <linux/nodemask.h>
17 #include <linux/mm.h>
18 #include <linux/jump_label.h>
19
20 #ifdef CONFIG_CPUSETS
21
22 /*
23 * Static branch rewrites can happen in an arbitrary order for a given
24 * key. In code paths where we need to loop with read_mems_allowed_begin() and
25 * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
26 * to ensure that begin() always gets rewritten before retry() in the
27 * disabled -> enabled transition. If not, then if local irqs are disabled
28 * around the loop, we can deadlock since retry() would always be
29 * comparing the latest value of the mems_allowed seqcount against 0 as
30 * begin() still would see cpusets_enabled() as false. The enabled -> disabled
31 * transition should happen in reverse order for the same reasons (want to stop
32 * looking at real value of mems_allowed.sequence in retry() first).
33 */
34 extern struct static_key_false cpusets_pre_enable_key;
35 extern struct static_key_false cpusets_enabled_key;
36 static inline bool cpusets_enabled(void)
37 {
38 return static_branch_unlikely(&cpusets_enabled_key);
39 }
40
41 static inline void cpuset_inc(void)
42 {
43 static_branch_inc_cpuslocked(&cpusets_pre_enable_key);
44 static_branch_inc_cpuslocked(&cpusets_enabled_key);
45 }
46
47 static inline void cpuset_dec(void)
48 {
49 static_branch_dec_cpuslocked(&cpusets_enabled_key);
50 static_branch_dec_cpuslocked(&cpusets_pre_enable_key);
51 }
52
53 extern int cpuset_init(void);
54 extern void cpuset_init_smp(void);
55 extern void cpuset_force_rebuild(void);
56 extern void cpuset_update_active_cpus(void);
57 extern void cpuset_read_lock(void);
58 extern void cpuset_read_unlock(void);
59 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
60 extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
61 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
62 #define cpuset_current_mems_allowed (current->mems_allowed)
63 void cpuset_init_current_mems_allowed(void);
64 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
65
66 extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
67
68 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
69 {
70 if (cpusets_enabled())
71 return __cpuset_node_allowed(node, gfp_mask);
72 return true;
73 }
74
75 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
76 {
77 return __cpuset_node_allowed(zone_to_nid(z), gfp_mask);
78 }
79
80 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
81 {
82 if (cpusets_enabled())
83 return __cpuset_zone_allowed(z, gfp_mask);
84 return true;
85 }
86
87 extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
88 const struct task_struct *tsk2);
89
90 #define cpuset_memory_pressure_bump() \
91 do { \
92 if (cpuset_memory_pressure_enabled) \
93 __cpuset_memory_pressure_bump(); \
94 } while (0)
95 extern int cpuset_memory_pressure_enabled;
96 extern void __cpuset_memory_pressure_bump(void);
97
98 extern void cpuset_task_status_allowed(struct seq_file *m,
99 struct task_struct *task);
100 extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
101 struct pid *pid, struct task_struct *tsk);
102
103 extern int cpuset_mem_spread_node(void);
104 extern int cpuset_slab_spread_node(void);
105
106 static inline int cpuset_do_page_mem_spread(void)
107 {
108 return task_spread_page(current);
109 }
110
111 static inline int cpuset_do_slab_mem_spread(void)
112 {
113 return task_spread_slab(current);
114 }
115
116 extern bool current_cpuset_is_being_rebound(void);
117
118 extern void rebuild_sched_domains(void);
119
120 extern void cpuset_print_current_mems_allowed(void);
121
122 /*
123 * read_mems_allowed_begin is required when making decisions involving
124 * mems_allowed such as during page allocation. mems_allowed can be updated in
125 * parallel and depending on the new value an operation can fail potentially
126 * causing process failure. A retry loop with read_mems_allowed_begin and
127 * read_mems_allowed_retry prevents these artificial failures.
128 */
129 static inline unsigned int read_mems_allowed_begin(void)
130 {
131 if (!static_branch_unlikely(&cpusets_pre_enable_key))
132 return 0;
133
134 return read_seqcount_begin(&current->mems_allowed_seq);
135 }
136
137 /*
138 * If this returns true, the operation that took place after
139 * read_mems_allowed_begin may have failed artificially due to a concurrent
140 * update of mems_allowed. It is up to the caller to retry the operation if
141 * appropriate.
142 */
143 static inline bool read_mems_allowed_retry(unsigned int seq)
144 {
145 if (!static_branch_unlikely(&cpusets_enabled_key))
146 return false;
147
148 return read_seqcount_retry(&current->mems_allowed_seq, seq);
149 }
150
151 static inline void set_mems_allowed(nodemask_t nodemask)
152 {
153 unsigned long flags;
154
155 task_lock(current);
156 local_irq_save(flags);
157 write_seqcount_begin(&current->mems_allowed_seq);
158 current->mems_allowed = nodemask;
159 write_seqcount_end(&current->mems_allowed_seq);
160 local_irq_restore(flags);
161 task_unlock(current);
162 }
163
164 #else /* !CONFIG_CPUSETS */
165
166 static inline bool cpusets_enabled(void) { return false; }
167
168 static inline int cpuset_init(void) { return 0; }
169 static inline void cpuset_init_smp(void) {}
170
171 static inline void cpuset_force_rebuild(void) { }
172
173 static inline void cpuset_update_active_cpus(void)
174 {
175 partition_sched_domains(1, NULL, NULL);
176 }
177
178 static inline void cpuset_read_lock(void) { }
179 static inline void cpuset_read_unlock(void) { }
180
181 static inline void cpuset_cpus_allowed(struct task_struct *p,
182 struct cpumask *mask)
183 {
184 cpumask_copy(mask, cpu_possible_mask);
185 }
186
187 static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
188 {
189 }
190
191 static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
192 {
193 return node_possible_map;
194 }
195
196 #define cpuset_current_mems_allowed (node_states[N_MEMORY])
197 static inline void cpuset_init_current_mems_allowed(void) {}
198
199 static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
200 {
201 return 1;
202 }
203
204 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
205 {
206 return true;
207 }
208
209 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
210 {
211 return true;
212 }
213
214 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
215 {
216 return true;
217 }
218
219 static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
220 const struct task_struct *tsk2)
221 {
222 return 1;
223 }
224
225 static inline void cpuset_memory_pressure_bump(void) {}
226
227 static inline void cpuset_task_status_allowed(struct seq_file *m,
228 struct task_struct *task)
229 {
230 }
231
232 static inline int cpuset_mem_spread_node(void)
233 {
234 return 0;
235 }
236
237 static inline int cpuset_slab_spread_node(void)
238 {
239 return 0;
240 }
241
242 static inline int cpuset_do_page_mem_spread(void)
243 {
244 return 0;
245 }
246
247 static inline int cpuset_do_slab_mem_spread(void)
248 {
249 return 0;
250 }
251
252 static inline bool current_cpuset_is_being_rebound(void)
253 {
254 return false;
255 }
256
257 static inline void rebuild_sched_domains(void)
258 {
259 partition_sched_domains(1, NULL, NULL);
260 }
261
262 static inline void cpuset_print_current_mems_allowed(void)
263 {
264 }
265
266 static inline void set_mems_allowed(nodemask_t nodemask)
267 {
268 }
269
270 static inline unsigned int read_mems_allowed_begin(void)
271 {
272 return 0;
273 }
274
275 static inline bool read_mems_allowed_retry(unsigned int seq)
276 {
277 return false;
278 }
279
280 #endif /* !CONFIG_CPUSETS */
281
282 #endif /* _LINUX_CPUSET_H */