]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _LINUX_CPUSET_H |
3 | #define _LINUX_CPUSET_H | |
4 | /* | |
5 | * cpuset interface | |
6 | * | |
7 | * Copyright (C) 2003 BULL SA | |
825a46af | 8 | * Copyright (C) 2004-2006 Silicon Graphics, Inc. |
1da177e4 LT |
9 | * |
10 | */ | |
11 | ||
12 | #include <linux/sched.h> | |
105ab3d8 | 13 | #include <linux/sched/topology.h> |
f719ff9b | 14 | #include <linux/sched/task.h> |
1da177e4 LT |
15 | #include <linux/cpumask.h> |
16 | #include <linux/nodemask.h> | |
a1bc5a4e | 17 | #include <linux/mm.h> |
664eedde | 18 | #include <linux/jump_label.h> |
1da177e4 LT |
19 | |
20 | #ifdef CONFIG_CPUSETS | |
21 | ||
89affbf5 DZ |
22 | /* |
23 | * Static branch rewrites can happen in an arbitrary order for a given | |
24 | * key. In code paths where we need to loop with read_mems_allowed_begin() and | |
25 | * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need | |
26 | * to ensure that begin() always gets rewritten before retry() in the | |
27 | * disabled -> enabled transition. If not, then if local irqs are disabled | |
28 | * around the loop, we can deadlock since retry() would always be | |
29 | * comparing the latest value of the mems_allowed seqcount against 0 as | |
30 | * begin() still would see cpusets_enabled() as false. The enabled -> disabled | |
31 | * transition should happen in reverse order for the same reasons (want to stop | |
32 | * looking at real value of mems_allowed.sequence in retry() first). | |
33 | */ | |
34 | extern struct static_key_false cpusets_pre_enable_key; | |
002f2906 | 35 | extern struct static_key_false cpusets_enabled_key; |
664eedde MG |
36 | static inline bool cpusets_enabled(void) |
37 | { | |
002f2906 | 38 | return static_branch_unlikely(&cpusets_enabled_key); |
664eedde MG |
39 | } |
40 | ||
664eedde MG |
41 | static inline void cpuset_inc(void) |
42 | { | |
89affbf5 | 43 | static_branch_inc(&cpusets_pre_enable_key); |
002f2906 | 44 | static_branch_inc(&cpusets_enabled_key); |
664eedde MG |
45 | } |
46 | ||
47 | static inline void cpuset_dec(void) | |
48 | { | |
002f2906 | 49 | static_branch_dec(&cpusets_enabled_key); |
89affbf5 | 50 | static_branch_dec(&cpusets_pre_enable_key); |
664eedde | 51 | } |
202f72d5 | 52 | |
1da177e4 LT |
53 | extern int cpuset_init(void); |
54 | extern void cpuset_init_smp(void); | |
50e76632 | 55 | extern void cpuset_force_rebuild(void); |
30e03acd | 56 | extern void cpuset_update_active_cpus(void); |
50e76632 | 57 | extern void cpuset_wait_for_hotplug(void); |
6af866af | 58 | extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); |
2baab4e9 | 59 | extern void cpuset_cpus_allowed_fallback(struct task_struct *p); |
909d75a3 | 60 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); |
9276b1bc | 61 | #define cpuset_current_mems_allowed (current->mems_allowed) |
1da177e4 | 62 | void cpuset_init_current_mems_allowed(void); |
19770b32 | 63 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); |
202f72d5 | 64 | |
002f2906 | 65 | extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask); |
02a0e53d | 66 | |
002f2906 | 67 | static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) |
02a0e53d | 68 | { |
002f2906 VB |
69 | if (cpusets_enabled()) |
70 | return __cpuset_node_allowed(node, gfp_mask); | |
71 | return true; | |
02a0e53d PJ |
72 | } |
73 | ||
002f2906 | 74 | static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) |
202f72d5 | 75 | { |
002f2906 VB |
76 | return __cpuset_node_allowed(zone_to_nid(z), gfp_mask); |
77 | } | |
78 | ||
79 | static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) | |
80 | { | |
81 | if (cpusets_enabled()) | |
82 | return __cpuset_zone_allowed(z, gfp_mask); | |
83 | return true; | |
202f72d5 PJ |
84 | } |
85 | ||
bbe373f2 DR |
86 | extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
87 | const struct task_struct *tsk2); | |
3e0d98b9 PJ |
88 | |
89 | #define cpuset_memory_pressure_bump() \ | |
90 | do { \ | |
91 | if (cpuset_memory_pressure_enabled) \ | |
92 | __cpuset_memory_pressure_bump(); \ | |
93 | } while (0) | |
94 | extern int cpuset_memory_pressure_enabled; | |
95 | extern void __cpuset_memory_pressure_bump(void); | |
96 | ||
df5f8314 EB |
97 | extern void cpuset_task_status_allowed(struct seq_file *m, |
98 | struct task_struct *task); | |
52de4779 ZL |
99 | extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, |
100 | struct pid *pid, struct task_struct *tsk); | |
1da177e4 | 101 | |
825a46af | 102 | extern int cpuset_mem_spread_node(void); |
6adef3eb | 103 | extern int cpuset_slab_spread_node(void); |
825a46af PJ |
104 | |
105 | static inline int cpuset_do_page_mem_spread(void) | |
106 | { | |
2ad654bc | 107 | return task_spread_page(current); |
825a46af PJ |
108 | } |
109 | ||
110 | static inline int cpuset_do_slab_mem_spread(void) | |
111 | { | |
2ad654bc | 112 | return task_spread_slab(current); |
825a46af PJ |
113 | } |
114 | ||
8793d854 PM |
115 | extern int current_cpuset_is_being_rebound(void); |
116 | ||
e761b772 MK |
117 | extern void rebuild_sched_domains(void); |
118 | ||
da39da3a | 119 | extern void cpuset_print_current_mems_allowed(void); |
75aa1994 | 120 | |
c0ff7453 | 121 | /* |
d26914d1 MG |
122 | * read_mems_allowed_begin is required when making decisions involving |
123 | * mems_allowed such as during page allocation. mems_allowed can be updated in | |
124 | * parallel and depending on the new value an operation can fail potentially | |
125 | * causing process failure. A retry loop with read_mems_allowed_begin and | |
126 | * read_mems_allowed_retry prevents these artificial failures. | |
c0ff7453 | 127 | */ |
d26914d1 | 128 | static inline unsigned int read_mems_allowed_begin(void) |
c0ff7453 | 129 | { |
89affbf5 | 130 | if (!static_branch_unlikely(&cpusets_pre_enable_key)) |
46e700ab MG |
131 | return 0; |
132 | ||
cc9a6c87 | 133 | return read_seqcount_begin(¤t->mems_allowed_seq); |
c0ff7453 MX |
134 | } |
135 | ||
cc9a6c87 | 136 | /* |
d26914d1 MG |
137 | * If this returns true, the operation that took place after |
138 | * read_mems_allowed_begin may have failed artificially due to a concurrent | |
139 | * update of mems_allowed. It is up to the caller to retry the operation if | |
cc9a6c87 MG |
140 | * appropriate. |
141 | */ | |
d26914d1 | 142 | static inline bool read_mems_allowed_retry(unsigned int seq) |
c0ff7453 | 143 | { |
89affbf5 | 144 | if (!static_branch_unlikely(&cpusets_enabled_key)) |
46e700ab MG |
145 | return false; |
146 | ||
d26914d1 | 147 | return read_seqcount_retry(¤t->mems_allowed_seq, seq); |
c0ff7453 MX |
148 | } |
149 | ||
58568d2a MX |
150 | static inline void set_mems_allowed(nodemask_t nodemask) |
151 | { | |
db751fe3 JS |
152 | unsigned long flags; |
153 | ||
c0ff7453 | 154 | task_lock(current); |
db751fe3 | 155 | local_irq_save(flags); |
cc9a6c87 | 156 | write_seqcount_begin(¤t->mems_allowed_seq); |
58568d2a | 157 | current->mems_allowed = nodemask; |
cc9a6c87 | 158 | write_seqcount_end(¤t->mems_allowed_seq); |
db751fe3 | 159 | local_irq_restore(flags); |
c0ff7453 | 160 | task_unlock(current); |
58568d2a MX |
161 | } |
162 | ||
1da177e4 LT |
163 | #else /* !CONFIG_CPUSETS */ |
164 | ||
664eedde MG |
165 | static inline bool cpusets_enabled(void) { return false; } |
166 | ||
1da177e4 LT |
167 | static inline int cpuset_init(void) { return 0; } |
168 | static inline void cpuset_init_smp(void) {} | |
1da177e4 | 169 | |
50e76632 PZ |
170 | static inline void cpuset_force_rebuild(void) { } |
171 | ||
30e03acd | 172 | static inline void cpuset_update_active_cpus(void) |
3a101d05 TH |
173 | { |
174 | partition_sched_domains(1, NULL, NULL); | |
175 | } | |
176 | ||
50e76632 PZ |
177 | static inline void cpuset_wait_for_hotplug(void) { } |
178 | ||
6af866af LZ |
179 | static inline void cpuset_cpus_allowed(struct task_struct *p, |
180 | struct cpumask *mask) | |
1da177e4 | 181 | { |
aa85ea5b | 182 | cpumask_copy(mask, cpu_possible_mask); |
1da177e4 LT |
183 | } |
184 | ||
2baab4e9 | 185 | static inline void cpuset_cpus_allowed_fallback(struct task_struct *p) |
9084bb82 | 186 | { |
9084bb82 ON |
187 | } |
188 | ||
909d75a3 PJ |
189 | static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) |
190 | { | |
191 | return node_possible_map; | |
192 | } | |
193 | ||
38d7bee9 | 194 | #define cpuset_current_mems_allowed (node_states[N_MEMORY]) |
1da177e4 | 195 | static inline void cpuset_init_current_mems_allowed(void) {} |
1da177e4 | 196 | |
19770b32 | 197 | static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) |
1da177e4 LT |
198 | { |
199 | return 1; | |
200 | } | |
201 | ||
002f2906 | 202 | static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) |
02a0e53d | 203 | { |
002f2906 | 204 | return true; |
02a0e53d PJ |
205 | } |
206 | ||
002f2906 | 207 | static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) |
1da177e4 | 208 | { |
002f2906 VB |
209 | return true; |
210 | } | |
211 | ||
212 | static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) | |
213 | { | |
214 | return true; | |
1da177e4 LT |
215 | } |
216 | ||
bbe373f2 DR |
217 | static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
218 | const struct task_struct *tsk2) | |
ef08e3b4 PJ |
219 | { |
220 | return 1; | |
221 | } | |
222 | ||
3e0d98b9 PJ |
223 | static inline void cpuset_memory_pressure_bump(void) {} |
224 | ||
df5f8314 EB |
225 | static inline void cpuset_task_status_allowed(struct seq_file *m, |
226 | struct task_struct *task) | |
1da177e4 | 227 | { |
1da177e4 LT |
228 | } |
229 | ||
825a46af PJ |
230 | static inline int cpuset_mem_spread_node(void) |
231 | { | |
232 | return 0; | |
233 | } | |
234 | ||
6adef3eb JS |
235 | static inline int cpuset_slab_spread_node(void) |
236 | { | |
237 | return 0; | |
238 | } | |
239 | ||
825a46af PJ |
240 | static inline int cpuset_do_page_mem_spread(void) |
241 | { | |
242 | return 0; | |
243 | } | |
244 | ||
245 | static inline int cpuset_do_slab_mem_spread(void) | |
246 | { | |
247 | return 0; | |
248 | } | |
249 | ||
8793d854 PM |
250 | static inline int current_cpuset_is_being_rebound(void) |
251 | { | |
252 | return 0; | |
253 | } | |
254 | ||
e761b772 MK |
255 | static inline void rebuild_sched_domains(void) |
256 | { | |
dfb512ec | 257 | partition_sched_domains(1, NULL, NULL); |
e761b772 MK |
258 | } |
259 | ||
da39da3a | 260 | static inline void cpuset_print_current_mems_allowed(void) |
75aa1994 DR |
261 | { |
262 | } | |
263 | ||
58568d2a MX |
264 | static inline void set_mems_allowed(nodemask_t nodemask) |
265 | { | |
266 | } | |
267 | ||
d26914d1 | 268 | static inline unsigned int read_mems_allowed_begin(void) |
c0ff7453 | 269 | { |
cc9a6c87 | 270 | return 0; |
c0ff7453 MX |
271 | } |
272 | ||
d26914d1 | 273 | static inline bool read_mems_allowed_retry(unsigned int seq) |
c0ff7453 | 274 | { |
d26914d1 | 275 | return false; |
c0ff7453 MX |
276 | } |
277 | ||
1da177e4 LT |
278 | #endif /* !CONFIG_CPUSETS */ |
279 | ||
280 | #endif /* _LINUX_CPUSET_H */ |