]>
Commit | Line | Data |
---|---|---|
1 | #ifndef _LINUX_CPUSET_H | |
2 | #define _LINUX_CPUSET_H | |
3 | /* | |
4 | * cpuset interface | |
5 | * | |
6 | * Copyright (C) 2003 BULL SA | |
7 | * Copyright (C) 2004-2006 Silicon Graphics, Inc. | |
8 | * | |
9 | */ | |
10 | ||
11 | #include <linux/sched.h> | |
12 | #include <linux/sched/topology.h> | |
13 | #include <linux/sched/task.h> | |
14 | #include <linux/cpumask.h> | |
15 | #include <linux/nodemask.h> | |
16 | #include <linux/mm.h> | |
17 | #include <linux/jump_label.h> | |
18 | ||
19 | #ifdef CONFIG_CPUSETS | |
20 | ||
21 | /* | |
22 | * Static branch rewrites can happen in an arbitrary order for a given | |
23 | * key. In code paths where we need to loop with read_mems_allowed_begin() and | |
24 | * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need | |
25 | * to ensure that begin() always gets rewritten before retry() in the | |
26 | * disabled -> enabled transition. If not, then if local irqs are disabled | |
27 | * around the loop, we can deadlock since retry() would always be | |
28 | * comparing the latest value of the mems_allowed seqcount against 0 as | |
29 | * begin() still would see cpusets_enabled() as false. The enabled -> disabled | |
30 | * transition should happen in reverse order for the same reasons (want to stop | |
31 | * looking at real value of mems_allowed.sequence in retry() first). | |
32 | */ | |
33 | extern struct static_key_false cpusets_pre_enable_key; | |
34 | extern struct static_key_false cpusets_enabled_key; | |
35 | static inline bool cpusets_enabled(void) | |
36 | { | |
37 | return static_branch_unlikely(&cpusets_enabled_key); | |
38 | } | |
39 | ||
40 | static inline int nr_cpusets(void) | |
41 | { | |
42 | /* jump label reference count + the top-level cpuset */ | |
43 | return static_key_count(&cpusets_enabled_key.key) + 1; | |
44 | } | |
45 | ||
46 | static inline void cpuset_inc(void) | |
47 | { | |
48 | static_branch_inc(&cpusets_pre_enable_key); | |
49 | static_branch_inc(&cpusets_enabled_key); | |
50 | } | |
51 | ||
52 | static inline void cpuset_dec(void) | |
53 | { | |
54 | static_branch_dec(&cpusets_enabled_key); | |
55 | static_branch_dec(&cpusets_pre_enable_key); | |
56 | } | |
57 | ||
58 | extern int cpuset_init(void); | |
59 | extern void cpuset_init_smp(void); | |
60 | extern void cpuset_force_rebuild(void); | |
61 | extern void cpuset_update_active_cpus(void); | |
62 | extern void cpuset_wait_for_hotplug(void); | |
63 | extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); | |
64 | extern void cpuset_cpus_allowed_fallback(struct task_struct *p); | |
65 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); | |
66 | #define cpuset_current_mems_allowed (current->mems_allowed) | |
67 | void cpuset_init_current_mems_allowed(void); | |
68 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); | |
69 | ||
70 | extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask); | |
71 | ||
72 | static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) | |
73 | { | |
74 | if (cpusets_enabled()) | |
75 | return __cpuset_node_allowed(node, gfp_mask); | |
76 | return true; | |
77 | } | |
78 | ||
79 | static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) | |
80 | { | |
81 | return __cpuset_node_allowed(zone_to_nid(z), gfp_mask); | |
82 | } | |
83 | ||
84 | static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) | |
85 | { | |
86 | if (cpusets_enabled()) | |
87 | return __cpuset_zone_allowed(z, gfp_mask); | |
88 | return true; | |
89 | } | |
90 | ||
91 | extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, | |
92 | const struct task_struct *tsk2); | |
93 | ||
94 | #define cpuset_memory_pressure_bump() \ | |
95 | do { \ | |
96 | if (cpuset_memory_pressure_enabled) \ | |
97 | __cpuset_memory_pressure_bump(); \ | |
98 | } while (0) | |
99 | extern int cpuset_memory_pressure_enabled; | |
100 | extern void __cpuset_memory_pressure_bump(void); | |
101 | ||
102 | extern void cpuset_task_status_allowed(struct seq_file *m, | |
103 | struct task_struct *task); | |
104 | extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, | |
105 | struct pid *pid, struct task_struct *tsk); | |
106 | ||
107 | extern int cpuset_mem_spread_node(void); | |
108 | extern int cpuset_slab_spread_node(void); | |
109 | ||
110 | static inline int cpuset_do_page_mem_spread(void) | |
111 | { | |
112 | return task_spread_page(current); | |
113 | } | |
114 | ||
115 | static inline int cpuset_do_slab_mem_spread(void) | |
116 | { | |
117 | return task_spread_slab(current); | |
118 | } | |
119 | ||
120 | extern int current_cpuset_is_being_rebound(void); | |
121 | ||
122 | extern void rebuild_sched_domains(void); | |
123 | ||
124 | extern void cpuset_print_current_mems_allowed(void); | |
125 | ||
126 | /* | |
127 | * read_mems_allowed_begin is required when making decisions involving | |
128 | * mems_allowed such as during page allocation. mems_allowed can be updated in | |
129 | * parallel and depending on the new value an operation can fail potentially | |
130 | * causing process failure. A retry loop with read_mems_allowed_begin and | |
131 | * read_mems_allowed_retry prevents these artificial failures. | |
132 | */ | |
133 | static inline unsigned int read_mems_allowed_begin(void) | |
134 | { | |
135 | if (!static_branch_unlikely(&cpusets_pre_enable_key)) | |
136 | return 0; | |
137 | ||
138 | return read_seqcount_begin(¤t->mems_allowed_seq); | |
139 | } | |
140 | ||
141 | /* | |
142 | * If this returns true, the operation that took place after | |
143 | * read_mems_allowed_begin may have failed artificially due to a concurrent | |
144 | * update of mems_allowed. It is up to the caller to retry the operation if | |
145 | * appropriate. | |
146 | */ | |
147 | static inline bool read_mems_allowed_retry(unsigned int seq) | |
148 | { | |
149 | if (!static_branch_unlikely(&cpusets_enabled_key)) | |
150 | return false; | |
151 | ||
152 | return read_seqcount_retry(¤t->mems_allowed_seq, seq); | |
153 | } | |
154 | ||
155 | static inline void set_mems_allowed(nodemask_t nodemask) | |
156 | { | |
157 | unsigned long flags; | |
158 | ||
159 | task_lock(current); | |
160 | local_irq_save(flags); | |
161 | write_seqcount_begin(¤t->mems_allowed_seq); | |
162 | current->mems_allowed = nodemask; | |
163 | write_seqcount_end(¤t->mems_allowed_seq); | |
164 | local_irq_restore(flags); | |
165 | task_unlock(current); | |
166 | } | |
167 | ||
168 | #else /* !CONFIG_CPUSETS */ | |
169 | ||
170 | static inline bool cpusets_enabled(void) { return false; } | |
171 | ||
172 | static inline int cpuset_init(void) { return 0; } | |
173 | static inline void cpuset_init_smp(void) {} | |
174 | ||
175 | static inline void cpuset_force_rebuild(void) { } | |
176 | ||
177 | static inline void cpuset_update_active_cpus(void) | |
178 | { | |
179 | partition_sched_domains(1, NULL, NULL); | |
180 | } | |
181 | ||
182 | static inline void cpuset_wait_for_hotplug(void) { } | |
183 | ||
184 | static inline void cpuset_cpus_allowed(struct task_struct *p, | |
185 | struct cpumask *mask) | |
186 | { | |
187 | cpumask_copy(mask, cpu_possible_mask); | |
188 | } | |
189 | ||
190 | static inline void cpuset_cpus_allowed_fallback(struct task_struct *p) | |
191 | { | |
192 | } | |
193 | ||
194 | static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) | |
195 | { | |
196 | return node_possible_map; | |
197 | } | |
198 | ||
199 | #define cpuset_current_mems_allowed (node_states[N_MEMORY]) | |
200 | static inline void cpuset_init_current_mems_allowed(void) {} | |
201 | ||
202 | static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) | |
203 | { | |
204 | return 1; | |
205 | } | |
206 | ||
207 | static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) | |
208 | { | |
209 | return true; | |
210 | } | |
211 | ||
212 | static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) | |
213 | { | |
214 | return true; | |
215 | } | |
216 | ||
217 | static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) | |
218 | { | |
219 | return true; | |
220 | } | |
221 | ||
222 | static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, | |
223 | const struct task_struct *tsk2) | |
224 | { | |
225 | return 1; | |
226 | } | |
227 | ||
228 | static inline void cpuset_memory_pressure_bump(void) {} | |
229 | ||
230 | static inline void cpuset_task_status_allowed(struct seq_file *m, | |
231 | struct task_struct *task) | |
232 | { | |
233 | } | |
234 | ||
235 | static inline int cpuset_mem_spread_node(void) | |
236 | { | |
237 | return 0; | |
238 | } | |
239 | ||
240 | static inline int cpuset_slab_spread_node(void) | |
241 | { | |
242 | return 0; | |
243 | } | |
244 | ||
245 | static inline int cpuset_do_page_mem_spread(void) | |
246 | { | |
247 | return 0; | |
248 | } | |
249 | ||
250 | static inline int cpuset_do_slab_mem_spread(void) | |
251 | { | |
252 | return 0; | |
253 | } | |
254 | ||
255 | static inline int current_cpuset_is_being_rebound(void) | |
256 | { | |
257 | return 0; | |
258 | } | |
259 | ||
260 | static inline void rebuild_sched_domains(void) | |
261 | { | |
262 | partition_sched_domains(1, NULL, NULL); | |
263 | } | |
264 | ||
265 | static inline void cpuset_print_current_mems_allowed(void) | |
266 | { | |
267 | } | |
268 | ||
269 | static inline void set_mems_allowed(nodemask_t nodemask) | |
270 | { | |
271 | } | |
272 | ||
273 | static inline unsigned int read_mems_allowed_begin(void) | |
274 | { | |
275 | return 0; | |
276 | } | |
277 | ||
278 | static inline bool read_mems_allowed_retry(unsigned int seq) | |
279 | { | |
280 | return false; | |
281 | } | |
282 | ||
283 | #endif /* !CONFIG_CPUSETS */ | |
284 | ||
285 | #endif /* _LINUX_CPUSET_H */ |