]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * kernel/cpuset.c | |
3 | * | |
4 | * Processor and Memory placement constraints for sets of tasks. | |
5 | * | |
6 | * Copyright (C) 2003 BULL SA. | |
029190c5 | 7 | * Copyright (C) 2004-2007 Silicon Graphics, Inc. |
8793d854 | 8 | * Copyright (C) 2006 Google, Inc |
1da177e4 LT |
9 | * |
10 | * Portions derived from Patrick Mochel's sysfs code. | |
11 | * sysfs is Copyright (c) 2001-3 Patrick Mochel | |
1da177e4 | 12 | * |
825a46af | 13 | * 2003-10-10 Written by Simon Derr. |
1da177e4 | 14 | * 2003-10-22 Updates by Stephen Hemminger. |
825a46af | 15 | * 2004 May-July Rework by Paul Jackson. |
8793d854 | 16 | * 2006 Rework by Paul Menage to use generic cgroups |
cf417141 MK |
17 | * 2008 Rework of the scheduler domains and CPU hotplug handling |
18 | * by Max Krasnyansky | |
1da177e4 LT |
19 | * |
20 | * This file is subject to the terms and conditions of the GNU General Public | |
21 | * License. See the file COPYING in the main directory of the Linux | |
22 | * distribution for more details. | |
23 | */ | |
24 | ||
1da177e4 LT |
25 | #include <linux/cpu.h> |
26 | #include <linux/cpumask.h> | |
27 | #include <linux/cpuset.h> | |
28 | #include <linux/err.h> | |
29 | #include <linux/errno.h> | |
30 | #include <linux/file.h> | |
31 | #include <linux/fs.h> | |
32 | #include <linux/init.h> | |
33 | #include <linux/interrupt.h> | |
34 | #include <linux/kernel.h> | |
35 | #include <linux/kmod.h> | |
36 | #include <linux/list.h> | |
68860ec1 | 37 | #include <linux/mempolicy.h> |
1da177e4 | 38 | #include <linux/mm.h> |
f481891f | 39 | #include <linux/memory.h> |
9984de1a | 40 | #include <linux/export.h> |
1da177e4 | 41 | #include <linux/mount.h> |
a1875374 | 42 | #include <linux/fs_context.h> |
1da177e4 LT |
43 | #include <linux/namei.h> |
44 | #include <linux/pagemap.h> | |
45 | #include <linux/proc_fs.h> | |
6b9c2603 | 46 | #include <linux/rcupdate.h> |
1da177e4 | 47 | #include <linux/sched.h> |
f9a25f77 | 48 | #include <linux/sched/deadline.h> |
6e84f315 | 49 | #include <linux/sched/mm.h> |
f719ff9b | 50 | #include <linux/sched/task.h> |
1da177e4 | 51 | #include <linux/seq_file.h> |
22fb52dd | 52 | #include <linux/security.h> |
1da177e4 | 53 | #include <linux/slab.h> |
1da177e4 LT |
54 | #include <linux/spinlock.h> |
55 | #include <linux/stat.h> | |
56 | #include <linux/string.h> | |
57 | #include <linux/time.h> | |
d2b43658 | 58 | #include <linux/time64.h> |
1da177e4 LT |
59 | #include <linux/backing-dev.h> |
60 | #include <linux/sort.h> | |
da99ecf1 | 61 | #include <linux/oom.h> |
edb93821 | 62 | #include <linux/sched/isolation.h> |
7c0f6ba6 | 63 | #include <linux/uaccess.h> |
60063497 | 64 | #include <linux/atomic.h> |
3d3f26a7 | 65 | #include <linux/mutex.h> |
956db3ca | 66 | #include <linux/cgroup.h> |
e44193d3 | 67 | #include <linux/wait.h> |
1da177e4 | 68 | |
89affbf5 | 69 | DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key); |
002f2906 | 70 | DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key); |
202f72d5 | 71 | |
3e0d98b9 PJ |
72 | /* See "Frequency meter" comments, below. */ |
73 | ||
74 | struct fmeter { | |
75 | int cnt; /* unprocessed events count */ | |
76 | int val; /* most recent output value */ | |
d2b43658 | 77 | time64_t time; /* clock (secs) when val computed */ |
3e0d98b9 PJ |
78 | spinlock_t lock; /* guards read or write of above */ |
79 | }; | |
80 | ||
1da177e4 | 81 | struct cpuset { |
8793d854 PM |
82 | struct cgroup_subsys_state css; |
83 | ||
1da177e4 | 84 | unsigned long flags; /* "unsigned long" so bitops work */ |
e2b9a3d7 | 85 | |
7e88291b LZ |
86 | /* |
87 | * On default hierarchy: | |
88 | * | |
89 | * The user-configured masks can only be changed by writing to | |
90 | * cpuset.cpus and cpuset.mems, and won't be limited by the | |
91 | * parent masks. | |
92 | * | |
93 | * The effective masks is the real masks that apply to the tasks | |
94 | * in the cpuset. They may be changed if the configured masks are | |
95 | * changed or hotplug happens. | |
96 | * | |
97 | * effective_mask == configured_mask & parent's effective_mask, | |
98 | * and if it ends up empty, it will inherit the parent's mask. | |
99 | * | |
100 | * | |
101 | * On legacy hierachy: | |
102 | * | |
103 | * The user-configured masks are always the same with effective masks. | |
104 | */ | |
105 | ||
e2b9a3d7 LZ |
106 | /* user-configured CPUs and Memory Nodes allow to tasks */ |
107 | cpumask_var_t cpus_allowed; | |
108 | nodemask_t mems_allowed; | |
109 | ||
110 | /* effective CPUs and Memory Nodes allow to tasks */ | |
111 | cpumask_var_t effective_cpus; | |
112 | nodemask_t effective_mems; | |
1da177e4 | 113 | |
58b74842 WL |
114 | /* |
115 | * CPUs allocated to child sub-partitions (default hierarchy only) | |
116 | * - CPUs granted by the parent = effective_cpus U subparts_cpus | |
117 | * - effective_cpus and subparts_cpus are mutually exclusive. | |
4b842da2 WL |
118 | * |
119 | * effective_cpus contains only onlined CPUs, but subparts_cpus | |
120 | * may have offlined ones. | |
58b74842 WL |
121 | */ |
122 | cpumask_var_t subparts_cpus; | |
123 | ||
33ad801d LZ |
124 | /* |
125 | * This is old Memory Nodes tasks took on. | |
126 | * | |
127 | * - top_cpuset.old_mems_allowed is initialized to mems_allowed. | |
128 | * - A new cpuset's old_mems_allowed is initialized when some | |
129 | * task is moved into it. | |
130 | * - old_mems_allowed is used in cpuset_migrate_mm() when we change | |
131 | * cpuset.mems_allowed and have tasks' nodemask updated, and | |
132 | * then old_mems_allowed is updated to mems_allowed. | |
133 | */ | |
134 | nodemask_t old_mems_allowed; | |
135 | ||
3e0d98b9 | 136 | struct fmeter fmeter; /* memory_pressure filter */ |
029190c5 | 137 | |
452477fa TH |
138 | /* |
139 | * Tasks are being attached to this cpuset. Used to prevent | |
140 | * zeroing cpus/mems_allowed between ->can_attach() and ->attach(). | |
141 | */ | |
142 | int attach_in_progress; | |
143 | ||
029190c5 PJ |
144 | /* partition number for rebuild_sched_domains() */ |
145 | int pn; | |
956db3ca | 146 | |
1d3504fc HS |
147 | /* for custom sched domain */ |
148 | int relax_domain_level; | |
58b74842 WL |
149 | |
150 | /* number of CPUs in subparts_cpus */ | |
151 | int nr_subparts_cpus; | |
152 | ||
153 | /* partition root state */ | |
154 | int partition_root_state; | |
4716909c WL |
155 | |
156 | /* | |
157 | * Default hierarchy only: | |
158 | * use_parent_ecpus - set if using parent's effective_cpus | |
159 | * child_ecpus_count - # of children with use_parent_ecpus set | |
160 | */ | |
161 | int use_parent_ecpus; | |
162 | int child_ecpus_count; | |
58b74842 WL |
163 | }; |
164 | ||
165 | /* | |
166 | * Partition root states: | |
167 | * | |
168 | * 0 - not a partition root | |
3881b861 | 169 | * |
58b74842 | 170 | * 1 - partition root |
3881b861 WL |
171 | * |
172 | * -1 - invalid partition root | |
173 | * None of the cpus in cpus_allowed can be put into the parent's | |
174 | * subparts_cpus. In this case, the cpuset is not a real partition | |
175 | * root anymore. However, the CPU_EXCLUSIVE bit will still be set | |
176 | * and the cpuset can be restored back to a partition root if the | |
177 | * parent cpuset can give more CPUs back to this child cpuset. | |
58b74842 WL |
178 | */ |
179 | #define PRS_DISABLED 0 | |
180 | #define PRS_ENABLED 1 | |
3881b861 | 181 | #define PRS_ERROR -1 |
58b74842 WL |
182 | |
183 | /* | |
184 | * Temporary cpumasks for working with partitions that are passed among | |
185 | * functions to avoid memory allocation in inner functions. | |
186 | */ | |
187 | struct tmpmasks { | |
188 | cpumask_var_t addmask, delmask; /* For partition root */ | |
189 | cpumask_var_t new_cpus; /* For update_cpumasks_hier() */ | |
1da177e4 LT |
190 | }; |
191 | ||
a7c6d554 | 192 | static inline struct cpuset *css_cs(struct cgroup_subsys_state *css) |
8793d854 | 193 | { |
a7c6d554 | 194 | return css ? container_of(css, struct cpuset, css) : NULL; |
8793d854 PM |
195 | } |
196 | ||
197 | /* Retrieve the cpuset for a task */ | |
198 | static inline struct cpuset *task_cs(struct task_struct *task) | |
199 | { | |
073219e9 | 200 | return css_cs(task_css(task, cpuset_cgrp_id)); |
8793d854 | 201 | } |
8793d854 | 202 | |
c9710d80 | 203 | static inline struct cpuset *parent_cs(struct cpuset *cs) |
c431069f | 204 | { |
5c9d535b | 205 | return css_cs(cs->css.parent); |
c431069f TH |
206 | } |
207 | ||
1da177e4 LT |
208 | /* bits in struct cpuset flags field */ |
209 | typedef enum { | |
efeb77b2 | 210 | CS_ONLINE, |
1da177e4 LT |
211 | CS_CPU_EXCLUSIVE, |
212 | CS_MEM_EXCLUSIVE, | |
78608366 | 213 | CS_MEM_HARDWALL, |
45b07ef3 | 214 | CS_MEMORY_MIGRATE, |
029190c5 | 215 | CS_SCHED_LOAD_BALANCE, |
825a46af PJ |
216 | CS_SPREAD_PAGE, |
217 | CS_SPREAD_SLAB, | |
1da177e4 LT |
218 | } cpuset_flagbits_t; |
219 | ||
220 | /* convenient tests for these bits */ | |
41c25707 | 221 | static inline bool is_cpuset_online(struct cpuset *cs) |
efeb77b2 | 222 | { |
41c25707 | 223 | return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); |
efeb77b2 TH |
224 | } |
225 | ||
1da177e4 LT |
226 | static inline int is_cpu_exclusive(const struct cpuset *cs) |
227 | { | |
7b5b9ef0 | 228 | return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); |
1da177e4 LT |
229 | } |
230 | ||
231 | static inline int is_mem_exclusive(const struct cpuset *cs) | |
232 | { | |
7b5b9ef0 | 233 | return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); |
1da177e4 LT |
234 | } |
235 | ||
78608366 PM |
236 | static inline int is_mem_hardwall(const struct cpuset *cs) |
237 | { | |
238 | return test_bit(CS_MEM_HARDWALL, &cs->flags); | |
239 | } | |
240 | ||
029190c5 PJ |
241 | static inline int is_sched_load_balance(const struct cpuset *cs) |
242 | { | |
243 | return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); | |
244 | } | |
245 | ||
45b07ef3 PJ |
246 | static inline int is_memory_migrate(const struct cpuset *cs) |
247 | { | |
7b5b9ef0 | 248 | return test_bit(CS_MEMORY_MIGRATE, &cs->flags); |
45b07ef3 PJ |
249 | } |
250 | ||
825a46af PJ |
251 | static inline int is_spread_page(const struct cpuset *cs) |
252 | { | |
253 | return test_bit(CS_SPREAD_PAGE, &cs->flags); | |
254 | } | |
255 | ||
256 | static inline int is_spread_slab(const struct cpuset *cs) | |
257 | { | |
258 | return test_bit(CS_SPREAD_SLAB, &cs->flags); | |
259 | } | |
260 | ||
58b74842 WL |
261 | static inline int is_partition_root(const struct cpuset *cs) |
262 | { | |
3881b861 | 263 | return cs->partition_root_state > 0; |
58b74842 WL |
264 | } |
265 | ||
1da177e4 | 266 | static struct cpuset top_cpuset = { |
efeb77b2 TH |
267 | .flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) | |
268 | (1 << CS_MEM_EXCLUSIVE)), | |
58b74842 | 269 | .partition_root_state = PRS_ENABLED, |
1da177e4 LT |
270 | }; |
271 | ||
ae8086ce TH |
272 | /** |
273 | * cpuset_for_each_child - traverse online children of a cpuset | |
274 | * @child_cs: loop cursor pointing to the current child | |
492eb21b | 275 | * @pos_css: used for iteration |
ae8086ce TH |
276 | * @parent_cs: target cpuset to walk children of |
277 | * | |
278 | * Walk @child_cs through the online children of @parent_cs. Must be used | |
279 | * with RCU read locked. | |
280 | */ | |
492eb21b TH |
281 | #define cpuset_for_each_child(child_cs, pos_css, parent_cs) \ |
282 | css_for_each_child((pos_css), &(parent_cs)->css) \ | |
283 | if (is_cpuset_online(((child_cs) = css_cs((pos_css))))) | |
ae8086ce | 284 | |
fc560a26 TH |
285 | /** |
286 | * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants | |
287 | * @des_cs: loop cursor pointing to the current descendant | |
492eb21b | 288 | * @pos_css: used for iteration |
fc560a26 TH |
289 | * @root_cs: target cpuset to walk ancestor of |
290 | * | |
291 | * Walk @des_cs through the online descendants of @root_cs. Must be used | |
492eb21b | 292 | * with RCU read locked. The caller may modify @pos_css by calling |
bd8815a6 TH |
293 | * css_rightmost_descendant() to skip subtree. @root_cs is included in the |
294 | * iteration and the first node to be visited. | |
fc560a26 | 295 | */ |
492eb21b TH |
296 | #define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \ |
297 | css_for_each_descendant_pre((pos_css), &(root_cs)->css) \ | |
298 | if (is_cpuset_online(((des_cs) = css_cs((pos_css))))) | |
fc560a26 | 299 | |
1da177e4 | 300 | /* |
8447a0fe VD |
301 | * There are two global locks guarding cpuset structures - cpuset_mutex and |
302 | * callback_lock. We also require taking task_lock() when dereferencing a | |
303 | * task's cpuset pointer. See "The task_lock() exception", at the end of this | |
304 | * comment. | |
5d21cc2d | 305 | * |
8447a0fe | 306 | * A task must hold both locks to modify cpusets. If a task holds |
5d21cc2d | 307 | * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it |
8447a0fe | 308 | * is the only task able to also acquire callback_lock and be able to |
5d21cc2d TH |
309 | * modify cpusets. It can perform various checks on the cpuset structure |
310 | * first, knowing nothing will change. It can also allocate memory while | |
311 | * just holding cpuset_mutex. While it is performing these checks, various | |
8447a0fe VD |
312 | * callback routines can briefly acquire callback_lock to query cpusets. |
313 | * Once it is ready to make the changes, it takes callback_lock, blocking | |
5d21cc2d | 314 | * everyone else. |
053199ed PJ |
315 | * |
316 | * Calls to the kernel memory allocator can not be made while holding | |
8447a0fe | 317 | * callback_lock, as that would risk double tripping on callback_lock |
053199ed PJ |
318 | * from one of the callbacks into the cpuset code from within |
319 | * __alloc_pages(). | |
320 | * | |
8447a0fe | 321 | * If a task is only holding callback_lock, then it has read-only |
053199ed PJ |
322 | * access to cpusets. |
323 | * | |
58568d2a MX |
324 | * Now, the task_struct fields mems_allowed and mempolicy may be changed |
325 | * by other task, we use alloc_lock in the task_struct fields to protect | |
326 | * them. | |
053199ed | 327 | * |
8447a0fe | 328 | * The cpuset_common_file_read() handlers only hold callback_lock across |
053199ed PJ |
329 | * small pieces of code, such as when reading out possibly multi-word |
330 | * cpumasks and nodemasks. | |
331 | * | |
2df167a3 PM |
332 | * Accessing a task's cpuset should be done in accordance with the |
333 | * guidelines for accessing subsystem state in kernel/cgroup.c | |
1da177e4 LT |
334 | */ |
335 | ||
1243dc51 | 336 | DEFINE_STATIC_PERCPU_RWSEM(cpuset_rwsem); |
710da3c8 JL |
337 | |
338 | void cpuset_read_lock(void) | |
339 | { | |
340 | percpu_down_read(&cpuset_rwsem); | |
341 | } | |
342 | ||
343 | void cpuset_read_unlock(void) | |
344 | { | |
345 | percpu_up_read(&cpuset_rwsem); | |
346 | } | |
347 | ||
8447a0fe | 348 | static DEFINE_SPINLOCK(callback_lock); |
4247bdc6 | 349 | |
e93ad19d TH |
350 | static struct workqueue_struct *cpuset_migrate_mm_wq; |
351 | ||
3a5a6d0c TH |
352 | /* |
353 | * CPU / memory hotplug is handled asynchronously. | |
354 | */ | |
355 | static void cpuset_hotplug_workfn(struct work_struct *work); | |
3a5a6d0c TH |
356 | static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn); |
357 | ||
e44193d3 LZ |
358 | static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq); |
359 | ||
b8d1b8ee | 360 | /* |
0c05b9bd WL |
361 | * Cgroup v2 behavior is used on the "cpus" and "mems" control files when |
362 | * on default hierarchy or when the cpuset_v2_mode flag is set by mounting | |
363 | * the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option. | |
364 | * With v2 behavior, "cpus" and "mems" are always what the users have | |
365 | * requested and won't be changed by hotplug events. Only the effective | |
366 | * cpus or mems will be affected. | |
b8d1b8ee WL |
367 | */ |
368 | static inline bool is_in_v2_mode(void) | |
369 | { | |
370 | return cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || | |
371 | (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE); | |
372 | } | |
373 | ||
1da177e4 | 374 | /* |
300ed6cb | 375 | * Return in pmask the portion of a cpusets's cpus_allowed that |
1da177e4 | 376 | * are online. If none are online, walk up the cpuset hierarchy |
28b89b9e | 377 | * until we find one that does have some online cpus. |
1da177e4 LT |
378 | * |
379 | * One way or another, we guarantee to return some non-empty subset | |
5f054e31 | 380 | * of cpu_online_mask. |
1da177e4 | 381 | * |
8447a0fe | 382 | * Call with callback_lock or cpuset_mutex held. |
1da177e4 | 383 | */ |
c9710d80 | 384 | static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask) |
1da177e4 | 385 | { |
28b89b9e | 386 | while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) { |
c431069f | 387 | cs = parent_cs(cs); |
28b89b9e JP |
388 | if (unlikely(!cs)) { |
389 | /* | |
390 | * The top cpuset doesn't have any online cpu as a | |
391 | * consequence of a race between cpuset_hotplug_work | |
392 | * and cpu hotplug notifier. But we know the top | |
393 | * cpuset's effective_cpus is on its way to to be | |
394 | * identical to cpu_online_mask. | |
395 | */ | |
396 | cpumask_copy(pmask, cpu_online_mask); | |
397 | return; | |
398 | } | |
399 | } | |
ae1c8023 | 400 | cpumask_and(pmask, cs->effective_cpus, cpu_online_mask); |
1da177e4 LT |
401 | } |
402 | ||
403 | /* | |
404 | * Return in *pmask the portion of a cpusets's mems_allowed that | |
0e1e7c7a CL |
405 | * are online, with memory. If none are online with memory, walk |
406 | * up the cpuset hierarchy until we find one that does have some | |
40df2deb | 407 | * online mems. The top cpuset always has some mems online. |
1da177e4 LT |
408 | * |
409 | * One way or another, we guarantee to return some non-empty subset | |
38d7bee9 | 410 | * of node_states[N_MEMORY]. |
1da177e4 | 411 | * |
8447a0fe | 412 | * Call with callback_lock or cpuset_mutex held. |
1da177e4 | 413 | */ |
c9710d80 | 414 | static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) |
1da177e4 | 415 | { |
ae1c8023 | 416 | while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) |
c431069f | 417 | cs = parent_cs(cs); |
ae1c8023 | 418 | nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); |
1da177e4 LT |
419 | } |
420 | ||
f3b39d47 MX |
421 | /* |
422 | * update task's spread flag if cpuset's page/slab spread flag is set | |
423 | * | |
8447a0fe | 424 | * Call with callback_lock or cpuset_mutex held. |
f3b39d47 MX |
425 | */ |
426 | static void cpuset_update_task_spread_flag(struct cpuset *cs, | |
427 | struct task_struct *tsk) | |
428 | { | |
429 | if (is_spread_page(cs)) | |
2ad654bc | 430 | task_set_spread_page(tsk); |
f3b39d47 | 431 | else |
2ad654bc ZL |
432 | task_clear_spread_page(tsk); |
433 | ||
f3b39d47 | 434 | if (is_spread_slab(cs)) |
2ad654bc | 435 | task_set_spread_slab(tsk); |
f3b39d47 | 436 | else |
2ad654bc | 437 | task_clear_spread_slab(tsk); |
f3b39d47 MX |
438 | } |
439 | ||
1da177e4 LT |
440 | /* |
441 | * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q? | |
442 | * | |
443 | * One cpuset is a subset of another if all its allowed CPUs and | |
444 | * Memory Nodes are a subset of the other, and its exclusive flags | |
5d21cc2d | 445 | * are only set if the other's are set. Call holding cpuset_mutex. |
1da177e4 LT |
446 | */ |
447 | ||
448 | static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) | |
449 | { | |
300ed6cb | 450 | return cpumask_subset(p->cpus_allowed, q->cpus_allowed) && |
1da177e4 LT |
451 | nodes_subset(p->mems_allowed, q->mems_allowed) && |
452 | is_cpu_exclusive(p) <= is_cpu_exclusive(q) && | |
453 | is_mem_exclusive(p) <= is_mem_exclusive(q); | |
454 | } | |
455 | ||
bf92370c WL |
456 | /** |
457 | * alloc_cpumasks - allocate three cpumasks for cpuset | |
458 | * @cs: the cpuset that have cpumasks to be allocated. | |
459 | * @tmp: the tmpmasks structure pointer | |
460 | * Return: 0 if successful, -ENOMEM otherwise. | |
461 | * | |
462 | * Only one of the two input arguments should be non-NULL. | |
463 | */ | |
464 | static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) | |
465 | { | |
466 | cpumask_var_t *pmask1, *pmask2, *pmask3; | |
467 | ||
468 | if (cs) { | |
469 | pmask1 = &cs->cpus_allowed; | |
470 | pmask2 = &cs->effective_cpus; | |
471 | pmask3 = &cs->subparts_cpus; | |
472 | } else { | |
473 | pmask1 = &tmp->new_cpus; | |
474 | pmask2 = &tmp->addmask; | |
475 | pmask3 = &tmp->delmask; | |
476 | } | |
477 | ||
478 | if (!zalloc_cpumask_var(pmask1, GFP_KERNEL)) | |
479 | return -ENOMEM; | |
480 | ||
481 | if (!zalloc_cpumask_var(pmask2, GFP_KERNEL)) | |
482 | goto free_one; | |
483 | ||
484 | if (!zalloc_cpumask_var(pmask3, GFP_KERNEL)) | |
485 | goto free_two; | |
486 | ||
487 | return 0; | |
488 | ||
489 | free_two: | |
490 | free_cpumask_var(*pmask2); | |
491 | free_one: | |
492 | free_cpumask_var(*pmask1); | |
493 | return -ENOMEM; | |
494 | } | |
495 | ||
496 | /** | |
497 | * free_cpumasks - free cpumasks in a tmpmasks structure | |
498 | * @cs: the cpuset that have cpumasks to be free. | |
499 | * @tmp: the tmpmasks structure pointer | |
500 | */ | |
501 | static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) | |
502 | { | |
503 | if (cs) { | |
504 | free_cpumask_var(cs->cpus_allowed); | |
505 | free_cpumask_var(cs->effective_cpus); | |
506 | free_cpumask_var(cs->subparts_cpus); | |
507 | } | |
508 | if (tmp) { | |
509 | free_cpumask_var(tmp->new_cpus); | |
510 | free_cpumask_var(tmp->addmask); | |
511 | free_cpumask_var(tmp->delmask); | |
512 | } | |
513 | } | |
514 | ||
645fcc9d LZ |
515 | /** |
516 | * alloc_trial_cpuset - allocate a trial cpuset | |
517 | * @cs: the cpuset that the trial cpuset duplicates | |
518 | */ | |
c9710d80 | 519 | static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) |
645fcc9d | 520 | { |
300ed6cb LZ |
521 | struct cpuset *trial; |
522 | ||
523 | trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL); | |
524 | if (!trial) | |
525 | return NULL; | |
526 | ||
bf92370c WL |
527 | if (alloc_cpumasks(trial, NULL)) { |
528 | kfree(trial); | |
529 | return NULL; | |
530 | } | |
300ed6cb | 531 | |
e2b9a3d7 LZ |
532 | cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); |
533 | cpumask_copy(trial->effective_cpus, cs->effective_cpus); | |
300ed6cb | 534 | return trial; |
645fcc9d LZ |
535 | } |
536 | ||
537 | /** | |
bf92370c WL |
538 | * free_cpuset - free the cpuset |
539 | * @cs: the cpuset to be freed | |
645fcc9d | 540 | */ |
bf92370c | 541 | static inline void free_cpuset(struct cpuset *cs) |
645fcc9d | 542 | { |
bf92370c WL |
543 | free_cpumasks(cs, NULL); |
544 | kfree(cs); | |
645fcc9d LZ |
545 | } |
546 | ||
1da177e4 LT |
547 | /* |
548 | * validate_change() - Used to validate that any proposed cpuset change | |
549 | * follows the structural rules for cpusets. | |
550 | * | |
551 | * If we replaced the flag and mask values of the current cpuset | |
552 | * (cur) with those values in the trial cpuset (trial), would | |
553 | * our various subset and exclusive rules still be valid? Presumes | |
5d21cc2d | 554 | * cpuset_mutex held. |
1da177e4 LT |
555 | * |
556 | * 'cur' is the address of an actual, in-use cpuset. Operations | |
557 | * such as list traversal that depend on the actual address of the | |
558 | * cpuset in the list must use cur below, not trial. | |
559 | * | |
560 | * 'trial' is the address of bulk structure copy of cur, with | |
561 | * perhaps one or more of the fields cpus_allowed, mems_allowed, | |
562 | * or flags changed to new, trial values. | |
563 | * | |
564 | * Return 0 if valid, -errno if not. | |
565 | */ | |
566 | ||
c9710d80 | 567 | static int validate_change(struct cpuset *cur, struct cpuset *trial) |
1da177e4 | 568 | { |
492eb21b | 569 | struct cgroup_subsys_state *css; |
1da177e4 | 570 | struct cpuset *c, *par; |
ae8086ce TH |
571 | int ret; |
572 | ||
573 | rcu_read_lock(); | |
1da177e4 LT |
574 | |
575 | /* Each of our child cpusets must be a subset of us */ | |
ae8086ce | 576 | ret = -EBUSY; |
492eb21b | 577 | cpuset_for_each_child(c, css, cur) |
ae8086ce TH |
578 | if (!is_cpuset_subset(c, trial)) |
579 | goto out; | |
1da177e4 LT |
580 | |
581 | /* Remaining checks don't apply to root cpuset */ | |
ae8086ce | 582 | ret = 0; |
69604067 | 583 | if (cur == &top_cpuset) |
ae8086ce | 584 | goto out; |
1da177e4 | 585 | |
c431069f | 586 | par = parent_cs(cur); |
69604067 | 587 | |
7e88291b | 588 | /* On legacy hiearchy, we must be a subset of our parent cpuset. */ |
ae8086ce | 589 | ret = -EACCES; |
b8d1b8ee | 590 | if (!is_in_v2_mode() && !is_cpuset_subset(trial, par)) |
ae8086ce | 591 | goto out; |
1da177e4 | 592 | |
2df167a3 PM |
593 | /* |
594 | * If either I or some sibling (!= me) is exclusive, we can't | |
595 | * overlap | |
596 | */ | |
ae8086ce | 597 | ret = -EINVAL; |
492eb21b | 598 | cpuset_for_each_child(c, css, par) { |
1da177e4 LT |
599 | if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && |
600 | c != cur && | |
300ed6cb | 601 | cpumask_intersects(trial->cpus_allowed, c->cpus_allowed)) |
ae8086ce | 602 | goto out; |
1da177e4 LT |
603 | if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) && |
604 | c != cur && | |
605 | nodes_intersects(trial->mems_allowed, c->mems_allowed)) | |
ae8086ce | 606 | goto out; |
1da177e4 LT |
607 | } |
608 | ||
452477fa TH |
609 | /* |
610 | * Cpusets with tasks - existing or newly being attached - can't | |
1c09b195 | 611 | * be changed to have empty cpus_allowed or mems_allowed. |
452477fa | 612 | */ |
ae8086ce | 613 | ret = -ENOSPC; |
27bd4dbb | 614 | if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) { |
1c09b195 LZ |
615 | if (!cpumask_empty(cur->cpus_allowed) && |
616 | cpumask_empty(trial->cpus_allowed)) | |
617 | goto out; | |
618 | if (!nodes_empty(cur->mems_allowed) && | |
619 | nodes_empty(trial->mems_allowed)) | |
620 | goto out; | |
621 | } | |
020958b6 | 622 | |
f82f8042 JL |
623 | /* |
624 | * We can't shrink if we won't have enough room for SCHED_DEADLINE | |
625 | * tasks. | |
626 | */ | |
627 | ret = -EBUSY; | |
628 | if (is_cpu_exclusive(cur) && | |
629 | !cpuset_cpumask_can_shrink(cur->cpus_allowed, | |
630 | trial->cpus_allowed)) | |
631 | goto out; | |
632 | ||
ae8086ce TH |
633 | ret = 0; |
634 | out: | |
635 | rcu_read_unlock(); | |
636 | return ret; | |
1da177e4 LT |
637 | } |
638 | ||
db7f47cf | 639 | #ifdef CONFIG_SMP |
029190c5 | 640 | /* |
cf417141 | 641 | * Helper routine for generate_sched_domains(). |
8b5f1c52 | 642 | * Do cpusets a, b have overlapping effective cpus_allowed masks? |
029190c5 | 643 | */ |
029190c5 PJ |
644 | static int cpusets_overlap(struct cpuset *a, struct cpuset *b) |
645 | { | |
8b5f1c52 | 646 | return cpumask_intersects(a->effective_cpus, b->effective_cpus); |
029190c5 PJ |
647 | } |
648 | ||
1d3504fc HS |
649 | static void |
650 | update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c) | |
651 | { | |
1d3504fc HS |
652 | if (dattr->relax_domain_level < c->relax_domain_level) |
653 | dattr->relax_domain_level = c->relax_domain_level; | |
654 | return; | |
655 | } | |
656 | ||
fc560a26 TH |
657 | static void update_domain_attr_tree(struct sched_domain_attr *dattr, |
658 | struct cpuset *root_cs) | |
f5393693 | 659 | { |
fc560a26 | 660 | struct cpuset *cp; |
492eb21b | 661 | struct cgroup_subsys_state *pos_css; |
f5393693 | 662 | |
fc560a26 | 663 | rcu_read_lock(); |
492eb21b | 664 | cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { |
fc560a26 TH |
665 | /* skip the whole subtree if @cp doesn't have any CPU */ |
666 | if (cpumask_empty(cp->cpus_allowed)) { | |
492eb21b | 667 | pos_css = css_rightmost_descendant(pos_css); |
f5393693 | 668 | continue; |
fc560a26 | 669 | } |
f5393693 LJ |
670 | |
671 | if (is_sched_load_balance(cp)) | |
672 | update_domain_attr(dattr, cp); | |
f5393693 | 673 | } |
fc560a26 | 674 | rcu_read_unlock(); |
f5393693 LJ |
675 | } |
676 | ||
be040bea PB |
677 | /* Must be called with cpuset_mutex held. */ |
678 | static inline int nr_cpusets(void) | |
679 | { | |
680 | /* jump label reference count + the top-level cpuset */ | |
681 | return static_key_count(&cpusets_enabled_key.key) + 1; | |
682 | } | |
683 | ||
029190c5 | 684 | /* |
cf417141 MK |
685 | * generate_sched_domains() |
686 | * | |
687 | * This function builds a partial partition of the systems CPUs | |
688 | * A 'partial partition' is a set of non-overlapping subsets whose | |
689 | * union is a subset of that set. | |
0a0fca9d | 690 | * The output of this function needs to be passed to kernel/sched/core.c |
cf417141 MK |
691 | * partition_sched_domains() routine, which will rebuild the scheduler's |
692 | * load balancing domains (sched domains) as specified by that partial | |
693 | * partition. | |
029190c5 | 694 | * |
da82c92f | 695 | * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst |
029190c5 PJ |
696 | * for a background explanation of this. |
697 | * | |
698 | * Does not return errors, on the theory that the callers of this | |
699 | * routine would rather not worry about failures to rebuild sched | |
700 | * domains when operating in the severe memory shortage situations | |
701 | * that could cause allocation failures below. | |
702 | * | |
5d21cc2d | 703 | * Must be called with cpuset_mutex held. |
029190c5 PJ |
704 | * |
705 | * The three key local variables below are: | |
b6fbbf31 JL |
706 | * cp - cpuset pointer, used (together with pos_css) to perform a |
707 | * top-down scan of all cpusets. For our purposes, rebuilding | |
708 | * the schedulers sched domains, we can ignore !is_sched_load_ | |
709 | * balance cpusets. | |
029190c5 PJ |
710 | * csa - (for CpuSet Array) Array of pointers to all the cpusets |
711 | * that need to be load balanced, for convenient iterative | |
712 | * access by the subsequent code that finds the best partition, | |
713 | * i.e the set of domains (subsets) of CPUs such that the | |
714 | * cpus_allowed of every cpuset marked is_sched_load_balance | |
715 | * is a subset of one of these domains, while there are as | |
716 | * many such domains as possible, each as small as possible. | |
717 | * doms - Conversion of 'csa' to an array of cpumasks, for passing to | |
0a0fca9d | 718 | * the kernel/sched/core.c routine partition_sched_domains() in a |
029190c5 PJ |
719 | * convenient format, that can be easily compared to the prior |
720 | * value to determine what partition elements (sched domains) | |
721 | * were changed (added or removed.) | |
722 | * | |
723 | * Finding the best partition (set of domains): | |
724 | * The triple nested loops below over i, j, k scan over the | |
725 | * load balanced cpusets (using the array of cpuset pointers in | |
726 | * csa[]) looking for pairs of cpusets that have overlapping | |
727 | * cpus_allowed, but which don't have the same 'pn' partition | |
728 | * number and gives them in the same partition number. It keeps | |
729 | * looping on the 'restart' label until it can no longer find | |
730 | * any such pairs. | |
731 | * | |
732 | * The union of the cpus_allowed masks from the set of | |
733 | * all cpusets having the same 'pn' value then form the one | |
734 | * element of the partition (one sched domain) to be passed to | |
735 | * partition_sched_domains(). | |
736 | */ | |
acc3f5d7 | 737 | static int generate_sched_domains(cpumask_var_t **domains, |
cf417141 | 738 | struct sched_domain_attr **attributes) |
029190c5 | 739 | { |
b6fbbf31 | 740 | struct cpuset *cp; /* top-down scan of cpusets */ |
029190c5 PJ |
741 | struct cpuset **csa; /* array of all cpuset ptrs */ |
742 | int csn; /* how many cpuset ptrs in csa so far */ | |
743 | int i, j, k; /* indices for partition finding loops */ | |
acc3f5d7 | 744 | cpumask_var_t *doms; /* resulting partition; i.e. sched domains */ |
1d3504fc | 745 | struct sched_domain_attr *dattr; /* attributes for custom domains */ |
1583715d | 746 | int ndoms = 0; /* number of sched domains in result */ |
6af866af | 747 | int nslot; /* next empty doms[] struct cpumask slot */ |
492eb21b | 748 | struct cgroup_subsys_state *pos_css; |
0ccea8fe | 749 | bool root_load_balance = is_sched_load_balance(&top_cpuset); |
029190c5 | 750 | |
029190c5 | 751 | doms = NULL; |
1d3504fc | 752 | dattr = NULL; |
cf417141 | 753 | csa = NULL; |
029190c5 PJ |
754 | |
755 | /* Special case for the 99% of systems with one, full, sched domain */ | |
0ccea8fe | 756 | if (root_load_balance && !top_cpuset.nr_subparts_cpus) { |
acc3f5d7 RR |
757 | ndoms = 1; |
758 | doms = alloc_sched_domains(ndoms); | |
029190c5 | 759 | if (!doms) |
cf417141 MK |
760 | goto done; |
761 | ||
1d3504fc HS |
762 | dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL); |
763 | if (dattr) { | |
764 | *dattr = SD_ATTR_INIT; | |
93a65575 | 765 | update_domain_attr_tree(dattr, &top_cpuset); |
1d3504fc | 766 | } |
47b8ea71 | 767 | cpumask_and(doms[0], top_cpuset.effective_cpus, |
edb93821 | 768 | housekeeping_cpumask(HK_FLAG_DOMAIN)); |
cf417141 | 769 | |
cf417141 | 770 | goto done; |
029190c5 PJ |
771 | } |
772 | ||
6da2ec56 | 773 | csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL); |
029190c5 PJ |
774 | if (!csa) |
775 | goto done; | |
776 | csn = 0; | |
777 | ||
fc560a26 | 778 | rcu_read_lock(); |
0ccea8fe WL |
779 | if (root_load_balance) |
780 | csa[csn++] = &top_cpuset; | |
492eb21b | 781 | cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) { |
bd8815a6 TH |
782 | if (cp == &top_cpuset) |
783 | continue; | |
f5393693 | 784 | /* |
fc560a26 TH |
785 | * Continue traversing beyond @cp iff @cp has some CPUs and |
786 | * isn't load balancing. The former is obvious. The | |
787 | * latter: All child cpusets contain a subset of the | |
788 | * parent's cpus, so just skip them, and then we call | |
789 | * update_domain_attr_tree() to calc relax_domain_level of | |
790 | * the corresponding sched domain. | |
0ccea8fe WL |
791 | * |
792 | * If root is load-balancing, we can skip @cp if it | |
793 | * is a subset of the root's effective_cpus. | |
f5393693 | 794 | */ |
fc560a26 | 795 | if (!cpumask_empty(cp->cpus_allowed) && |
47b8ea71 | 796 | !(is_sched_load_balance(cp) && |
edb93821 FW |
797 | cpumask_intersects(cp->cpus_allowed, |
798 | housekeeping_cpumask(HK_FLAG_DOMAIN)))) | |
f5393693 | 799 | continue; |
489a5393 | 800 | |
0ccea8fe WL |
801 | if (root_load_balance && |
802 | cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus)) | |
803 | continue; | |
804 | ||
cd1cb335 VS |
805 | if (is_sched_load_balance(cp) && |
806 | !cpumask_empty(cp->effective_cpus)) | |
fc560a26 TH |
807 | csa[csn++] = cp; |
808 | ||
0ccea8fe WL |
809 | /* skip @cp's subtree if not a partition root */ |
810 | if (!is_partition_root(cp)) | |
811 | pos_css = css_rightmost_descendant(pos_css); | |
fc560a26 TH |
812 | } |
813 | rcu_read_unlock(); | |
029190c5 PJ |
814 | |
815 | for (i = 0; i < csn; i++) | |
816 | csa[i]->pn = i; | |
817 | ndoms = csn; | |
818 | ||
819 | restart: | |
820 | /* Find the best partition (set of sched domains) */ | |
821 | for (i = 0; i < csn; i++) { | |
822 | struct cpuset *a = csa[i]; | |
823 | int apn = a->pn; | |
824 | ||
825 | for (j = 0; j < csn; j++) { | |
826 | struct cpuset *b = csa[j]; | |
827 | int bpn = b->pn; | |
828 | ||
829 | if (apn != bpn && cpusets_overlap(a, b)) { | |
830 | for (k = 0; k < csn; k++) { | |
831 | struct cpuset *c = csa[k]; | |
832 | ||
833 | if (c->pn == bpn) | |
834 | c->pn = apn; | |
835 | } | |
836 | ndoms--; /* one less element */ | |
837 | goto restart; | |
838 | } | |
839 | } | |
840 | } | |
841 | ||
cf417141 MK |
842 | /* |
843 | * Now we know how many domains to create. | |
844 | * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. | |
845 | */ | |
acc3f5d7 | 846 | doms = alloc_sched_domains(ndoms); |
700018e0 | 847 | if (!doms) |
cf417141 | 848 | goto done; |
cf417141 MK |
849 | |
850 | /* | |
851 | * The rest of the code, including the scheduler, can deal with | |
852 | * dattr==NULL case. No need to abort if alloc fails. | |
853 | */ | |
6da2ec56 KC |
854 | dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr), |
855 | GFP_KERNEL); | |
029190c5 PJ |
856 | |
857 | for (nslot = 0, i = 0; i < csn; i++) { | |
858 | struct cpuset *a = csa[i]; | |
6af866af | 859 | struct cpumask *dp; |
029190c5 PJ |
860 | int apn = a->pn; |
861 | ||
cf417141 MK |
862 | if (apn < 0) { |
863 | /* Skip completed partitions */ | |
864 | continue; | |
865 | } | |
866 | ||
acc3f5d7 | 867 | dp = doms[nslot]; |
cf417141 MK |
868 | |
869 | if (nslot == ndoms) { | |
870 | static int warnings = 10; | |
871 | if (warnings) { | |
12d3089c FF |
872 | pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n", |
873 | nslot, ndoms, csn, i, apn); | |
cf417141 | 874 | warnings--; |
029190c5 | 875 | } |
cf417141 MK |
876 | continue; |
877 | } | |
029190c5 | 878 | |
6af866af | 879 | cpumask_clear(dp); |
cf417141 MK |
880 | if (dattr) |
881 | *(dattr + nslot) = SD_ATTR_INIT; | |
882 | for (j = i; j < csn; j++) { | |
883 | struct cpuset *b = csa[j]; | |
884 | ||
885 | if (apn == b->pn) { | |
8b5f1c52 | 886 | cpumask_or(dp, dp, b->effective_cpus); |
edb93821 | 887 | cpumask_and(dp, dp, housekeeping_cpumask(HK_FLAG_DOMAIN)); |
cf417141 MK |
888 | if (dattr) |
889 | update_domain_attr_tree(dattr + nslot, b); | |
890 | ||
891 | /* Done with this partition */ | |
892 | b->pn = -1; | |
029190c5 | 893 | } |
029190c5 | 894 | } |
cf417141 | 895 | nslot++; |
029190c5 PJ |
896 | } |
897 | BUG_ON(nslot != ndoms); | |
898 | ||
cf417141 MK |
899 | done: |
900 | kfree(csa); | |
901 | ||
700018e0 LZ |
902 | /* |
903 | * Fallback to the default domain if kmalloc() failed. | |
904 | * See comments in partition_sched_domains(). | |
905 | */ | |
906 | if (doms == NULL) | |
907 | ndoms = 1; | |
908 | ||
cf417141 MK |
909 | *domains = doms; |
910 | *attributes = dattr; | |
911 | return ndoms; | |
912 | } | |
913 | ||
f9a25f77 MP |
914 | static void update_tasks_root_domain(struct cpuset *cs) |
915 | { | |
916 | struct css_task_iter it; | |
917 | struct task_struct *task; | |
918 | ||
919 | css_task_iter_start(&cs->css, 0, &it); | |
920 | ||
921 | while ((task = css_task_iter_next(&it))) | |
922 | dl_add_task_root_domain(task); | |
923 | ||
924 | css_task_iter_end(&it); | |
925 | } | |
926 | ||
927 | static void rebuild_root_domains(void) | |
928 | { | |
929 | struct cpuset *cs = NULL; | |
930 | struct cgroup_subsys_state *pos_css; | |
931 | ||
1243dc51 | 932 | percpu_rwsem_assert_held(&cpuset_rwsem); |
f9a25f77 MP |
933 | lockdep_assert_cpus_held(); |
934 | lockdep_assert_held(&sched_domains_mutex); | |
935 | ||
f9a25f77 MP |
936 | rcu_read_lock(); |
937 | ||
938 | /* | |
939 | * Clear default root domain DL accounting, it will be computed again | |
940 | * if a task belongs to it. | |
941 | */ | |
942 | dl_clear_root_domain(&def_root_domain); | |
943 | ||
944 | cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { | |
945 | ||
946 | if (cpumask_empty(cs->effective_cpus)) { | |
947 | pos_css = css_rightmost_descendant(pos_css); | |
948 | continue; | |
949 | } | |
950 | ||
951 | css_get(&cs->css); | |
952 | ||
953 | rcu_read_unlock(); | |
954 | ||
955 | update_tasks_root_domain(cs); | |
956 | ||
957 | rcu_read_lock(); | |
958 | css_put(&cs->css); | |
959 | } | |
960 | rcu_read_unlock(); | |
961 | } | |
962 | ||
963 | static void | |
964 | partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[], | |
965 | struct sched_domain_attr *dattr_new) | |
966 | { | |
967 | mutex_lock(&sched_domains_mutex); | |
968 | partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); | |
969 | rebuild_root_domains(); | |
970 | mutex_unlock(&sched_domains_mutex); | |
971 | } | |
972 | ||
cf417141 MK |
973 | /* |
974 | * Rebuild scheduler domains. | |
975 | * | |
699140ba TH |
976 | * If the flag 'sched_load_balance' of any cpuset with non-empty |
977 | * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset | |
978 | * which has that flag enabled, or if any cpuset with a non-empty | |
979 | * 'cpus' is removed, then call this routine to rebuild the | |
980 | * scheduler's dynamic sched domains. | |
cf417141 | 981 | * |
5d21cc2d | 982 | * Call with cpuset_mutex held. Takes get_online_cpus(). |
cf417141 | 983 | */ |
699140ba | 984 | static void rebuild_sched_domains_locked(void) |
cf417141 MK |
985 | { |
986 | struct sched_domain_attr *attr; | |
acc3f5d7 | 987 | cpumask_var_t *doms; |
cf417141 MK |
988 | int ndoms; |
989 | ||
d74b27d6 | 990 | lockdep_assert_cpus_held(); |
1243dc51 | 991 | percpu_rwsem_assert_held(&cpuset_rwsem); |
cf417141 | 992 | |
5b16c2a4 LZ |
993 | /* |
994 | * We have raced with CPU hotplug. Don't do anything to avoid | |
995 | * passing doms with offlined cpu to partition_sched_domains(). | |
996 | * Anyways, hotplug work item will rebuild sched domains. | |
997 | */ | |
0ccea8fe WL |
998 | if (!top_cpuset.nr_subparts_cpus && |
999 | !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask)) | |
d74b27d6 | 1000 | return; |
0ccea8fe WL |
1001 | |
1002 | if (top_cpuset.nr_subparts_cpus && | |
1003 | !cpumask_subset(top_cpuset.effective_cpus, cpu_active_mask)) | |
d74b27d6 | 1004 | return; |
5b16c2a4 | 1005 | |
cf417141 | 1006 | /* Generate domain masks and attrs */ |
cf417141 | 1007 | ndoms = generate_sched_domains(&doms, &attr); |
cf417141 MK |
1008 | |
1009 | /* Have scheduler rebuild the domains */ | |
f9a25f77 | 1010 | partition_and_rebuild_sched_domains(ndoms, doms, attr); |
cf417141 | 1011 | } |
db7f47cf | 1012 | #else /* !CONFIG_SMP */ |
699140ba | 1013 | static void rebuild_sched_domains_locked(void) |
db7f47cf PM |
1014 | { |
1015 | } | |
db7f47cf | 1016 | #endif /* CONFIG_SMP */ |
029190c5 | 1017 | |
cf417141 MK |
1018 | void rebuild_sched_domains(void) |
1019 | { | |
d74b27d6 | 1020 | get_online_cpus(); |
1243dc51 | 1021 | percpu_down_write(&cpuset_rwsem); |
699140ba | 1022 | rebuild_sched_domains_locked(); |
1243dc51 | 1023 | percpu_up_write(&cpuset_rwsem); |
d74b27d6 | 1024 | put_online_cpus(); |
029190c5 PJ |
1025 | } |
1026 | ||
0b2f630a MX |
1027 | /** |
1028 | * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. | |
1029 | * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed | |
0b2f630a | 1030 | * |
d66393e5 TH |
1031 | * Iterate through each task of @cs updating its cpus_allowed to the |
1032 | * effective cpuset's. As this function is called with cpuset_mutex held, | |
1033 | * cpuset membership stays stable. | |
0b2f630a | 1034 | */ |
d66393e5 | 1035 | static void update_tasks_cpumask(struct cpuset *cs) |
0b2f630a | 1036 | { |
d66393e5 TH |
1037 | struct css_task_iter it; |
1038 | struct task_struct *task; | |
1039 | ||
bc2fb7ed | 1040 | css_task_iter_start(&cs->css, 0, &it); |
d66393e5 | 1041 | while ((task = css_task_iter_next(&it))) |
ae1c8023 | 1042 | set_cpus_allowed_ptr(task, cs->effective_cpus); |
d66393e5 | 1043 | css_task_iter_end(&it); |
0b2f630a MX |
1044 | } |
1045 | ||
ee8dde0c WL |
1046 | /** |
1047 | * compute_effective_cpumask - Compute the effective cpumask of the cpuset | |
1048 | * @new_cpus: the temp variable for the new effective_cpus mask | |
1049 | * @cs: the cpuset the need to recompute the new effective_cpus mask | |
1050 | * @parent: the parent cpuset | |
1051 | * | |
1052 | * If the parent has subpartition CPUs, include them in the list of | |
4b842da2 WL |
1053 | * allowable CPUs in computing the new effective_cpus mask. Since offlined |
1054 | * CPUs are not removed from subparts_cpus, we have to use cpu_active_mask | |
1055 | * to mask those out. | |
ee8dde0c WL |
1056 | */ |
1057 | static void compute_effective_cpumask(struct cpumask *new_cpus, | |
1058 | struct cpuset *cs, struct cpuset *parent) | |
1059 | { | |
1060 | if (parent->nr_subparts_cpus) { | |
1061 | cpumask_or(new_cpus, parent->effective_cpus, | |
1062 | parent->subparts_cpus); | |
1063 | cpumask_and(new_cpus, new_cpus, cs->cpus_allowed); | |
4b842da2 | 1064 | cpumask_and(new_cpus, new_cpus, cpu_active_mask); |
ee8dde0c WL |
1065 | } else { |
1066 | cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus); | |
1067 | } | |
1068 | } | |
1069 | ||
1070 | /* | |
1071 | * Commands for update_parent_subparts_cpumask | |
1072 | */ | |
1073 | enum subparts_cmd { | |
1074 | partcmd_enable, /* Enable partition root */ | |
1075 | partcmd_disable, /* Disable partition root */ | |
1076 | partcmd_update, /* Update parent's subparts_cpus */ | |
1077 | }; | |
1078 | ||
1079 | /** | |
1080 | * update_parent_subparts_cpumask - update subparts_cpus mask of parent cpuset | |
1081 | * @cpuset: The cpuset that requests change in partition root state | |
1082 | * @cmd: Partition root state change command | |
1083 | * @newmask: Optional new cpumask for partcmd_update | |
1084 | * @tmp: Temporary addmask and delmask | |
1085 | * Return: 0, 1 or an error code | |
1086 | * | |
1087 | * For partcmd_enable, the cpuset is being transformed from a non-partition | |
1088 | * root to a partition root. The cpus_allowed mask of the given cpuset will | |
1089 | * be put into parent's subparts_cpus and taken away from parent's | |
1090 | * effective_cpus. The function will return 0 if all the CPUs listed in | |
1091 | * cpus_allowed can be granted or an error code will be returned. | |
1092 | * | |
1093 | * For partcmd_disable, the cpuset is being transofrmed from a partition | |
1094 | * root back to a non-partition root. any CPUs in cpus_allowed that are in | |
1095 | * parent's subparts_cpus will be taken away from that cpumask and put back | |
1096 | * into parent's effective_cpus. 0 should always be returned. | |
1097 | * | |
1098 | * For partcmd_update, if the optional newmask is specified, the cpu | |
1099 | * list is to be changed from cpus_allowed to newmask. Otherwise, | |
3881b861 WL |
1100 | * cpus_allowed is assumed to remain the same. The cpuset should either |
1101 | * be a partition root or an invalid partition root. The partition root | |
1102 | * state may change if newmask is NULL and none of the requested CPUs can | |
1103 | * be granted by the parent. The function will return 1 if changes to | |
1104 | * parent's subparts_cpus and effective_cpus happen or 0 otherwise. | |
1105 | * Error code should only be returned when newmask is non-NULL. | |
ee8dde0c WL |
1106 | * |
1107 | * The partcmd_enable and partcmd_disable commands are used by | |
1108 | * update_prstate(). The partcmd_update command is used by | |
1109 | * update_cpumasks_hier() with newmask NULL and update_cpumask() with | |
1110 | * newmask set. | |
1111 | * | |
1112 | * The checking is more strict when enabling partition root than the | |
1113 | * other two commands. | |
1114 | * | |
1115 | * Because of the implicit cpu exclusive nature of a partition root, | |
1116 | * cpumask changes that violates the cpu exclusivity rule will not be | |
1117 | * permitted when checked by validate_change(). The validate_change() | |
1118 | * function will also prevent any changes to the cpu list if it is not | |
1119 | * a superset of children's cpu lists. | |
1120 | */ | |
1121 | static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd, | |
1122 | struct cpumask *newmask, | |
1123 | struct tmpmasks *tmp) | |
1124 | { | |
1125 | struct cpuset *parent = parent_cs(cpuset); | |
1126 | int adding; /* Moving cpus from effective_cpus to subparts_cpus */ | |
1127 | int deleting; /* Moving cpus from subparts_cpus to effective_cpus */ | |
3881b861 | 1128 | bool part_error = false; /* Partition error? */ |
ee8dde0c | 1129 | |
1243dc51 | 1130 | percpu_rwsem_assert_held(&cpuset_rwsem); |
ee8dde0c WL |
1131 | |
1132 | /* | |
1133 | * The parent must be a partition root. | |
1134 | * The new cpumask, if present, or the current cpus_allowed must | |
1135 | * not be empty. | |
1136 | */ | |
1137 | if (!is_partition_root(parent) || | |
1138 | (newmask && cpumask_empty(newmask)) || | |
1139 | (!newmask && cpumask_empty(cpuset->cpus_allowed))) | |
1140 | return -EINVAL; | |
1141 | ||
1142 | /* | |
1143 | * Enabling/disabling partition root is not allowed if there are | |
1144 | * online children. | |
1145 | */ | |
1146 | if ((cmd != partcmd_update) && css_has_online_children(&cpuset->css)) | |
1147 | return -EBUSY; | |
1148 | ||
1149 | /* | |
1150 | * Enabling partition root is not allowed if not all the CPUs | |
1151 | * can be granted from parent's effective_cpus or at least one | |
1152 | * CPU will be left after that. | |
1153 | */ | |
1154 | if ((cmd == partcmd_enable) && | |
1155 | (!cpumask_subset(cpuset->cpus_allowed, parent->effective_cpus) || | |
1156 | cpumask_equal(cpuset->cpus_allowed, parent->effective_cpus))) | |
1157 | return -EINVAL; | |
1158 | ||
1159 | /* | |
1160 | * A cpumask update cannot make parent's effective_cpus become empty. | |
1161 | */ | |
1162 | adding = deleting = false; | |
1163 | if (cmd == partcmd_enable) { | |
1164 | cpumask_copy(tmp->addmask, cpuset->cpus_allowed); | |
1165 | adding = true; | |
1166 | } else if (cmd == partcmd_disable) { | |
1167 | deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed, | |
1168 | parent->subparts_cpus); | |
1169 | } else if (newmask) { | |
1170 | /* | |
1171 | * partcmd_update with newmask: | |
1172 | * | |
1173 | * delmask = cpus_allowed & ~newmask & parent->subparts_cpus | |
1174 | * addmask = newmask & parent->effective_cpus | |
1175 | * & ~parent->subparts_cpus | |
1176 | */ | |
1177 | cpumask_andnot(tmp->delmask, cpuset->cpus_allowed, newmask); | |
1178 | deleting = cpumask_and(tmp->delmask, tmp->delmask, | |
1179 | parent->subparts_cpus); | |
1180 | ||
1181 | cpumask_and(tmp->addmask, newmask, parent->effective_cpus); | |
1182 | adding = cpumask_andnot(tmp->addmask, tmp->addmask, | |
1183 | parent->subparts_cpus); | |
1184 | /* | |
1185 | * Return error if the new effective_cpus could become empty. | |
1186 | */ | |
4b842da2 WL |
1187 | if (adding && |
1188 | cpumask_equal(parent->effective_cpus, tmp->addmask)) { | |
1189 | if (!deleting) | |
1190 | return -EINVAL; | |
1191 | /* | |
1192 | * As some of the CPUs in subparts_cpus might have | |
1193 | * been offlined, we need to compute the real delmask | |
1194 | * to confirm that. | |
1195 | */ | |
1196 | if (!cpumask_and(tmp->addmask, tmp->delmask, | |
1197 | cpu_active_mask)) | |
1198 | return -EINVAL; | |
1199 | cpumask_copy(tmp->addmask, parent->effective_cpus); | |
1200 | } | |
ee8dde0c WL |
1201 | } else { |
1202 | /* | |
1203 | * partcmd_update w/o newmask: | |
1204 | * | |
1205 | * addmask = cpus_allowed & parent->effectiveb_cpus | |
1206 | * | |
1207 | * Note that parent's subparts_cpus may have been | |
3881b861 WL |
1208 | * pre-shrunk in case there is a change in the cpu list. |
1209 | * So no deletion is needed. | |
ee8dde0c WL |
1210 | */ |
1211 | adding = cpumask_and(tmp->addmask, cpuset->cpus_allowed, | |
1212 | parent->effective_cpus); | |
3881b861 WL |
1213 | part_error = cpumask_equal(tmp->addmask, |
1214 | parent->effective_cpus); | |
1215 | } | |
1216 | ||
1217 | if (cmd == partcmd_update) { | |
1218 | int prev_prs = cpuset->partition_root_state; | |
1219 | ||
1220 | /* | |
1221 | * Check for possible transition between PRS_ENABLED | |
1222 | * and PRS_ERROR. | |
1223 | */ | |
1224 | switch (cpuset->partition_root_state) { | |
1225 | case PRS_ENABLED: | |
1226 | if (part_error) | |
1227 | cpuset->partition_root_state = PRS_ERROR; | |
1228 | break; | |
1229 | case PRS_ERROR: | |
1230 | if (!part_error) | |
1231 | cpuset->partition_root_state = PRS_ENABLED; | |
1232 | break; | |
1233 | } | |
1234 | /* | |
1235 | * Set part_error if previously in invalid state. | |
1236 | */ | |
1237 | part_error = (prev_prs == PRS_ERROR); | |
1238 | } | |
1239 | ||
1240 | if (!part_error && (cpuset->partition_root_state == PRS_ERROR)) | |
1241 | return 0; /* Nothing need to be done */ | |
1242 | ||
1243 | if (cpuset->partition_root_state == PRS_ERROR) { | |
1244 | /* | |
1245 | * Remove all its cpus from parent's subparts_cpus. | |
1246 | */ | |
1247 | adding = false; | |
1248 | deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed, | |
1249 | parent->subparts_cpus); | |
ee8dde0c WL |
1250 | } |
1251 | ||
1252 | if (!adding && !deleting) | |
1253 | return 0; | |
1254 | ||
1255 | /* | |
1256 | * Change the parent's subparts_cpus. | |
1257 | * Newly added CPUs will be removed from effective_cpus and | |
1258 | * newly deleted ones will be added back to effective_cpus. | |
1259 | */ | |
1260 | spin_lock_irq(&callback_lock); | |
1261 | if (adding) { | |
1262 | cpumask_or(parent->subparts_cpus, | |
1263 | parent->subparts_cpus, tmp->addmask); | |
1264 | cpumask_andnot(parent->effective_cpus, | |
1265 | parent->effective_cpus, tmp->addmask); | |
1266 | } | |
1267 | if (deleting) { | |
1268 | cpumask_andnot(parent->subparts_cpus, | |
1269 | parent->subparts_cpus, tmp->delmask); | |
4b842da2 WL |
1270 | /* |
1271 | * Some of the CPUs in subparts_cpus might have been offlined. | |
1272 | */ | |
1273 | cpumask_and(tmp->delmask, tmp->delmask, cpu_active_mask); | |
ee8dde0c WL |
1274 | cpumask_or(parent->effective_cpus, |
1275 | parent->effective_cpus, tmp->delmask); | |
1276 | } | |
1277 | ||
1278 | parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus); | |
1279 | spin_unlock_irq(&callback_lock); | |
1280 | ||
1281 | return cmd == partcmd_update; | |
1282 | } | |
1283 | ||
5c5cc623 | 1284 | /* |
734d4513 | 1285 | * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree |
ee8dde0c WL |
1286 | * @cs: the cpuset to consider |
1287 | * @tmp: temp variables for calculating effective_cpus & partition setup | |
734d4513 LZ |
1288 | * |
1289 | * When congifured cpumask is changed, the effective cpumasks of this cpuset | |
1290 | * and all its descendants need to be updated. | |
5c5cc623 | 1291 | * |
734d4513 | 1292 | * On legacy hierachy, effective_cpus will be the same with cpu_allowed. |
5c5cc623 LZ |
1293 | * |
1294 | * Called with cpuset_mutex held | |
1295 | */ | |
ee8dde0c | 1296 | static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp) |
5c5cc623 LZ |
1297 | { |
1298 | struct cpuset *cp; | |
492eb21b | 1299 | struct cgroup_subsys_state *pos_css; |
8b5f1c52 | 1300 | bool need_rebuild_sched_domains = false; |
5c5cc623 LZ |
1301 | |
1302 | rcu_read_lock(); | |
734d4513 LZ |
1303 | cpuset_for_each_descendant_pre(cp, pos_css, cs) { |
1304 | struct cpuset *parent = parent_cs(cp); | |
1305 | ||
ee8dde0c | 1306 | compute_effective_cpumask(tmp->new_cpus, cp, parent); |
734d4513 | 1307 | |
554b0d1c LZ |
1308 | /* |
1309 | * If it becomes empty, inherit the effective mask of the | |
1310 | * parent, which is guaranteed to have some CPUs. | |
1311 | */ | |
4716909c | 1312 | if (is_in_v2_mode() && cpumask_empty(tmp->new_cpus)) { |
ee8dde0c | 1313 | cpumask_copy(tmp->new_cpus, parent->effective_cpus); |
4716909c WL |
1314 | if (!cp->use_parent_ecpus) { |
1315 | cp->use_parent_ecpus = true; | |
1316 | parent->child_ecpus_count++; | |
1317 | } | |
1318 | } else if (cp->use_parent_ecpus) { | |
1319 | cp->use_parent_ecpus = false; | |
1320 | WARN_ON_ONCE(!parent->child_ecpus_count); | |
1321 | parent->child_ecpus_count--; | |
1322 | } | |
554b0d1c | 1323 | |
ee8dde0c WL |
1324 | /* |
1325 | * Skip the whole subtree if the cpumask remains the same | |
1326 | * and has no partition root state. | |
1327 | */ | |
3881b861 | 1328 | if (!cp->partition_root_state && |
ee8dde0c | 1329 | cpumask_equal(tmp->new_cpus, cp->effective_cpus)) { |
734d4513 LZ |
1330 | pos_css = css_rightmost_descendant(pos_css); |
1331 | continue; | |
5c5cc623 | 1332 | } |
734d4513 | 1333 | |
ee8dde0c WL |
1334 | /* |
1335 | * update_parent_subparts_cpumask() should have been called | |
1336 | * for cs already in update_cpumask(). We should also call | |
1337 | * update_tasks_cpumask() again for tasks in the parent | |
1338 | * cpuset if the parent's subparts_cpus changes. | |
1339 | */ | |
3881b861 WL |
1340 | if ((cp != cs) && cp->partition_root_state) { |
1341 | switch (parent->partition_root_state) { | |
1342 | case PRS_DISABLED: | |
1343 | /* | |
1344 | * If parent is not a partition root or an | |
1345 | * invalid partition root, clear the state | |
1346 | * state and the CS_CPU_EXCLUSIVE flag. | |
1347 | */ | |
1348 | WARN_ON_ONCE(cp->partition_root_state | |
1349 | != PRS_ERROR); | |
1350 | cp->partition_root_state = 0; | |
1351 | ||
1352 | /* | |
1353 | * clear_bit() is an atomic operation and | |
1354 | * readers aren't interested in the state | |
1355 | * of CS_CPU_EXCLUSIVE anyway. So we can | |
1356 | * just update the flag without holding | |
1357 | * the callback_lock. | |
1358 | */ | |
1359 | clear_bit(CS_CPU_EXCLUSIVE, &cp->flags); | |
1360 | break; | |
1361 | ||
1362 | case PRS_ENABLED: | |
1363 | if (update_parent_subparts_cpumask(cp, partcmd_update, NULL, tmp)) | |
1364 | update_tasks_cpumask(parent); | |
1365 | break; | |
1366 | ||
1367 | case PRS_ERROR: | |
1368 | /* | |
1369 | * When parent is invalid, it has to be too. | |
1370 | */ | |
1371 | cp->partition_root_state = PRS_ERROR; | |
1372 | if (cp->nr_subparts_cpus) { | |
1373 | cp->nr_subparts_cpus = 0; | |
1374 | cpumask_clear(cp->subparts_cpus); | |
1375 | } | |
1376 | break; | |
1377 | } | |
ee8dde0c WL |
1378 | } |
1379 | ||
ec903c0c | 1380 | if (!css_tryget_online(&cp->css)) |
5c5cc623 LZ |
1381 | continue; |
1382 | rcu_read_unlock(); | |
1383 | ||
8447a0fe | 1384 | spin_lock_irq(&callback_lock); |
ee8dde0c WL |
1385 | |
1386 | cpumask_copy(cp->effective_cpus, tmp->new_cpus); | |
3881b861 WL |
1387 | if (cp->nr_subparts_cpus && |
1388 | (cp->partition_root_state != PRS_ENABLED)) { | |
1389 | cp->nr_subparts_cpus = 0; | |
1390 | cpumask_clear(cp->subparts_cpus); | |
1391 | } else if (cp->nr_subparts_cpus) { | |
ee8dde0c WL |
1392 | /* |
1393 | * Make sure that effective_cpus & subparts_cpus | |
1394 | * are mutually exclusive. | |
3881b861 WL |
1395 | * |
1396 | * In the unlikely event that effective_cpus | |
1397 | * becomes empty. we clear cp->nr_subparts_cpus and | |
1398 | * let its child partition roots to compete for | |
1399 | * CPUs again. | |
ee8dde0c WL |
1400 | */ |
1401 | cpumask_andnot(cp->effective_cpus, cp->effective_cpus, | |
1402 | cp->subparts_cpus); | |
3881b861 WL |
1403 | if (cpumask_empty(cp->effective_cpus)) { |
1404 | cpumask_copy(cp->effective_cpus, tmp->new_cpus); | |
1405 | cpumask_clear(cp->subparts_cpus); | |
1406 | cp->nr_subparts_cpus = 0; | |
1407 | } else if (!cpumask_subset(cp->subparts_cpus, | |
1408 | tmp->new_cpus)) { | |
1409 | cpumask_andnot(cp->subparts_cpus, | |
1410 | cp->subparts_cpus, tmp->new_cpus); | |
1411 | cp->nr_subparts_cpus | |
1412 | = cpumask_weight(cp->subparts_cpus); | |
1413 | } | |
ee8dde0c | 1414 | } |
8447a0fe | 1415 | spin_unlock_irq(&callback_lock); |
734d4513 | 1416 | |
b8d1b8ee | 1417 | WARN_ON(!is_in_v2_mode() && |
734d4513 LZ |
1418 | !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); |
1419 | ||
d66393e5 | 1420 | update_tasks_cpumask(cp); |
5c5cc623 | 1421 | |
8b5f1c52 | 1422 | /* |
0ccea8fe WL |
1423 | * On legacy hierarchy, if the effective cpumask of any non- |
1424 | * empty cpuset is changed, we need to rebuild sched domains. | |
1425 | * On default hierarchy, the cpuset needs to be a partition | |
1426 | * root as well. | |
8b5f1c52 LZ |
1427 | */ |
1428 | if (!cpumask_empty(cp->cpus_allowed) && | |
0ccea8fe WL |
1429 | is_sched_load_balance(cp) && |
1430 | (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || | |
1431 | is_partition_root(cp))) | |
8b5f1c52 LZ |
1432 | need_rebuild_sched_domains = true; |
1433 | ||
5c5cc623 LZ |
1434 | rcu_read_lock(); |
1435 | css_put(&cp->css); | |
1436 | } | |
1437 | rcu_read_unlock(); | |
8b5f1c52 LZ |
1438 | |
1439 | if (need_rebuild_sched_domains) | |
1440 | rebuild_sched_domains_locked(); | |
5c5cc623 LZ |
1441 | } |
1442 | ||
4716909c WL |
1443 | /** |
1444 | * update_sibling_cpumasks - Update siblings cpumasks | |
1445 | * @parent: Parent cpuset | |
1446 | * @cs: Current cpuset | |
1447 | * @tmp: Temp variables | |
1448 | */ | |
1449 | static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, | |
1450 | struct tmpmasks *tmp) | |
1451 | { | |
1452 | struct cpuset *sibling; | |
1453 | struct cgroup_subsys_state *pos_css; | |
1454 | ||
1455 | /* | |
1456 | * Check all its siblings and call update_cpumasks_hier() | |
1457 | * if their use_parent_ecpus flag is set in order for them | |
1458 | * to use the right effective_cpus value. | |
1459 | */ | |
1460 | rcu_read_lock(); | |
1461 | cpuset_for_each_child(sibling, pos_css, parent) { | |
1462 | if (sibling == cs) | |
1463 | continue; | |
1464 | if (!sibling->use_parent_ecpus) | |
1465 | continue; | |
1466 | ||
1467 | update_cpumasks_hier(sibling, tmp); | |
1468 | } | |
1469 | rcu_read_unlock(); | |
1470 | } | |
1471 | ||
58f4790b CW |
1472 | /** |
1473 | * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it | |
1474 | * @cs: the cpuset to consider | |
fc34ac1d | 1475 | * @trialcs: trial cpuset |
58f4790b CW |
1476 | * @buf: buffer of cpu numbers written to this cpuset |
1477 | */ | |
645fcc9d LZ |
1478 | static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, |
1479 | const char *buf) | |
1da177e4 | 1480 | { |
58f4790b | 1481 | int retval; |
ee8dde0c | 1482 | struct tmpmasks tmp; |
1da177e4 | 1483 | |
5f054e31 | 1484 | /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */ |
4c4d50f7 PJ |
1485 | if (cs == &top_cpuset) |
1486 | return -EACCES; | |
1487 | ||
6f7f02e7 | 1488 | /* |
c8d9c90c | 1489 | * An empty cpus_allowed is ok only if the cpuset has no tasks. |
020958b6 PJ |
1490 | * Since cpulist_parse() fails on an empty mask, we special case |
1491 | * that parsing. The validate_change() call ensures that cpusets | |
1492 | * with tasks have cpus. | |
6f7f02e7 | 1493 | */ |
020958b6 | 1494 | if (!*buf) { |
300ed6cb | 1495 | cpumask_clear(trialcs->cpus_allowed); |
6f7f02e7 | 1496 | } else { |
300ed6cb | 1497 | retval = cpulist_parse(buf, trialcs->cpus_allowed); |
6f7f02e7 DR |
1498 | if (retval < 0) |
1499 | return retval; | |
37340746 | 1500 | |
5d8ba82c LZ |
1501 | if (!cpumask_subset(trialcs->cpus_allowed, |
1502 | top_cpuset.cpus_allowed)) | |
37340746 | 1503 | return -EINVAL; |
6f7f02e7 | 1504 | } |
029190c5 | 1505 | |
8707d8b8 | 1506 | /* Nothing to do if the cpus didn't change */ |
300ed6cb | 1507 | if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed)) |
8707d8b8 | 1508 | return 0; |
58f4790b | 1509 | |
a73456f3 LZ |
1510 | retval = validate_change(cs, trialcs); |
1511 | if (retval < 0) | |
1512 | return retval; | |
1513 | ||
ee8dde0c WL |
1514 | #ifdef CONFIG_CPUMASK_OFFSTACK |
1515 | /* | |
1516 | * Use the cpumasks in trialcs for tmpmasks when they are pointers | |
1517 | * to allocated cpumasks. | |
1518 | */ | |
1519 | tmp.addmask = trialcs->subparts_cpus; | |
1520 | tmp.delmask = trialcs->effective_cpus; | |
1521 | tmp.new_cpus = trialcs->cpus_allowed; | |
1522 | #endif | |
1523 | ||
1524 | if (cs->partition_root_state) { | |
1525 | /* Cpumask of a partition root cannot be empty */ | |
1526 | if (cpumask_empty(trialcs->cpus_allowed)) | |
1527 | return -EINVAL; | |
1528 | if (update_parent_subparts_cpumask(cs, partcmd_update, | |
1529 | trialcs->cpus_allowed, &tmp) < 0) | |
1530 | return -EINVAL; | |
1531 | } | |
1532 | ||
8447a0fe | 1533 | spin_lock_irq(&callback_lock); |
300ed6cb | 1534 | cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); |
ee8dde0c WL |
1535 | |
1536 | /* | |
1537 | * Make sure that subparts_cpus is a subset of cpus_allowed. | |
1538 | */ | |
1539 | if (cs->nr_subparts_cpus) { | |
1540 | cpumask_andnot(cs->subparts_cpus, cs->subparts_cpus, | |
1541 | cs->cpus_allowed); | |
1542 | cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus); | |
1543 | } | |
8447a0fe | 1544 | spin_unlock_irq(&callback_lock); |
029190c5 | 1545 | |
ee8dde0c | 1546 | update_cpumasks_hier(cs, &tmp); |
4716909c WL |
1547 | |
1548 | if (cs->partition_root_state) { | |
1549 | struct cpuset *parent = parent_cs(cs); | |
1550 | ||
1551 | /* | |
1552 | * For partition root, update the cpumasks of sibling | |
1553 | * cpusets if they use parent's effective_cpus. | |
1554 | */ | |
1555 | if (parent->child_ecpus_count) | |
1556 | update_sibling_cpumasks(parent, cs, &tmp); | |
1557 | } | |
85d7b949 | 1558 | return 0; |
1da177e4 LT |
1559 | } |
1560 | ||
e4e364e8 | 1561 | /* |
e93ad19d TH |
1562 | * Migrate memory region from one set of nodes to another. This is |
1563 | * performed asynchronously as it can be called from process migration path | |
1564 | * holding locks involved in process management. All mm migrations are | |
1565 | * performed in the queued order and can be waited for by flushing | |
1566 | * cpuset_migrate_mm_wq. | |
e4e364e8 PJ |
1567 | */ |
1568 | ||
e93ad19d TH |
1569 | struct cpuset_migrate_mm_work { |
1570 | struct work_struct work; | |
1571 | struct mm_struct *mm; | |
1572 | nodemask_t from; | |
1573 | nodemask_t to; | |
1574 | }; | |
1575 | ||
1576 | static void cpuset_migrate_mm_workfn(struct work_struct *work) | |
1577 | { | |
1578 | struct cpuset_migrate_mm_work *mwork = | |
1579 | container_of(work, struct cpuset_migrate_mm_work, work); | |
1580 | ||
1581 | /* on a wq worker, no need to worry about %current's mems_allowed */ | |
1582 | do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL); | |
1583 | mmput(mwork->mm); | |
1584 | kfree(mwork); | |
1585 | } | |
1586 | ||
e4e364e8 PJ |
1587 | static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, |
1588 | const nodemask_t *to) | |
1589 | { | |
e93ad19d | 1590 | struct cpuset_migrate_mm_work *mwork; |
e4e364e8 | 1591 | |
e93ad19d TH |
1592 | mwork = kzalloc(sizeof(*mwork), GFP_KERNEL); |
1593 | if (mwork) { | |
1594 | mwork->mm = mm; | |
1595 | mwork->from = *from; | |
1596 | mwork->to = *to; | |
1597 | INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn); | |
1598 | queue_work(cpuset_migrate_mm_wq, &mwork->work); | |
1599 | } else { | |
1600 | mmput(mm); | |
1601 | } | |
1602 | } | |
e4e364e8 | 1603 | |
5cf1cacb | 1604 | static void cpuset_post_attach(void) |
e93ad19d TH |
1605 | { |
1606 | flush_workqueue(cpuset_migrate_mm_wq); | |
e4e364e8 PJ |
1607 | } |
1608 | ||
3b6766fe | 1609 | /* |
58568d2a MX |
1610 | * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy |
1611 | * @tsk: the task to change | |
1612 | * @newmems: new nodes that the task will be set | |
1613 | * | |
5f155f27 VB |
1614 | * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed |
1615 | * and rebind an eventual tasks' mempolicy. If the task is allocating in | |
1616 | * parallel, it might temporarily see an empty intersection, which results in | |
1617 | * a seqlock check and retry before OOM or allocation failure. | |
58568d2a MX |
1618 | */ |
1619 | static void cpuset_change_task_nodemask(struct task_struct *tsk, | |
1620 | nodemask_t *newmems) | |
1621 | { | |
c0ff7453 | 1622 | task_lock(tsk); |
c0ff7453 | 1623 | |
5f155f27 VB |
1624 | local_irq_disable(); |
1625 | write_seqcount_begin(&tsk->mems_allowed_seq); | |
c0ff7453 | 1626 | |
cc9a6c87 | 1627 | nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); |
213980c0 | 1628 | mpol_rebind_task(tsk, newmems); |
58568d2a | 1629 | tsk->mems_allowed = *newmems; |
cc9a6c87 | 1630 | |
5f155f27 VB |
1631 | write_seqcount_end(&tsk->mems_allowed_seq); |
1632 | local_irq_enable(); | |
cc9a6c87 | 1633 | |
c0ff7453 | 1634 | task_unlock(tsk); |
58568d2a MX |
1635 | } |
1636 | ||
8793d854 PM |
1637 | static void *cpuset_being_rebound; |
1638 | ||
0b2f630a MX |
1639 | /** |
1640 | * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset. | |
1641 | * @cs: the cpuset in which each task's mems_allowed mask needs to be changed | |
0b2f630a | 1642 | * |
d66393e5 TH |
1643 | * Iterate through each task of @cs updating its mems_allowed to the |
1644 | * effective cpuset's. As this function is called with cpuset_mutex held, | |
1645 | * cpuset membership stays stable. | |
0b2f630a | 1646 | */ |
d66393e5 | 1647 | static void update_tasks_nodemask(struct cpuset *cs) |
1da177e4 | 1648 | { |
33ad801d | 1649 | static nodemask_t newmems; /* protected by cpuset_mutex */ |
d66393e5 TH |
1650 | struct css_task_iter it; |
1651 | struct task_struct *task; | |
59dac16f | 1652 | |
846a16bf | 1653 | cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ |
4225399a | 1654 | |
ae1c8023 | 1655 | guarantee_online_mems(cs, &newmems); |
33ad801d | 1656 | |
4225399a | 1657 | /* |
c1e8d7c6 | 1658 | * The mpol_rebind_mm() call takes mmap_lock, which we couldn't |
3b6766fe LZ |
1659 | * take while holding tasklist_lock. Forks can happen - the |
1660 | * mpol_dup() cpuset_being_rebound check will catch such forks, | |
1661 | * and rebind their vma mempolicies too. Because we still hold | |
5d21cc2d | 1662 | * the global cpuset_mutex, we know that no other rebind effort |
3b6766fe | 1663 | * will be contending for the global variable cpuset_being_rebound. |
4225399a | 1664 | * It's ok if we rebind the same mm twice; mpol_rebind_mm() |
04c19fa6 | 1665 | * is idempotent. Also migrate pages in each mm to new nodes. |
4225399a | 1666 | */ |
bc2fb7ed | 1667 | css_task_iter_start(&cs->css, 0, &it); |
d66393e5 TH |
1668 | while ((task = css_task_iter_next(&it))) { |
1669 | struct mm_struct *mm; | |
1670 | bool migrate; | |
1671 | ||
1672 | cpuset_change_task_nodemask(task, &newmems); | |
1673 | ||
1674 | mm = get_task_mm(task); | |
1675 | if (!mm) | |
1676 | continue; | |
1677 | ||
1678 | migrate = is_memory_migrate(cs); | |
1679 | ||
1680 | mpol_rebind_mm(mm, &cs->mems_allowed); | |
1681 | if (migrate) | |
1682 | cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); | |
e93ad19d TH |
1683 | else |
1684 | mmput(mm); | |
d66393e5 TH |
1685 | } |
1686 | css_task_iter_end(&it); | |
4225399a | 1687 | |
33ad801d LZ |
1688 | /* |
1689 | * All the tasks' nodemasks have been updated, update | |
1690 | * cs->old_mems_allowed. | |
1691 | */ | |
1692 | cs->old_mems_allowed = newmems; | |
1693 | ||
2df167a3 | 1694 | /* We're done rebinding vmas to this cpuset's new mems_allowed. */ |
8793d854 | 1695 | cpuset_being_rebound = NULL; |
1da177e4 LT |
1696 | } |
1697 | ||
5c5cc623 | 1698 | /* |
734d4513 LZ |
1699 | * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree |
1700 | * @cs: the cpuset to consider | |
1701 | * @new_mems: a temp variable for calculating new effective_mems | |
5c5cc623 | 1702 | * |
734d4513 LZ |
1703 | * When configured nodemask is changed, the effective nodemasks of this cpuset |
1704 | * and all its descendants need to be updated. | |
5c5cc623 | 1705 | * |
734d4513 | 1706 | * On legacy hiearchy, effective_mems will be the same with mems_allowed. |
5c5cc623 LZ |
1707 | * |
1708 | * Called with cpuset_mutex held | |
1709 | */ | |
734d4513 | 1710 | static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) |
5c5cc623 LZ |
1711 | { |
1712 | struct cpuset *cp; | |
492eb21b | 1713 | struct cgroup_subsys_state *pos_css; |
5c5cc623 LZ |
1714 | |
1715 | rcu_read_lock(); | |
734d4513 LZ |
1716 | cpuset_for_each_descendant_pre(cp, pos_css, cs) { |
1717 | struct cpuset *parent = parent_cs(cp); | |
1718 | ||
1719 | nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems); | |
1720 | ||
554b0d1c LZ |
1721 | /* |
1722 | * If it becomes empty, inherit the effective mask of the | |
1723 | * parent, which is guaranteed to have some MEMs. | |
1724 | */ | |
b8d1b8ee | 1725 | if (is_in_v2_mode() && nodes_empty(*new_mems)) |
554b0d1c LZ |
1726 | *new_mems = parent->effective_mems; |
1727 | ||
734d4513 LZ |
1728 | /* Skip the whole subtree if the nodemask remains the same. */ |
1729 | if (nodes_equal(*new_mems, cp->effective_mems)) { | |
1730 | pos_css = css_rightmost_descendant(pos_css); | |
1731 | continue; | |
5c5cc623 | 1732 | } |
734d4513 | 1733 | |
ec903c0c | 1734 | if (!css_tryget_online(&cp->css)) |
5c5cc623 LZ |
1735 | continue; |
1736 | rcu_read_unlock(); | |
1737 | ||
8447a0fe | 1738 | spin_lock_irq(&callback_lock); |
734d4513 | 1739 | cp->effective_mems = *new_mems; |
8447a0fe | 1740 | spin_unlock_irq(&callback_lock); |
734d4513 | 1741 | |
b8d1b8ee | 1742 | WARN_ON(!is_in_v2_mode() && |
a1381268 | 1743 | !nodes_equal(cp->mems_allowed, cp->effective_mems)); |
734d4513 | 1744 | |
d66393e5 | 1745 | update_tasks_nodemask(cp); |
5c5cc623 LZ |
1746 | |
1747 | rcu_read_lock(); | |
1748 | css_put(&cp->css); | |
1749 | } | |
1750 | rcu_read_unlock(); | |
1751 | } | |
1752 | ||
0b2f630a MX |
1753 | /* |
1754 | * Handle user request to change the 'mems' memory placement | |
1755 | * of a cpuset. Needs to validate the request, update the | |
58568d2a MX |
1756 | * cpusets mems_allowed, and for each task in the cpuset, |
1757 | * update mems_allowed and rebind task's mempolicy and any vma | |
1758 | * mempolicies and if the cpuset is marked 'memory_migrate', | |
1759 | * migrate the tasks pages to the new memory. | |
0b2f630a | 1760 | * |
8447a0fe | 1761 | * Call with cpuset_mutex held. May take callback_lock during call. |
0b2f630a | 1762 | * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, |
c1e8d7c6 | 1763 | * lock each such tasks mm->mmap_lock, scan its vma's and rebind |
0b2f630a MX |
1764 | * their mempolicies to the cpusets new mems_allowed. |
1765 | */ | |
645fcc9d LZ |
1766 | static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, |
1767 | const char *buf) | |
0b2f630a | 1768 | { |
0b2f630a MX |
1769 | int retval; |
1770 | ||
1771 | /* | |
38d7bee9 | 1772 | * top_cpuset.mems_allowed tracks node_stats[N_MEMORY]; |
0b2f630a MX |
1773 | * it's read-only |
1774 | */ | |
53feb297 MX |
1775 | if (cs == &top_cpuset) { |
1776 | retval = -EACCES; | |
1777 | goto done; | |
1778 | } | |
0b2f630a | 1779 | |
0b2f630a MX |
1780 | /* |
1781 | * An empty mems_allowed is ok iff there are no tasks in the cpuset. | |
1782 | * Since nodelist_parse() fails on an empty mask, we special case | |
1783 | * that parsing. The validate_change() call ensures that cpusets | |
1784 | * with tasks have memory. | |
1785 | */ | |
1786 | if (!*buf) { | |
645fcc9d | 1787 | nodes_clear(trialcs->mems_allowed); |
0b2f630a | 1788 | } else { |
645fcc9d | 1789 | retval = nodelist_parse(buf, trialcs->mems_allowed); |
0b2f630a MX |
1790 | if (retval < 0) |
1791 | goto done; | |
1792 | ||
645fcc9d | 1793 | if (!nodes_subset(trialcs->mems_allowed, |
5d8ba82c LZ |
1794 | top_cpuset.mems_allowed)) { |
1795 | retval = -EINVAL; | |
53feb297 MX |
1796 | goto done; |
1797 | } | |
0b2f630a | 1798 | } |
33ad801d LZ |
1799 | |
1800 | if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { | |
0b2f630a MX |
1801 | retval = 0; /* Too easy - nothing to do */ |
1802 | goto done; | |
1803 | } | |
645fcc9d | 1804 | retval = validate_change(cs, trialcs); |
0b2f630a MX |
1805 | if (retval < 0) |
1806 | goto done; | |
1807 | ||
8447a0fe | 1808 | spin_lock_irq(&callback_lock); |
645fcc9d | 1809 | cs->mems_allowed = trialcs->mems_allowed; |
8447a0fe | 1810 | spin_unlock_irq(&callback_lock); |
0b2f630a | 1811 | |
734d4513 | 1812 | /* use trialcs->mems_allowed as a temp variable */ |
24ee3cf8 | 1813 | update_nodemasks_hier(cs, &trialcs->mems_allowed); |
0b2f630a MX |
1814 | done: |
1815 | return retval; | |
1816 | } | |
1817 | ||
77ef80c6 | 1818 | bool current_cpuset_is_being_rebound(void) |
8793d854 | 1819 | { |
77ef80c6 | 1820 | bool ret; |
391acf97 GZ |
1821 | |
1822 | rcu_read_lock(); | |
1823 | ret = task_cs(current) == cpuset_being_rebound; | |
1824 | rcu_read_unlock(); | |
1825 | ||
1826 | return ret; | |
8793d854 PM |
1827 | } |
1828 | ||
5be7a479 | 1829 | static int update_relax_domain_level(struct cpuset *cs, s64 val) |
1d3504fc | 1830 | { |
db7f47cf | 1831 | #ifdef CONFIG_SMP |
60495e77 | 1832 | if (val < -1 || val >= sched_domain_level_max) |
30e0e178 | 1833 | return -EINVAL; |
db7f47cf | 1834 | #endif |
1d3504fc HS |
1835 | |
1836 | if (val != cs->relax_domain_level) { | |
1837 | cs->relax_domain_level = val; | |
300ed6cb LZ |
1838 | if (!cpumask_empty(cs->cpus_allowed) && |
1839 | is_sched_load_balance(cs)) | |
699140ba | 1840 | rebuild_sched_domains_locked(); |
1d3504fc HS |
1841 | } |
1842 | ||
1843 | return 0; | |
1844 | } | |
1845 | ||
72ec7029 | 1846 | /** |
950592f7 MX |
1847 | * update_tasks_flags - update the spread flags of tasks in the cpuset. |
1848 | * @cs: the cpuset in which each task's spread flags needs to be changed | |
950592f7 | 1849 | * |
d66393e5 TH |
1850 | * Iterate through each task of @cs updating its spread flags. As this |
1851 | * function is called with cpuset_mutex held, cpuset membership stays | |
1852 | * stable. | |
950592f7 | 1853 | */ |
d66393e5 | 1854 | static void update_tasks_flags(struct cpuset *cs) |
950592f7 | 1855 | { |
d66393e5 TH |
1856 | struct css_task_iter it; |
1857 | struct task_struct *task; | |
1858 | ||
bc2fb7ed | 1859 | css_task_iter_start(&cs->css, 0, &it); |
d66393e5 TH |
1860 | while ((task = css_task_iter_next(&it))) |
1861 | cpuset_update_task_spread_flag(cs, task); | |
1862 | css_task_iter_end(&it); | |
950592f7 MX |
1863 | } |
1864 | ||
1da177e4 LT |
1865 | /* |
1866 | * update_flag - read a 0 or a 1 in a file and update associated flag | |
78608366 PM |
1867 | * bit: the bit to update (see cpuset_flagbits_t) |
1868 | * cs: the cpuset to update | |
1869 | * turning_on: whether the flag is being set or cleared | |
053199ed | 1870 | * |
5d21cc2d | 1871 | * Call with cpuset_mutex held. |
1da177e4 LT |
1872 | */ |
1873 | ||
700fe1ab PM |
1874 | static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, |
1875 | int turning_on) | |
1da177e4 | 1876 | { |
645fcc9d | 1877 | struct cpuset *trialcs; |
40b6a762 | 1878 | int balance_flag_changed; |
950592f7 | 1879 | int spread_flag_changed; |
950592f7 | 1880 | int err; |
1da177e4 | 1881 | |
645fcc9d LZ |
1882 | trialcs = alloc_trial_cpuset(cs); |
1883 | if (!trialcs) | |
1884 | return -ENOMEM; | |
1885 | ||
1da177e4 | 1886 | if (turning_on) |
645fcc9d | 1887 | set_bit(bit, &trialcs->flags); |
1da177e4 | 1888 | else |
645fcc9d | 1889 | clear_bit(bit, &trialcs->flags); |
1da177e4 | 1890 | |
645fcc9d | 1891 | err = validate_change(cs, trialcs); |
85d7b949 | 1892 | if (err < 0) |
645fcc9d | 1893 | goto out; |
029190c5 | 1894 | |
029190c5 | 1895 | balance_flag_changed = (is_sched_load_balance(cs) != |
645fcc9d | 1896 | is_sched_load_balance(trialcs)); |
029190c5 | 1897 | |
950592f7 MX |
1898 | spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) |
1899 | || (is_spread_page(cs) != is_spread_page(trialcs))); | |
1900 | ||
8447a0fe | 1901 | spin_lock_irq(&callback_lock); |
645fcc9d | 1902 | cs->flags = trialcs->flags; |
8447a0fe | 1903 | spin_unlock_irq(&callback_lock); |
85d7b949 | 1904 | |
300ed6cb | 1905 | if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) |
699140ba | 1906 | rebuild_sched_domains_locked(); |
029190c5 | 1907 | |
950592f7 | 1908 | if (spread_flag_changed) |
d66393e5 | 1909 | update_tasks_flags(cs); |
645fcc9d | 1910 | out: |
bf92370c | 1911 | free_cpuset(trialcs); |
645fcc9d | 1912 | return err; |
1da177e4 LT |
1913 | } |
1914 | ||
ee8dde0c WL |
1915 | /* |
1916 | * update_prstate - update partititon_root_state | |
1917 | * cs: the cpuset to update | |
1918 | * val: 0 - disabled, 1 - enabled | |
1919 | * | |
1920 | * Call with cpuset_mutex held. | |
1921 | */ | |
1922 | static int update_prstate(struct cpuset *cs, int val) | |
1923 | { | |
1924 | int err; | |
1925 | struct cpuset *parent = parent_cs(cs); | |
1926 | struct tmpmasks tmp; | |
1927 | ||
1928 | if ((val != 0) && (val != 1)) | |
1929 | return -EINVAL; | |
1930 | if (val == cs->partition_root_state) | |
1931 | return 0; | |
1932 | ||
1933 | /* | |
3881b861 | 1934 | * Cannot force a partial or invalid partition root to a full |
ee8dde0c WL |
1935 | * partition root. |
1936 | */ | |
1937 | if (val && cs->partition_root_state) | |
1938 | return -EINVAL; | |
1939 | ||
1940 | if (alloc_cpumasks(NULL, &tmp)) | |
1941 | return -ENOMEM; | |
1942 | ||
1943 | err = -EINVAL; | |
1944 | if (!cs->partition_root_state) { | |
1945 | /* | |
1946 | * Turning on partition root requires setting the | |
1947 | * CS_CPU_EXCLUSIVE bit implicitly as well and cpus_allowed | |
1948 | * cannot be NULL. | |
1949 | */ | |
1950 | if (cpumask_empty(cs->cpus_allowed)) | |
1951 | goto out; | |
1952 | ||
1953 | err = update_flag(CS_CPU_EXCLUSIVE, cs, 1); | |
1954 | if (err) | |
1955 | goto out; | |
1956 | ||
1957 | err = update_parent_subparts_cpumask(cs, partcmd_enable, | |
1958 | NULL, &tmp); | |
1959 | if (err) { | |
1960 | update_flag(CS_CPU_EXCLUSIVE, cs, 0); | |
1961 | goto out; | |
1962 | } | |
1963 | cs->partition_root_state = PRS_ENABLED; | |
1964 | } else { | |
3881b861 WL |
1965 | /* |
1966 | * Turning off partition root will clear the | |
1967 | * CS_CPU_EXCLUSIVE bit. | |
1968 | */ | |
1969 | if (cs->partition_root_state == PRS_ERROR) { | |
1970 | cs->partition_root_state = 0; | |
1971 | update_flag(CS_CPU_EXCLUSIVE, cs, 0); | |
1972 | err = 0; | |
1973 | goto out; | |
1974 | } | |
1975 | ||
ee8dde0c WL |
1976 | err = update_parent_subparts_cpumask(cs, partcmd_disable, |
1977 | NULL, &tmp); | |
1978 | if (err) | |
1979 | goto out; | |
1980 | ||
1981 | cs->partition_root_state = 0; | |
1982 | ||
1983 | /* Turning off CS_CPU_EXCLUSIVE will not return error */ | |
1984 | update_flag(CS_CPU_EXCLUSIVE, cs, 0); | |
1985 | } | |
1986 | ||
1987 | /* | |
1988 | * Update cpumask of parent's tasks except when it is the top | |
1989 | * cpuset as some system daemons cannot be mapped to other CPUs. | |
1990 | */ | |
1991 | if (parent != &top_cpuset) | |
1992 | update_tasks_cpumask(parent); | |
1993 | ||
4716909c WL |
1994 | if (parent->child_ecpus_count) |
1995 | update_sibling_cpumasks(parent, cs, &tmp); | |
1996 | ||
ee8dde0c WL |
1997 | rebuild_sched_domains_locked(); |
1998 | out: | |
1999 | free_cpumasks(NULL, &tmp); | |
645fcc9d | 2000 | return err; |
1da177e4 LT |
2001 | } |
2002 | ||
3e0d98b9 | 2003 | /* |
80f7228b | 2004 | * Frequency meter - How fast is some event occurring? |
3e0d98b9 PJ |
2005 | * |
2006 | * These routines manage a digitally filtered, constant time based, | |
2007 | * event frequency meter. There are four routines: | |
2008 | * fmeter_init() - initialize a frequency meter. | |
2009 | * fmeter_markevent() - called each time the event happens. | |
2010 | * fmeter_getrate() - returns the recent rate of such events. | |
2011 | * fmeter_update() - internal routine used to update fmeter. | |
2012 | * | |
2013 | * A common data structure is passed to each of these routines, | |
2014 | * which is used to keep track of the state required to manage the | |
2015 | * frequency meter and its digital filter. | |
2016 | * | |
2017 | * The filter works on the number of events marked per unit time. | |
2018 | * The filter is single-pole low-pass recursive (IIR). The time unit | |
2019 | * is 1 second. Arithmetic is done using 32-bit integers scaled to | |
2020 | * simulate 3 decimal digits of precision (multiplied by 1000). | |
2021 | * | |
2022 | * With an FM_COEF of 933, and a time base of 1 second, the filter | |
2023 | * has a half-life of 10 seconds, meaning that if the events quit | |
2024 | * happening, then the rate returned from the fmeter_getrate() | |
2025 | * will be cut in half each 10 seconds, until it converges to zero. | |
2026 | * | |
2027 | * It is not worth doing a real infinitely recursive filter. If more | |
2028 | * than FM_MAXTICKS ticks have elapsed since the last filter event, | |
2029 | * just compute FM_MAXTICKS ticks worth, by which point the level | |
2030 | * will be stable. | |
2031 | * | |
2032 | * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid | |
2033 | * arithmetic overflow in the fmeter_update() routine. | |
2034 | * | |
2035 | * Given the simple 32 bit integer arithmetic used, this meter works | |
2036 | * best for reporting rates between one per millisecond (msec) and | |
2037 | * one per 32 (approx) seconds. At constant rates faster than one | |
2038 | * per msec it maxes out at values just under 1,000,000. At constant | |
2039 | * rates between one per msec, and one per second it will stabilize | |
2040 | * to a value N*1000, where N is the rate of events per second. | |
2041 | * At constant rates between one per second and one per 32 seconds, | |
2042 | * it will be choppy, moving up on the seconds that have an event, | |
2043 | * and then decaying until the next event. At rates slower than | |
2044 | * about one in 32 seconds, it decays all the way back to zero between | |
2045 | * each event. | |
2046 | */ | |
2047 | ||
2048 | #define FM_COEF 933 /* coefficient for half-life of 10 secs */ | |
d2b43658 | 2049 | #define FM_MAXTICKS ((u32)99) /* useless computing more ticks than this */ |
3e0d98b9 PJ |
2050 | #define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */ |
2051 | #define FM_SCALE 1000 /* faux fixed point scale */ | |
2052 | ||
2053 | /* Initialize a frequency meter */ | |
2054 | static void fmeter_init(struct fmeter *fmp) | |
2055 | { | |
2056 | fmp->cnt = 0; | |
2057 | fmp->val = 0; | |
2058 | fmp->time = 0; | |
2059 | spin_lock_init(&fmp->lock); | |
2060 | } | |
2061 | ||
2062 | /* Internal meter update - process cnt events and update value */ | |
2063 | static void fmeter_update(struct fmeter *fmp) | |
2064 | { | |
d2b43658 AB |
2065 | time64_t now; |
2066 | u32 ticks; | |
2067 | ||
2068 | now = ktime_get_seconds(); | |
2069 | ticks = now - fmp->time; | |
3e0d98b9 PJ |
2070 | |
2071 | if (ticks == 0) | |
2072 | return; | |
2073 | ||
2074 | ticks = min(FM_MAXTICKS, ticks); | |
2075 | while (ticks-- > 0) | |
2076 | fmp->val = (FM_COEF * fmp->val) / FM_SCALE; | |
2077 | fmp->time = now; | |
2078 | ||
2079 | fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE; | |
2080 | fmp->cnt = 0; | |
2081 | } | |
2082 | ||
2083 | /* Process any previous ticks, then bump cnt by one (times scale). */ | |
2084 | static void fmeter_markevent(struct fmeter *fmp) | |
2085 | { | |
2086 | spin_lock(&fmp->lock); | |
2087 | fmeter_update(fmp); | |
2088 | fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE); | |
2089 | spin_unlock(&fmp->lock); | |
2090 | } | |
2091 | ||
2092 | /* Process any previous ticks, then return current value. */ | |
2093 | static int fmeter_getrate(struct fmeter *fmp) | |
2094 | { | |
2095 | int val; | |
2096 | ||
2097 | spin_lock(&fmp->lock); | |
2098 | fmeter_update(fmp); | |
2099 | val = fmp->val; | |
2100 | spin_unlock(&fmp->lock); | |
2101 | return val; | |
2102 | } | |
2103 | ||
57fce0a6 TH |
2104 | static struct cpuset *cpuset_attach_old_cs; |
2105 | ||
5d21cc2d | 2106 | /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */ |
1f7dd3e5 | 2107 | static int cpuset_can_attach(struct cgroup_taskset *tset) |
f780bdb7 | 2108 | { |
1f7dd3e5 TH |
2109 | struct cgroup_subsys_state *css; |
2110 | struct cpuset *cs; | |
bb9d97b6 TH |
2111 | struct task_struct *task; |
2112 | int ret; | |
1da177e4 | 2113 | |
57fce0a6 | 2114 | /* used later by cpuset_attach() */ |
1f7dd3e5 TH |
2115 | cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css)); |
2116 | cs = css_cs(css); | |
57fce0a6 | 2117 | |
1243dc51 | 2118 | percpu_down_write(&cpuset_rwsem); |
5d21cc2d | 2119 | |
aa6ec29b | 2120 | /* allow moving tasks into an empty cpuset if on default hierarchy */ |
5d21cc2d | 2121 | ret = -ENOSPC; |
b8d1b8ee | 2122 | if (!is_in_v2_mode() && |
88fa523b | 2123 | (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) |
5d21cc2d | 2124 | goto out_unlock; |
9985b0ba | 2125 | |
1f7dd3e5 | 2126 | cgroup_taskset_for_each(task, css, tset) { |
7f51412a JL |
2127 | ret = task_can_attach(task, cs->cpus_allowed); |
2128 | if (ret) | |
5d21cc2d TH |
2129 | goto out_unlock; |
2130 | ret = security_task_setscheduler(task); | |
2131 | if (ret) | |
2132 | goto out_unlock; | |
bb9d97b6 | 2133 | } |
f780bdb7 | 2134 | |
452477fa TH |
2135 | /* |
2136 | * Mark attach is in progress. This makes validate_change() fail | |
2137 | * changes which zero cpus/mems_allowed. | |
2138 | */ | |
2139 | cs->attach_in_progress++; | |
5d21cc2d TH |
2140 | ret = 0; |
2141 | out_unlock: | |
1243dc51 | 2142 | percpu_up_write(&cpuset_rwsem); |
5d21cc2d | 2143 | return ret; |
8793d854 | 2144 | } |
f780bdb7 | 2145 | |
1f7dd3e5 | 2146 | static void cpuset_cancel_attach(struct cgroup_taskset *tset) |
452477fa | 2147 | { |
1f7dd3e5 | 2148 | struct cgroup_subsys_state *css; |
1f7dd3e5 TH |
2149 | |
2150 | cgroup_taskset_first(tset, &css); | |
1f7dd3e5 | 2151 | |
1243dc51 | 2152 | percpu_down_write(&cpuset_rwsem); |
eb95419b | 2153 | css_cs(css)->attach_in_progress--; |
1243dc51 | 2154 | percpu_up_write(&cpuset_rwsem); |
8793d854 | 2155 | } |
1da177e4 | 2156 | |
4e4c9a14 | 2157 | /* |
5d21cc2d | 2158 | * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach() |
4e4c9a14 TH |
2159 | * but we can't allocate it dynamically there. Define it global and |
2160 | * allocate from cpuset_init(). | |
2161 | */ | |
2162 | static cpumask_var_t cpus_attach; | |
2163 | ||
1f7dd3e5 | 2164 | static void cpuset_attach(struct cgroup_taskset *tset) |
8793d854 | 2165 | { |
67bd2c59 | 2166 | /* static buf protected by cpuset_mutex */ |
4e4c9a14 | 2167 | static nodemask_t cpuset_attach_nodemask_to; |
bb9d97b6 | 2168 | struct task_struct *task; |
4530eddb | 2169 | struct task_struct *leader; |
1f7dd3e5 TH |
2170 | struct cgroup_subsys_state *css; |
2171 | struct cpuset *cs; | |
57fce0a6 | 2172 | struct cpuset *oldcs = cpuset_attach_old_cs; |
22fb52dd | 2173 | |
1f7dd3e5 TH |
2174 | cgroup_taskset_first(tset, &css); |
2175 | cs = css_cs(css); | |
2176 | ||
1243dc51 | 2177 | percpu_down_write(&cpuset_rwsem); |
5d21cc2d | 2178 | |
4e4c9a14 TH |
2179 | /* prepare for attach */ |
2180 | if (cs == &top_cpuset) | |
2181 | cpumask_copy(cpus_attach, cpu_possible_mask); | |
2182 | else | |
ae1c8023 | 2183 | guarantee_online_cpus(cs, cpus_attach); |
4e4c9a14 | 2184 | |
ae1c8023 | 2185 | guarantee_online_mems(cs, &cpuset_attach_nodemask_to); |
4e4c9a14 | 2186 | |
1f7dd3e5 | 2187 | cgroup_taskset_for_each(task, css, tset) { |
bb9d97b6 TH |
2188 | /* |
2189 | * can_attach beforehand should guarantee that this doesn't | |
2190 | * fail. TODO: have a better way to handle failure here | |
2191 | */ | |
2192 | WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach)); | |
2193 | ||
2194 | cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to); | |
2195 | cpuset_update_task_spread_flag(cs, task); | |
2196 | } | |
22fb52dd | 2197 | |
f780bdb7 | 2198 | /* |
4530eddb TH |
2199 | * Change mm for all threadgroup leaders. This is expensive and may |
2200 | * sleep and should be moved outside migration path proper. | |
f780bdb7 | 2201 | */ |
ae1c8023 | 2202 | cpuset_attach_nodemask_to = cs->effective_mems; |
1f7dd3e5 | 2203 | cgroup_taskset_for_each_leader(leader, css, tset) { |
3df9ca0a TH |
2204 | struct mm_struct *mm = get_task_mm(leader); |
2205 | ||
2206 | if (mm) { | |
2207 | mpol_rebind_mm(mm, &cpuset_attach_nodemask_to); | |
2208 | ||
2209 | /* | |
2210 | * old_mems_allowed is the same with mems_allowed | |
2211 | * here, except if this task is being moved | |
2212 | * automatically due to hotplug. In that case | |
2213 | * @mems_allowed has been updated and is empty, so | |
2214 | * @old_mems_allowed is the right nodesets that we | |
2215 | * migrate mm from. | |
2216 | */ | |
e93ad19d | 2217 | if (is_memory_migrate(cs)) |
3df9ca0a TH |
2218 | cpuset_migrate_mm(mm, &oldcs->old_mems_allowed, |
2219 | &cpuset_attach_nodemask_to); | |
e93ad19d TH |
2220 | else |
2221 | mmput(mm); | |
f047cecf | 2222 | } |
4225399a | 2223 | } |
452477fa | 2224 | |
33ad801d | 2225 | cs->old_mems_allowed = cpuset_attach_nodemask_to; |
02bb5863 | 2226 | |
452477fa | 2227 | cs->attach_in_progress--; |
e44193d3 LZ |
2228 | if (!cs->attach_in_progress) |
2229 | wake_up(&cpuset_attach_wq); | |
5d21cc2d | 2230 | |
1243dc51 | 2231 | percpu_up_write(&cpuset_rwsem); |
1da177e4 LT |
2232 | } |
2233 | ||
2234 | /* The various types of files and directories in a cpuset file system */ | |
2235 | ||
2236 | typedef enum { | |
45b07ef3 | 2237 | FILE_MEMORY_MIGRATE, |
1da177e4 LT |
2238 | FILE_CPULIST, |
2239 | FILE_MEMLIST, | |
afd1a8b3 LZ |
2240 | FILE_EFFECTIVE_CPULIST, |
2241 | FILE_EFFECTIVE_MEMLIST, | |
5cf8114d | 2242 | FILE_SUBPARTS_CPULIST, |
1da177e4 LT |
2243 | FILE_CPU_EXCLUSIVE, |
2244 | FILE_MEM_EXCLUSIVE, | |
78608366 | 2245 | FILE_MEM_HARDWALL, |
029190c5 | 2246 | FILE_SCHED_LOAD_BALANCE, |
ee8dde0c | 2247 | FILE_PARTITION_ROOT, |
1d3504fc | 2248 | FILE_SCHED_RELAX_DOMAIN_LEVEL, |
3e0d98b9 PJ |
2249 | FILE_MEMORY_PRESSURE_ENABLED, |
2250 | FILE_MEMORY_PRESSURE, | |
825a46af PJ |
2251 | FILE_SPREAD_PAGE, |
2252 | FILE_SPREAD_SLAB, | |
1da177e4 LT |
2253 | } cpuset_filetype_t; |
2254 | ||
182446d0 TH |
2255 | static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft, |
2256 | u64 val) | |
700fe1ab | 2257 | { |
182446d0 | 2258 | struct cpuset *cs = css_cs(css); |
700fe1ab | 2259 | cpuset_filetype_t type = cft->private; |
a903f086 | 2260 | int retval = 0; |
700fe1ab | 2261 | |
d74b27d6 | 2262 | get_online_cpus(); |
1243dc51 | 2263 | percpu_down_write(&cpuset_rwsem); |
a903f086 LZ |
2264 | if (!is_cpuset_online(cs)) { |
2265 | retval = -ENODEV; | |
5d21cc2d | 2266 | goto out_unlock; |
a903f086 | 2267 | } |
700fe1ab PM |
2268 | |
2269 | switch (type) { | |
1da177e4 | 2270 | case FILE_CPU_EXCLUSIVE: |
700fe1ab | 2271 | retval = update_flag(CS_CPU_EXCLUSIVE, cs, val); |
1da177e4 LT |
2272 | break; |
2273 | case FILE_MEM_EXCLUSIVE: | |
700fe1ab | 2274 | retval = update_flag(CS_MEM_EXCLUSIVE, cs, val); |
1da177e4 | 2275 | break; |
78608366 PM |
2276 | case FILE_MEM_HARDWALL: |
2277 | retval = update_flag(CS_MEM_HARDWALL, cs, val); | |
2278 | break; | |
029190c5 | 2279 | case FILE_SCHED_LOAD_BALANCE: |
700fe1ab | 2280 | retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val); |
1d3504fc | 2281 | break; |
45b07ef3 | 2282 | case FILE_MEMORY_MIGRATE: |
700fe1ab | 2283 | retval = update_flag(CS_MEMORY_MIGRATE, cs, val); |
45b07ef3 | 2284 | break; |
3e0d98b9 | 2285 | case FILE_MEMORY_PRESSURE_ENABLED: |
700fe1ab | 2286 | cpuset_memory_pressure_enabled = !!val; |
3e0d98b9 | 2287 | break; |
825a46af | 2288 | case FILE_SPREAD_PAGE: |
700fe1ab | 2289 | retval = update_flag(CS_SPREAD_PAGE, cs, val); |
825a46af PJ |
2290 | break; |
2291 | case FILE_SPREAD_SLAB: | |
700fe1ab | 2292 | retval = update_flag(CS_SPREAD_SLAB, cs, val); |
825a46af | 2293 | break; |
1da177e4 LT |
2294 | default: |
2295 | retval = -EINVAL; | |
700fe1ab | 2296 | break; |
1da177e4 | 2297 | } |
5d21cc2d | 2298 | out_unlock: |
1243dc51 | 2299 | percpu_up_write(&cpuset_rwsem); |
d74b27d6 | 2300 | put_online_cpus(); |
1da177e4 LT |
2301 | return retval; |
2302 | } | |
2303 | ||
182446d0 TH |
2304 | static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft, |
2305 | s64 val) | |
5be7a479 | 2306 | { |
182446d0 | 2307 | struct cpuset *cs = css_cs(css); |
5be7a479 | 2308 | cpuset_filetype_t type = cft->private; |
5d21cc2d | 2309 | int retval = -ENODEV; |
5be7a479 | 2310 | |
d74b27d6 | 2311 | get_online_cpus(); |
1243dc51 | 2312 | percpu_down_write(&cpuset_rwsem); |
5d21cc2d TH |
2313 | if (!is_cpuset_online(cs)) |
2314 | goto out_unlock; | |
e3712395 | 2315 | |
5be7a479 PM |
2316 | switch (type) { |
2317 | case FILE_SCHED_RELAX_DOMAIN_LEVEL: | |
2318 | retval = update_relax_domain_level(cs, val); | |
2319 | break; | |
2320 | default: | |
2321 | retval = -EINVAL; | |
2322 | break; | |
2323 | } | |
5d21cc2d | 2324 | out_unlock: |
1243dc51 | 2325 | percpu_up_write(&cpuset_rwsem); |
d74b27d6 | 2326 | put_online_cpus(); |
5be7a479 PM |
2327 | return retval; |
2328 | } | |
2329 | ||
e3712395 PM |
2330 | /* |
2331 | * Common handling for a write to a "cpus" or "mems" file. | |
2332 | */ | |
451af504 TH |
2333 | static ssize_t cpuset_write_resmask(struct kernfs_open_file *of, |
2334 | char *buf, size_t nbytes, loff_t off) | |
e3712395 | 2335 | { |
451af504 | 2336 | struct cpuset *cs = css_cs(of_css(of)); |
645fcc9d | 2337 | struct cpuset *trialcs; |
5d21cc2d | 2338 | int retval = -ENODEV; |
e3712395 | 2339 | |
451af504 TH |
2340 | buf = strstrip(buf); |
2341 | ||
3a5a6d0c TH |
2342 | /* |
2343 | * CPU or memory hotunplug may leave @cs w/o any execution | |
2344 | * resources, in which case the hotplug code asynchronously updates | |
2345 | * configuration and transfers all tasks to the nearest ancestor | |
2346 | * which can execute. | |
2347 | * | |
2348 | * As writes to "cpus" or "mems" may restore @cs's execution | |
2349 | * resources, wait for the previously scheduled operations before | |
2350 | * proceeding, so that we don't end up keep removing tasks added | |
2351 | * after execution capability is restored. | |
76bb5ab8 TH |
2352 | * |
2353 | * cpuset_hotplug_work calls back into cgroup core via | |
2354 | * cgroup_transfer_tasks() and waiting for it from a cgroupfs | |
2355 | * operation like this one can lead to a deadlock through kernfs | |
2356 | * active_ref protection. Let's break the protection. Losing the | |
2357 | * protection is okay as we check whether @cs is online after | |
2358 | * grabbing cpuset_mutex anyway. This only happens on the legacy | |
2359 | * hierarchies. | |
3a5a6d0c | 2360 | */ |
76bb5ab8 TH |
2361 | css_get(&cs->css); |
2362 | kernfs_break_active_protection(of->kn); | |
3a5a6d0c TH |
2363 | flush_work(&cpuset_hotplug_work); |
2364 | ||
d74b27d6 | 2365 | get_online_cpus(); |
1243dc51 | 2366 | percpu_down_write(&cpuset_rwsem); |
5d21cc2d TH |
2367 | if (!is_cpuset_online(cs)) |
2368 | goto out_unlock; | |
e3712395 | 2369 | |
645fcc9d | 2370 | trialcs = alloc_trial_cpuset(cs); |
b75f38d6 LZ |
2371 | if (!trialcs) { |
2372 | retval = -ENOMEM; | |
5d21cc2d | 2373 | goto out_unlock; |
b75f38d6 | 2374 | } |
645fcc9d | 2375 | |
451af504 | 2376 | switch (of_cft(of)->private) { |
e3712395 | 2377 | case FILE_CPULIST: |
645fcc9d | 2378 | retval = update_cpumask(cs, trialcs, buf); |
e3712395 PM |
2379 | break; |
2380 | case FILE_MEMLIST: | |
645fcc9d | 2381 | retval = update_nodemask(cs, trialcs, buf); |
e3712395 PM |
2382 | break; |
2383 | default: | |
2384 | retval = -EINVAL; | |
2385 | break; | |
2386 | } | |
645fcc9d | 2387 | |
bf92370c | 2388 | free_cpuset(trialcs); |
5d21cc2d | 2389 | out_unlock: |
1243dc51 | 2390 | percpu_up_write(&cpuset_rwsem); |
d74b27d6 | 2391 | put_online_cpus(); |
76bb5ab8 TH |
2392 | kernfs_unbreak_active_protection(of->kn); |
2393 | css_put(&cs->css); | |
e93ad19d | 2394 | flush_workqueue(cpuset_migrate_mm_wq); |
451af504 | 2395 | return retval ?: nbytes; |
e3712395 PM |
2396 | } |
2397 | ||
1da177e4 LT |
2398 | /* |
2399 | * These ascii lists should be read in a single call, by using a user | |
2400 | * buffer large enough to hold the entire map. If read in smaller | |
2401 | * chunks, there is no guarantee of atomicity. Since the display format | |
2402 | * used, list of ranges of sequential numbers, is variable length, | |
2403 | * and since these maps can change value dynamically, one could read | |
2404 | * gibberish by doing partial reads while a list was changing. | |
1da177e4 | 2405 | */ |
2da8ca82 | 2406 | static int cpuset_common_seq_show(struct seq_file *sf, void *v) |
1da177e4 | 2407 | { |
2da8ca82 TH |
2408 | struct cpuset *cs = css_cs(seq_css(sf)); |
2409 | cpuset_filetype_t type = seq_cft(sf)->private; | |
51ffe411 | 2410 | int ret = 0; |
1da177e4 | 2411 | |
8447a0fe | 2412 | spin_lock_irq(&callback_lock); |
1da177e4 LT |
2413 | |
2414 | switch (type) { | |
2415 | case FILE_CPULIST: | |
e8e6d97c | 2416 | seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed)); |
1da177e4 LT |
2417 | break; |
2418 | case FILE_MEMLIST: | |
e8e6d97c | 2419 | seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); |
1da177e4 | 2420 | break; |
afd1a8b3 | 2421 | case FILE_EFFECTIVE_CPULIST: |
e8e6d97c | 2422 | seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); |
afd1a8b3 LZ |
2423 | break; |
2424 | case FILE_EFFECTIVE_MEMLIST: | |
e8e6d97c | 2425 | seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); |
afd1a8b3 | 2426 | break; |
5cf8114d WL |
2427 | case FILE_SUBPARTS_CPULIST: |
2428 | seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus)); | |
2429 | break; | |
1da177e4 | 2430 | default: |
51ffe411 | 2431 | ret = -EINVAL; |
1da177e4 | 2432 | } |
1da177e4 | 2433 | |
8447a0fe | 2434 | spin_unlock_irq(&callback_lock); |
51ffe411 | 2435 | return ret; |
1da177e4 LT |
2436 | } |
2437 | ||
182446d0 | 2438 | static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft) |
700fe1ab | 2439 | { |
182446d0 | 2440 | struct cpuset *cs = css_cs(css); |
700fe1ab PM |
2441 | cpuset_filetype_t type = cft->private; |
2442 | switch (type) { | |
2443 | case FILE_CPU_EXCLUSIVE: | |
2444 | return is_cpu_exclusive(cs); | |
2445 | case FILE_MEM_EXCLUSIVE: | |
2446 | return is_mem_exclusive(cs); | |
78608366 PM |
2447 | case FILE_MEM_HARDWALL: |
2448 | return is_mem_hardwall(cs); | |
700fe1ab PM |
2449 | case FILE_SCHED_LOAD_BALANCE: |
2450 | return is_sched_load_balance(cs); | |
2451 | case FILE_MEMORY_MIGRATE: | |
2452 | return is_memory_migrate(cs); | |
2453 | case FILE_MEMORY_PRESSURE_ENABLED: | |
2454 | return cpuset_memory_pressure_enabled; | |
2455 | case FILE_MEMORY_PRESSURE: | |
2456 | return fmeter_getrate(&cs->fmeter); | |
2457 | case FILE_SPREAD_PAGE: | |
2458 | return is_spread_page(cs); | |
2459 | case FILE_SPREAD_SLAB: | |
2460 | return is_spread_slab(cs); | |
2461 | default: | |
2462 | BUG(); | |
2463 | } | |
cf417141 MK |
2464 | |
2465 | /* Unreachable but makes gcc happy */ | |
2466 | return 0; | |
700fe1ab | 2467 | } |
1da177e4 | 2468 | |
182446d0 | 2469 | static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft) |
5be7a479 | 2470 | { |
182446d0 | 2471 | struct cpuset *cs = css_cs(css); |
5be7a479 PM |
2472 | cpuset_filetype_t type = cft->private; |
2473 | switch (type) { | |
2474 | case FILE_SCHED_RELAX_DOMAIN_LEVEL: | |
2475 | return cs->relax_domain_level; | |
2476 | default: | |
2477 | BUG(); | |
2478 | } | |
cf417141 MK |
2479 | |
2480 | /* Unrechable but makes gcc happy */ | |
2481 | return 0; | |
5be7a479 PM |
2482 | } |
2483 | ||
bb5b553c WL |
2484 | static int sched_partition_show(struct seq_file *seq, void *v) |
2485 | { | |
2486 | struct cpuset *cs = css_cs(seq_css(seq)); | |
2487 | ||
2488 | switch (cs->partition_root_state) { | |
2489 | case PRS_ENABLED: | |
2490 | seq_puts(seq, "root\n"); | |
2491 | break; | |
2492 | case PRS_DISABLED: | |
2493 | seq_puts(seq, "member\n"); | |
2494 | break; | |
2495 | case PRS_ERROR: | |
2496 | seq_puts(seq, "root invalid\n"); | |
2497 | break; | |
2498 | } | |
2499 | return 0; | |
2500 | } | |
2501 | ||
2502 | static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf, | |
2503 | size_t nbytes, loff_t off) | |
2504 | { | |
2505 | struct cpuset *cs = css_cs(of_css(of)); | |
2506 | int val; | |
2507 | int retval = -ENODEV; | |
2508 | ||
2509 | buf = strstrip(buf); | |
2510 | ||
2511 | /* | |
b1e3aeb1 | 2512 | * Convert "root" to ENABLED, and convert "member" to DISABLED. |
bb5b553c | 2513 | */ |
b1e3aeb1 | 2514 | if (!strcmp(buf, "root")) |
bb5b553c | 2515 | val = PRS_ENABLED; |
b1e3aeb1 | 2516 | else if (!strcmp(buf, "member")) |
bb5b553c WL |
2517 | val = PRS_DISABLED; |
2518 | else | |
2519 | return -EINVAL; | |
2520 | ||
2521 | css_get(&cs->css); | |
d74b27d6 | 2522 | get_online_cpus(); |
1243dc51 | 2523 | percpu_down_write(&cpuset_rwsem); |
bb5b553c WL |
2524 | if (!is_cpuset_online(cs)) |
2525 | goto out_unlock; | |
2526 | ||
2527 | retval = update_prstate(cs, val); | |
2528 | out_unlock: | |
1243dc51 | 2529 | percpu_up_write(&cpuset_rwsem); |
d74b27d6 | 2530 | put_online_cpus(); |
bb5b553c WL |
2531 | css_put(&cs->css); |
2532 | return retval ?: nbytes; | |
2533 | } | |
1da177e4 LT |
2534 | |
2535 | /* | |
2536 | * for the common functions, 'private' gives the type of file | |
2537 | */ | |
2538 | ||
4ec22e9c | 2539 | static struct cftype legacy_files[] = { |
addf2c73 PM |
2540 | { |
2541 | .name = "cpus", | |
2da8ca82 | 2542 | .seq_show = cpuset_common_seq_show, |
451af504 | 2543 | .write = cpuset_write_resmask, |
e3712395 | 2544 | .max_write_len = (100U + 6 * NR_CPUS), |
addf2c73 PM |
2545 | .private = FILE_CPULIST, |
2546 | }, | |
2547 | ||
2548 | { | |
2549 | .name = "mems", | |
2da8ca82 | 2550 | .seq_show = cpuset_common_seq_show, |
451af504 | 2551 | .write = cpuset_write_resmask, |
e3712395 | 2552 | .max_write_len = (100U + 6 * MAX_NUMNODES), |
addf2c73 PM |
2553 | .private = FILE_MEMLIST, |
2554 | }, | |
2555 | ||
afd1a8b3 LZ |
2556 | { |
2557 | .name = "effective_cpus", | |
2558 | .seq_show = cpuset_common_seq_show, | |
2559 | .private = FILE_EFFECTIVE_CPULIST, | |
2560 | }, | |
2561 | ||
2562 | { | |
2563 | .name = "effective_mems", | |
2564 | .seq_show = cpuset_common_seq_show, | |
2565 | .private = FILE_EFFECTIVE_MEMLIST, | |
2566 | }, | |
2567 | ||
addf2c73 PM |
2568 | { |
2569 | .name = "cpu_exclusive", | |
2570 | .read_u64 = cpuset_read_u64, | |
2571 | .write_u64 = cpuset_write_u64, | |
2572 | .private = FILE_CPU_EXCLUSIVE, | |
2573 | }, | |
2574 | ||
2575 | { | |
2576 | .name = "mem_exclusive", | |
2577 | .read_u64 = cpuset_read_u64, | |
2578 | .write_u64 = cpuset_write_u64, | |
2579 | .private = FILE_MEM_EXCLUSIVE, | |
2580 | }, | |
2581 | ||
78608366 PM |
2582 | { |
2583 | .name = "mem_hardwall", | |
2584 | .read_u64 = cpuset_read_u64, | |
2585 | .write_u64 = cpuset_write_u64, | |
2586 | .private = FILE_MEM_HARDWALL, | |
2587 | }, | |
2588 | ||
addf2c73 PM |
2589 | { |
2590 | .name = "sched_load_balance", | |
2591 | .read_u64 = cpuset_read_u64, | |
2592 | .write_u64 = cpuset_write_u64, | |
2593 | .private = FILE_SCHED_LOAD_BALANCE, | |
2594 | }, | |
2595 | ||
2596 | { | |
2597 | .name = "sched_relax_domain_level", | |
5be7a479 PM |
2598 | .read_s64 = cpuset_read_s64, |
2599 | .write_s64 = cpuset_write_s64, | |
addf2c73 PM |
2600 | .private = FILE_SCHED_RELAX_DOMAIN_LEVEL, |
2601 | }, | |
2602 | ||
2603 | { | |
2604 | .name = "memory_migrate", | |
2605 | .read_u64 = cpuset_read_u64, | |
2606 | .write_u64 = cpuset_write_u64, | |
2607 | .private = FILE_MEMORY_MIGRATE, | |
2608 | }, | |
2609 | ||
2610 | { | |
2611 | .name = "memory_pressure", | |
2612 | .read_u64 = cpuset_read_u64, | |
1c08c22c | 2613 | .private = FILE_MEMORY_PRESSURE, |
addf2c73 PM |
2614 | }, |
2615 | ||
2616 | { | |
2617 | .name = "memory_spread_page", | |
2618 | .read_u64 = cpuset_read_u64, | |
2619 | .write_u64 = cpuset_write_u64, | |
2620 | .private = FILE_SPREAD_PAGE, | |
2621 | }, | |
2622 | ||
2623 | { | |
2624 | .name = "memory_spread_slab", | |
2625 | .read_u64 = cpuset_read_u64, | |
2626 | .write_u64 = cpuset_write_u64, | |
2627 | .private = FILE_SPREAD_SLAB, | |
2628 | }, | |
3e0d98b9 | 2629 | |
4baf6e33 TH |
2630 | { |
2631 | .name = "memory_pressure_enabled", | |
2632 | .flags = CFTYPE_ONLY_ON_ROOT, | |
2633 | .read_u64 = cpuset_read_u64, | |
2634 | .write_u64 = cpuset_write_u64, | |
2635 | .private = FILE_MEMORY_PRESSURE_ENABLED, | |
2636 | }, | |
1da177e4 | 2637 | |
4baf6e33 TH |
2638 | { } /* terminate */ |
2639 | }; | |
1da177e4 | 2640 | |
4ec22e9c WL |
2641 | /* |
2642 | * This is currently a minimal set for the default hierarchy. It can be | |
2643 | * expanded later on by migrating more features and control files from v1. | |
2644 | */ | |
2645 | static struct cftype dfl_files[] = { | |
2646 | { | |
2647 | .name = "cpus", | |
2648 | .seq_show = cpuset_common_seq_show, | |
2649 | .write = cpuset_write_resmask, | |
2650 | .max_write_len = (100U + 6 * NR_CPUS), | |
2651 | .private = FILE_CPULIST, | |
2652 | .flags = CFTYPE_NOT_ON_ROOT, | |
2653 | }, | |
2654 | ||
2655 | { | |
2656 | .name = "mems", | |
2657 | .seq_show = cpuset_common_seq_show, | |
2658 | .write = cpuset_write_resmask, | |
2659 | .max_write_len = (100U + 6 * MAX_NUMNODES), | |
2660 | .private = FILE_MEMLIST, | |
2661 | .flags = CFTYPE_NOT_ON_ROOT, | |
2662 | }, | |
2663 | ||
2664 | { | |
2665 | .name = "cpus.effective", | |
2666 | .seq_show = cpuset_common_seq_show, | |
2667 | .private = FILE_EFFECTIVE_CPULIST, | |
4ec22e9c WL |
2668 | }, |
2669 | ||
2670 | { | |
2671 | .name = "mems.effective", | |
2672 | .seq_show = cpuset_common_seq_show, | |
2673 | .private = FILE_EFFECTIVE_MEMLIST, | |
4ec22e9c WL |
2674 | }, |
2675 | ||
ee8dde0c | 2676 | { |
b1e3aeb1 | 2677 | .name = "cpus.partition", |
bb5b553c WL |
2678 | .seq_show = sched_partition_show, |
2679 | .write = sched_partition_write, | |
ee8dde0c WL |
2680 | .private = FILE_PARTITION_ROOT, |
2681 | .flags = CFTYPE_NOT_ON_ROOT, | |
2682 | }, | |
2683 | ||
5cf8114d WL |
2684 | { |
2685 | .name = "cpus.subpartitions", | |
2686 | .seq_show = cpuset_common_seq_show, | |
2687 | .private = FILE_SUBPARTS_CPULIST, | |
2688 | .flags = CFTYPE_DEBUG, | |
2689 | }, | |
2690 | ||
4ec22e9c WL |
2691 | { } /* terminate */ |
2692 | }; | |
2693 | ||
2694 | ||
1da177e4 | 2695 | /* |
92fb9748 | 2696 | * cpuset_css_alloc - allocate a cpuset css |
c9e5fe66 | 2697 | * cgrp: control group that the new cpuset will be part of |
1da177e4 LT |
2698 | */ |
2699 | ||
eb95419b TH |
2700 | static struct cgroup_subsys_state * |
2701 | cpuset_css_alloc(struct cgroup_subsys_state *parent_css) | |
1da177e4 | 2702 | { |
c8f699bb | 2703 | struct cpuset *cs; |
1da177e4 | 2704 | |
eb95419b | 2705 | if (!parent_css) |
8793d854 | 2706 | return &top_cpuset.css; |
033fa1c5 | 2707 | |
c8f699bb | 2708 | cs = kzalloc(sizeof(*cs), GFP_KERNEL); |
1da177e4 | 2709 | if (!cs) |
8793d854 | 2710 | return ERR_PTR(-ENOMEM); |
bf92370c WL |
2711 | |
2712 | if (alloc_cpumasks(cs, NULL)) { | |
2713 | kfree(cs); | |
2714 | return ERR_PTR(-ENOMEM); | |
2715 | } | |
1da177e4 | 2716 | |
029190c5 | 2717 | set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); |
f9a86fcb | 2718 | nodes_clear(cs->mems_allowed); |
e2b9a3d7 | 2719 | nodes_clear(cs->effective_mems); |
3e0d98b9 | 2720 | fmeter_init(&cs->fmeter); |
1d3504fc | 2721 | cs->relax_domain_level = -1; |
1da177e4 | 2722 | |
c8f699bb TH |
2723 | return &cs->css; |
2724 | } | |
2725 | ||
eb95419b | 2726 | static int cpuset_css_online(struct cgroup_subsys_state *css) |
c8f699bb | 2727 | { |
eb95419b | 2728 | struct cpuset *cs = css_cs(css); |
c431069f | 2729 | struct cpuset *parent = parent_cs(cs); |
ae8086ce | 2730 | struct cpuset *tmp_cs; |
492eb21b | 2731 | struct cgroup_subsys_state *pos_css; |
c8f699bb TH |
2732 | |
2733 | if (!parent) | |
2734 | return 0; | |
2735 | ||
d74b27d6 | 2736 | get_online_cpus(); |
1243dc51 | 2737 | percpu_down_write(&cpuset_rwsem); |
5d21cc2d | 2738 | |
efeb77b2 | 2739 | set_bit(CS_ONLINE, &cs->flags); |
c8f699bb TH |
2740 | if (is_spread_page(parent)) |
2741 | set_bit(CS_SPREAD_PAGE, &cs->flags); | |
2742 | if (is_spread_slab(parent)) | |
2743 | set_bit(CS_SPREAD_SLAB, &cs->flags); | |
1da177e4 | 2744 | |
664eedde | 2745 | cpuset_inc(); |
033fa1c5 | 2746 | |
8447a0fe | 2747 | spin_lock_irq(&callback_lock); |
b8d1b8ee | 2748 | if (is_in_v2_mode()) { |
e2b9a3d7 LZ |
2749 | cpumask_copy(cs->effective_cpus, parent->effective_cpus); |
2750 | cs->effective_mems = parent->effective_mems; | |
4716909c WL |
2751 | cs->use_parent_ecpus = true; |
2752 | parent->child_ecpus_count++; | |
e2b9a3d7 | 2753 | } |
8447a0fe | 2754 | spin_unlock_irq(&callback_lock); |
e2b9a3d7 | 2755 | |
eb95419b | 2756 | if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) |
5d21cc2d | 2757 | goto out_unlock; |
033fa1c5 TH |
2758 | |
2759 | /* | |
2760 | * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is | |
2761 | * set. This flag handling is implemented in cgroup core for | |
2762 | * histrical reasons - the flag may be specified during mount. | |
2763 | * | |
2764 | * Currently, if any sibling cpusets have exclusive cpus or mem, we | |
2765 | * refuse to clone the configuration - thereby refusing the task to | |
2766 | * be entered, and as a result refusing the sys_unshare() or | |
2767 | * clone() which initiated it. If this becomes a problem for some | |
2768 | * users who wish to allow that scenario, then this could be | |
2769 | * changed to grant parent->cpus_allowed-sibling_cpus_exclusive | |
2770 | * (and likewise for mems) to the new cgroup. | |
2771 | */ | |
ae8086ce | 2772 | rcu_read_lock(); |
492eb21b | 2773 | cpuset_for_each_child(tmp_cs, pos_css, parent) { |
ae8086ce TH |
2774 | if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) { |
2775 | rcu_read_unlock(); | |
5d21cc2d | 2776 | goto out_unlock; |
ae8086ce | 2777 | } |
033fa1c5 | 2778 | } |
ae8086ce | 2779 | rcu_read_unlock(); |
033fa1c5 | 2780 | |
8447a0fe | 2781 | spin_lock_irq(&callback_lock); |
033fa1c5 | 2782 | cs->mems_allowed = parent->mems_allowed; |
790317e1 | 2783 | cs->effective_mems = parent->mems_allowed; |
033fa1c5 | 2784 | cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); |
790317e1 | 2785 | cpumask_copy(cs->effective_cpus, parent->cpus_allowed); |
cea74465 | 2786 | spin_unlock_irq(&callback_lock); |
5d21cc2d | 2787 | out_unlock: |
1243dc51 | 2788 | percpu_up_write(&cpuset_rwsem); |
d74b27d6 | 2789 | put_online_cpus(); |
c8f699bb TH |
2790 | return 0; |
2791 | } | |
2792 | ||
0b9e6965 ZH |
2793 | /* |
2794 | * If the cpuset being removed has its flag 'sched_load_balance' | |
2795 | * enabled, then simulate turning sched_load_balance off, which | |
ee8dde0c WL |
2796 | * will call rebuild_sched_domains_locked(). That is not needed |
2797 | * in the default hierarchy where only changes in partition | |
2798 | * will cause repartitioning. | |
2799 | * | |
2800 | * If the cpuset has the 'sched.partition' flag enabled, simulate | |
2801 | * turning 'sched.partition" off. | |
0b9e6965 ZH |
2802 | */ |
2803 | ||
eb95419b | 2804 | static void cpuset_css_offline(struct cgroup_subsys_state *css) |
c8f699bb | 2805 | { |
eb95419b | 2806 | struct cpuset *cs = css_cs(css); |
c8f699bb | 2807 | |
d74b27d6 | 2808 | get_online_cpus(); |
1243dc51 | 2809 | percpu_down_write(&cpuset_rwsem); |
c8f699bb | 2810 | |
ee8dde0c WL |
2811 | if (is_partition_root(cs)) |
2812 | update_prstate(cs, 0); | |
2813 | ||
2814 | if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && | |
2815 | is_sched_load_balance(cs)) | |
c8f699bb TH |
2816 | update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); |
2817 | ||
4716909c WL |
2818 | if (cs->use_parent_ecpus) { |
2819 | struct cpuset *parent = parent_cs(cs); | |
2820 | ||
2821 | cs->use_parent_ecpus = false; | |
2822 | parent->child_ecpus_count--; | |
2823 | } | |
2824 | ||
664eedde | 2825 | cpuset_dec(); |
efeb77b2 | 2826 | clear_bit(CS_ONLINE, &cs->flags); |
c8f699bb | 2827 | |
1243dc51 | 2828 | percpu_up_write(&cpuset_rwsem); |
d74b27d6 | 2829 | put_online_cpus(); |
1da177e4 LT |
2830 | } |
2831 | ||
eb95419b | 2832 | static void cpuset_css_free(struct cgroup_subsys_state *css) |
1da177e4 | 2833 | { |
eb95419b | 2834 | struct cpuset *cs = css_cs(css); |
1da177e4 | 2835 | |
bf92370c | 2836 | free_cpuset(cs); |
1da177e4 LT |
2837 | } |
2838 | ||
39bd0d15 LZ |
2839 | static void cpuset_bind(struct cgroup_subsys_state *root_css) |
2840 | { | |
1243dc51 | 2841 | percpu_down_write(&cpuset_rwsem); |
8447a0fe | 2842 | spin_lock_irq(&callback_lock); |
39bd0d15 | 2843 | |
b8d1b8ee | 2844 | if (is_in_v2_mode()) { |
39bd0d15 LZ |
2845 | cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask); |
2846 | top_cpuset.mems_allowed = node_possible_map; | |
2847 | } else { | |
2848 | cpumask_copy(top_cpuset.cpus_allowed, | |
2849 | top_cpuset.effective_cpus); | |
2850 | top_cpuset.mems_allowed = top_cpuset.effective_mems; | |
2851 | } | |
2852 | ||
8447a0fe | 2853 | spin_unlock_irq(&callback_lock); |
1243dc51 | 2854 | percpu_up_write(&cpuset_rwsem); |
39bd0d15 LZ |
2855 | } |
2856 | ||
06f4e948 ZL |
2857 | /* |
2858 | * Make sure the new task conform to the current state of its parent, | |
2859 | * which could have been changed by cpuset just after it inherits the | |
2860 | * state from the parent and before it sits on the cgroup's task list. | |
2861 | */ | |
8a15b817 | 2862 | static void cpuset_fork(struct task_struct *task) |
06f4e948 ZL |
2863 | { |
2864 | if (task_css_is_root(task, cpuset_cgrp_id)) | |
2865 | return; | |
2866 | ||
3bd37062 | 2867 | set_cpus_allowed_ptr(task, current->cpus_ptr); |
06f4e948 ZL |
2868 | task->mems_allowed = current->mems_allowed; |
2869 | } | |
2870 | ||
073219e9 | 2871 | struct cgroup_subsys cpuset_cgrp_subsys = { |
39bd0d15 LZ |
2872 | .css_alloc = cpuset_css_alloc, |
2873 | .css_online = cpuset_css_online, | |
2874 | .css_offline = cpuset_css_offline, | |
2875 | .css_free = cpuset_css_free, | |
2876 | .can_attach = cpuset_can_attach, | |
2877 | .cancel_attach = cpuset_cancel_attach, | |
2878 | .attach = cpuset_attach, | |
5cf1cacb | 2879 | .post_attach = cpuset_post_attach, |
39bd0d15 | 2880 | .bind = cpuset_bind, |
06f4e948 | 2881 | .fork = cpuset_fork, |
4ec22e9c WL |
2882 | .legacy_cftypes = legacy_files, |
2883 | .dfl_cftypes = dfl_files, | |
b38e42e9 | 2884 | .early_init = true, |
4ec22e9c | 2885 | .threaded = true, |
8793d854 PM |
2886 | }; |
2887 | ||
1da177e4 LT |
2888 | /** |
2889 | * cpuset_init - initialize cpusets at system boot | |
2890 | * | |
d5f68d33 | 2891 | * Description: Initialize top_cpuset |
1da177e4 LT |
2892 | **/ |
2893 | ||
2894 | int __init cpuset_init(void) | |
2895 | { | |
1243dc51 JL |
2896 | BUG_ON(percpu_init_rwsem(&cpuset_rwsem)); |
2897 | ||
75fa8e5d NMG |
2898 | BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL)); |
2899 | BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL)); | |
bf92370c | 2900 | BUG_ON(!zalloc_cpumask_var(&top_cpuset.subparts_cpus, GFP_KERNEL)); |
58568d2a | 2901 | |
300ed6cb | 2902 | cpumask_setall(top_cpuset.cpus_allowed); |
f9a86fcb | 2903 | nodes_setall(top_cpuset.mems_allowed); |
e2b9a3d7 LZ |
2904 | cpumask_setall(top_cpuset.effective_cpus); |
2905 | nodes_setall(top_cpuset.effective_mems); | |
1da177e4 | 2906 | |
3e0d98b9 | 2907 | fmeter_init(&top_cpuset.fmeter); |
029190c5 | 2908 | set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags); |
1d3504fc | 2909 | top_cpuset.relax_domain_level = -1; |
1da177e4 | 2910 | |
75fa8e5d | 2911 | BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL)); |
2341d1b6 | 2912 | |
8793d854 | 2913 | return 0; |
1da177e4 LT |
2914 | } |
2915 | ||
b1aac8bb | 2916 | /* |
cf417141 | 2917 | * If CPU and/or memory hotplug handlers, below, unplug any CPUs |
b1aac8bb PJ |
2918 | * or memory nodes, we need to walk over the cpuset hierarchy, |
2919 | * removing that CPU or node from all cpusets. If this removes the | |
956db3ca CW |
2920 | * last CPU or node from a cpuset, then move the tasks in the empty |
2921 | * cpuset to its next-highest non-empty parent. | |
b1aac8bb | 2922 | */ |
956db3ca CW |
2923 | static void remove_tasks_in_empty_cpuset(struct cpuset *cs) |
2924 | { | |
2925 | struct cpuset *parent; | |
2926 | ||
956db3ca CW |
2927 | /* |
2928 | * Find its next-highest non-empty parent, (top cpuset | |
2929 | * has online cpus, so can't be empty). | |
2930 | */ | |
c431069f | 2931 | parent = parent_cs(cs); |
300ed6cb | 2932 | while (cpumask_empty(parent->cpus_allowed) || |
b4501295 | 2933 | nodes_empty(parent->mems_allowed)) |
c431069f | 2934 | parent = parent_cs(parent); |
956db3ca | 2935 | |
8cc99345 | 2936 | if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { |
12d3089c | 2937 | pr_err("cpuset: failed to transfer tasks out of empty cpuset "); |
e61734c5 TH |
2938 | pr_cont_cgroup_name(cs->css.cgroup); |
2939 | pr_cont("\n"); | |
8cc99345 | 2940 | } |
956db3ca CW |
2941 | } |
2942 | ||
be4c9dd7 LZ |
2943 | static void |
2944 | hotplug_update_tasks_legacy(struct cpuset *cs, | |
2945 | struct cpumask *new_cpus, nodemask_t *new_mems, | |
2946 | bool cpus_updated, bool mems_updated) | |
390a36aa LZ |
2947 | { |
2948 | bool is_empty; | |
2949 | ||
8447a0fe | 2950 | spin_lock_irq(&callback_lock); |
be4c9dd7 LZ |
2951 | cpumask_copy(cs->cpus_allowed, new_cpus); |
2952 | cpumask_copy(cs->effective_cpus, new_cpus); | |
2953 | cs->mems_allowed = *new_mems; | |
2954 | cs->effective_mems = *new_mems; | |
8447a0fe | 2955 | spin_unlock_irq(&callback_lock); |
390a36aa LZ |
2956 | |
2957 | /* | |
2958 | * Don't call update_tasks_cpumask() if the cpuset becomes empty, | |
2959 | * as the tasks will be migratecd to an ancestor. | |
2960 | */ | |
be4c9dd7 | 2961 | if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) |
390a36aa | 2962 | update_tasks_cpumask(cs); |
be4c9dd7 | 2963 | if (mems_updated && !nodes_empty(cs->mems_allowed)) |
390a36aa LZ |
2964 | update_tasks_nodemask(cs); |
2965 | ||
2966 | is_empty = cpumask_empty(cs->cpus_allowed) || | |
2967 | nodes_empty(cs->mems_allowed); | |
2968 | ||
1243dc51 | 2969 | percpu_up_write(&cpuset_rwsem); |
390a36aa LZ |
2970 | |
2971 | /* | |
2972 | * Move tasks to the nearest ancestor with execution resources, | |
2973 | * This is full cgroup operation which will also call back into | |
2974 | * cpuset. Should be done outside any lock. | |
2975 | */ | |
2976 | if (is_empty) | |
2977 | remove_tasks_in_empty_cpuset(cs); | |
2978 | ||
1243dc51 | 2979 | percpu_down_write(&cpuset_rwsem); |
390a36aa LZ |
2980 | } |
2981 | ||
be4c9dd7 LZ |
2982 | static void |
2983 | hotplug_update_tasks(struct cpuset *cs, | |
2984 | struct cpumask *new_cpus, nodemask_t *new_mems, | |
2985 | bool cpus_updated, bool mems_updated) | |
390a36aa | 2986 | { |
be4c9dd7 LZ |
2987 | if (cpumask_empty(new_cpus)) |
2988 | cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); | |
2989 | if (nodes_empty(*new_mems)) | |
2990 | *new_mems = parent_cs(cs)->effective_mems; | |
2991 | ||
8447a0fe | 2992 | spin_lock_irq(&callback_lock); |
be4c9dd7 LZ |
2993 | cpumask_copy(cs->effective_cpus, new_cpus); |
2994 | cs->effective_mems = *new_mems; | |
8447a0fe | 2995 | spin_unlock_irq(&callback_lock); |
390a36aa | 2996 | |
be4c9dd7 | 2997 | if (cpus_updated) |
390a36aa | 2998 | update_tasks_cpumask(cs); |
be4c9dd7 | 2999 | if (mems_updated) |
390a36aa LZ |
3000 | update_tasks_nodemask(cs); |
3001 | } | |
3002 | ||
4b842da2 WL |
3003 | static bool force_rebuild; |
3004 | ||
3005 | void cpuset_force_rebuild(void) | |
3006 | { | |
3007 | force_rebuild = true; | |
3008 | } | |
3009 | ||
deb7aa30 | 3010 | /** |
388afd85 | 3011 | * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug |
deb7aa30 | 3012 | * @cs: cpuset in interest |
4b842da2 | 3013 | * @tmp: the tmpmasks structure pointer |
956db3ca | 3014 | * |
deb7aa30 TH |
3015 | * Compare @cs's cpu and mem masks against top_cpuset and if some have gone |
3016 | * offline, update @cs accordingly. If @cs ends up with no CPU or memory, | |
3017 | * all its tasks are moved to the nearest ancestor with both resources. | |
80d1fa64 | 3018 | */ |
4b842da2 | 3019 | static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) |
80d1fa64 | 3020 | { |
be4c9dd7 LZ |
3021 | static cpumask_t new_cpus; |
3022 | static nodemask_t new_mems; | |
3023 | bool cpus_updated; | |
3024 | bool mems_updated; | |
4b842da2 | 3025 | struct cpuset *parent; |
e44193d3 LZ |
3026 | retry: |
3027 | wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); | |
80d1fa64 | 3028 | |
1243dc51 | 3029 | percpu_down_write(&cpuset_rwsem); |
7ddf96b0 | 3030 | |
e44193d3 LZ |
3031 | /* |
3032 | * We have raced with task attaching. We wait until attaching | |
3033 | * is finished, so we won't attach a task to an empty cpuset. | |
3034 | */ | |
3035 | if (cs->attach_in_progress) { | |
1243dc51 | 3036 | percpu_up_write(&cpuset_rwsem); |
e44193d3 LZ |
3037 | goto retry; |
3038 | } | |
3039 | ||
4b842da2 WL |
3040 | parent = parent_cs(cs); |
3041 | compute_effective_cpumask(&new_cpus, cs, parent); | |
3042 | nodes_and(new_mems, cs->mems_allowed, parent->effective_mems); | |
3043 | ||
3044 | if (cs->nr_subparts_cpus) | |
3045 | /* | |
3046 | * Make sure that CPUs allocated to child partitions | |
3047 | * do not show up in effective_cpus. | |
3048 | */ | |
3049 | cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus); | |
3050 | ||
3051 | if (!tmp || !cs->partition_root_state) | |
3052 | goto update_tasks; | |
80d1fa64 | 3053 | |
4b842da2 WL |
3054 | /* |
3055 | * In the unlikely event that a partition root has empty | |
3056 | * effective_cpus or its parent becomes erroneous, we have to | |
3057 | * transition it to the erroneous state. | |
3058 | */ | |
3059 | if (is_partition_root(cs) && (cpumask_empty(&new_cpus) || | |
3060 | (parent->partition_root_state == PRS_ERROR))) { | |
3061 | if (cs->nr_subparts_cpus) { | |
3062 | cs->nr_subparts_cpus = 0; | |
3063 | cpumask_clear(cs->subparts_cpus); | |
3064 | compute_effective_cpumask(&new_cpus, cs, parent); | |
3065 | } | |
80d1fa64 | 3066 | |
4b842da2 WL |
3067 | /* |
3068 | * If the effective_cpus is empty because the child | |
3069 | * partitions take away all the CPUs, we can keep | |
3070 | * the current partition and let the child partitions | |
3071 | * fight for available CPUs. | |
3072 | */ | |
3073 | if ((parent->partition_root_state == PRS_ERROR) || | |
3074 | cpumask_empty(&new_cpus)) { | |
3075 | update_parent_subparts_cpumask(cs, partcmd_disable, | |
3076 | NULL, tmp); | |
3077 | cs->partition_root_state = PRS_ERROR; | |
3078 | } | |
3079 | cpuset_force_rebuild(); | |
3080 | } | |
3081 | ||
3082 | /* | |
3083 | * On the other hand, an erroneous partition root may be transitioned | |
3084 | * back to a regular one or a partition root with no CPU allocated | |
3085 | * from the parent may change to erroneous. | |
3086 | */ | |
3087 | if (is_partition_root(parent) && | |
3088 | ((cs->partition_root_state == PRS_ERROR) || | |
3089 | !cpumask_intersects(&new_cpus, parent->subparts_cpus)) && | |
3090 | update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp)) | |
3091 | cpuset_force_rebuild(); | |
3092 | ||
3093 | update_tasks: | |
be4c9dd7 LZ |
3094 | cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); |
3095 | mems_updated = !nodes_equal(new_mems, cs->effective_mems); | |
deb7aa30 | 3096 | |
b8d1b8ee | 3097 | if (is_in_v2_mode()) |
be4c9dd7 LZ |
3098 | hotplug_update_tasks(cs, &new_cpus, &new_mems, |
3099 | cpus_updated, mems_updated); | |
390a36aa | 3100 | else |
be4c9dd7 LZ |
3101 | hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems, |
3102 | cpus_updated, mems_updated); | |
8d033948 | 3103 | |
1243dc51 | 3104 | percpu_up_write(&cpuset_rwsem); |
b1aac8bb PJ |
3105 | } |
3106 | ||
deb7aa30 | 3107 | /** |
2b729fe7 | 3108 | * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset |
956db3ca | 3109 | * |
deb7aa30 TH |
3110 | * This function is called after either CPU or memory configuration has |
3111 | * changed and updates cpuset accordingly. The top_cpuset is always | |
3112 | * synchronized to cpu_active_mask and N_MEMORY, which is necessary in | |
3113 | * order to make cpusets transparent (of no affect) on systems that are | |
3114 | * actively using CPU hotplug but making no active use of cpusets. | |
956db3ca | 3115 | * |
deb7aa30 | 3116 | * Non-root cpusets are only affected by offlining. If any CPUs or memory |
388afd85 LZ |
3117 | * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on |
3118 | * all descendants. | |
956db3ca | 3119 | * |
deb7aa30 TH |
3120 | * Note that CPU offlining during suspend is ignored. We don't modify |
3121 | * cpusets across suspend/resume cycles at all. | |
956db3ca | 3122 | */ |
2b729fe7 | 3123 | static void cpuset_hotplug_workfn(struct work_struct *work) |
b1aac8bb | 3124 | { |
5c5cc623 LZ |
3125 | static cpumask_t new_cpus; |
3126 | static nodemask_t new_mems; | |
deb7aa30 | 3127 | bool cpus_updated, mems_updated; |
b8d1b8ee | 3128 | bool on_dfl = is_in_v2_mode(); |
4b842da2 WL |
3129 | struct tmpmasks tmp, *ptmp = NULL; |
3130 | ||
3131 | if (on_dfl && !alloc_cpumasks(NULL, &tmp)) | |
3132 | ptmp = &tmp; | |
b1aac8bb | 3133 | |
1243dc51 | 3134 | percpu_down_write(&cpuset_rwsem); |
956db3ca | 3135 | |
deb7aa30 TH |
3136 | /* fetch the available cpus/mems and find out which changed how */ |
3137 | cpumask_copy(&new_cpus, cpu_active_mask); | |
3138 | new_mems = node_states[N_MEMORY]; | |
7ddf96b0 | 3139 | |
4b842da2 WL |
3140 | /* |
3141 | * If subparts_cpus is populated, it is likely that the check below | |
3142 | * will produce a false positive on cpus_updated when the cpu list | |
3143 | * isn't changed. It is extra work, but it is better to be safe. | |
3144 | */ | |
7e88291b LZ |
3145 | cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus); |
3146 | mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems); | |
7ddf96b0 | 3147 | |
deb7aa30 TH |
3148 | /* synchronize cpus_allowed to cpu_active_mask */ |
3149 | if (cpus_updated) { | |
8447a0fe | 3150 | spin_lock_irq(&callback_lock); |
7e88291b LZ |
3151 | if (!on_dfl) |
3152 | cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); | |
4b842da2 WL |
3153 | /* |
3154 | * Make sure that CPUs allocated to child partitions | |
3155 | * do not show up in effective_cpus. If no CPU is left, | |
3156 | * we clear the subparts_cpus & let the child partitions | |
3157 | * fight for the CPUs again. | |
3158 | */ | |
3159 | if (top_cpuset.nr_subparts_cpus) { | |
3160 | if (cpumask_subset(&new_cpus, | |
3161 | top_cpuset.subparts_cpus)) { | |
3162 | top_cpuset.nr_subparts_cpus = 0; | |
3163 | cpumask_clear(top_cpuset.subparts_cpus); | |
3164 | } else { | |
3165 | cpumask_andnot(&new_cpus, &new_cpus, | |
3166 | top_cpuset.subparts_cpus); | |
3167 | } | |
3168 | } | |
1344ab9c | 3169 | cpumask_copy(top_cpuset.effective_cpus, &new_cpus); |
8447a0fe | 3170 | spin_unlock_irq(&callback_lock); |
deb7aa30 TH |
3171 | /* we don't mess with cpumasks of tasks in top_cpuset */ |
3172 | } | |
b4501295 | 3173 | |
deb7aa30 TH |
3174 | /* synchronize mems_allowed to N_MEMORY */ |
3175 | if (mems_updated) { | |
8447a0fe | 3176 | spin_lock_irq(&callback_lock); |
7e88291b LZ |
3177 | if (!on_dfl) |
3178 | top_cpuset.mems_allowed = new_mems; | |
1344ab9c | 3179 | top_cpuset.effective_mems = new_mems; |
8447a0fe | 3180 | spin_unlock_irq(&callback_lock); |
d66393e5 | 3181 | update_tasks_nodemask(&top_cpuset); |
deb7aa30 | 3182 | } |
b4501295 | 3183 | |
1243dc51 | 3184 | percpu_up_write(&cpuset_rwsem); |
388afd85 | 3185 | |
5c5cc623 LZ |
3186 | /* if cpus or mems changed, we need to propagate to descendants */ |
3187 | if (cpus_updated || mems_updated) { | |
deb7aa30 | 3188 | struct cpuset *cs; |
492eb21b | 3189 | struct cgroup_subsys_state *pos_css; |
f9b4fb8d | 3190 | |
fc560a26 | 3191 | rcu_read_lock(); |
492eb21b | 3192 | cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { |
ec903c0c | 3193 | if (cs == &top_cpuset || !css_tryget_online(&cs->css)) |
388afd85 LZ |
3194 | continue; |
3195 | rcu_read_unlock(); | |
7ddf96b0 | 3196 | |
4b842da2 | 3197 | cpuset_hotplug_update_tasks(cs, ptmp); |
b4501295 | 3198 | |
388afd85 LZ |
3199 | rcu_read_lock(); |
3200 | css_put(&cs->css); | |
3201 | } | |
3202 | rcu_read_unlock(); | |
3203 | } | |
8d033948 | 3204 | |
deb7aa30 | 3205 | /* rebuild sched domains if cpus_allowed has changed */ |
50e76632 PZ |
3206 | if (cpus_updated || force_rebuild) { |
3207 | force_rebuild = false; | |
2b729fe7 | 3208 | rebuild_sched_domains(); |
50e76632 | 3209 | } |
4b842da2 WL |
3210 | |
3211 | free_cpumasks(NULL, ptmp); | |
b1aac8bb PJ |
3212 | } |
3213 | ||
2b729fe7 | 3214 | void cpuset_update_active_cpus(void) |
4c4d50f7 | 3215 | { |
2b729fe7 TH |
3216 | /* |
3217 | * We're inside cpu hotplug critical region which usually nests | |
3218 | * inside cgroup synchronization. Bounce actual hotplug processing | |
3219 | * to a work item to avoid reverse locking order. | |
3220 | */ | |
3221 | schedule_work(&cpuset_hotplug_work); | |
4c4d50f7 | 3222 | } |
4c4d50f7 | 3223 | |
2b729fe7 | 3224 | void cpuset_wait_for_hotplug(void) |
50e76632 | 3225 | { |
2b729fe7 | 3226 | flush_work(&cpuset_hotplug_work); |
50e76632 PZ |
3227 | } |
3228 | ||
38837fc7 | 3229 | /* |
38d7bee9 LJ |
3230 | * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY]. |
3231 | * Call this routine anytime after node_states[N_MEMORY] changes. | |
a1cd2b13 | 3232 | * See cpuset_update_active_cpus() for CPU hotplug handling. |
38837fc7 | 3233 | */ |
f481891f MX |
3234 | static int cpuset_track_online_nodes(struct notifier_block *self, |
3235 | unsigned long action, void *arg) | |
38837fc7 | 3236 | { |
3a5a6d0c | 3237 | schedule_work(&cpuset_hotplug_work); |
f481891f | 3238 | return NOTIFY_OK; |
38837fc7 | 3239 | } |
d8f10cb3 AM |
3240 | |
3241 | static struct notifier_block cpuset_track_online_nodes_nb = { | |
3242 | .notifier_call = cpuset_track_online_nodes, | |
3243 | .priority = 10, /* ??! */ | |
3244 | }; | |
38837fc7 | 3245 | |
1da177e4 LT |
3246 | /** |
3247 | * cpuset_init_smp - initialize cpus_allowed | |
3248 | * | |
3249 | * Description: Finish top cpuset after cpu, node maps are initialized | |
d8f10cb3 | 3250 | */ |
1da177e4 LT |
3251 | void __init cpuset_init_smp(void) |
3252 | { | |
6ad4c188 | 3253 | cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask); |
38d7bee9 | 3254 | top_cpuset.mems_allowed = node_states[N_MEMORY]; |
33ad801d | 3255 | top_cpuset.old_mems_allowed = top_cpuset.mems_allowed; |
4c4d50f7 | 3256 | |
e2b9a3d7 LZ |
3257 | cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask); |
3258 | top_cpuset.effective_mems = node_states[N_MEMORY]; | |
3259 | ||
d8f10cb3 | 3260 | register_hotmemory_notifier(&cpuset_track_online_nodes_nb); |
e93ad19d TH |
3261 | |
3262 | cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0); | |
3263 | BUG_ON(!cpuset_migrate_mm_wq); | |
1da177e4 LT |
3264 | } |
3265 | ||
3266 | /** | |
1da177e4 LT |
3267 | * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. |
3268 | * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. | |
6af866af | 3269 | * @pmask: pointer to struct cpumask variable to receive cpus_allowed set. |
1da177e4 | 3270 | * |
300ed6cb | 3271 | * Description: Returns the cpumask_var_t cpus_allowed of the cpuset |
1da177e4 | 3272 | * attached to the specified @tsk. Guaranteed to return some non-empty |
5f054e31 | 3273 | * subset of cpu_online_mask, even if this means going outside the |
1da177e4 LT |
3274 | * tasks cpuset. |
3275 | **/ | |
3276 | ||
6af866af | 3277 | void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) |
1da177e4 | 3278 | { |
8447a0fe VD |
3279 | unsigned long flags; |
3280 | ||
3281 | spin_lock_irqsave(&callback_lock, flags); | |
b8dadcb5 | 3282 | rcu_read_lock(); |
ae1c8023 | 3283 | guarantee_online_cpus(task_cs(tsk), pmask); |
b8dadcb5 | 3284 | rcu_read_unlock(); |
8447a0fe | 3285 | spin_unlock_irqrestore(&callback_lock, flags); |
1da177e4 LT |
3286 | } |
3287 | ||
d477f8c2 JS |
3288 | /** |
3289 | * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe. | |
3290 | * @tsk: pointer to task_struct with which the scheduler is struggling | |
3291 | * | |
3292 | * Description: In the case that the scheduler cannot find an allowed cpu in | |
3293 | * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy | |
3294 | * mode however, this value is the same as task_cs(tsk)->effective_cpus, | |
3295 | * which will not contain a sane cpumask during cases such as cpu hotplugging. | |
3296 | * This is the absolute last resort for the scheduler and it is only used if | |
3297 | * _every_ other avenue has been traveled. | |
3298 | **/ | |
3299 | ||
2baab4e9 | 3300 | void cpuset_cpus_allowed_fallback(struct task_struct *tsk) |
9084bb82 | 3301 | { |
9084bb82 | 3302 | rcu_read_lock(); |
d477f8c2 JS |
3303 | do_set_cpus_allowed(tsk, is_in_v2_mode() ? |
3304 | task_cs(tsk)->cpus_allowed : cpu_possible_mask); | |
9084bb82 ON |
3305 | rcu_read_unlock(); |
3306 | ||
3307 | /* | |
3308 | * We own tsk->cpus_allowed, nobody can change it under us. | |
3309 | * | |
3310 | * But we used cs && cs->cpus_allowed lockless and thus can | |
3311 | * race with cgroup_attach_task() or update_cpumask() and get | |
3312 | * the wrong tsk->cpus_allowed. However, both cases imply the | |
3313 | * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr() | |
3314 | * which takes task_rq_lock(). | |
3315 | * | |
3316 | * If we are called after it dropped the lock we must see all | |
3317 | * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary | |
3318 | * set any mask even if it is not right from task_cs() pov, | |
3319 | * the pending set_cpus_allowed_ptr() will fix things. | |
2baab4e9 PZ |
3320 | * |
3321 | * select_fallback_rq() will fix things ups and set cpu_possible_mask | |
3322 | * if required. | |
9084bb82 | 3323 | */ |
9084bb82 ON |
3324 | } |
3325 | ||
8f4ab07f | 3326 | void __init cpuset_init_current_mems_allowed(void) |
1da177e4 | 3327 | { |
f9a86fcb | 3328 | nodes_setall(current->mems_allowed); |
1da177e4 LT |
3329 | } |
3330 | ||
909d75a3 PJ |
3331 | /** |
3332 | * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset. | |
3333 | * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed. | |
3334 | * | |
3335 | * Description: Returns the nodemask_t mems_allowed of the cpuset | |
3336 | * attached to the specified @tsk. Guaranteed to return some non-empty | |
38d7bee9 | 3337 | * subset of node_states[N_MEMORY], even if this means going outside the |
909d75a3 PJ |
3338 | * tasks cpuset. |
3339 | **/ | |
3340 | ||
3341 | nodemask_t cpuset_mems_allowed(struct task_struct *tsk) | |
3342 | { | |
3343 | nodemask_t mask; | |
8447a0fe | 3344 | unsigned long flags; |
909d75a3 | 3345 | |
8447a0fe | 3346 | spin_lock_irqsave(&callback_lock, flags); |
b8dadcb5 | 3347 | rcu_read_lock(); |
ae1c8023 | 3348 | guarantee_online_mems(task_cs(tsk), &mask); |
b8dadcb5 | 3349 | rcu_read_unlock(); |
8447a0fe | 3350 | spin_unlock_irqrestore(&callback_lock, flags); |
909d75a3 PJ |
3351 | |
3352 | return mask; | |
3353 | } | |
3354 | ||
d9fd8a6d | 3355 | /** |
19770b32 MG |
3356 | * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed |
3357 | * @nodemask: the nodemask to be checked | |
d9fd8a6d | 3358 | * |
19770b32 | 3359 | * Are any of the nodes in the nodemask allowed in current->mems_allowed? |
1da177e4 | 3360 | */ |
19770b32 | 3361 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) |
1da177e4 | 3362 | { |
19770b32 | 3363 | return nodes_intersects(*nodemask, current->mems_allowed); |
1da177e4 LT |
3364 | } |
3365 | ||
9bf2229f | 3366 | /* |
78608366 PM |
3367 | * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or |
3368 | * mem_hardwall ancestor to the specified cpuset. Call holding | |
8447a0fe | 3369 | * callback_lock. If no ancestor is mem_exclusive or mem_hardwall |
78608366 | 3370 | * (an unusual configuration), then returns the root cpuset. |
9bf2229f | 3371 | */ |
c9710d80 | 3372 | static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) |
9bf2229f | 3373 | { |
c431069f TH |
3374 | while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) |
3375 | cs = parent_cs(cs); | |
9bf2229f PJ |
3376 | return cs; |
3377 | } | |
3378 | ||
d9fd8a6d | 3379 | /** |
344736f2 | 3380 | * cpuset_node_allowed - Can we allocate on a memory node? |
a1bc5a4e | 3381 | * @node: is this an allowed node? |
02a0e53d | 3382 | * @gfp_mask: memory allocation flags |
d9fd8a6d | 3383 | * |
6e276d2a DR |
3384 | * If we're in interrupt, yes, we can always allocate. If @node is set in |
3385 | * current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this | |
3386 | * node is set in the nearest hardwalled cpuset ancestor to current's cpuset, | |
da99ecf1 | 3387 | * yes. If current has access to memory reserves as an oom victim, yes. |
9bf2229f PJ |
3388 | * Otherwise, no. |
3389 | * | |
3390 | * GFP_USER allocations are marked with the __GFP_HARDWALL bit, | |
c596d9f3 | 3391 | * and do not allow allocations outside the current tasks cpuset |
da99ecf1 | 3392 | * unless the task has been OOM killed. |
9bf2229f | 3393 | * GFP_KERNEL allocations are not so marked, so can escape to the |
78608366 | 3394 | * nearest enclosing hardwalled ancestor cpuset. |
9bf2229f | 3395 | * |
8447a0fe | 3396 | * Scanning up parent cpusets requires callback_lock. The |
02a0e53d PJ |
3397 | * __alloc_pages() routine only calls here with __GFP_HARDWALL bit |
3398 | * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the | |
3399 | * current tasks mems_allowed came up empty on the first pass over | |
3400 | * the zonelist. So only GFP_KERNEL allocations, if all nodes in the | |
8447a0fe | 3401 | * cpuset are short of memory, might require taking the callback_lock. |
9bf2229f | 3402 | * |
36be57ff | 3403 | * The first call here from mm/page_alloc:get_page_from_freelist() |
02a0e53d PJ |
3404 | * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets, |
3405 | * so no allocation on a node outside the cpuset is allowed (unless | |
3406 | * in interrupt, of course). | |
36be57ff PJ |
3407 | * |
3408 | * The second pass through get_page_from_freelist() doesn't even call | |
3409 | * here for GFP_ATOMIC calls. For those calls, the __alloc_pages() | |
3410 | * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set | |
3411 | * in alloc_flags. That logic and the checks below have the combined | |
3412 | * affect that: | |
9bf2229f PJ |
3413 | * in_interrupt - any node ok (current task context irrelevant) |
3414 | * GFP_ATOMIC - any node ok | |
da99ecf1 | 3415 | * tsk_is_oom_victim - any node ok |
78608366 | 3416 | * GFP_KERNEL - any node in enclosing hardwalled cpuset ok |
9bf2229f | 3417 | * GFP_USER - only nodes in current tasks mems allowed ok. |
02a0e53d | 3418 | */ |
002f2906 | 3419 | bool __cpuset_node_allowed(int node, gfp_t gfp_mask) |
1da177e4 | 3420 | { |
c9710d80 | 3421 | struct cpuset *cs; /* current cpuset ancestors */ |
29afd49b | 3422 | int allowed; /* is allocation in zone z allowed? */ |
8447a0fe | 3423 | unsigned long flags; |
9bf2229f | 3424 | |
6e276d2a | 3425 | if (in_interrupt()) |
002f2906 | 3426 | return true; |
9bf2229f | 3427 | if (node_isset(node, current->mems_allowed)) |
002f2906 | 3428 | return true; |
c596d9f3 DR |
3429 | /* |
3430 | * Allow tasks that have access to memory reserves because they have | |
3431 | * been OOM killed to get memory anywhere. | |
3432 | */ | |
da99ecf1 | 3433 | if (unlikely(tsk_is_oom_victim(current))) |
002f2906 | 3434 | return true; |
9bf2229f | 3435 | if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */ |
002f2906 | 3436 | return false; |
9bf2229f | 3437 | |
5563e770 | 3438 | if (current->flags & PF_EXITING) /* Let dying task have memory */ |
002f2906 | 3439 | return true; |
5563e770 | 3440 | |
9bf2229f | 3441 | /* Not hardwall and node outside mems_allowed: scan up cpusets */ |
8447a0fe | 3442 | spin_lock_irqsave(&callback_lock, flags); |
053199ed | 3443 | |
b8dadcb5 | 3444 | rcu_read_lock(); |
78608366 | 3445 | cs = nearest_hardwall_ancestor(task_cs(current)); |
99afb0fd | 3446 | allowed = node_isset(node, cs->mems_allowed); |
b8dadcb5 | 3447 | rcu_read_unlock(); |
053199ed | 3448 | |
8447a0fe | 3449 | spin_unlock_irqrestore(&callback_lock, flags); |
9bf2229f | 3450 | return allowed; |
1da177e4 LT |
3451 | } |
3452 | ||
825a46af | 3453 | /** |
6adef3eb JS |
3454 | * cpuset_mem_spread_node() - On which node to begin search for a file page |
3455 | * cpuset_slab_spread_node() - On which node to begin search for a slab page | |
825a46af PJ |
3456 | * |
3457 | * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for | |
3458 | * tasks in a cpuset with is_spread_page or is_spread_slab set), | |
3459 | * and if the memory allocation used cpuset_mem_spread_node() | |
3460 | * to determine on which node to start looking, as it will for | |
3461 | * certain page cache or slab cache pages such as used for file | |
3462 | * system buffers and inode caches, then instead of starting on the | |
3463 | * local node to look for a free page, rather spread the starting | |
3464 | * node around the tasks mems_allowed nodes. | |
3465 | * | |
3466 | * We don't have to worry about the returned node being offline | |
3467 | * because "it can't happen", and even if it did, it would be ok. | |
3468 | * | |
3469 | * The routines calling guarantee_online_mems() are careful to | |
3470 | * only set nodes in task->mems_allowed that are online. So it | |
3471 | * should not be possible for the following code to return an | |
3472 | * offline node. But if it did, that would be ok, as this routine | |
3473 | * is not returning the node where the allocation must be, only | |
3474 | * the node where the search should start. The zonelist passed to | |
3475 | * __alloc_pages() will include all nodes. If the slab allocator | |
3476 | * is passed an offline node, it will fall back to the local node. | |
3477 | * See kmem_cache_alloc_node(). | |
3478 | */ | |
3479 | ||
6adef3eb | 3480 | static int cpuset_spread_node(int *rotor) |
825a46af | 3481 | { |
0edaf86c | 3482 | return *rotor = next_node_in(*rotor, current->mems_allowed); |
825a46af | 3483 | } |
6adef3eb JS |
3484 | |
3485 | int cpuset_mem_spread_node(void) | |
3486 | { | |
778d3b0f MH |
3487 | if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE) |
3488 | current->cpuset_mem_spread_rotor = | |
3489 | node_random(¤t->mems_allowed); | |
3490 | ||
6adef3eb JS |
3491 | return cpuset_spread_node(¤t->cpuset_mem_spread_rotor); |
3492 | } | |
3493 | ||
3494 | int cpuset_slab_spread_node(void) | |
3495 | { | |
778d3b0f MH |
3496 | if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE) |
3497 | current->cpuset_slab_spread_rotor = | |
3498 | node_random(¤t->mems_allowed); | |
3499 | ||
6adef3eb JS |
3500 | return cpuset_spread_node(¤t->cpuset_slab_spread_rotor); |
3501 | } | |
3502 | ||
825a46af PJ |
3503 | EXPORT_SYMBOL_GPL(cpuset_mem_spread_node); |
3504 | ||
ef08e3b4 | 3505 | /** |
bbe373f2 DR |
3506 | * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's? |
3507 | * @tsk1: pointer to task_struct of some task. | |
3508 | * @tsk2: pointer to task_struct of some other task. | |
3509 | * | |
3510 | * Description: Return true if @tsk1's mems_allowed intersects the | |
3511 | * mems_allowed of @tsk2. Used by the OOM killer to determine if | |
3512 | * one of the task's memory usage might impact the memory available | |
3513 | * to the other. | |
ef08e3b4 PJ |
3514 | **/ |
3515 | ||
bbe373f2 DR |
3516 | int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
3517 | const struct task_struct *tsk2) | |
ef08e3b4 | 3518 | { |
bbe373f2 | 3519 | return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed); |
ef08e3b4 PJ |
3520 | } |
3521 | ||
75aa1994 | 3522 | /** |
da39da3a | 3523 | * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed |
75aa1994 | 3524 | * |
da39da3a | 3525 | * Description: Prints current's name, cpuset name, and cached copy of its |
b8dadcb5 | 3526 | * mems_allowed to the kernel log. |
75aa1994 | 3527 | */ |
da39da3a | 3528 | void cpuset_print_current_mems_allowed(void) |
75aa1994 | 3529 | { |
b8dadcb5 | 3530 | struct cgroup *cgrp; |
75aa1994 | 3531 | |
b8dadcb5 | 3532 | rcu_read_lock(); |
63f43f55 | 3533 | |
da39da3a | 3534 | cgrp = task_cs(current)->css.cgroup; |
ef8444ea | 3535 | pr_cont(",cpuset="); |
e61734c5 | 3536 | pr_cont_cgroup_name(cgrp); |
ef8444ea | 3537 | pr_cont(",mems_allowed=%*pbl", |
da39da3a | 3538 | nodemask_pr_args(¤t->mems_allowed)); |
f440d98f | 3539 | |
cfb5966b | 3540 | rcu_read_unlock(); |
75aa1994 DR |
3541 | } |
3542 | ||
3e0d98b9 PJ |
3543 | /* |
3544 | * Collection of memory_pressure is suppressed unless | |
3545 | * this flag is enabled by writing "1" to the special | |
3546 | * cpuset file 'memory_pressure_enabled' in the root cpuset. | |
3547 | */ | |
3548 | ||
c5b2aff8 | 3549 | int cpuset_memory_pressure_enabled __read_mostly; |
3e0d98b9 PJ |
3550 | |
3551 | /** | |
3552 | * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims. | |
3553 | * | |
3554 | * Keep a running average of the rate of synchronous (direct) | |
3555 | * page reclaim efforts initiated by tasks in each cpuset. | |
3556 | * | |
3557 | * This represents the rate at which some task in the cpuset | |
3558 | * ran low on memory on all nodes it was allowed to use, and | |
3559 | * had to enter the kernels page reclaim code in an effort to | |
3560 | * create more free memory by tossing clean pages or swapping | |
3561 | * or writing dirty pages. | |
3562 | * | |
3563 | * Display to user space in the per-cpuset read-only file | |
3564 | * "memory_pressure". Value displayed is an integer | |
3565 | * representing the recent rate of entry into the synchronous | |
3566 | * (direct) page reclaim by any task attached to the cpuset. | |
3567 | **/ | |
3568 | ||
3569 | void __cpuset_memory_pressure_bump(void) | |
3570 | { | |
b8dadcb5 | 3571 | rcu_read_lock(); |
8793d854 | 3572 | fmeter_markevent(&task_cs(current)->fmeter); |
b8dadcb5 | 3573 | rcu_read_unlock(); |
3e0d98b9 PJ |
3574 | } |
3575 | ||
8793d854 | 3576 | #ifdef CONFIG_PROC_PID_CPUSET |
1da177e4 LT |
3577 | /* |
3578 | * proc_cpuset_show() | |
3579 | * - Print tasks cpuset path into seq_file. | |
3580 | * - Used for /proc/<pid>/cpuset. | |
053199ed PJ |
3581 | * - No need to task_lock(tsk) on this tsk->cpuset reference, as it |
3582 | * doesn't really matter if tsk->cpuset changes after we read it, | |
5d21cc2d | 3583 | * and we take cpuset_mutex, keeping cpuset_attach() from changing it |
2df167a3 | 3584 | * anyway. |
1da177e4 | 3585 | */ |
52de4779 ZL |
3586 | int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, |
3587 | struct pid *pid, struct task_struct *tsk) | |
1da177e4 | 3588 | { |
4c737b41 | 3589 | char *buf; |
8793d854 | 3590 | struct cgroup_subsys_state *css; |
99f89551 | 3591 | int retval; |
1da177e4 | 3592 | |
99f89551 | 3593 | retval = -ENOMEM; |
e61734c5 | 3594 | buf = kmalloc(PATH_MAX, GFP_KERNEL); |
1da177e4 | 3595 | if (!buf) |
99f89551 EB |
3596 | goto out; |
3597 | ||
a79a908f | 3598 | css = task_get_css(tsk, cpuset_cgrp_id); |
4c737b41 TH |
3599 | retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX, |
3600 | current->nsproxy->cgroup_ns); | |
a79a908f | 3601 | css_put(css); |
4c737b41 | 3602 | if (retval >= PATH_MAX) |
679a5e3f TH |
3603 | retval = -ENAMETOOLONG; |
3604 | if (retval < 0) | |
52de4779 | 3605 | goto out_free; |
4c737b41 | 3606 | seq_puts(m, buf); |
1da177e4 | 3607 | seq_putc(m, '\n'); |
e61734c5 | 3608 | retval = 0; |
99f89551 | 3609 | out_free: |
1da177e4 | 3610 | kfree(buf); |
99f89551 | 3611 | out: |
1da177e4 LT |
3612 | return retval; |
3613 | } | |
8793d854 | 3614 | #endif /* CONFIG_PROC_PID_CPUSET */ |
1da177e4 | 3615 | |
d01d4827 | 3616 | /* Display task mems_allowed in /proc/<pid>/status file. */ |
df5f8314 EB |
3617 | void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task) |
3618 | { | |
e8e6d97c TH |
3619 | seq_printf(m, "Mems_allowed:\t%*pb\n", |
3620 | nodemask_pr_args(&task->mems_allowed)); | |
3621 | seq_printf(m, "Mems_allowed_list:\t%*pbl\n", | |
3622 | nodemask_pr_args(&task->mems_allowed)); | |
1da177e4 | 3623 | } |