]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * kernel/cpuset.c | |
3 | * | |
4 | * Processor and Memory placement constraints for sets of tasks. | |
5 | * | |
6 | * Copyright (C) 2003 BULL SA. | |
029190c5 | 7 | * Copyright (C) 2004-2007 Silicon Graphics, Inc. |
8793d854 | 8 | * Copyright (C) 2006 Google, Inc |
1da177e4 LT |
9 | * |
10 | * Portions derived from Patrick Mochel's sysfs code. | |
11 | * sysfs is Copyright (c) 2001-3 Patrick Mochel | |
1da177e4 | 12 | * |
825a46af | 13 | * 2003-10-10 Written by Simon Derr. |
1da177e4 | 14 | * 2003-10-22 Updates by Stephen Hemminger. |
825a46af | 15 | * 2004 May-July Rework by Paul Jackson. |
8793d854 | 16 | * 2006 Rework by Paul Menage to use generic cgroups |
cf417141 MK |
17 | * 2008 Rework of the scheduler domains and CPU hotplug handling |
18 | * by Max Krasnyansky | |
1da177e4 LT |
19 | * |
20 | * This file is subject to the terms and conditions of the GNU General Public | |
21 | * License. See the file COPYING in the main directory of the Linux | |
22 | * distribution for more details. | |
23 | */ | |
24 | ||
1da177e4 LT |
25 | #include <linux/cpu.h> |
26 | #include <linux/cpumask.h> | |
27 | #include <linux/cpuset.h> | |
28 | #include <linux/err.h> | |
29 | #include <linux/errno.h> | |
30 | #include <linux/file.h> | |
31 | #include <linux/fs.h> | |
32 | #include <linux/init.h> | |
33 | #include <linux/interrupt.h> | |
34 | #include <linux/kernel.h> | |
35 | #include <linux/kmod.h> | |
36 | #include <linux/list.h> | |
68860ec1 | 37 | #include <linux/mempolicy.h> |
1da177e4 | 38 | #include <linux/mm.h> |
f481891f | 39 | #include <linux/memory.h> |
9984de1a | 40 | #include <linux/export.h> |
1da177e4 | 41 | #include <linux/mount.h> |
a1875374 | 42 | #include <linux/fs_context.h> |
1da177e4 LT |
43 | #include <linux/namei.h> |
44 | #include <linux/pagemap.h> | |
45 | #include <linux/proc_fs.h> | |
6b9c2603 | 46 | #include <linux/rcupdate.h> |
1da177e4 | 47 | #include <linux/sched.h> |
f9a25f77 | 48 | #include <linux/sched/deadline.h> |
6e84f315 | 49 | #include <linux/sched/mm.h> |
f719ff9b | 50 | #include <linux/sched/task.h> |
1da177e4 | 51 | #include <linux/seq_file.h> |
22fb52dd | 52 | #include <linux/security.h> |
1da177e4 | 53 | #include <linux/slab.h> |
1da177e4 LT |
54 | #include <linux/spinlock.h> |
55 | #include <linux/stat.h> | |
56 | #include <linux/string.h> | |
57 | #include <linux/time.h> | |
d2b43658 | 58 | #include <linux/time64.h> |
1da177e4 LT |
59 | #include <linux/backing-dev.h> |
60 | #include <linux/sort.h> | |
da99ecf1 | 61 | #include <linux/oom.h> |
edb93821 | 62 | #include <linux/sched/isolation.h> |
7c0f6ba6 | 63 | #include <linux/uaccess.h> |
60063497 | 64 | #include <linux/atomic.h> |
3d3f26a7 | 65 | #include <linux/mutex.h> |
956db3ca | 66 | #include <linux/cgroup.h> |
e44193d3 | 67 | #include <linux/wait.h> |
1da177e4 | 68 | |
89affbf5 | 69 | DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key); |
002f2906 | 70 | DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key); |
202f72d5 | 71 | |
3e0d98b9 PJ |
72 | /* See "Frequency meter" comments, below. */ |
73 | ||
74 | struct fmeter { | |
75 | int cnt; /* unprocessed events count */ | |
76 | int val; /* most recent output value */ | |
d2b43658 | 77 | time64_t time; /* clock (secs) when val computed */ |
3e0d98b9 PJ |
78 | spinlock_t lock; /* guards read or write of above */ |
79 | }; | |
80 | ||
1da177e4 | 81 | struct cpuset { |
8793d854 PM |
82 | struct cgroup_subsys_state css; |
83 | ||
1da177e4 | 84 | unsigned long flags; /* "unsigned long" so bitops work */ |
e2b9a3d7 | 85 | |
7e88291b LZ |
86 | /* |
87 | * On default hierarchy: | |
88 | * | |
89 | * The user-configured masks can only be changed by writing to | |
90 | * cpuset.cpus and cpuset.mems, and won't be limited by the | |
91 | * parent masks. | |
92 | * | |
93 | * The effective masks is the real masks that apply to the tasks | |
94 | * in the cpuset. They may be changed if the configured masks are | |
95 | * changed or hotplug happens. | |
96 | * | |
97 | * effective_mask == configured_mask & parent's effective_mask, | |
98 | * and if it ends up empty, it will inherit the parent's mask. | |
99 | * | |
100 | * | |
101 | * On legacy hierachy: | |
102 | * | |
103 | * The user-configured masks are always the same with effective masks. | |
104 | */ | |
105 | ||
e2b9a3d7 LZ |
106 | /* user-configured CPUs and Memory Nodes allow to tasks */ |
107 | cpumask_var_t cpus_allowed; | |
108 | nodemask_t mems_allowed; | |
109 | ||
110 | /* effective CPUs and Memory Nodes allow to tasks */ | |
111 | cpumask_var_t effective_cpus; | |
112 | nodemask_t effective_mems; | |
1da177e4 | 113 | |
58b74842 WL |
114 | /* |
115 | * CPUs allocated to child sub-partitions (default hierarchy only) | |
116 | * - CPUs granted by the parent = effective_cpus U subparts_cpus | |
117 | * - effective_cpus and subparts_cpus are mutually exclusive. | |
4b842da2 WL |
118 | * |
119 | * effective_cpus contains only onlined CPUs, but subparts_cpus | |
120 | * may have offlined ones. | |
58b74842 WL |
121 | */ |
122 | cpumask_var_t subparts_cpus; | |
123 | ||
33ad801d LZ |
124 | /* |
125 | * This is old Memory Nodes tasks took on. | |
126 | * | |
127 | * - top_cpuset.old_mems_allowed is initialized to mems_allowed. | |
128 | * - A new cpuset's old_mems_allowed is initialized when some | |
129 | * task is moved into it. | |
130 | * - old_mems_allowed is used in cpuset_migrate_mm() when we change | |
131 | * cpuset.mems_allowed and have tasks' nodemask updated, and | |
132 | * then old_mems_allowed is updated to mems_allowed. | |
133 | */ | |
134 | nodemask_t old_mems_allowed; | |
135 | ||
3e0d98b9 | 136 | struct fmeter fmeter; /* memory_pressure filter */ |
029190c5 | 137 | |
452477fa TH |
138 | /* |
139 | * Tasks are being attached to this cpuset. Used to prevent | |
140 | * zeroing cpus/mems_allowed between ->can_attach() and ->attach(). | |
141 | */ | |
142 | int attach_in_progress; | |
143 | ||
029190c5 PJ |
144 | /* partition number for rebuild_sched_domains() */ |
145 | int pn; | |
956db3ca | 146 | |
1d3504fc HS |
147 | /* for custom sched domain */ |
148 | int relax_domain_level; | |
58b74842 WL |
149 | |
150 | /* number of CPUs in subparts_cpus */ | |
151 | int nr_subparts_cpus; | |
152 | ||
153 | /* partition root state */ | |
154 | int partition_root_state; | |
4716909c WL |
155 | |
156 | /* | |
157 | * Default hierarchy only: | |
158 | * use_parent_ecpus - set if using parent's effective_cpus | |
159 | * child_ecpus_count - # of children with use_parent_ecpus set | |
160 | */ | |
161 | int use_parent_ecpus; | |
162 | int child_ecpus_count; | |
58b74842 WL |
163 | }; |
164 | ||
165 | /* | |
166 | * Partition root states: | |
167 | * | |
168 | * 0 - not a partition root | |
3881b861 | 169 | * |
58b74842 | 170 | * 1 - partition root |
3881b861 WL |
171 | * |
172 | * -1 - invalid partition root | |
173 | * None of the cpus in cpus_allowed can be put into the parent's | |
174 | * subparts_cpus. In this case, the cpuset is not a real partition | |
175 | * root anymore. However, the CPU_EXCLUSIVE bit will still be set | |
176 | * and the cpuset can be restored back to a partition root if the | |
177 | * parent cpuset can give more CPUs back to this child cpuset. | |
58b74842 WL |
178 | */ |
179 | #define PRS_DISABLED 0 | |
180 | #define PRS_ENABLED 1 | |
3881b861 | 181 | #define PRS_ERROR -1 |
58b74842 WL |
182 | |
183 | /* | |
184 | * Temporary cpumasks for working with partitions that are passed among | |
185 | * functions to avoid memory allocation in inner functions. | |
186 | */ | |
187 | struct tmpmasks { | |
188 | cpumask_var_t addmask, delmask; /* For partition root */ | |
189 | cpumask_var_t new_cpus; /* For update_cpumasks_hier() */ | |
1da177e4 LT |
190 | }; |
191 | ||
a7c6d554 | 192 | static inline struct cpuset *css_cs(struct cgroup_subsys_state *css) |
8793d854 | 193 | { |
a7c6d554 | 194 | return css ? container_of(css, struct cpuset, css) : NULL; |
8793d854 PM |
195 | } |
196 | ||
197 | /* Retrieve the cpuset for a task */ | |
198 | static inline struct cpuset *task_cs(struct task_struct *task) | |
199 | { | |
073219e9 | 200 | return css_cs(task_css(task, cpuset_cgrp_id)); |
8793d854 | 201 | } |
8793d854 | 202 | |
c9710d80 | 203 | static inline struct cpuset *parent_cs(struct cpuset *cs) |
c431069f | 204 | { |
5c9d535b | 205 | return css_cs(cs->css.parent); |
c431069f TH |
206 | } |
207 | ||
1da177e4 LT |
208 | /* bits in struct cpuset flags field */ |
209 | typedef enum { | |
efeb77b2 | 210 | CS_ONLINE, |
1da177e4 LT |
211 | CS_CPU_EXCLUSIVE, |
212 | CS_MEM_EXCLUSIVE, | |
78608366 | 213 | CS_MEM_HARDWALL, |
45b07ef3 | 214 | CS_MEMORY_MIGRATE, |
029190c5 | 215 | CS_SCHED_LOAD_BALANCE, |
825a46af PJ |
216 | CS_SPREAD_PAGE, |
217 | CS_SPREAD_SLAB, | |
1da177e4 LT |
218 | } cpuset_flagbits_t; |
219 | ||
220 | /* convenient tests for these bits */ | |
41c25707 | 221 | static inline bool is_cpuset_online(struct cpuset *cs) |
efeb77b2 | 222 | { |
41c25707 | 223 | return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); |
efeb77b2 TH |
224 | } |
225 | ||
1da177e4 LT |
226 | static inline int is_cpu_exclusive(const struct cpuset *cs) |
227 | { | |
7b5b9ef0 | 228 | return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); |
1da177e4 LT |
229 | } |
230 | ||
231 | static inline int is_mem_exclusive(const struct cpuset *cs) | |
232 | { | |
7b5b9ef0 | 233 | return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); |
1da177e4 LT |
234 | } |
235 | ||
78608366 PM |
236 | static inline int is_mem_hardwall(const struct cpuset *cs) |
237 | { | |
238 | return test_bit(CS_MEM_HARDWALL, &cs->flags); | |
239 | } | |
240 | ||
029190c5 PJ |
241 | static inline int is_sched_load_balance(const struct cpuset *cs) |
242 | { | |
243 | return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); | |
244 | } | |
245 | ||
45b07ef3 PJ |
246 | static inline int is_memory_migrate(const struct cpuset *cs) |
247 | { | |
7b5b9ef0 | 248 | return test_bit(CS_MEMORY_MIGRATE, &cs->flags); |
45b07ef3 PJ |
249 | } |
250 | ||
825a46af PJ |
251 | static inline int is_spread_page(const struct cpuset *cs) |
252 | { | |
253 | return test_bit(CS_SPREAD_PAGE, &cs->flags); | |
254 | } | |
255 | ||
256 | static inline int is_spread_slab(const struct cpuset *cs) | |
257 | { | |
258 | return test_bit(CS_SPREAD_SLAB, &cs->flags); | |
259 | } | |
260 | ||
58b74842 WL |
261 | static inline int is_partition_root(const struct cpuset *cs) |
262 | { | |
3881b861 | 263 | return cs->partition_root_state > 0; |
58b74842 WL |
264 | } |
265 | ||
1da177e4 | 266 | static struct cpuset top_cpuset = { |
efeb77b2 TH |
267 | .flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) | |
268 | (1 << CS_MEM_EXCLUSIVE)), | |
58b74842 | 269 | .partition_root_state = PRS_ENABLED, |
1da177e4 LT |
270 | }; |
271 | ||
ae8086ce TH |
272 | /** |
273 | * cpuset_for_each_child - traverse online children of a cpuset | |
274 | * @child_cs: loop cursor pointing to the current child | |
492eb21b | 275 | * @pos_css: used for iteration |
ae8086ce TH |
276 | * @parent_cs: target cpuset to walk children of |
277 | * | |
278 | * Walk @child_cs through the online children of @parent_cs. Must be used | |
279 | * with RCU read locked. | |
280 | */ | |
492eb21b TH |
281 | #define cpuset_for_each_child(child_cs, pos_css, parent_cs) \ |
282 | css_for_each_child((pos_css), &(parent_cs)->css) \ | |
283 | if (is_cpuset_online(((child_cs) = css_cs((pos_css))))) | |
ae8086ce | 284 | |
fc560a26 TH |
285 | /** |
286 | * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants | |
287 | * @des_cs: loop cursor pointing to the current descendant | |
492eb21b | 288 | * @pos_css: used for iteration |
fc560a26 TH |
289 | * @root_cs: target cpuset to walk ancestor of |
290 | * | |
291 | * Walk @des_cs through the online descendants of @root_cs. Must be used | |
492eb21b | 292 | * with RCU read locked. The caller may modify @pos_css by calling |
bd8815a6 TH |
293 | * css_rightmost_descendant() to skip subtree. @root_cs is included in the |
294 | * iteration and the first node to be visited. | |
fc560a26 | 295 | */ |
492eb21b TH |
296 | #define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \ |
297 | css_for_each_descendant_pre((pos_css), &(root_cs)->css) \ | |
298 | if (is_cpuset_online(((des_cs) = css_cs((pos_css))))) | |
fc560a26 | 299 | |
1da177e4 | 300 | /* |
8447a0fe VD |
301 | * There are two global locks guarding cpuset structures - cpuset_mutex and |
302 | * callback_lock. We also require taking task_lock() when dereferencing a | |
303 | * task's cpuset pointer. See "The task_lock() exception", at the end of this | |
304 | * comment. | |
5d21cc2d | 305 | * |
8447a0fe | 306 | * A task must hold both locks to modify cpusets. If a task holds |
5d21cc2d | 307 | * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it |
8447a0fe | 308 | * is the only task able to also acquire callback_lock and be able to |
5d21cc2d TH |
309 | * modify cpusets. It can perform various checks on the cpuset structure |
310 | * first, knowing nothing will change. It can also allocate memory while | |
311 | * just holding cpuset_mutex. While it is performing these checks, various | |
8447a0fe VD |
312 | * callback routines can briefly acquire callback_lock to query cpusets. |
313 | * Once it is ready to make the changes, it takes callback_lock, blocking | |
5d21cc2d | 314 | * everyone else. |
053199ed PJ |
315 | * |
316 | * Calls to the kernel memory allocator can not be made while holding | |
8447a0fe | 317 | * callback_lock, as that would risk double tripping on callback_lock |
053199ed PJ |
318 | * from one of the callbacks into the cpuset code from within |
319 | * __alloc_pages(). | |
320 | * | |
8447a0fe | 321 | * If a task is only holding callback_lock, then it has read-only |
053199ed PJ |
322 | * access to cpusets. |
323 | * | |
58568d2a MX |
324 | * Now, the task_struct fields mems_allowed and mempolicy may be changed |
325 | * by other task, we use alloc_lock in the task_struct fields to protect | |
326 | * them. | |
053199ed | 327 | * |
8447a0fe | 328 | * The cpuset_common_file_read() handlers only hold callback_lock across |
053199ed PJ |
329 | * small pieces of code, such as when reading out possibly multi-word |
330 | * cpumasks and nodemasks. | |
331 | * | |
2df167a3 PM |
332 | * Accessing a task's cpuset should be done in accordance with the |
333 | * guidelines for accessing subsystem state in kernel/cgroup.c | |
1da177e4 LT |
334 | */ |
335 | ||
1243dc51 | 336 | DEFINE_STATIC_PERCPU_RWSEM(cpuset_rwsem); |
710da3c8 JL |
337 | |
338 | void cpuset_read_lock(void) | |
339 | { | |
340 | percpu_down_read(&cpuset_rwsem); | |
341 | } | |
342 | ||
343 | void cpuset_read_unlock(void) | |
344 | { | |
345 | percpu_up_read(&cpuset_rwsem); | |
346 | } | |
347 | ||
8447a0fe | 348 | static DEFINE_SPINLOCK(callback_lock); |
4247bdc6 | 349 | |
e93ad19d TH |
350 | static struct workqueue_struct *cpuset_migrate_mm_wq; |
351 | ||
3a5a6d0c TH |
352 | /* |
353 | * CPU / memory hotplug is handled asynchronously. | |
354 | */ | |
355 | static void cpuset_hotplug_workfn(struct work_struct *work); | |
3a5a6d0c TH |
356 | static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn); |
357 | ||
e44193d3 LZ |
358 | static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq); |
359 | ||
b8d1b8ee | 360 | /* |
0c05b9bd WL |
361 | * Cgroup v2 behavior is used on the "cpus" and "mems" control files when |
362 | * on default hierarchy or when the cpuset_v2_mode flag is set by mounting | |
363 | * the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option. | |
364 | * With v2 behavior, "cpus" and "mems" are always what the users have | |
365 | * requested and won't be changed by hotplug events. Only the effective | |
366 | * cpus or mems will be affected. | |
b8d1b8ee WL |
367 | */ |
368 | static inline bool is_in_v2_mode(void) | |
369 | { | |
370 | return cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || | |
371 | (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE); | |
372 | } | |
373 | ||
1da177e4 | 374 | /* |
300ed6cb | 375 | * Return in pmask the portion of a cpusets's cpus_allowed that |
1da177e4 | 376 | * are online. If none are online, walk up the cpuset hierarchy |
28b89b9e | 377 | * until we find one that does have some online cpus. |
1da177e4 LT |
378 | * |
379 | * One way or another, we guarantee to return some non-empty subset | |
5f054e31 | 380 | * of cpu_online_mask. |
1da177e4 | 381 | * |
8447a0fe | 382 | * Call with callback_lock or cpuset_mutex held. |
1da177e4 | 383 | */ |
c9710d80 | 384 | static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask) |
1da177e4 | 385 | { |
28b89b9e | 386 | while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) { |
c431069f | 387 | cs = parent_cs(cs); |
28b89b9e JP |
388 | if (unlikely(!cs)) { |
389 | /* | |
390 | * The top cpuset doesn't have any online cpu as a | |
391 | * consequence of a race between cpuset_hotplug_work | |
392 | * and cpu hotplug notifier. But we know the top | |
7b7b8a2c | 393 | * cpuset's effective_cpus is on its way to be |
28b89b9e JP |
394 | * identical to cpu_online_mask. |
395 | */ | |
396 | cpumask_copy(pmask, cpu_online_mask); | |
397 | return; | |
398 | } | |
399 | } | |
ae1c8023 | 400 | cpumask_and(pmask, cs->effective_cpus, cpu_online_mask); |
1da177e4 LT |
401 | } |
402 | ||
403 | /* | |
404 | * Return in *pmask the portion of a cpusets's mems_allowed that | |
0e1e7c7a CL |
405 | * are online, with memory. If none are online with memory, walk |
406 | * up the cpuset hierarchy until we find one that does have some | |
40df2deb | 407 | * online mems. The top cpuset always has some mems online. |
1da177e4 LT |
408 | * |
409 | * One way or another, we guarantee to return some non-empty subset | |
38d7bee9 | 410 | * of node_states[N_MEMORY]. |
1da177e4 | 411 | * |
8447a0fe | 412 | * Call with callback_lock or cpuset_mutex held. |
1da177e4 | 413 | */ |
c9710d80 | 414 | static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) |
1da177e4 | 415 | { |
ae1c8023 | 416 | while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) |
c431069f | 417 | cs = parent_cs(cs); |
ae1c8023 | 418 | nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); |
1da177e4 LT |
419 | } |
420 | ||
f3b39d47 MX |
421 | /* |
422 | * update task's spread flag if cpuset's page/slab spread flag is set | |
423 | * | |
8447a0fe | 424 | * Call with callback_lock or cpuset_mutex held. |
f3b39d47 MX |
425 | */ |
426 | static void cpuset_update_task_spread_flag(struct cpuset *cs, | |
427 | struct task_struct *tsk) | |
428 | { | |
429 | if (is_spread_page(cs)) | |
2ad654bc | 430 | task_set_spread_page(tsk); |
f3b39d47 | 431 | else |
2ad654bc ZL |
432 | task_clear_spread_page(tsk); |
433 | ||
f3b39d47 | 434 | if (is_spread_slab(cs)) |
2ad654bc | 435 | task_set_spread_slab(tsk); |
f3b39d47 | 436 | else |
2ad654bc | 437 | task_clear_spread_slab(tsk); |
f3b39d47 MX |
438 | } |
439 | ||
1da177e4 LT |
440 | /* |
441 | * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q? | |
442 | * | |
443 | * One cpuset is a subset of another if all its allowed CPUs and | |
444 | * Memory Nodes are a subset of the other, and its exclusive flags | |
5d21cc2d | 445 | * are only set if the other's are set. Call holding cpuset_mutex. |
1da177e4 LT |
446 | */ |
447 | ||
448 | static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) | |
449 | { | |
300ed6cb | 450 | return cpumask_subset(p->cpus_allowed, q->cpus_allowed) && |
1da177e4 LT |
451 | nodes_subset(p->mems_allowed, q->mems_allowed) && |
452 | is_cpu_exclusive(p) <= is_cpu_exclusive(q) && | |
453 | is_mem_exclusive(p) <= is_mem_exclusive(q); | |
454 | } | |
455 | ||
bf92370c WL |
456 | /** |
457 | * alloc_cpumasks - allocate three cpumasks for cpuset | |
458 | * @cs: the cpuset that have cpumasks to be allocated. | |
459 | * @tmp: the tmpmasks structure pointer | |
460 | * Return: 0 if successful, -ENOMEM otherwise. | |
461 | * | |
462 | * Only one of the two input arguments should be non-NULL. | |
463 | */ | |
464 | static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) | |
465 | { | |
466 | cpumask_var_t *pmask1, *pmask2, *pmask3; | |
467 | ||
468 | if (cs) { | |
469 | pmask1 = &cs->cpus_allowed; | |
470 | pmask2 = &cs->effective_cpus; | |
471 | pmask3 = &cs->subparts_cpus; | |
472 | } else { | |
473 | pmask1 = &tmp->new_cpus; | |
474 | pmask2 = &tmp->addmask; | |
475 | pmask3 = &tmp->delmask; | |
476 | } | |
477 | ||
478 | if (!zalloc_cpumask_var(pmask1, GFP_KERNEL)) | |
479 | return -ENOMEM; | |
480 | ||
481 | if (!zalloc_cpumask_var(pmask2, GFP_KERNEL)) | |
482 | goto free_one; | |
483 | ||
484 | if (!zalloc_cpumask_var(pmask3, GFP_KERNEL)) | |
485 | goto free_two; | |
486 | ||
487 | return 0; | |
488 | ||
489 | free_two: | |
490 | free_cpumask_var(*pmask2); | |
491 | free_one: | |
492 | free_cpumask_var(*pmask1); | |
493 | return -ENOMEM; | |
494 | } | |
495 | ||
496 | /** | |
497 | * free_cpumasks - free cpumasks in a tmpmasks structure | |
498 | * @cs: the cpuset that have cpumasks to be free. | |
499 | * @tmp: the tmpmasks structure pointer | |
500 | */ | |
501 | static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) | |
502 | { | |
503 | if (cs) { | |
504 | free_cpumask_var(cs->cpus_allowed); | |
505 | free_cpumask_var(cs->effective_cpus); | |
506 | free_cpumask_var(cs->subparts_cpus); | |
507 | } | |
508 | if (tmp) { | |
509 | free_cpumask_var(tmp->new_cpus); | |
510 | free_cpumask_var(tmp->addmask); | |
511 | free_cpumask_var(tmp->delmask); | |
512 | } | |
513 | } | |
514 | ||
645fcc9d LZ |
515 | /** |
516 | * alloc_trial_cpuset - allocate a trial cpuset | |
517 | * @cs: the cpuset that the trial cpuset duplicates | |
518 | */ | |
c9710d80 | 519 | static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) |
645fcc9d | 520 | { |
300ed6cb LZ |
521 | struct cpuset *trial; |
522 | ||
523 | trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL); | |
524 | if (!trial) | |
525 | return NULL; | |
526 | ||
bf92370c WL |
527 | if (alloc_cpumasks(trial, NULL)) { |
528 | kfree(trial); | |
529 | return NULL; | |
530 | } | |
300ed6cb | 531 | |
e2b9a3d7 LZ |
532 | cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); |
533 | cpumask_copy(trial->effective_cpus, cs->effective_cpus); | |
300ed6cb | 534 | return trial; |
645fcc9d LZ |
535 | } |
536 | ||
537 | /** | |
bf92370c WL |
538 | * free_cpuset - free the cpuset |
539 | * @cs: the cpuset to be freed | |
645fcc9d | 540 | */ |
bf92370c | 541 | static inline void free_cpuset(struct cpuset *cs) |
645fcc9d | 542 | { |
bf92370c WL |
543 | free_cpumasks(cs, NULL); |
544 | kfree(cs); | |
645fcc9d LZ |
545 | } |
546 | ||
1da177e4 LT |
547 | /* |
548 | * validate_change() - Used to validate that any proposed cpuset change | |
549 | * follows the structural rules for cpusets. | |
550 | * | |
551 | * If we replaced the flag and mask values of the current cpuset | |
552 | * (cur) with those values in the trial cpuset (trial), would | |
553 | * our various subset and exclusive rules still be valid? Presumes | |
5d21cc2d | 554 | * cpuset_mutex held. |
1da177e4 LT |
555 | * |
556 | * 'cur' is the address of an actual, in-use cpuset. Operations | |
557 | * such as list traversal that depend on the actual address of the | |
558 | * cpuset in the list must use cur below, not trial. | |
559 | * | |
560 | * 'trial' is the address of bulk structure copy of cur, with | |
561 | * perhaps one or more of the fields cpus_allowed, mems_allowed, | |
562 | * or flags changed to new, trial values. | |
563 | * | |
564 | * Return 0 if valid, -errno if not. | |
565 | */ | |
566 | ||
c9710d80 | 567 | static int validate_change(struct cpuset *cur, struct cpuset *trial) |
1da177e4 | 568 | { |
492eb21b | 569 | struct cgroup_subsys_state *css; |
1da177e4 | 570 | struct cpuset *c, *par; |
ae8086ce TH |
571 | int ret; |
572 | ||
573 | rcu_read_lock(); | |
1da177e4 LT |
574 | |
575 | /* Each of our child cpusets must be a subset of us */ | |
ae8086ce | 576 | ret = -EBUSY; |
492eb21b | 577 | cpuset_for_each_child(c, css, cur) |
ae8086ce TH |
578 | if (!is_cpuset_subset(c, trial)) |
579 | goto out; | |
1da177e4 LT |
580 | |
581 | /* Remaining checks don't apply to root cpuset */ | |
ae8086ce | 582 | ret = 0; |
69604067 | 583 | if (cur == &top_cpuset) |
ae8086ce | 584 | goto out; |
1da177e4 | 585 | |
c431069f | 586 | par = parent_cs(cur); |
69604067 | 587 | |
7e88291b | 588 | /* On legacy hiearchy, we must be a subset of our parent cpuset. */ |
ae8086ce | 589 | ret = -EACCES; |
b8d1b8ee | 590 | if (!is_in_v2_mode() && !is_cpuset_subset(trial, par)) |
ae8086ce | 591 | goto out; |
1da177e4 | 592 | |
2df167a3 PM |
593 | /* |
594 | * If either I or some sibling (!= me) is exclusive, we can't | |
595 | * overlap | |
596 | */ | |
ae8086ce | 597 | ret = -EINVAL; |
492eb21b | 598 | cpuset_for_each_child(c, css, par) { |
1da177e4 LT |
599 | if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && |
600 | c != cur && | |
300ed6cb | 601 | cpumask_intersects(trial->cpus_allowed, c->cpus_allowed)) |
ae8086ce | 602 | goto out; |
1da177e4 LT |
603 | if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) && |
604 | c != cur && | |
605 | nodes_intersects(trial->mems_allowed, c->mems_allowed)) | |
ae8086ce | 606 | goto out; |
1da177e4 LT |
607 | } |
608 | ||
452477fa TH |
609 | /* |
610 | * Cpusets with tasks - existing or newly being attached - can't | |
1c09b195 | 611 | * be changed to have empty cpus_allowed or mems_allowed. |
452477fa | 612 | */ |
ae8086ce | 613 | ret = -ENOSPC; |
27bd4dbb | 614 | if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) { |
1c09b195 LZ |
615 | if (!cpumask_empty(cur->cpus_allowed) && |
616 | cpumask_empty(trial->cpus_allowed)) | |
617 | goto out; | |
618 | if (!nodes_empty(cur->mems_allowed) && | |
619 | nodes_empty(trial->mems_allowed)) | |
620 | goto out; | |
621 | } | |
020958b6 | 622 | |
f82f8042 JL |
623 | /* |
624 | * We can't shrink if we won't have enough room for SCHED_DEADLINE | |
625 | * tasks. | |
626 | */ | |
627 | ret = -EBUSY; | |
628 | if (is_cpu_exclusive(cur) && | |
629 | !cpuset_cpumask_can_shrink(cur->cpus_allowed, | |
630 | trial->cpus_allowed)) | |
631 | goto out; | |
632 | ||
ae8086ce TH |
633 | ret = 0; |
634 | out: | |
635 | rcu_read_unlock(); | |
636 | return ret; | |
1da177e4 LT |
637 | } |
638 | ||
db7f47cf | 639 | #ifdef CONFIG_SMP |
029190c5 | 640 | /* |
cf417141 | 641 | * Helper routine for generate_sched_domains(). |
8b5f1c52 | 642 | * Do cpusets a, b have overlapping effective cpus_allowed masks? |
029190c5 | 643 | */ |
029190c5 PJ |
644 | static int cpusets_overlap(struct cpuset *a, struct cpuset *b) |
645 | { | |
8b5f1c52 | 646 | return cpumask_intersects(a->effective_cpus, b->effective_cpus); |
029190c5 PJ |
647 | } |
648 | ||
1d3504fc HS |
649 | static void |
650 | update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c) | |
651 | { | |
1d3504fc HS |
652 | if (dattr->relax_domain_level < c->relax_domain_level) |
653 | dattr->relax_domain_level = c->relax_domain_level; | |
654 | return; | |
655 | } | |
656 | ||
fc560a26 TH |
657 | static void update_domain_attr_tree(struct sched_domain_attr *dattr, |
658 | struct cpuset *root_cs) | |
f5393693 | 659 | { |
fc560a26 | 660 | struct cpuset *cp; |
492eb21b | 661 | struct cgroup_subsys_state *pos_css; |
f5393693 | 662 | |
fc560a26 | 663 | rcu_read_lock(); |
492eb21b | 664 | cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { |
fc560a26 TH |
665 | /* skip the whole subtree if @cp doesn't have any CPU */ |
666 | if (cpumask_empty(cp->cpus_allowed)) { | |
492eb21b | 667 | pos_css = css_rightmost_descendant(pos_css); |
f5393693 | 668 | continue; |
fc560a26 | 669 | } |
f5393693 LJ |
670 | |
671 | if (is_sched_load_balance(cp)) | |
672 | update_domain_attr(dattr, cp); | |
f5393693 | 673 | } |
fc560a26 | 674 | rcu_read_unlock(); |
f5393693 LJ |
675 | } |
676 | ||
be040bea PB |
677 | /* Must be called with cpuset_mutex held. */ |
678 | static inline int nr_cpusets(void) | |
679 | { | |
680 | /* jump label reference count + the top-level cpuset */ | |
681 | return static_key_count(&cpusets_enabled_key.key) + 1; | |
682 | } | |
683 | ||
029190c5 | 684 | /* |
cf417141 MK |
685 | * generate_sched_domains() |
686 | * | |
687 | * This function builds a partial partition of the systems CPUs | |
688 | * A 'partial partition' is a set of non-overlapping subsets whose | |
689 | * union is a subset of that set. | |
0a0fca9d | 690 | * The output of this function needs to be passed to kernel/sched/core.c |
cf417141 MK |
691 | * partition_sched_domains() routine, which will rebuild the scheduler's |
692 | * load balancing domains (sched domains) as specified by that partial | |
693 | * partition. | |
029190c5 | 694 | * |
da82c92f | 695 | * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst |
029190c5 PJ |
696 | * for a background explanation of this. |
697 | * | |
698 | * Does not return errors, on the theory that the callers of this | |
699 | * routine would rather not worry about failures to rebuild sched | |
700 | * domains when operating in the severe memory shortage situations | |
701 | * that could cause allocation failures below. | |
702 | * | |
5d21cc2d | 703 | * Must be called with cpuset_mutex held. |
029190c5 PJ |
704 | * |
705 | * The three key local variables below are: | |
b6fbbf31 JL |
706 | * cp - cpuset pointer, used (together with pos_css) to perform a |
707 | * top-down scan of all cpusets. For our purposes, rebuilding | |
708 | * the schedulers sched domains, we can ignore !is_sched_load_ | |
709 | * balance cpusets. | |
029190c5 PJ |
710 | * csa - (for CpuSet Array) Array of pointers to all the cpusets |
711 | * that need to be load balanced, for convenient iterative | |
712 | * access by the subsequent code that finds the best partition, | |
713 | * i.e the set of domains (subsets) of CPUs such that the | |
714 | * cpus_allowed of every cpuset marked is_sched_load_balance | |
715 | * is a subset of one of these domains, while there are as | |
716 | * many such domains as possible, each as small as possible. | |
717 | * doms - Conversion of 'csa' to an array of cpumasks, for passing to | |
0a0fca9d | 718 | * the kernel/sched/core.c routine partition_sched_domains() in a |
029190c5 PJ |
719 | * convenient format, that can be easily compared to the prior |
720 | * value to determine what partition elements (sched domains) | |
721 | * were changed (added or removed.) | |
722 | * | |
723 | * Finding the best partition (set of domains): | |
724 | * The triple nested loops below over i, j, k scan over the | |
725 | * load balanced cpusets (using the array of cpuset pointers in | |
726 | * csa[]) looking for pairs of cpusets that have overlapping | |
727 | * cpus_allowed, but which don't have the same 'pn' partition | |
728 | * number and gives them in the same partition number. It keeps | |
729 | * looping on the 'restart' label until it can no longer find | |
730 | * any such pairs. | |
731 | * | |
732 | * The union of the cpus_allowed masks from the set of | |
733 | * all cpusets having the same 'pn' value then form the one | |
734 | * element of the partition (one sched domain) to be passed to | |
735 | * partition_sched_domains(). | |
736 | */ | |
acc3f5d7 | 737 | static int generate_sched_domains(cpumask_var_t **domains, |
cf417141 | 738 | struct sched_domain_attr **attributes) |
029190c5 | 739 | { |
b6fbbf31 | 740 | struct cpuset *cp; /* top-down scan of cpusets */ |
029190c5 PJ |
741 | struct cpuset **csa; /* array of all cpuset ptrs */ |
742 | int csn; /* how many cpuset ptrs in csa so far */ | |
743 | int i, j, k; /* indices for partition finding loops */ | |
acc3f5d7 | 744 | cpumask_var_t *doms; /* resulting partition; i.e. sched domains */ |
1d3504fc | 745 | struct sched_domain_attr *dattr; /* attributes for custom domains */ |
1583715d | 746 | int ndoms = 0; /* number of sched domains in result */ |
6af866af | 747 | int nslot; /* next empty doms[] struct cpumask slot */ |
492eb21b | 748 | struct cgroup_subsys_state *pos_css; |
0ccea8fe | 749 | bool root_load_balance = is_sched_load_balance(&top_cpuset); |
029190c5 | 750 | |
029190c5 | 751 | doms = NULL; |
1d3504fc | 752 | dattr = NULL; |
cf417141 | 753 | csa = NULL; |
029190c5 PJ |
754 | |
755 | /* Special case for the 99% of systems with one, full, sched domain */ | |
0ccea8fe | 756 | if (root_load_balance && !top_cpuset.nr_subparts_cpus) { |
acc3f5d7 RR |
757 | ndoms = 1; |
758 | doms = alloc_sched_domains(ndoms); | |
029190c5 | 759 | if (!doms) |
cf417141 MK |
760 | goto done; |
761 | ||
1d3504fc HS |
762 | dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL); |
763 | if (dattr) { | |
764 | *dattr = SD_ATTR_INIT; | |
93a65575 | 765 | update_domain_attr_tree(dattr, &top_cpuset); |
1d3504fc | 766 | } |
47b8ea71 | 767 | cpumask_and(doms[0], top_cpuset.effective_cpus, |
edb93821 | 768 | housekeeping_cpumask(HK_FLAG_DOMAIN)); |
cf417141 | 769 | |
cf417141 | 770 | goto done; |
029190c5 PJ |
771 | } |
772 | ||
6da2ec56 | 773 | csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL); |
029190c5 PJ |
774 | if (!csa) |
775 | goto done; | |
776 | csn = 0; | |
777 | ||
fc560a26 | 778 | rcu_read_lock(); |
0ccea8fe WL |
779 | if (root_load_balance) |
780 | csa[csn++] = &top_cpuset; | |
492eb21b | 781 | cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) { |
bd8815a6 TH |
782 | if (cp == &top_cpuset) |
783 | continue; | |
f5393693 | 784 | /* |
fc560a26 TH |
785 | * Continue traversing beyond @cp iff @cp has some CPUs and |
786 | * isn't load balancing. The former is obvious. The | |
787 | * latter: All child cpusets contain a subset of the | |
788 | * parent's cpus, so just skip them, and then we call | |
789 | * update_domain_attr_tree() to calc relax_domain_level of | |
790 | * the corresponding sched domain. | |
0ccea8fe WL |
791 | * |
792 | * If root is load-balancing, we can skip @cp if it | |
793 | * is a subset of the root's effective_cpus. | |
f5393693 | 794 | */ |
fc560a26 | 795 | if (!cpumask_empty(cp->cpus_allowed) && |
47b8ea71 | 796 | !(is_sched_load_balance(cp) && |
edb93821 FW |
797 | cpumask_intersects(cp->cpus_allowed, |
798 | housekeeping_cpumask(HK_FLAG_DOMAIN)))) | |
f5393693 | 799 | continue; |
489a5393 | 800 | |
0ccea8fe WL |
801 | if (root_load_balance && |
802 | cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus)) | |
803 | continue; | |
804 | ||
cd1cb335 VS |
805 | if (is_sched_load_balance(cp) && |
806 | !cpumask_empty(cp->effective_cpus)) | |
fc560a26 TH |
807 | csa[csn++] = cp; |
808 | ||
0ccea8fe WL |
809 | /* skip @cp's subtree if not a partition root */ |
810 | if (!is_partition_root(cp)) | |
811 | pos_css = css_rightmost_descendant(pos_css); | |
fc560a26 TH |
812 | } |
813 | rcu_read_unlock(); | |
029190c5 PJ |
814 | |
815 | for (i = 0; i < csn; i++) | |
816 | csa[i]->pn = i; | |
817 | ndoms = csn; | |
818 | ||
819 | restart: | |
820 | /* Find the best partition (set of sched domains) */ | |
821 | for (i = 0; i < csn; i++) { | |
822 | struct cpuset *a = csa[i]; | |
823 | int apn = a->pn; | |
824 | ||
825 | for (j = 0; j < csn; j++) { | |
826 | struct cpuset *b = csa[j]; | |
827 | int bpn = b->pn; | |
828 | ||
829 | if (apn != bpn && cpusets_overlap(a, b)) { | |
830 | for (k = 0; k < csn; k++) { | |
831 | struct cpuset *c = csa[k]; | |
832 | ||
833 | if (c->pn == bpn) | |
834 | c->pn = apn; | |
835 | } | |
836 | ndoms--; /* one less element */ | |
837 | goto restart; | |
838 | } | |
839 | } | |
840 | } | |
841 | ||
cf417141 MK |
842 | /* |
843 | * Now we know how many domains to create. | |
844 | * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. | |
845 | */ | |
acc3f5d7 | 846 | doms = alloc_sched_domains(ndoms); |
700018e0 | 847 | if (!doms) |
cf417141 | 848 | goto done; |
cf417141 MK |
849 | |
850 | /* | |
851 | * The rest of the code, including the scheduler, can deal with | |
852 | * dattr==NULL case. No need to abort if alloc fails. | |
853 | */ | |
6da2ec56 KC |
854 | dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr), |
855 | GFP_KERNEL); | |
029190c5 PJ |
856 | |
857 | for (nslot = 0, i = 0; i < csn; i++) { | |
858 | struct cpuset *a = csa[i]; | |
6af866af | 859 | struct cpumask *dp; |
029190c5 PJ |
860 | int apn = a->pn; |
861 | ||
cf417141 MK |
862 | if (apn < 0) { |
863 | /* Skip completed partitions */ | |
864 | continue; | |
865 | } | |
866 | ||
acc3f5d7 | 867 | dp = doms[nslot]; |
cf417141 MK |
868 | |
869 | if (nslot == ndoms) { | |
870 | static int warnings = 10; | |
871 | if (warnings) { | |
12d3089c FF |
872 | pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n", |
873 | nslot, ndoms, csn, i, apn); | |
cf417141 | 874 | warnings--; |
029190c5 | 875 | } |
cf417141 MK |
876 | continue; |
877 | } | |
029190c5 | 878 | |
6af866af | 879 | cpumask_clear(dp); |
cf417141 MK |
880 | if (dattr) |
881 | *(dattr + nslot) = SD_ATTR_INIT; | |
882 | for (j = i; j < csn; j++) { | |
883 | struct cpuset *b = csa[j]; | |
884 | ||
885 | if (apn == b->pn) { | |
8b5f1c52 | 886 | cpumask_or(dp, dp, b->effective_cpus); |
edb93821 | 887 | cpumask_and(dp, dp, housekeeping_cpumask(HK_FLAG_DOMAIN)); |
cf417141 MK |
888 | if (dattr) |
889 | update_domain_attr_tree(dattr + nslot, b); | |
890 | ||
891 | /* Done with this partition */ | |
892 | b->pn = -1; | |
029190c5 | 893 | } |
029190c5 | 894 | } |
cf417141 | 895 | nslot++; |
029190c5 PJ |
896 | } |
897 | BUG_ON(nslot != ndoms); | |
898 | ||
cf417141 MK |
899 | done: |
900 | kfree(csa); | |
901 | ||
700018e0 LZ |
902 | /* |
903 | * Fallback to the default domain if kmalloc() failed. | |
904 | * See comments in partition_sched_domains(). | |
905 | */ | |
906 | if (doms == NULL) | |
907 | ndoms = 1; | |
908 | ||
cf417141 MK |
909 | *domains = doms; |
910 | *attributes = dattr; | |
911 | return ndoms; | |
912 | } | |
913 | ||
f9a25f77 MP |
914 | static void update_tasks_root_domain(struct cpuset *cs) |
915 | { | |
916 | struct css_task_iter it; | |
917 | struct task_struct *task; | |
918 | ||
919 | css_task_iter_start(&cs->css, 0, &it); | |
920 | ||
921 | while ((task = css_task_iter_next(&it))) | |
922 | dl_add_task_root_domain(task); | |
923 | ||
924 | css_task_iter_end(&it); | |
925 | } | |
926 | ||
927 | static void rebuild_root_domains(void) | |
928 | { | |
929 | struct cpuset *cs = NULL; | |
930 | struct cgroup_subsys_state *pos_css; | |
931 | ||
1243dc51 | 932 | percpu_rwsem_assert_held(&cpuset_rwsem); |
f9a25f77 MP |
933 | lockdep_assert_cpus_held(); |
934 | lockdep_assert_held(&sched_domains_mutex); | |
935 | ||
f9a25f77 MP |
936 | rcu_read_lock(); |
937 | ||
938 | /* | |
939 | * Clear default root domain DL accounting, it will be computed again | |
940 | * if a task belongs to it. | |
941 | */ | |
942 | dl_clear_root_domain(&def_root_domain); | |
943 | ||
944 | cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { | |
945 | ||
946 | if (cpumask_empty(cs->effective_cpus)) { | |
947 | pos_css = css_rightmost_descendant(pos_css); | |
948 | continue; | |
949 | } | |
950 | ||
951 | css_get(&cs->css); | |
952 | ||
953 | rcu_read_unlock(); | |
954 | ||
955 | update_tasks_root_domain(cs); | |
956 | ||
957 | rcu_read_lock(); | |
958 | css_put(&cs->css); | |
959 | } | |
960 | rcu_read_unlock(); | |
961 | } | |
962 | ||
963 | static void | |
964 | partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[], | |
965 | struct sched_domain_attr *dattr_new) | |
966 | { | |
967 | mutex_lock(&sched_domains_mutex); | |
968 | partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); | |
969 | rebuild_root_domains(); | |
970 | mutex_unlock(&sched_domains_mutex); | |
971 | } | |
972 | ||
cf417141 MK |
973 | /* |
974 | * Rebuild scheduler domains. | |
975 | * | |
699140ba TH |
976 | * If the flag 'sched_load_balance' of any cpuset with non-empty |
977 | * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset | |
978 | * which has that flag enabled, or if any cpuset with a non-empty | |
979 | * 'cpus' is removed, then call this routine to rebuild the | |
980 | * scheduler's dynamic sched domains. | |
cf417141 | 981 | * |
5d21cc2d | 982 | * Call with cpuset_mutex held. Takes get_online_cpus(). |
cf417141 | 983 | */ |
699140ba | 984 | static void rebuild_sched_domains_locked(void) |
cf417141 | 985 | { |
406100f3 | 986 | struct cgroup_subsys_state *pos_css; |
cf417141 | 987 | struct sched_domain_attr *attr; |
acc3f5d7 | 988 | cpumask_var_t *doms; |
406100f3 | 989 | struct cpuset *cs; |
cf417141 MK |
990 | int ndoms; |
991 | ||
d74b27d6 | 992 | lockdep_assert_cpus_held(); |
1243dc51 | 993 | percpu_rwsem_assert_held(&cpuset_rwsem); |
cf417141 | 994 | |
5b16c2a4 | 995 | /* |
406100f3 | 996 | * If we have raced with CPU hotplug, return early to avoid |
5b16c2a4 | 997 | * passing doms with offlined cpu to partition_sched_domains(). |
406100f3 DJ |
998 | * Anyways, cpuset_hotplug_workfn() will rebuild sched domains. |
999 | * | |
1000 | * With no CPUs in any subpartitions, top_cpuset's effective CPUs | |
1001 | * should be the same as the active CPUs, so checking only top_cpuset | |
1002 | * is enough to detect racing CPU offlines. | |
5b16c2a4 | 1003 | */ |
0ccea8fe WL |
1004 | if (!top_cpuset.nr_subparts_cpus && |
1005 | !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask)) | |
d74b27d6 | 1006 | return; |
0ccea8fe | 1007 | |
406100f3 DJ |
1008 | /* |
1009 | * With subpartition CPUs, however, the effective CPUs of a partition | |
1010 | * root should be only a subset of the active CPUs. Since a CPU in any | |
1011 | * partition root could be offlined, all must be checked. | |
1012 | */ | |
1013 | if (top_cpuset.nr_subparts_cpus) { | |
1014 | rcu_read_lock(); | |
1015 | cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { | |
1016 | if (!is_partition_root(cs)) { | |
1017 | pos_css = css_rightmost_descendant(pos_css); | |
1018 | continue; | |
1019 | } | |
1020 | if (!cpumask_subset(cs->effective_cpus, | |
1021 | cpu_active_mask)) { | |
1022 | rcu_read_unlock(); | |
1023 | return; | |
1024 | } | |
1025 | } | |
1026 | rcu_read_unlock(); | |
1027 | } | |
5b16c2a4 | 1028 | |
cf417141 | 1029 | /* Generate domain masks and attrs */ |
cf417141 | 1030 | ndoms = generate_sched_domains(&doms, &attr); |
cf417141 MK |
1031 | |
1032 | /* Have scheduler rebuild the domains */ | |
f9a25f77 | 1033 | partition_and_rebuild_sched_domains(ndoms, doms, attr); |
cf417141 | 1034 | } |
db7f47cf | 1035 | #else /* !CONFIG_SMP */ |
699140ba | 1036 | static void rebuild_sched_domains_locked(void) |
db7f47cf PM |
1037 | { |
1038 | } | |
db7f47cf | 1039 | #endif /* CONFIG_SMP */ |
029190c5 | 1040 | |
cf417141 MK |
1041 | void rebuild_sched_domains(void) |
1042 | { | |
d74b27d6 | 1043 | get_online_cpus(); |
1243dc51 | 1044 | percpu_down_write(&cpuset_rwsem); |
699140ba | 1045 | rebuild_sched_domains_locked(); |
1243dc51 | 1046 | percpu_up_write(&cpuset_rwsem); |
d74b27d6 | 1047 | put_online_cpus(); |
029190c5 PJ |
1048 | } |
1049 | ||
0b2f630a MX |
1050 | /** |
1051 | * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. | |
1052 | * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed | |
0b2f630a | 1053 | * |
d66393e5 TH |
1054 | * Iterate through each task of @cs updating its cpus_allowed to the |
1055 | * effective cpuset's. As this function is called with cpuset_mutex held, | |
1056 | * cpuset membership stays stable. | |
0b2f630a | 1057 | */ |
d66393e5 | 1058 | static void update_tasks_cpumask(struct cpuset *cs) |
0b2f630a | 1059 | { |
d66393e5 TH |
1060 | struct css_task_iter it; |
1061 | struct task_struct *task; | |
1062 | ||
bc2fb7ed | 1063 | css_task_iter_start(&cs->css, 0, &it); |
d66393e5 | 1064 | while ((task = css_task_iter_next(&it))) |
ae1c8023 | 1065 | set_cpus_allowed_ptr(task, cs->effective_cpus); |
d66393e5 | 1066 | css_task_iter_end(&it); |
0b2f630a MX |
1067 | } |
1068 | ||
ee8dde0c WL |
1069 | /** |
1070 | * compute_effective_cpumask - Compute the effective cpumask of the cpuset | |
1071 | * @new_cpus: the temp variable for the new effective_cpus mask | |
1072 | * @cs: the cpuset the need to recompute the new effective_cpus mask | |
1073 | * @parent: the parent cpuset | |
1074 | * | |
1075 | * If the parent has subpartition CPUs, include them in the list of | |
4b842da2 WL |
1076 | * allowable CPUs in computing the new effective_cpus mask. Since offlined |
1077 | * CPUs are not removed from subparts_cpus, we have to use cpu_active_mask | |
1078 | * to mask those out. | |
ee8dde0c WL |
1079 | */ |
1080 | static void compute_effective_cpumask(struct cpumask *new_cpus, | |
1081 | struct cpuset *cs, struct cpuset *parent) | |
1082 | { | |
1083 | if (parent->nr_subparts_cpus) { | |
1084 | cpumask_or(new_cpus, parent->effective_cpus, | |
1085 | parent->subparts_cpus); | |
1086 | cpumask_and(new_cpus, new_cpus, cs->cpus_allowed); | |
4b842da2 | 1087 | cpumask_and(new_cpus, new_cpus, cpu_active_mask); |
ee8dde0c WL |
1088 | } else { |
1089 | cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus); | |
1090 | } | |
1091 | } | |
1092 | ||
1093 | /* | |
1094 | * Commands for update_parent_subparts_cpumask | |
1095 | */ | |
1096 | enum subparts_cmd { | |
1097 | partcmd_enable, /* Enable partition root */ | |
1098 | partcmd_disable, /* Disable partition root */ | |
1099 | partcmd_update, /* Update parent's subparts_cpus */ | |
1100 | }; | |
1101 | ||
1102 | /** | |
1103 | * update_parent_subparts_cpumask - update subparts_cpus mask of parent cpuset | |
1104 | * @cpuset: The cpuset that requests change in partition root state | |
1105 | * @cmd: Partition root state change command | |
1106 | * @newmask: Optional new cpumask for partcmd_update | |
1107 | * @tmp: Temporary addmask and delmask | |
1108 | * Return: 0, 1 or an error code | |
1109 | * | |
1110 | * For partcmd_enable, the cpuset is being transformed from a non-partition | |
1111 | * root to a partition root. The cpus_allowed mask of the given cpuset will | |
1112 | * be put into parent's subparts_cpus and taken away from parent's | |
1113 | * effective_cpus. The function will return 0 if all the CPUs listed in | |
1114 | * cpus_allowed can be granted or an error code will be returned. | |
1115 | * | |
1116 | * For partcmd_disable, the cpuset is being transofrmed from a partition | |
1117 | * root back to a non-partition root. any CPUs in cpus_allowed that are in | |
1118 | * parent's subparts_cpus will be taken away from that cpumask and put back | |
1119 | * into parent's effective_cpus. 0 should always be returned. | |
1120 | * | |
1121 | * For partcmd_update, if the optional newmask is specified, the cpu | |
1122 | * list is to be changed from cpus_allowed to newmask. Otherwise, | |
3881b861 WL |
1123 | * cpus_allowed is assumed to remain the same. The cpuset should either |
1124 | * be a partition root or an invalid partition root. The partition root | |
1125 | * state may change if newmask is NULL and none of the requested CPUs can | |
1126 | * be granted by the parent. The function will return 1 if changes to | |
1127 | * parent's subparts_cpus and effective_cpus happen or 0 otherwise. | |
1128 | * Error code should only be returned when newmask is non-NULL. | |
ee8dde0c WL |
1129 | * |
1130 | * The partcmd_enable and partcmd_disable commands are used by | |
1131 | * update_prstate(). The partcmd_update command is used by | |
1132 | * update_cpumasks_hier() with newmask NULL and update_cpumask() with | |
1133 | * newmask set. | |
1134 | * | |
1135 | * The checking is more strict when enabling partition root than the | |
1136 | * other two commands. | |
1137 | * | |
1138 | * Because of the implicit cpu exclusive nature of a partition root, | |
1139 | * cpumask changes that violates the cpu exclusivity rule will not be | |
1140 | * permitted when checked by validate_change(). The validate_change() | |
1141 | * function will also prevent any changes to the cpu list if it is not | |
1142 | * a superset of children's cpu lists. | |
1143 | */ | |
1144 | static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd, | |
1145 | struct cpumask *newmask, | |
1146 | struct tmpmasks *tmp) | |
1147 | { | |
1148 | struct cpuset *parent = parent_cs(cpuset); | |
1149 | int adding; /* Moving cpus from effective_cpus to subparts_cpus */ | |
1150 | int deleting; /* Moving cpus from subparts_cpus to effective_cpus */ | |
3881b861 | 1151 | bool part_error = false; /* Partition error? */ |
ee8dde0c | 1152 | |
1243dc51 | 1153 | percpu_rwsem_assert_held(&cpuset_rwsem); |
ee8dde0c WL |
1154 | |
1155 | /* | |
1156 | * The parent must be a partition root. | |
1157 | * The new cpumask, if present, or the current cpus_allowed must | |
1158 | * not be empty. | |
1159 | */ | |
1160 | if (!is_partition_root(parent) || | |
1161 | (newmask && cpumask_empty(newmask)) || | |
1162 | (!newmask && cpumask_empty(cpuset->cpus_allowed))) | |
1163 | return -EINVAL; | |
1164 | ||
1165 | /* | |
1166 | * Enabling/disabling partition root is not allowed if there are | |
1167 | * online children. | |
1168 | */ | |
1169 | if ((cmd != partcmd_update) && css_has_online_children(&cpuset->css)) | |
1170 | return -EBUSY; | |
1171 | ||
1172 | /* | |
1173 | * Enabling partition root is not allowed if not all the CPUs | |
1174 | * can be granted from parent's effective_cpus or at least one | |
1175 | * CPU will be left after that. | |
1176 | */ | |
1177 | if ((cmd == partcmd_enable) && | |
1178 | (!cpumask_subset(cpuset->cpus_allowed, parent->effective_cpus) || | |
1179 | cpumask_equal(cpuset->cpus_allowed, parent->effective_cpus))) | |
1180 | return -EINVAL; | |
1181 | ||
1182 | /* | |
1183 | * A cpumask update cannot make parent's effective_cpus become empty. | |
1184 | */ | |
1185 | adding = deleting = false; | |
1186 | if (cmd == partcmd_enable) { | |
1187 | cpumask_copy(tmp->addmask, cpuset->cpus_allowed); | |
1188 | adding = true; | |
1189 | } else if (cmd == partcmd_disable) { | |
1190 | deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed, | |
1191 | parent->subparts_cpus); | |
1192 | } else if (newmask) { | |
1193 | /* | |
1194 | * partcmd_update with newmask: | |
1195 | * | |
1196 | * delmask = cpus_allowed & ~newmask & parent->subparts_cpus | |
1197 | * addmask = newmask & parent->effective_cpus | |
1198 | * & ~parent->subparts_cpus | |
1199 | */ | |
1200 | cpumask_andnot(tmp->delmask, cpuset->cpus_allowed, newmask); | |
1201 | deleting = cpumask_and(tmp->delmask, tmp->delmask, | |
1202 | parent->subparts_cpus); | |
1203 | ||
1204 | cpumask_and(tmp->addmask, newmask, parent->effective_cpus); | |
1205 | adding = cpumask_andnot(tmp->addmask, tmp->addmask, | |
1206 | parent->subparts_cpus); | |
1207 | /* | |
1208 | * Return error if the new effective_cpus could become empty. | |
1209 | */ | |
4b842da2 WL |
1210 | if (adding && |
1211 | cpumask_equal(parent->effective_cpus, tmp->addmask)) { | |
1212 | if (!deleting) | |
1213 | return -EINVAL; | |
1214 | /* | |
1215 | * As some of the CPUs in subparts_cpus might have | |
1216 | * been offlined, we need to compute the real delmask | |
1217 | * to confirm that. | |
1218 | */ | |
1219 | if (!cpumask_and(tmp->addmask, tmp->delmask, | |
1220 | cpu_active_mask)) | |
1221 | return -EINVAL; | |
1222 | cpumask_copy(tmp->addmask, parent->effective_cpus); | |
1223 | } | |
ee8dde0c WL |
1224 | } else { |
1225 | /* | |
1226 | * partcmd_update w/o newmask: | |
1227 | * | |
1228 | * addmask = cpus_allowed & parent->effectiveb_cpus | |
1229 | * | |
1230 | * Note that parent's subparts_cpus may have been | |
3881b861 WL |
1231 | * pre-shrunk in case there is a change in the cpu list. |
1232 | * So no deletion is needed. | |
ee8dde0c WL |
1233 | */ |
1234 | adding = cpumask_and(tmp->addmask, cpuset->cpus_allowed, | |
1235 | parent->effective_cpus); | |
3881b861 WL |
1236 | part_error = cpumask_equal(tmp->addmask, |
1237 | parent->effective_cpus); | |
1238 | } | |
1239 | ||
1240 | if (cmd == partcmd_update) { | |
1241 | int prev_prs = cpuset->partition_root_state; | |
1242 | ||
1243 | /* | |
1244 | * Check for possible transition between PRS_ENABLED | |
1245 | * and PRS_ERROR. | |
1246 | */ | |
1247 | switch (cpuset->partition_root_state) { | |
1248 | case PRS_ENABLED: | |
1249 | if (part_error) | |
1250 | cpuset->partition_root_state = PRS_ERROR; | |
1251 | break; | |
1252 | case PRS_ERROR: | |
1253 | if (!part_error) | |
1254 | cpuset->partition_root_state = PRS_ENABLED; | |
1255 | break; | |
1256 | } | |
1257 | /* | |
1258 | * Set part_error if previously in invalid state. | |
1259 | */ | |
1260 | part_error = (prev_prs == PRS_ERROR); | |
1261 | } | |
1262 | ||
1263 | if (!part_error && (cpuset->partition_root_state == PRS_ERROR)) | |
1264 | return 0; /* Nothing need to be done */ | |
1265 | ||
1266 | if (cpuset->partition_root_state == PRS_ERROR) { | |
1267 | /* | |
1268 | * Remove all its cpus from parent's subparts_cpus. | |
1269 | */ | |
1270 | adding = false; | |
1271 | deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed, | |
1272 | parent->subparts_cpus); | |
ee8dde0c WL |
1273 | } |
1274 | ||
1275 | if (!adding && !deleting) | |
1276 | return 0; | |
1277 | ||
1278 | /* | |
1279 | * Change the parent's subparts_cpus. | |
1280 | * Newly added CPUs will be removed from effective_cpus and | |
1281 | * newly deleted ones will be added back to effective_cpus. | |
1282 | */ | |
1283 | spin_lock_irq(&callback_lock); | |
1284 | if (adding) { | |
1285 | cpumask_or(parent->subparts_cpus, | |
1286 | parent->subparts_cpus, tmp->addmask); | |
1287 | cpumask_andnot(parent->effective_cpus, | |
1288 | parent->effective_cpus, tmp->addmask); | |
1289 | } | |
1290 | if (deleting) { | |
1291 | cpumask_andnot(parent->subparts_cpus, | |
1292 | parent->subparts_cpus, tmp->delmask); | |
4b842da2 WL |
1293 | /* |
1294 | * Some of the CPUs in subparts_cpus might have been offlined. | |
1295 | */ | |
1296 | cpumask_and(tmp->delmask, tmp->delmask, cpu_active_mask); | |
ee8dde0c WL |
1297 | cpumask_or(parent->effective_cpus, |
1298 | parent->effective_cpus, tmp->delmask); | |
1299 | } | |
1300 | ||
1301 | parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus); | |
1302 | spin_unlock_irq(&callback_lock); | |
1303 | ||
1304 | return cmd == partcmd_update; | |
1305 | } | |
1306 | ||
5c5cc623 | 1307 | /* |
734d4513 | 1308 | * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree |
ee8dde0c WL |
1309 | * @cs: the cpuset to consider |
1310 | * @tmp: temp variables for calculating effective_cpus & partition setup | |
734d4513 LZ |
1311 | * |
1312 | * When congifured cpumask is changed, the effective cpumasks of this cpuset | |
1313 | * and all its descendants need to be updated. | |
5c5cc623 | 1314 | * |
734d4513 | 1315 | * On legacy hierachy, effective_cpus will be the same with cpu_allowed. |
5c5cc623 LZ |
1316 | * |
1317 | * Called with cpuset_mutex held | |
1318 | */ | |
ee8dde0c | 1319 | static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp) |
5c5cc623 LZ |
1320 | { |
1321 | struct cpuset *cp; | |
492eb21b | 1322 | struct cgroup_subsys_state *pos_css; |
8b5f1c52 | 1323 | bool need_rebuild_sched_domains = false; |
5c5cc623 LZ |
1324 | |
1325 | rcu_read_lock(); | |
734d4513 LZ |
1326 | cpuset_for_each_descendant_pre(cp, pos_css, cs) { |
1327 | struct cpuset *parent = parent_cs(cp); | |
1328 | ||
ee8dde0c | 1329 | compute_effective_cpumask(tmp->new_cpus, cp, parent); |
734d4513 | 1330 | |
554b0d1c LZ |
1331 | /* |
1332 | * If it becomes empty, inherit the effective mask of the | |
1333 | * parent, which is guaranteed to have some CPUs. | |
1334 | */ | |
4716909c | 1335 | if (is_in_v2_mode() && cpumask_empty(tmp->new_cpus)) { |
ee8dde0c | 1336 | cpumask_copy(tmp->new_cpus, parent->effective_cpus); |
4716909c WL |
1337 | if (!cp->use_parent_ecpus) { |
1338 | cp->use_parent_ecpus = true; | |
1339 | parent->child_ecpus_count++; | |
1340 | } | |
1341 | } else if (cp->use_parent_ecpus) { | |
1342 | cp->use_parent_ecpus = false; | |
1343 | WARN_ON_ONCE(!parent->child_ecpus_count); | |
1344 | parent->child_ecpus_count--; | |
1345 | } | |
554b0d1c | 1346 | |
ee8dde0c WL |
1347 | /* |
1348 | * Skip the whole subtree if the cpumask remains the same | |
1349 | * and has no partition root state. | |
1350 | */ | |
3881b861 | 1351 | if (!cp->partition_root_state && |
ee8dde0c | 1352 | cpumask_equal(tmp->new_cpus, cp->effective_cpus)) { |
734d4513 LZ |
1353 | pos_css = css_rightmost_descendant(pos_css); |
1354 | continue; | |
5c5cc623 | 1355 | } |
734d4513 | 1356 | |
ee8dde0c WL |
1357 | /* |
1358 | * update_parent_subparts_cpumask() should have been called | |
1359 | * for cs already in update_cpumask(). We should also call | |
1360 | * update_tasks_cpumask() again for tasks in the parent | |
1361 | * cpuset if the parent's subparts_cpus changes. | |
1362 | */ | |
3881b861 WL |
1363 | if ((cp != cs) && cp->partition_root_state) { |
1364 | switch (parent->partition_root_state) { | |
1365 | case PRS_DISABLED: | |
1366 | /* | |
1367 | * If parent is not a partition root or an | |
1368 | * invalid partition root, clear the state | |
1369 | * state and the CS_CPU_EXCLUSIVE flag. | |
1370 | */ | |
1371 | WARN_ON_ONCE(cp->partition_root_state | |
1372 | != PRS_ERROR); | |
1373 | cp->partition_root_state = 0; | |
1374 | ||
1375 | /* | |
1376 | * clear_bit() is an atomic operation and | |
1377 | * readers aren't interested in the state | |
1378 | * of CS_CPU_EXCLUSIVE anyway. So we can | |
1379 | * just update the flag without holding | |
1380 | * the callback_lock. | |
1381 | */ | |
1382 | clear_bit(CS_CPU_EXCLUSIVE, &cp->flags); | |
1383 | break; | |
1384 | ||
1385 | case PRS_ENABLED: | |
1386 | if (update_parent_subparts_cpumask(cp, partcmd_update, NULL, tmp)) | |
1387 | update_tasks_cpumask(parent); | |
1388 | break; | |
1389 | ||
1390 | case PRS_ERROR: | |
1391 | /* | |
1392 | * When parent is invalid, it has to be too. | |
1393 | */ | |
1394 | cp->partition_root_state = PRS_ERROR; | |
1395 | if (cp->nr_subparts_cpus) { | |
1396 | cp->nr_subparts_cpus = 0; | |
1397 | cpumask_clear(cp->subparts_cpus); | |
1398 | } | |
1399 | break; | |
1400 | } | |
ee8dde0c WL |
1401 | } |
1402 | ||
ec903c0c | 1403 | if (!css_tryget_online(&cp->css)) |
5c5cc623 LZ |
1404 | continue; |
1405 | rcu_read_unlock(); | |
1406 | ||
8447a0fe | 1407 | spin_lock_irq(&callback_lock); |
ee8dde0c WL |
1408 | |
1409 | cpumask_copy(cp->effective_cpus, tmp->new_cpus); | |
3881b861 WL |
1410 | if (cp->nr_subparts_cpus && |
1411 | (cp->partition_root_state != PRS_ENABLED)) { | |
1412 | cp->nr_subparts_cpus = 0; | |
1413 | cpumask_clear(cp->subparts_cpus); | |
1414 | } else if (cp->nr_subparts_cpus) { | |
ee8dde0c WL |
1415 | /* |
1416 | * Make sure that effective_cpus & subparts_cpus | |
1417 | * are mutually exclusive. | |
3881b861 WL |
1418 | * |
1419 | * In the unlikely event that effective_cpus | |
1420 | * becomes empty. we clear cp->nr_subparts_cpus and | |
1421 | * let its child partition roots to compete for | |
1422 | * CPUs again. | |
ee8dde0c WL |
1423 | */ |
1424 | cpumask_andnot(cp->effective_cpus, cp->effective_cpus, | |
1425 | cp->subparts_cpus); | |
3881b861 WL |
1426 | if (cpumask_empty(cp->effective_cpus)) { |
1427 | cpumask_copy(cp->effective_cpus, tmp->new_cpus); | |
1428 | cpumask_clear(cp->subparts_cpus); | |
1429 | cp->nr_subparts_cpus = 0; | |
1430 | } else if (!cpumask_subset(cp->subparts_cpus, | |
1431 | tmp->new_cpus)) { | |
1432 | cpumask_andnot(cp->subparts_cpus, | |
1433 | cp->subparts_cpus, tmp->new_cpus); | |
1434 | cp->nr_subparts_cpus | |
1435 | = cpumask_weight(cp->subparts_cpus); | |
1436 | } | |
ee8dde0c | 1437 | } |
8447a0fe | 1438 | spin_unlock_irq(&callback_lock); |
734d4513 | 1439 | |
b8d1b8ee | 1440 | WARN_ON(!is_in_v2_mode() && |
734d4513 LZ |
1441 | !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); |
1442 | ||
d66393e5 | 1443 | update_tasks_cpumask(cp); |
5c5cc623 | 1444 | |
8b5f1c52 | 1445 | /* |
0ccea8fe WL |
1446 | * On legacy hierarchy, if the effective cpumask of any non- |
1447 | * empty cpuset is changed, we need to rebuild sched domains. | |
1448 | * On default hierarchy, the cpuset needs to be a partition | |
1449 | * root as well. | |
8b5f1c52 LZ |
1450 | */ |
1451 | if (!cpumask_empty(cp->cpus_allowed) && | |
0ccea8fe WL |
1452 | is_sched_load_balance(cp) && |
1453 | (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || | |
1454 | is_partition_root(cp))) | |
8b5f1c52 LZ |
1455 | need_rebuild_sched_domains = true; |
1456 | ||
5c5cc623 LZ |
1457 | rcu_read_lock(); |
1458 | css_put(&cp->css); | |
1459 | } | |
1460 | rcu_read_unlock(); | |
8b5f1c52 LZ |
1461 | |
1462 | if (need_rebuild_sched_domains) | |
1463 | rebuild_sched_domains_locked(); | |
5c5cc623 LZ |
1464 | } |
1465 | ||
4716909c WL |
1466 | /** |
1467 | * update_sibling_cpumasks - Update siblings cpumasks | |
1468 | * @parent: Parent cpuset | |
1469 | * @cs: Current cpuset | |
1470 | * @tmp: Temp variables | |
1471 | */ | |
1472 | static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, | |
1473 | struct tmpmasks *tmp) | |
1474 | { | |
1475 | struct cpuset *sibling; | |
1476 | struct cgroup_subsys_state *pos_css; | |
1477 | ||
1478 | /* | |
1479 | * Check all its siblings and call update_cpumasks_hier() | |
1480 | * if their use_parent_ecpus flag is set in order for them | |
1481 | * to use the right effective_cpus value. | |
1482 | */ | |
1483 | rcu_read_lock(); | |
1484 | cpuset_for_each_child(sibling, pos_css, parent) { | |
1485 | if (sibling == cs) | |
1486 | continue; | |
1487 | if (!sibling->use_parent_ecpus) | |
1488 | continue; | |
1489 | ||
1490 | update_cpumasks_hier(sibling, tmp); | |
1491 | } | |
1492 | rcu_read_unlock(); | |
1493 | } | |
1494 | ||
58f4790b CW |
1495 | /** |
1496 | * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it | |
1497 | * @cs: the cpuset to consider | |
fc34ac1d | 1498 | * @trialcs: trial cpuset |
58f4790b CW |
1499 | * @buf: buffer of cpu numbers written to this cpuset |
1500 | */ | |
645fcc9d LZ |
1501 | static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, |
1502 | const char *buf) | |
1da177e4 | 1503 | { |
58f4790b | 1504 | int retval; |
ee8dde0c | 1505 | struct tmpmasks tmp; |
1da177e4 | 1506 | |
5f054e31 | 1507 | /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */ |
4c4d50f7 PJ |
1508 | if (cs == &top_cpuset) |
1509 | return -EACCES; | |
1510 | ||
6f7f02e7 | 1511 | /* |
c8d9c90c | 1512 | * An empty cpus_allowed is ok only if the cpuset has no tasks. |
020958b6 PJ |
1513 | * Since cpulist_parse() fails on an empty mask, we special case |
1514 | * that parsing. The validate_change() call ensures that cpusets | |
1515 | * with tasks have cpus. | |
6f7f02e7 | 1516 | */ |
020958b6 | 1517 | if (!*buf) { |
300ed6cb | 1518 | cpumask_clear(trialcs->cpus_allowed); |
6f7f02e7 | 1519 | } else { |
300ed6cb | 1520 | retval = cpulist_parse(buf, trialcs->cpus_allowed); |
6f7f02e7 DR |
1521 | if (retval < 0) |
1522 | return retval; | |
37340746 | 1523 | |
5d8ba82c LZ |
1524 | if (!cpumask_subset(trialcs->cpus_allowed, |
1525 | top_cpuset.cpus_allowed)) | |
37340746 | 1526 | return -EINVAL; |
6f7f02e7 | 1527 | } |
029190c5 | 1528 | |
8707d8b8 | 1529 | /* Nothing to do if the cpus didn't change */ |
300ed6cb | 1530 | if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed)) |
8707d8b8 | 1531 | return 0; |
58f4790b | 1532 | |
a73456f3 LZ |
1533 | retval = validate_change(cs, trialcs); |
1534 | if (retval < 0) | |
1535 | return retval; | |
1536 | ||
ee8dde0c WL |
1537 | #ifdef CONFIG_CPUMASK_OFFSTACK |
1538 | /* | |
1539 | * Use the cpumasks in trialcs for tmpmasks when they are pointers | |
1540 | * to allocated cpumasks. | |
1541 | */ | |
1542 | tmp.addmask = trialcs->subparts_cpus; | |
1543 | tmp.delmask = trialcs->effective_cpus; | |
1544 | tmp.new_cpus = trialcs->cpus_allowed; | |
1545 | #endif | |
1546 | ||
1547 | if (cs->partition_root_state) { | |
1548 | /* Cpumask of a partition root cannot be empty */ | |
1549 | if (cpumask_empty(trialcs->cpus_allowed)) | |
1550 | return -EINVAL; | |
1551 | if (update_parent_subparts_cpumask(cs, partcmd_update, | |
1552 | trialcs->cpus_allowed, &tmp) < 0) | |
1553 | return -EINVAL; | |
1554 | } | |
1555 | ||
8447a0fe | 1556 | spin_lock_irq(&callback_lock); |
300ed6cb | 1557 | cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); |
ee8dde0c WL |
1558 | |
1559 | /* | |
1560 | * Make sure that subparts_cpus is a subset of cpus_allowed. | |
1561 | */ | |
1562 | if (cs->nr_subparts_cpus) { | |
1563 | cpumask_andnot(cs->subparts_cpus, cs->subparts_cpus, | |
1564 | cs->cpus_allowed); | |
1565 | cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus); | |
1566 | } | |
8447a0fe | 1567 | spin_unlock_irq(&callback_lock); |
029190c5 | 1568 | |
ee8dde0c | 1569 | update_cpumasks_hier(cs, &tmp); |
4716909c WL |
1570 | |
1571 | if (cs->partition_root_state) { | |
1572 | struct cpuset *parent = parent_cs(cs); | |
1573 | ||
1574 | /* | |
1575 | * For partition root, update the cpumasks of sibling | |
1576 | * cpusets if they use parent's effective_cpus. | |
1577 | */ | |
1578 | if (parent->child_ecpus_count) | |
1579 | update_sibling_cpumasks(parent, cs, &tmp); | |
1580 | } | |
85d7b949 | 1581 | return 0; |
1da177e4 LT |
1582 | } |
1583 | ||
e4e364e8 | 1584 | /* |
e93ad19d TH |
1585 | * Migrate memory region from one set of nodes to another. This is |
1586 | * performed asynchronously as it can be called from process migration path | |
1587 | * holding locks involved in process management. All mm migrations are | |
1588 | * performed in the queued order and can be waited for by flushing | |
1589 | * cpuset_migrate_mm_wq. | |
e4e364e8 PJ |
1590 | */ |
1591 | ||
e93ad19d TH |
1592 | struct cpuset_migrate_mm_work { |
1593 | struct work_struct work; | |
1594 | struct mm_struct *mm; | |
1595 | nodemask_t from; | |
1596 | nodemask_t to; | |
1597 | }; | |
1598 | ||
1599 | static void cpuset_migrate_mm_workfn(struct work_struct *work) | |
1600 | { | |
1601 | struct cpuset_migrate_mm_work *mwork = | |
1602 | container_of(work, struct cpuset_migrate_mm_work, work); | |
1603 | ||
1604 | /* on a wq worker, no need to worry about %current's mems_allowed */ | |
1605 | do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL); | |
1606 | mmput(mwork->mm); | |
1607 | kfree(mwork); | |
1608 | } | |
1609 | ||
e4e364e8 PJ |
1610 | static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, |
1611 | const nodemask_t *to) | |
1612 | { | |
e93ad19d | 1613 | struct cpuset_migrate_mm_work *mwork; |
e4e364e8 | 1614 | |
e93ad19d TH |
1615 | mwork = kzalloc(sizeof(*mwork), GFP_KERNEL); |
1616 | if (mwork) { | |
1617 | mwork->mm = mm; | |
1618 | mwork->from = *from; | |
1619 | mwork->to = *to; | |
1620 | INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn); | |
1621 | queue_work(cpuset_migrate_mm_wq, &mwork->work); | |
1622 | } else { | |
1623 | mmput(mm); | |
1624 | } | |
1625 | } | |
e4e364e8 | 1626 | |
5cf1cacb | 1627 | static void cpuset_post_attach(void) |
e93ad19d TH |
1628 | { |
1629 | flush_workqueue(cpuset_migrate_mm_wq); | |
e4e364e8 PJ |
1630 | } |
1631 | ||
3b6766fe | 1632 | /* |
58568d2a MX |
1633 | * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy |
1634 | * @tsk: the task to change | |
1635 | * @newmems: new nodes that the task will be set | |
1636 | * | |
5f155f27 VB |
1637 | * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed |
1638 | * and rebind an eventual tasks' mempolicy. If the task is allocating in | |
1639 | * parallel, it might temporarily see an empty intersection, which results in | |
1640 | * a seqlock check and retry before OOM or allocation failure. | |
58568d2a MX |
1641 | */ |
1642 | static void cpuset_change_task_nodemask(struct task_struct *tsk, | |
1643 | nodemask_t *newmems) | |
1644 | { | |
c0ff7453 | 1645 | task_lock(tsk); |
c0ff7453 | 1646 | |
5f155f27 VB |
1647 | local_irq_disable(); |
1648 | write_seqcount_begin(&tsk->mems_allowed_seq); | |
c0ff7453 | 1649 | |
cc9a6c87 | 1650 | nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); |
213980c0 | 1651 | mpol_rebind_task(tsk, newmems); |
58568d2a | 1652 | tsk->mems_allowed = *newmems; |
cc9a6c87 | 1653 | |
5f155f27 VB |
1654 | write_seqcount_end(&tsk->mems_allowed_seq); |
1655 | local_irq_enable(); | |
cc9a6c87 | 1656 | |
c0ff7453 | 1657 | task_unlock(tsk); |
58568d2a MX |
1658 | } |
1659 | ||
8793d854 PM |
1660 | static void *cpuset_being_rebound; |
1661 | ||
0b2f630a MX |
1662 | /** |
1663 | * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset. | |
1664 | * @cs: the cpuset in which each task's mems_allowed mask needs to be changed | |
0b2f630a | 1665 | * |
d66393e5 TH |
1666 | * Iterate through each task of @cs updating its mems_allowed to the |
1667 | * effective cpuset's. As this function is called with cpuset_mutex held, | |
1668 | * cpuset membership stays stable. | |
0b2f630a | 1669 | */ |
d66393e5 | 1670 | static void update_tasks_nodemask(struct cpuset *cs) |
1da177e4 | 1671 | { |
33ad801d | 1672 | static nodemask_t newmems; /* protected by cpuset_mutex */ |
d66393e5 TH |
1673 | struct css_task_iter it; |
1674 | struct task_struct *task; | |
59dac16f | 1675 | |
846a16bf | 1676 | cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ |
4225399a | 1677 | |
ae1c8023 | 1678 | guarantee_online_mems(cs, &newmems); |
33ad801d | 1679 | |
4225399a | 1680 | /* |
c1e8d7c6 | 1681 | * The mpol_rebind_mm() call takes mmap_lock, which we couldn't |
3b6766fe LZ |
1682 | * take while holding tasklist_lock. Forks can happen - the |
1683 | * mpol_dup() cpuset_being_rebound check will catch such forks, | |
1684 | * and rebind their vma mempolicies too. Because we still hold | |
5d21cc2d | 1685 | * the global cpuset_mutex, we know that no other rebind effort |
3b6766fe | 1686 | * will be contending for the global variable cpuset_being_rebound. |
4225399a | 1687 | * It's ok if we rebind the same mm twice; mpol_rebind_mm() |
04c19fa6 | 1688 | * is idempotent. Also migrate pages in each mm to new nodes. |
4225399a | 1689 | */ |
bc2fb7ed | 1690 | css_task_iter_start(&cs->css, 0, &it); |
d66393e5 TH |
1691 | while ((task = css_task_iter_next(&it))) { |
1692 | struct mm_struct *mm; | |
1693 | bool migrate; | |
1694 | ||
1695 | cpuset_change_task_nodemask(task, &newmems); | |
1696 | ||
1697 | mm = get_task_mm(task); | |
1698 | if (!mm) | |
1699 | continue; | |
1700 | ||
1701 | migrate = is_memory_migrate(cs); | |
1702 | ||
1703 | mpol_rebind_mm(mm, &cs->mems_allowed); | |
1704 | if (migrate) | |
1705 | cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); | |
e93ad19d TH |
1706 | else |
1707 | mmput(mm); | |
d66393e5 TH |
1708 | } |
1709 | css_task_iter_end(&it); | |
4225399a | 1710 | |
33ad801d LZ |
1711 | /* |
1712 | * All the tasks' nodemasks have been updated, update | |
1713 | * cs->old_mems_allowed. | |
1714 | */ | |
1715 | cs->old_mems_allowed = newmems; | |
1716 | ||
2df167a3 | 1717 | /* We're done rebinding vmas to this cpuset's new mems_allowed. */ |
8793d854 | 1718 | cpuset_being_rebound = NULL; |
1da177e4 LT |
1719 | } |
1720 | ||
5c5cc623 | 1721 | /* |
734d4513 LZ |
1722 | * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree |
1723 | * @cs: the cpuset to consider | |
1724 | * @new_mems: a temp variable for calculating new effective_mems | |
5c5cc623 | 1725 | * |
734d4513 LZ |
1726 | * When configured nodemask is changed, the effective nodemasks of this cpuset |
1727 | * and all its descendants need to be updated. | |
5c5cc623 | 1728 | * |
734d4513 | 1729 | * On legacy hiearchy, effective_mems will be the same with mems_allowed. |
5c5cc623 LZ |
1730 | * |
1731 | * Called with cpuset_mutex held | |
1732 | */ | |
734d4513 | 1733 | static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) |
5c5cc623 LZ |
1734 | { |
1735 | struct cpuset *cp; | |
492eb21b | 1736 | struct cgroup_subsys_state *pos_css; |
5c5cc623 LZ |
1737 | |
1738 | rcu_read_lock(); | |
734d4513 LZ |
1739 | cpuset_for_each_descendant_pre(cp, pos_css, cs) { |
1740 | struct cpuset *parent = parent_cs(cp); | |
1741 | ||
1742 | nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems); | |
1743 | ||
554b0d1c LZ |
1744 | /* |
1745 | * If it becomes empty, inherit the effective mask of the | |
1746 | * parent, which is guaranteed to have some MEMs. | |
1747 | */ | |
b8d1b8ee | 1748 | if (is_in_v2_mode() && nodes_empty(*new_mems)) |
554b0d1c LZ |
1749 | *new_mems = parent->effective_mems; |
1750 | ||
734d4513 LZ |
1751 | /* Skip the whole subtree if the nodemask remains the same. */ |
1752 | if (nodes_equal(*new_mems, cp->effective_mems)) { | |
1753 | pos_css = css_rightmost_descendant(pos_css); | |
1754 | continue; | |
5c5cc623 | 1755 | } |
734d4513 | 1756 | |
ec903c0c | 1757 | if (!css_tryget_online(&cp->css)) |
5c5cc623 LZ |
1758 | continue; |
1759 | rcu_read_unlock(); | |
1760 | ||
8447a0fe | 1761 | spin_lock_irq(&callback_lock); |
734d4513 | 1762 | cp->effective_mems = *new_mems; |
8447a0fe | 1763 | spin_unlock_irq(&callback_lock); |
734d4513 | 1764 | |
b8d1b8ee | 1765 | WARN_ON(!is_in_v2_mode() && |
a1381268 | 1766 | !nodes_equal(cp->mems_allowed, cp->effective_mems)); |
734d4513 | 1767 | |
d66393e5 | 1768 | update_tasks_nodemask(cp); |
5c5cc623 LZ |
1769 | |
1770 | rcu_read_lock(); | |
1771 | css_put(&cp->css); | |
1772 | } | |
1773 | rcu_read_unlock(); | |
1774 | } | |
1775 | ||
0b2f630a MX |
1776 | /* |
1777 | * Handle user request to change the 'mems' memory placement | |
1778 | * of a cpuset. Needs to validate the request, update the | |
58568d2a MX |
1779 | * cpusets mems_allowed, and for each task in the cpuset, |
1780 | * update mems_allowed and rebind task's mempolicy and any vma | |
1781 | * mempolicies and if the cpuset is marked 'memory_migrate', | |
1782 | * migrate the tasks pages to the new memory. | |
0b2f630a | 1783 | * |
8447a0fe | 1784 | * Call with cpuset_mutex held. May take callback_lock during call. |
0b2f630a | 1785 | * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, |
c1e8d7c6 | 1786 | * lock each such tasks mm->mmap_lock, scan its vma's and rebind |
0b2f630a MX |
1787 | * their mempolicies to the cpusets new mems_allowed. |
1788 | */ | |
645fcc9d LZ |
1789 | static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, |
1790 | const char *buf) | |
0b2f630a | 1791 | { |
0b2f630a MX |
1792 | int retval; |
1793 | ||
1794 | /* | |
38d7bee9 | 1795 | * top_cpuset.mems_allowed tracks node_stats[N_MEMORY]; |
0b2f630a MX |
1796 | * it's read-only |
1797 | */ | |
53feb297 MX |
1798 | if (cs == &top_cpuset) { |
1799 | retval = -EACCES; | |
1800 | goto done; | |
1801 | } | |
0b2f630a | 1802 | |
0b2f630a MX |
1803 | /* |
1804 | * An empty mems_allowed is ok iff there are no tasks in the cpuset. | |
1805 | * Since nodelist_parse() fails on an empty mask, we special case | |
1806 | * that parsing. The validate_change() call ensures that cpusets | |
1807 | * with tasks have memory. | |
1808 | */ | |
1809 | if (!*buf) { | |
645fcc9d | 1810 | nodes_clear(trialcs->mems_allowed); |
0b2f630a | 1811 | } else { |
645fcc9d | 1812 | retval = nodelist_parse(buf, trialcs->mems_allowed); |
0b2f630a MX |
1813 | if (retval < 0) |
1814 | goto done; | |
1815 | ||
645fcc9d | 1816 | if (!nodes_subset(trialcs->mems_allowed, |
5d8ba82c LZ |
1817 | top_cpuset.mems_allowed)) { |
1818 | retval = -EINVAL; | |
53feb297 MX |
1819 | goto done; |
1820 | } | |
0b2f630a | 1821 | } |
33ad801d LZ |
1822 | |
1823 | if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { | |
0b2f630a MX |
1824 | retval = 0; /* Too easy - nothing to do */ |
1825 | goto done; | |
1826 | } | |
645fcc9d | 1827 | retval = validate_change(cs, trialcs); |
0b2f630a MX |
1828 | if (retval < 0) |
1829 | goto done; | |
1830 | ||
8447a0fe | 1831 | spin_lock_irq(&callback_lock); |
645fcc9d | 1832 | cs->mems_allowed = trialcs->mems_allowed; |
8447a0fe | 1833 | spin_unlock_irq(&callback_lock); |
0b2f630a | 1834 | |
734d4513 | 1835 | /* use trialcs->mems_allowed as a temp variable */ |
24ee3cf8 | 1836 | update_nodemasks_hier(cs, &trialcs->mems_allowed); |
0b2f630a MX |
1837 | done: |
1838 | return retval; | |
1839 | } | |
1840 | ||
77ef80c6 | 1841 | bool current_cpuset_is_being_rebound(void) |
8793d854 | 1842 | { |
77ef80c6 | 1843 | bool ret; |
391acf97 GZ |
1844 | |
1845 | rcu_read_lock(); | |
1846 | ret = task_cs(current) == cpuset_being_rebound; | |
1847 | rcu_read_unlock(); | |
1848 | ||
1849 | return ret; | |
8793d854 PM |
1850 | } |
1851 | ||
5be7a479 | 1852 | static int update_relax_domain_level(struct cpuset *cs, s64 val) |
1d3504fc | 1853 | { |
db7f47cf | 1854 | #ifdef CONFIG_SMP |
60495e77 | 1855 | if (val < -1 || val >= sched_domain_level_max) |
30e0e178 | 1856 | return -EINVAL; |
db7f47cf | 1857 | #endif |
1d3504fc HS |
1858 | |
1859 | if (val != cs->relax_domain_level) { | |
1860 | cs->relax_domain_level = val; | |
300ed6cb LZ |
1861 | if (!cpumask_empty(cs->cpus_allowed) && |
1862 | is_sched_load_balance(cs)) | |
699140ba | 1863 | rebuild_sched_domains_locked(); |
1d3504fc HS |
1864 | } |
1865 | ||
1866 | return 0; | |
1867 | } | |
1868 | ||
72ec7029 | 1869 | /** |
950592f7 MX |
1870 | * update_tasks_flags - update the spread flags of tasks in the cpuset. |
1871 | * @cs: the cpuset in which each task's spread flags needs to be changed | |
950592f7 | 1872 | * |
d66393e5 TH |
1873 | * Iterate through each task of @cs updating its spread flags. As this |
1874 | * function is called with cpuset_mutex held, cpuset membership stays | |
1875 | * stable. | |
950592f7 | 1876 | */ |
d66393e5 | 1877 | static void update_tasks_flags(struct cpuset *cs) |
950592f7 | 1878 | { |
d66393e5 TH |
1879 | struct css_task_iter it; |
1880 | struct task_struct *task; | |
1881 | ||
bc2fb7ed | 1882 | css_task_iter_start(&cs->css, 0, &it); |
d66393e5 TH |
1883 | while ((task = css_task_iter_next(&it))) |
1884 | cpuset_update_task_spread_flag(cs, task); | |
1885 | css_task_iter_end(&it); | |
950592f7 MX |
1886 | } |
1887 | ||
1da177e4 LT |
1888 | /* |
1889 | * update_flag - read a 0 or a 1 in a file and update associated flag | |
78608366 PM |
1890 | * bit: the bit to update (see cpuset_flagbits_t) |
1891 | * cs: the cpuset to update | |
1892 | * turning_on: whether the flag is being set or cleared | |
053199ed | 1893 | * |
5d21cc2d | 1894 | * Call with cpuset_mutex held. |
1da177e4 LT |
1895 | */ |
1896 | ||
700fe1ab PM |
1897 | static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, |
1898 | int turning_on) | |
1da177e4 | 1899 | { |
645fcc9d | 1900 | struct cpuset *trialcs; |
40b6a762 | 1901 | int balance_flag_changed; |
950592f7 | 1902 | int spread_flag_changed; |
950592f7 | 1903 | int err; |
1da177e4 | 1904 | |
645fcc9d LZ |
1905 | trialcs = alloc_trial_cpuset(cs); |
1906 | if (!trialcs) | |
1907 | return -ENOMEM; | |
1908 | ||
1da177e4 | 1909 | if (turning_on) |
645fcc9d | 1910 | set_bit(bit, &trialcs->flags); |
1da177e4 | 1911 | else |
645fcc9d | 1912 | clear_bit(bit, &trialcs->flags); |
1da177e4 | 1913 | |
645fcc9d | 1914 | err = validate_change(cs, trialcs); |
85d7b949 | 1915 | if (err < 0) |
645fcc9d | 1916 | goto out; |
029190c5 | 1917 | |
029190c5 | 1918 | balance_flag_changed = (is_sched_load_balance(cs) != |
645fcc9d | 1919 | is_sched_load_balance(trialcs)); |
029190c5 | 1920 | |
950592f7 MX |
1921 | spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) |
1922 | || (is_spread_page(cs) != is_spread_page(trialcs))); | |
1923 | ||
8447a0fe | 1924 | spin_lock_irq(&callback_lock); |
645fcc9d | 1925 | cs->flags = trialcs->flags; |
8447a0fe | 1926 | spin_unlock_irq(&callback_lock); |
85d7b949 | 1927 | |
300ed6cb | 1928 | if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) |
699140ba | 1929 | rebuild_sched_domains_locked(); |
029190c5 | 1930 | |
950592f7 | 1931 | if (spread_flag_changed) |
d66393e5 | 1932 | update_tasks_flags(cs); |
645fcc9d | 1933 | out: |
bf92370c | 1934 | free_cpuset(trialcs); |
645fcc9d | 1935 | return err; |
1da177e4 LT |
1936 | } |
1937 | ||
ee8dde0c WL |
1938 | /* |
1939 | * update_prstate - update partititon_root_state | |
1940 | * cs: the cpuset to update | |
1941 | * val: 0 - disabled, 1 - enabled | |
1942 | * | |
1943 | * Call with cpuset_mutex held. | |
1944 | */ | |
1945 | static int update_prstate(struct cpuset *cs, int val) | |
1946 | { | |
1947 | int err; | |
1948 | struct cpuset *parent = parent_cs(cs); | |
1949 | struct tmpmasks tmp; | |
1950 | ||
1951 | if ((val != 0) && (val != 1)) | |
1952 | return -EINVAL; | |
1953 | if (val == cs->partition_root_state) | |
1954 | return 0; | |
1955 | ||
1956 | /* | |
3881b861 | 1957 | * Cannot force a partial or invalid partition root to a full |
ee8dde0c WL |
1958 | * partition root. |
1959 | */ | |
1960 | if (val && cs->partition_root_state) | |
1961 | return -EINVAL; | |
1962 | ||
1963 | if (alloc_cpumasks(NULL, &tmp)) | |
1964 | return -ENOMEM; | |
1965 | ||
1966 | err = -EINVAL; | |
1967 | if (!cs->partition_root_state) { | |
1968 | /* | |
1969 | * Turning on partition root requires setting the | |
1970 | * CS_CPU_EXCLUSIVE bit implicitly as well and cpus_allowed | |
1971 | * cannot be NULL. | |
1972 | */ | |
1973 | if (cpumask_empty(cs->cpus_allowed)) | |
1974 | goto out; | |
1975 | ||
1976 | err = update_flag(CS_CPU_EXCLUSIVE, cs, 1); | |
1977 | if (err) | |
1978 | goto out; | |
1979 | ||
1980 | err = update_parent_subparts_cpumask(cs, partcmd_enable, | |
1981 | NULL, &tmp); | |
1982 | if (err) { | |
1983 | update_flag(CS_CPU_EXCLUSIVE, cs, 0); | |
1984 | goto out; | |
1985 | } | |
1986 | cs->partition_root_state = PRS_ENABLED; | |
1987 | } else { | |
3881b861 WL |
1988 | /* |
1989 | * Turning off partition root will clear the | |
1990 | * CS_CPU_EXCLUSIVE bit. | |
1991 | */ | |
1992 | if (cs->partition_root_state == PRS_ERROR) { | |
1993 | cs->partition_root_state = 0; | |
1994 | update_flag(CS_CPU_EXCLUSIVE, cs, 0); | |
1995 | err = 0; | |
1996 | goto out; | |
1997 | } | |
1998 | ||
ee8dde0c WL |
1999 | err = update_parent_subparts_cpumask(cs, partcmd_disable, |
2000 | NULL, &tmp); | |
2001 | if (err) | |
2002 | goto out; | |
2003 | ||
2004 | cs->partition_root_state = 0; | |
2005 | ||
2006 | /* Turning off CS_CPU_EXCLUSIVE will not return error */ | |
2007 | update_flag(CS_CPU_EXCLUSIVE, cs, 0); | |
2008 | } | |
2009 | ||
2010 | /* | |
2011 | * Update cpumask of parent's tasks except when it is the top | |
2012 | * cpuset as some system daemons cannot be mapped to other CPUs. | |
2013 | */ | |
2014 | if (parent != &top_cpuset) | |
2015 | update_tasks_cpumask(parent); | |
2016 | ||
4716909c WL |
2017 | if (parent->child_ecpus_count) |
2018 | update_sibling_cpumasks(parent, cs, &tmp); | |
2019 | ||
ee8dde0c WL |
2020 | rebuild_sched_domains_locked(); |
2021 | out: | |
2022 | free_cpumasks(NULL, &tmp); | |
645fcc9d | 2023 | return err; |
1da177e4 LT |
2024 | } |
2025 | ||
3e0d98b9 | 2026 | /* |
80f7228b | 2027 | * Frequency meter - How fast is some event occurring? |
3e0d98b9 PJ |
2028 | * |
2029 | * These routines manage a digitally filtered, constant time based, | |
2030 | * event frequency meter. There are four routines: | |
2031 | * fmeter_init() - initialize a frequency meter. | |
2032 | * fmeter_markevent() - called each time the event happens. | |
2033 | * fmeter_getrate() - returns the recent rate of such events. | |
2034 | * fmeter_update() - internal routine used to update fmeter. | |
2035 | * | |
2036 | * A common data structure is passed to each of these routines, | |
2037 | * which is used to keep track of the state required to manage the | |
2038 | * frequency meter and its digital filter. | |
2039 | * | |
2040 | * The filter works on the number of events marked per unit time. | |
2041 | * The filter is single-pole low-pass recursive (IIR). The time unit | |
2042 | * is 1 second. Arithmetic is done using 32-bit integers scaled to | |
2043 | * simulate 3 decimal digits of precision (multiplied by 1000). | |
2044 | * | |
2045 | * With an FM_COEF of 933, and a time base of 1 second, the filter | |
2046 | * has a half-life of 10 seconds, meaning that if the events quit | |
2047 | * happening, then the rate returned from the fmeter_getrate() | |
2048 | * will be cut in half each 10 seconds, until it converges to zero. | |
2049 | * | |
2050 | * It is not worth doing a real infinitely recursive filter. If more | |
2051 | * than FM_MAXTICKS ticks have elapsed since the last filter event, | |
2052 | * just compute FM_MAXTICKS ticks worth, by which point the level | |
2053 | * will be stable. | |
2054 | * | |
2055 | * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid | |
2056 | * arithmetic overflow in the fmeter_update() routine. | |
2057 | * | |
2058 | * Given the simple 32 bit integer arithmetic used, this meter works | |
2059 | * best for reporting rates between one per millisecond (msec) and | |
2060 | * one per 32 (approx) seconds. At constant rates faster than one | |
2061 | * per msec it maxes out at values just under 1,000,000. At constant | |
2062 | * rates between one per msec, and one per second it will stabilize | |
2063 | * to a value N*1000, where N is the rate of events per second. | |
2064 | * At constant rates between one per second and one per 32 seconds, | |
2065 | * it will be choppy, moving up on the seconds that have an event, | |
2066 | * and then decaying until the next event. At rates slower than | |
2067 | * about one in 32 seconds, it decays all the way back to zero between | |
2068 | * each event. | |
2069 | */ | |
2070 | ||
2071 | #define FM_COEF 933 /* coefficient for half-life of 10 secs */ | |
d2b43658 | 2072 | #define FM_MAXTICKS ((u32)99) /* useless computing more ticks than this */ |
3e0d98b9 PJ |
2073 | #define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */ |
2074 | #define FM_SCALE 1000 /* faux fixed point scale */ | |
2075 | ||
2076 | /* Initialize a frequency meter */ | |
2077 | static void fmeter_init(struct fmeter *fmp) | |
2078 | { | |
2079 | fmp->cnt = 0; | |
2080 | fmp->val = 0; | |
2081 | fmp->time = 0; | |
2082 | spin_lock_init(&fmp->lock); | |
2083 | } | |
2084 | ||
2085 | /* Internal meter update - process cnt events and update value */ | |
2086 | static void fmeter_update(struct fmeter *fmp) | |
2087 | { | |
d2b43658 AB |
2088 | time64_t now; |
2089 | u32 ticks; | |
2090 | ||
2091 | now = ktime_get_seconds(); | |
2092 | ticks = now - fmp->time; | |
3e0d98b9 PJ |
2093 | |
2094 | if (ticks == 0) | |
2095 | return; | |
2096 | ||
2097 | ticks = min(FM_MAXTICKS, ticks); | |
2098 | while (ticks-- > 0) | |
2099 | fmp->val = (FM_COEF * fmp->val) / FM_SCALE; | |
2100 | fmp->time = now; | |
2101 | ||
2102 | fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE; | |
2103 | fmp->cnt = 0; | |
2104 | } | |
2105 | ||
2106 | /* Process any previous ticks, then bump cnt by one (times scale). */ | |
2107 | static void fmeter_markevent(struct fmeter *fmp) | |
2108 | { | |
2109 | spin_lock(&fmp->lock); | |
2110 | fmeter_update(fmp); | |
2111 | fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE); | |
2112 | spin_unlock(&fmp->lock); | |
2113 | } | |
2114 | ||
2115 | /* Process any previous ticks, then return current value. */ | |
2116 | static int fmeter_getrate(struct fmeter *fmp) | |
2117 | { | |
2118 | int val; | |
2119 | ||
2120 | spin_lock(&fmp->lock); | |
2121 | fmeter_update(fmp); | |
2122 | val = fmp->val; | |
2123 | spin_unlock(&fmp->lock); | |
2124 | return val; | |
2125 | } | |
2126 | ||
57fce0a6 TH |
2127 | static struct cpuset *cpuset_attach_old_cs; |
2128 | ||
5d21cc2d | 2129 | /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */ |
1f7dd3e5 | 2130 | static int cpuset_can_attach(struct cgroup_taskset *tset) |
f780bdb7 | 2131 | { |
1f7dd3e5 TH |
2132 | struct cgroup_subsys_state *css; |
2133 | struct cpuset *cs; | |
bb9d97b6 TH |
2134 | struct task_struct *task; |
2135 | int ret; | |
1da177e4 | 2136 | |
57fce0a6 | 2137 | /* used later by cpuset_attach() */ |
1f7dd3e5 TH |
2138 | cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css)); |
2139 | cs = css_cs(css); | |
57fce0a6 | 2140 | |
1243dc51 | 2141 | percpu_down_write(&cpuset_rwsem); |
5d21cc2d | 2142 | |
aa6ec29b | 2143 | /* allow moving tasks into an empty cpuset if on default hierarchy */ |
5d21cc2d | 2144 | ret = -ENOSPC; |
b8d1b8ee | 2145 | if (!is_in_v2_mode() && |
88fa523b | 2146 | (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) |
5d21cc2d | 2147 | goto out_unlock; |
9985b0ba | 2148 | |
1f7dd3e5 | 2149 | cgroup_taskset_for_each(task, css, tset) { |
7f51412a JL |
2150 | ret = task_can_attach(task, cs->cpus_allowed); |
2151 | if (ret) | |
5d21cc2d TH |
2152 | goto out_unlock; |
2153 | ret = security_task_setscheduler(task); | |
2154 | if (ret) | |
2155 | goto out_unlock; | |
bb9d97b6 | 2156 | } |
f780bdb7 | 2157 | |
452477fa TH |
2158 | /* |
2159 | * Mark attach is in progress. This makes validate_change() fail | |
2160 | * changes which zero cpus/mems_allowed. | |
2161 | */ | |
2162 | cs->attach_in_progress++; | |
5d21cc2d TH |
2163 | ret = 0; |
2164 | out_unlock: | |
1243dc51 | 2165 | percpu_up_write(&cpuset_rwsem); |
5d21cc2d | 2166 | return ret; |
8793d854 | 2167 | } |
f780bdb7 | 2168 | |
1f7dd3e5 | 2169 | static void cpuset_cancel_attach(struct cgroup_taskset *tset) |
452477fa | 2170 | { |
1f7dd3e5 | 2171 | struct cgroup_subsys_state *css; |
1f7dd3e5 TH |
2172 | |
2173 | cgroup_taskset_first(tset, &css); | |
1f7dd3e5 | 2174 | |
1243dc51 | 2175 | percpu_down_write(&cpuset_rwsem); |
eb95419b | 2176 | css_cs(css)->attach_in_progress--; |
1243dc51 | 2177 | percpu_up_write(&cpuset_rwsem); |
8793d854 | 2178 | } |
1da177e4 | 2179 | |
4e4c9a14 | 2180 | /* |
5d21cc2d | 2181 | * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach() |
4e4c9a14 TH |
2182 | * but we can't allocate it dynamically there. Define it global and |
2183 | * allocate from cpuset_init(). | |
2184 | */ | |
2185 | static cpumask_var_t cpus_attach; | |
2186 | ||
1f7dd3e5 | 2187 | static void cpuset_attach(struct cgroup_taskset *tset) |
8793d854 | 2188 | { |
67bd2c59 | 2189 | /* static buf protected by cpuset_mutex */ |
4e4c9a14 | 2190 | static nodemask_t cpuset_attach_nodemask_to; |
bb9d97b6 | 2191 | struct task_struct *task; |
4530eddb | 2192 | struct task_struct *leader; |
1f7dd3e5 TH |
2193 | struct cgroup_subsys_state *css; |
2194 | struct cpuset *cs; | |
57fce0a6 | 2195 | struct cpuset *oldcs = cpuset_attach_old_cs; |
22fb52dd | 2196 | |
1f7dd3e5 TH |
2197 | cgroup_taskset_first(tset, &css); |
2198 | cs = css_cs(css); | |
2199 | ||
1243dc51 | 2200 | percpu_down_write(&cpuset_rwsem); |
5d21cc2d | 2201 | |
4e4c9a14 TH |
2202 | /* prepare for attach */ |
2203 | if (cs == &top_cpuset) | |
2204 | cpumask_copy(cpus_attach, cpu_possible_mask); | |
2205 | else | |
ae1c8023 | 2206 | guarantee_online_cpus(cs, cpus_attach); |
4e4c9a14 | 2207 | |
ae1c8023 | 2208 | guarantee_online_mems(cs, &cpuset_attach_nodemask_to); |
4e4c9a14 | 2209 | |
1f7dd3e5 | 2210 | cgroup_taskset_for_each(task, css, tset) { |
bb9d97b6 TH |
2211 | /* |
2212 | * can_attach beforehand should guarantee that this doesn't | |
2213 | * fail. TODO: have a better way to handle failure here | |
2214 | */ | |
2215 | WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach)); | |
2216 | ||
2217 | cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to); | |
2218 | cpuset_update_task_spread_flag(cs, task); | |
2219 | } | |
22fb52dd | 2220 | |
f780bdb7 | 2221 | /* |
4530eddb TH |
2222 | * Change mm for all threadgroup leaders. This is expensive and may |
2223 | * sleep and should be moved outside migration path proper. | |
f780bdb7 | 2224 | */ |
ae1c8023 | 2225 | cpuset_attach_nodemask_to = cs->effective_mems; |
1f7dd3e5 | 2226 | cgroup_taskset_for_each_leader(leader, css, tset) { |
3df9ca0a TH |
2227 | struct mm_struct *mm = get_task_mm(leader); |
2228 | ||
2229 | if (mm) { | |
2230 | mpol_rebind_mm(mm, &cpuset_attach_nodemask_to); | |
2231 | ||
2232 | /* | |
2233 | * old_mems_allowed is the same with mems_allowed | |
2234 | * here, except if this task is being moved | |
2235 | * automatically due to hotplug. In that case | |
2236 | * @mems_allowed has been updated and is empty, so | |
2237 | * @old_mems_allowed is the right nodesets that we | |
2238 | * migrate mm from. | |
2239 | */ | |
e93ad19d | 2240 | if (is_memory_migrate(cs)) |
3df9ca0a TH |
2241 | cpuset_migrate_mm(mm, &oldcs->old_mems_allowed, |
2242 | &cpuset_attach_nodemask_to); | |
e93ad19d TH |
2243 | else |
2244 | mmput(mm); | |
f047cecf | 2245 | } |
4225399a | 2246 | } |
452477fa | 2247 | |
33ad801d | 2248 | cs->old_mems_allowed = cpuset_attach_nodemask_to; |
02bb5863 | 2249 | |
452477fa | 2250 | cs->attach_in_progress--; |
e44193d3 LZ |
2251 | if (!cs->attach_in_progress) |
2252 | wake_up(&cpuset_attach_wq); | |
5d21cc2d | 2253 | |
1243dc51 | 2254 | percpu_up_write(&cpuset_rwsem); |
1da177e4 LT |
2255 | } |
2256 | ||
2257 | /* The various types of files and directories in a cpuset file system */ | |
2258 | ||
2259 | typedef enum { | |
45b07ef3 | 2260 | FILE_MEMORY_MIGRATE, |
1da177e4 LT |
2261 | FILE_CPULIST, |
2262 | FILE_MEMLIST, | |
afd1a8b3 LZ |
2263 | FILE_EFFECTIVE_CPULIST, |
2264 | FILE_EFFECTIVE_MEMLIST, | |
5cf8114d | 2265 | FILE_SUBPARTS_CPULIST, |
1da177e4 LT |
2266 | FILE_CPU_EXCLUSIVE, |
2267 | FILE_MEM_EXCLUSIVE, | |
78608366 | 2268 | FILE_MEM_HARDWALL, |
029190c5 | 2269 | FILE_SCHED_LOAD_BALANCE, |
ee8dde0c | 2270 | FILE_PARTITION_ROOT, |
1d3504fc | 2271 | FILE_SCHED_RELAX_DOMAIN_LEVEL, |
3e0d98b9 PJ |
2272 | FILE_MEMORY_PRESSURE_ENABLED, |
2273 | FILE_MEMORY_PRESSURE, | |
825a46af PJ |
2274 | FILE_SPREAD_PAGE, |
2275 | FILE_SPREAD_SLAB, | |
1da177e4 LT |
2276 | } cpuset_filetype_t; |
2277 | ||
182446d0 TH |
2278 | static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft, |
2279 | u64 val) | |
700fe1ab | 2280 | { |
182446d0 | 2281 | struct cpuset *cs = css_cs(css); |
700fe1ab | 2282 | cpuset_filetype_t type = cft->private; |
a903f086 | 2283 | int retval = 0; |
700fe1ab | 2284 | |
d74b27d6 | 2285 | get_online_cpus(); |
1243dc51 | 2286 | percpu_down_write(&cpuset_rwsem); |
a903f086 LZ |
2287 | if (!is_cpuset_online(cs)) { |
2288 | retval = -ENODEV; | |
5d21cc2d | 2289 | goto out_unlock; |
a903f086 | 2290 | } |
700fe1ab PM |
2291 | |
2292 | switch (type) { | |
1da177e4 | 2293 | case FILE_CPU_EXCLUSIVE: |
700fe1ab | 2294 | retval = update_flag(CS_CPU_EXCLUSIVE, cs, val); |
1da177e4 LT |
2295 | break; |
2296 | case FILE_MEM_EXCLUSIVE: | |
700fe1ab | 2297 | retval = update_flag(CS_MEM_EXCLUSIVE, cs, val); |
1da177e4 | 2298 | break; |
78608366 PM |
2299 | case FILE_MEM_HARDWALL: |
2300 | retval = update_flag(CS_MEM_HARDWALL, cs, val); | |
2301 | break; | |
029190c5 | 2302 | case FILE_SCHED_LOAD_BALANCE: |
700fe1ab | 2303 | retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val); |
1d3504fc | 2304 | break; |
45b07ef3 | 2305 | case FILE_MEMORY_MIGRATE: |
700fe1ab | 2306 | retval = update_flag(CS_MEMORY_MIGRATE, cs, val); |
45b07ef3 | 2307 | break; |
3e0d98b9 | 2308 | case FILE_MEMORY_PRESSURE_ENABLED: |
700fe1ab | 2309 | cpuset_memory_pressure_enabled = !!val; |
3e0d98b9 | 2310 | break; |
825a46af | 2311 | case FILE_SPREAD_PAGE: |
700fe1ab | 2312 | retval = update_flag(CS_SPREAD_PAGE, cs, val); |
825a46af PJ |
2313 | break; |
2314 | case FILE_SPREAD_SLAB: | |
700fe1ab | 2315 | retval = update_flag(CS_SPREAD_SLAB, cs, val); |
825a46af | 2316 | break; |
1da177e4 LT |
2317 | default: |
2318 | retval = -EINVAL; | |
700fe1ab | 2319 | break; |
1da177e4 | 2320 | } |
5d21cc2d | 2321 | out_unlock: |
1243dc51 | 2322 | percpu_up_write(&cpuset_rwsem); |
d74b27d6 | 2323 | put_online_cpus(); |
1da177e4 LT |
2324 | return retval; |
2325 | } | |
2326 | ||
182446d0 TH |
2327 | static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft, |
2328 | s64 val) | |
5be7a479 | 2329 | { |
182446d0 | 2330 | struct cpuset *cs = css_cs(css); |
5be7a479 | 2331 | cpuset_filetype_t type = cft->private; |
5d21cc2d | 2332 | int retval = -ENODEV; |
5be7a479 | 2333 | |
d74b27d6 | 2334 | get_online_cpus(); |
1243dc51 | 2335 | percpu_down_write(&cpuset_rwsem); |
5d21cc2d TH |
2336 | if (!is_cpuset_online(cs)) |
2337 | goto out_unlock; | |
e3712395 | 2338 | |
5be7a479 PM |
2339 | switch (type) { |
2340 | case FILE_SCHED_RELAX_DOMAIN_LEVEL: | |
2341 | retval = update_relax_domain_level(cs, val); | |
2342 | break; | |
2343 | default: | |
2344 | retval = -EINVAL; | |
2345 | break; | |
2346 | } | |
5d21cc2d | 2347 | out_unlock: |
1243dc51 | 2348 | percpu_up_write(&cpuset_rwsem); |
d74b27d6 | 2349 | put_online_cpus(); |
5be7a479 PM |
2350 | return retval; |
2351 | } | |
2352 | ||
e3712395 PM |
2353 | /* |
2354 | * Common handling for a write to a "cpus" or "mems" file. | |
2355 | */ | |
451af504 TH |
2356 | static ssize_t cpuset_write_resmask(struct kernfs_open_file *of, |
2357 | char *buf, size_t nbytes, loff_t off) | |
e3712395 | 2358 | { |
451af504 | 2359 | struct cpuset *cs = css_cs(of_css(of)); |
645fcc9d | 2360 | struct cpuset *trialcs; |
5d21cc2d | 2361 | int retval = -ENODEV; |
e3712395 | 2362 | |
451af504 TH |
2363 | buf = strstrip(buf); |
2364 | ||
3a5a6d0c TH |
2365 | /* |
2366 | * CPU or memory hotunplug may leave @cs w/o any execution | |
2367 | * resources, in which case the hotplug code asynchronously updates | |
2368 | * configuration and transfers all tasks to the nearest ancestor | |
2369 | * which can execute. | |
2370 | * | |
2371 | * As writes to "cpus" or "mems" may restore @cs's execution | |
2372 | * resources, wait for the previously scheduled operations before | |
2373 | * proceeding, so that we don't end up keep removing tasks added | |
2374 | * after execution capability is restored. | |
76bb5ab8 TH |
2375 | * |
2376 | * cpuset_hotplug_work calls back into cgroup core via | |
2377 | * cgroup_transfer_tasks() and waiting for it from a cgroupfs | |
2378 | * operation like this one can lead to a deadlock through kernfs | |
2379 | * active_ref protection. Let's break the protection. Losing the | |
2380 | * protection is okay as we check whether @cs is online after | |
2381 | * grabbing cpuset_mutex anyway. This only happens on the legacy | |
2382 | * hierarchies. | |
3a5a6d0c | 2383 | */ |
76bb5ab8 TH |
2384 | css_get(&cs->css); |
2385 | kernfs_break_active_protection(of->kn); | |
3a5a6d0c TH |
2386 | flush_work(&cpuset_hotplug_work); |
2387 | ||
d74b27d6 | 2388 | get_online_cpus(); |
1243dc51 | 2389 | percpu_down_write(&cpuset_rwsem); |
5d21cc2d TH |
2390 | if (!is_cpuset_online(cs)) |
2391 | goto out_unlock; | |
e3712395 | 2392 | |
645fcc9d | 2393 | trialcs = alloc_trial_cpuset(cs); |
b75f38d6 LZ |
2394 | if (!trialcs) { |
2395 | retval = -ENOMEM; | |
5d21cc2d | 2396 | goto out_unlock; |
b75f38d6 | 2397 | } |
645fcc9d | 2398 | |
451af504 | 2399 | switch (of_cft(of)->private) { |
e3712395 | 2400 | case FILE_CPULIST: |
645fcc9d | 2401 | retval = update_cpumask(cs, trialcs, buf); |
e3712395 PM |
2402 | break; |
2403 | case FILE_MEMLIST: | |
645fcc9d | 2404 | retval = update_nodemask(cs, trialcs, buf); |
e3712395 PM |
2405 | break; |
2406 | default: | |
2407 | retval = -EINVAL; | |
2408 | break; | |
2409 | } | |
645fcc9d | 2410 | |
bf92370c | 2411 | free_cpuset(trialcs); |
5d21cc2d | 2412 | out_unlock: |
1243dc51 | 2413 | percpu_up_write(&cpuset_rwsem); |
d74b27d6 | 2414 | put_online_cpus(); |
76bb5ab8 TH |
2415 | kernfs_unbreak_active_protection(of->kn); |
2416 | css_put(&cs->css); | |
e93ad19d | 2417 | flush_workqueue(cpuset_migrate_mm_wq); |
451af504 | 2418 | return retval ?: nbytes; |
e3712395 PM |
2419 | } |
2420 | ||
1da177e4 LT |
2421 | /* |
2422 | * These ascii lists should be read in a single call, by using a user | |
2423 | * buffer large enough to hold the entire map. If read in smaller | |
2424 | * chunks, there is no guarantee of atomicity. Since the display format | |
2425 | * used, list of ranges of sequential numbers, is variable length, | |
2426 | * and since these maps can change value dynamically, one could read | |
2427 | * gibberish by doing partial reads while a list was changing. | |
1da177e4 | 2428 | */ |
2da8ca82 | 2429 | static int cpuset_common_seq_show(struct seq_file *sf, void *v) |
1da177e4 | 2430 | { |
2da8ca82 TH |
2431 | struct cpuset *cs = css_cs(seq_css(sf)); |
2432 | cpuset_filetype_t type = seq_cft(sf)->private; | |
51ffe411 | 2433 | int ret = 0; |
1da177e4 | 2434 | |
8447a0fe | 2435 | spin_lock_irq(&callback_lock); |
1da177e4 LT |
2436 | |
2437 | switch (type) { | |
2438 | case FILE_CPULIST: | |
e8e6d97c | 2439 | seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed)); |
1da177e4 LT |
2440 | break; |
2441 | case FILE_MEMLIST: | |
e8e6d97c | 2442 | seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); |
1da177e4 | 2443 | break; |
afd1a8b3 | 2444 | case FILE_EFFECTIVE_CPULIST: |
e8e6d97c | 2445 | seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); |
afd1a8b3 LZ |
2446 | break; |
2447 | case FILE_EFFECTIVE_MEMLIST: | |
e8e6d97c | 2448 | seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); |
afd1a8b3 | 2449 | break; |
5cf8114d WL |
2450 | case FILE_SUBPARTS_CPULIST: |
2451 | seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus)); | |
2452 | break; | |
1da177e4 | 2453 | default: |
51ffe411 | 2454 | ret = -EINVAL; |
1da177e4 | 2455 | } |
1da177e4 | 2456 | |
8447a0fe | 2457 | spin_unlock_irq(&callback_lock); |
51ffe411 | 2458 | return ret; |
1da177e4 LT |
2459 | } |
2460 | ||
182446d0 | 2461 | static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft) |
700fe1ab | 2462 | { |
182446d0 | 2463 | struct cpuset *cs = css_cs(css); |
700fe1ab PM |
2464 | cpuset_filetype_t type = cft->private; |
2465 | switch (type) { | |
2466 | case FILE_CPU_EXCLUSIVE: | |
2467 | return is_cpu_exclusive(cs); | |
2468 | case FILE_MEM_EXCLUSIVE: | |
2469 | return is_mem_exclusive(cs); | |
78608366 PM |
2470 | case FILE_MEM_HARDWALL: |
2471 | return is_mem_hardwall(cs); | |
700fe1ab PM |
2472 | case FILE_SCHED_LOAD_BALANCE: |
2473 | return is_sched_load_balance(cs); | |
2474 | case FILE_MEMORY_MIGRATE: | |
2475 | return is_memory_migrate(cs); | |
2476 | case FILE_MEMORY_PRESSURE_ENABLED: | |
2477 | return cpuset_memory_pressure_enabled; | |
2478 | case FILE_MEMORY_PRESSURE: | |
2479 | return fmeter_getrate(&cs->fmeter); | |
2480 | case FILE_SPREAD_PAGE: | |
2481 | return is_spread_page(cs); | |
2482 | case FILE_SPREAD_SLAB: | |
2483 | return is_spread_slab(cs); | |
2484 | default: | |
2485 | BUG(); | |
2486 | } | |
cf417141 MK |
2487 | |
2488 | /* Unreachable but makes gcc happy */ | |
2489 | return 0; | |
700fe1ab | 2490 | } |
1da177e4 | 2491 | |
182446d0 | 2492 | static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft) |
5be7a479 | 2493 | { |
182446d0 | 2494 | struct cpuset *cs = css_cs(css); |
5be7a479 PM |
2495 | cpuset_filetype_t type = cft->private; |
2496 | switch (type) { | |
2497 | case FILE_SCHED_RELAX_DOMAIN_LEVEL: | |
2498 | return cs->relax_domain_level; | |
2499 | default: | |
2500 | BUG(); | |
2501 | } | |
cf417141 MK |
2502 | |
2503 | /* Unrechable but makes gcc happy */ | |
2504 | return 0; | |
5be7a479 PM |
2505 | } |
2506 | ||
bb5b553c WL |
2507 | static int sched_partition_show(struct seq_file *seq, void *v) |
2508 | { | |
2509 | struct cpuset *cs = css_cs(seq_css(seq)); | |
2510 | ||
2511 | switch (cs->partition_root_state) { | |
2512 | case PRS_ENABLED: | |
2513 | seq_puts(seq, "root\n"); | |
2514 | break; | |
2515 | case PRS_DISABLED: | |
2516 | seq_puts(seq, "member\n"); | |
2517 | break; | |
2518 | case PRS_ERROR: | |
2519 | seq_puts(seq, "root invalid\n"); | |
2520 | break; | |
2521 | } | |
2522 | return 0; | |
2523 | } | |
2524 | ||
2525 | static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf, | |
2526 | size_t nbytes, loff_t off) | |
2527 | { | |
2528 | struct cpuset *cs = css_cs(of_css(of)); | |
2529 | int val; | |
2530 | int retval = -ENODEV; | |
2531 | ||
2532 | buf = strstrip(buf); | |
2533 | ||
2534 | /* | |
b1e3aeb1 | 2535 | * Convert "root" to ENABLED, and convert "member" to DISABLED. |
bb5b553c | 2536 | */ |
b1e3aeb1 | 2537 | if (!strcmp(buf, "root")) |
bb5b553c | 2538 | val = PRS_ENABLED; |
b1e3aeb1 | 2539 | else if (!strcmp(buf, "member")) |
bb5b553c WL |
2540 | val = PRS_DISABLED; |
2541 | else | |
2542 | return -EINVAL; | |
2543 | ||
2544 | css_get(&cs->css); | |
d74b27d6 | 2545 | get_online_cpus(); |
1243dc51 | 2546 | percpu_down_write(&cpuset_rwsem); |
bb5b553c WL |
2547 | if (!is_cpuset_online(cs)) |
2548 | goto out_unlock; | |
2549 | ||
2550 | retval = update_prstate(cs, val); | |
2551 | out_unlock: | |
1243dc51 | 2552 | percpu_up_write(&cpuset_rwsem); |
d74b27d6 | 2553 | put_online_cpus(); |
bb5b553c WL |
2554 | css_put(&cs->css); |
2555 | return retval ?: nbytes; | |
2556 | } | |
1da177e4 LT |
2557 | |
2558 | /* | |
2559 | * for the common functions, 'private' gives the type of file | |
2560 | */ | |
2561 | ||
4ec22e9c | 2562 | static struct cftype legacy_files[] = { |
addf2c73 PM |
2563 | { |
2564 | .name = "cpus", | |
2da8ca82 | 2565 | .seq_show = cpuset_common_seq_show, |
451af504 | 2566 | .write = cpuset_write_resmask, |
e3712395 | 2567 | .max_write_len = (100U + 6 * NR_CPUS), |
addf2c73 PM |
2568 | .private = FILE_CPULIST, |
2569 | }, | |
2570 | ||
2571 | { | |
2572 | .name = "mems", | |
2da8ca82 | 2573 | .seq_show = cpuset_common_seq_show, |
451af504 | 2574 | .write = cpuset_write_resmask, |
e3712395 | 2575 | .max_write_len = (100U + 6 * MAX_NUMNODES), |
addf2c73 PM |
2576 | .private = FILE_MEMLIST, |
2577 | }, | |
2578 | ||
afd1a8b3 LZ |
2579 | { |
2580 | .name = "effective_cpus", | |
2581 | .seq_show = cpuset_common_seq_show, | |
2582 | .private = FILE_EFFECTIVE_CPULIST, | |
2583 | }, | |
2584 | ||
2585 | { | |
2586 | .name = "effective_mems", | |
2587 | .seq_show = cpuset_common_seq_show, | |
2588 | .private = FILE_EFFECTIVE_MEMLIST, | |
2589 | }, | |
2590 | ||
addf2c73 PM |
2591 | { |
2592 | .name = "cpu_exclusive", | |
2593 | .read_u64 = cpuset_read_u64, | |
2594 | .write_u64 = cpuset_write_u64, | |
2595 | .private = FILE_CPU_EXCLUSIVE, | |
2596 | }, | |
2597 | ||
2598 | { | |
2599 | .name = "mem_exclusive", | |
2600 | .read_u64 = cpuset_read_u64, | |
2601 | .write_u64 = cpuset_write_u64, | |
2602 | .private = FILE_MEM_EXCLUSIVE, | |
2603 | }, | |
2604 | ||
78608366 PM |
2605 | { |
2606 | .name = "mem_hardwall", | |
2607 | .read_u64 = cpuset_read_u64, | |
2608 | .write_u64 = cpuset_write_u64, | |
2609 | .private = FILE_MEM_HARDWALL, | |
2610 | }, | |
2611 | ||
addf2c73 PM |
2612 | { |
2613 | .name = "sched_load_balance", | |
2614 | .read_u64 = cpuset_read_u64, | |
2615 | .write_u64 = cpuset_write_u64, | |
2616 | .private = FILE_SCHED_LOAD_BALANCE, | |
2617 | }, | |
2618 | ||
2619 | { | |
2620 | .name = "sched_relax_domain_level", | |
5be7a479 PM |
2621 | .read_s64 = cpuset_read_s64, |
2622 | .write_s64 = cpuset_write_s64, | |
addf2c73 PM |
2623 | .private = FILE_SCHED_RELAX_DOMAIN_LEVEL, |
2624 | }, | |
2625 | ||
2626 | { | |
2627 | .name = "memory_migrate", | |
2628 | .read_u64 = cpuset_read_u64, | |
2629 | .write_u64 = cpuset_write_u64, | |
2630 | .private = FILE_MEMORY_MIGRATE, | |
2631 | }, | |
2632 | ||
2633 | { | |
2634 | .name = "memory_pressure", | |
2635 | .read_u64 = cpuset_read_u64, | |
1c08c22c | 2636 | .private = FILE_MEMORY_PRESSURE, |
addf2c73 PM |
2637 | }, |
2638 | ||
2639 | { | |
2640 | .name = "memory_spread_page", | |
2641 | .read_u64 = cpuset_read_u64, | |
2642 | .write_u64 = cpuset_write_u64, | |
2643 | .private = FILE_SPREAD_PAGE, | |
2644 | }, | |
2645 | ||
2646 | { | |
2647 | .name = "memory_spread_slab", | |
2648 | .read_u64 = cpuset_read_u64, | |
2649 | .write_u64 = cpuset_write_u64, | |
2650 | .private = FILE_SPREAD_SLAB, | |
2651 | }, | |
3e0d98b9 | 2652 | |
4baf6e33 TH |
2653 | { |
2654 | .name = "memory_pressure_enabled", | |
2655 | .flags = CFTYPE_ONLY_ON_ROOT, | |
2656 | .read_u64 = cpuset_read_u64, | |
2657 | .write_u64 = cpuset_write_u64, | |
2658 | .private = FILE_MEMORY_PRESSURE_ENABLED, | |
2659 | }, | |
1da177e4 | 2660 | |
4baf6e33 TH |
2661 | { } /* terminate */ |
2662 | }; | |
1da177e4 | 2663 | |
4ec22e9c WL |
2664 | /* |
2665 | * This is currently a minimal set for the default hierarchy. It can be | |
2666 | * expanded later on by migrating more features and control files from v1. | |
2667 | */ | |
2668 | static struct cftype dfl_files[] = { | |
2669 | { | |
2670 | .name = "cpus", | |
2671 | .seq_show = cpuset_common_seq_show, | |
2672 | .write = cpuset_write_resmask, | |
2673 | .max_write_len = (100U + 6 * NR_CPUS), | |
2674 | .private = FILE_CPULIST, | |
2675 | .flags = CFTYPE_NOT_ON_ROOT, | |
2676 | }, | |
2677 | ||
2678 | { | |
2679 | .name = "mems", | |
2680 | .seq_show = cpuset_common_seq_show, | |
2681 | .write = cpuset_write_resmask, | |
2682 | .max_write_len = (100U + 6 * MAX_NUMNODES), | |
2683 | .private = FILE_MEMLIST, | |
2684 | .flags = CFTYPE_NOT_ON_ROOT, | |
2685 | }, | |
2686 | ||
2687 | { | |
2688 | .name = "cpus.effective", | |
2689 | .seq_show = cpuset_common_seq_show, | |
2690 | .private = FILE_EFFECTIVE_CPULIST, | |
4ec22e9c WL |
2691 | }, |
2692 | ||
2693 | { | |
2694 | .name = "mems.effective", | |
2695 | .seq_show = cpuset_common_seq_show, | |
2696 | .private = FILE_EFFECTIVE_MEMLIST, | |
4ec22e9c WL |
2697 | }, |
2698 | ||
ee8dde0c | 2699 | { |
b1e3aeb1 | 2700 | .name = "cpus.partition", |
bb5b553c WL |
2701 | .seq_show = sched_partition_show, |
2702 | .write = sched_partition_write, | |
ee8dde0c WL |
2703 | .private = FILE_PARTITION_ROOT, |
2704 | .flags = CFTYPE_NOT_ON_ROOT, | |
2705 | }, | |
2706 | ||
5cf8114d WL |
2707 | { |
2708 | .name = "cpus.subpartitions", | |
2709 | .seq_show = cpuset_common_seq_show, | |
2710 | .private = FILE_SUBPARTS_CPULIST, | |
2711 | .flags = CFTYPE_DEBUG, | |
2712 | }, | |
2713 | ||
4ec22e9c WL |
2714 | { } /* terminate */ |
2715 | }; | |
2716 | ||
2717 | ||
1da177e4 | 2718 | /* |
92fb9748 | 2719 | * cpuset_css_alloc - allocate a cpuset css |
c9e5fe66 | 2720 | * cgrp: control group that the new cpuset will be part of |
1da177e4 LT |
2721 | */ |
2722 | ||
eb95419b TH |
2723 | static struct cgroup_subsys_state * |
2724 | cpuset_css_alloc(struct cgroup_subsys_state *parent_css) | |
1da177e4 | 2725 | { |
c8f699bb | 2726 | struct cpuset *cs; |
1da177e4 | 2727 | |
eb95419b | 2728 | if (!parent_css) |
8793d854 | 2729 | return &top_cpuset.css; |
033fa1c5 | 2730 | |
c8f699bb | 2731 | cs = kzalloc(sizeof(*cs), GFP_KERNEL); |
1da177e4 | 2732 | if (!cs) |
8793d854 | 2733 | return ERR_PTR(-ENOMEM); |
bf92370c WL |
2734 | |
2735 | if (alloc_cpumasks(cs, NULL)) { | |
2736 | kfree(cs); | |
2737 | return ERR_PTR(-ENOMEM); | |
2738 | } | |
1da177e4 | 2739 | |
029190c5 | 2740 | set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); |
f9a86fcb | 2741 | nodes_clear(cs->mems_allowed); |
e2b9a3d7 | 2742 | nodes_clear(cs->effective_mems); |
3e0d98b9 | 2743 | fmeter_init(&cs->fmeter); |
1d3504fc | 2744 | cs->relax_domain_level = -1; |
1da177e4 | 2745 | |
c8f699bb TH |
2746 | return &cs->css; |
2747 | } | |
2748 | ||
eb95419b | 2749 | static int cpuset_css_online(struct cgroup_subsys_state *css) |
c8f699bb | 2750 | { |
eb95419b | 2751 | struct cpuset *cs = css_cs(css); |
c431069f | 2752 | struct cpuset *parent = parent_cs(cs); |
ae8086ce | 2753 | struct cpuset *tmp_cs; |
492eb21b | 2754 | struct cgroup_subsys_state *pos_css; |
c8f699bb TH |
2755 | |
2756 | if (!parent) | |
2757 | return 0; | |
2758 | ||
d74b27d6 | 2759 | get_online_cpus(); |
1243dc51 | 2760 | percpu_down_write(&cpuset_rwsem); |
5d21cc2d | 2761 | |
efeb77b2 | 2762 | set_bit(CS_ONLINE, &cs->flags); |
c8f699bb TH |
2763 | if (is_spread_page(parent)) |
2764 | set_bit(CS_SPREAD_PAGE, &cs->flags); | |
2765 | if (is_spread_slab(parent)) | |
2766 | set_bit(CS_SPREAD_SLAB, &cs->flags); | |
1da177e4 | 2767 | |
664eedde | 2768 | cpuset_inc(); |
033fa1c5 | 2769 | |
8447a0fe | 2770 | spin_lock_irq(&callback_lock); |
b8d1b8ee | 2771 | if (is_in_v2_mode()) { |
e2b9a3d7 LZ |
2772 | cpumask_copy(cs->effective_cpus, parent->effective_cpus); |
2773 | cs->effective_mems = parent->effective_mems; | |
4716909c WL |
2774 | cs->use_parent_ecpus = true; |
2775 | parent->child_ecpus_count++; | |
e2b9a3d7 | 2776 | } |
8447a0fe | 2777 | spin_unlock_irq(&callback_lock); |
e2b9a3d7 | 2778 | |
eb95419b | 2779 | if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) |
5d21cc2d | 2780 | goto out_unlock; |
033fa1c5 TH |
2781 | |
2782 | /* | |
2783 | * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is | |
2784 | * set. This flag handling is implemented in cgroup core for | |
2785 | * histrical reasons - the flag may be specified during mount. | |
2786 | * | |
2787 | * Currently, if any sibling cpusets have exclusive cpus or mem, we | |
2788 | * refuse to clone the configuration - thereby refusing the task to | |
2789 | * be entered, and as a result refusing the sys_unshare() or | |
2790 | * clone() which initiated it. If this becomes a problem for some | |
2791 | * users who wish to allow that scenario, then this could be | |
2792 | * changed to grant parent->cpus_allowed-sibling_cpus_exclusive | |
2793 | * (and likewise for mems) to the new cgroup. | |
2794 | */ | |
ae8086ce | 2795 | rcu_read_lock(); |
492eb21b | 2796 | cpuset_for_each_child(tmp_cs, pos_css, parent) { |
ae8086ce TH |
2797 | if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) { |
2798 | rcu_read_unlock(); | |
5d21cc2d | 2799 | goto out_unlock; |
ae8086ce | 2800 | } |
033fa1c5 | 2801 | } |
ae8086ce | 2802 | rcu_read_unlock(); |
033fa1c5 | 2803 | |
8447a0fe | 2804 | spin_lock_irq(&callback_lock); |
033fa1c5 | 2805 | cs->mems_allowed = parent->mems_allowed; |
790317e1 | 2806 | cs->effective_mems = parent->mems_allowed; |
033fa1c5 | 2807 | cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); |
790317e1 | 2808 | cpumask_copy(cs->effective_cpus, parent->cpus_allowed); |
cea74465 | 2809 | spin_unlock_irq(&callback_lock); |
5d21cc2d | 2810 | out_unlock: |
1243dc51 | 2811 | percpu_up_write(&cpuset_rwsem); |
d74b27d6 | 2812 | put_online_cpus(); |
c8f699bb TH |
2813 | return 0; |
2814 | } | |
2815 | ||
0b9e6965 ZH |
2816 | /* |
2817 | * If the cpuset being removed has its flag 'sched_load_balance' | |
2818 | * enabled, then simulate turning sched_load_balance off, which | |
ee8dde0c WL |
2819 | * will call rebuild_sched_domains_locked(). That is not needed |
2820 | * in the default hierarchy where only changes in partition | |
2821 | * will cause repartitioning. | |
2822 | * | |
2823 | * If the cpuset has the 'sched.partition' flag enabled, simulate | |
2824 | * turning 'sched.partition" off. | |
0b9e6965 ZH |
2825 | */ |
2826 | ||
eb95419b | 2827 | static void cpuset_css_offline(struct cgroup_subsys_state *css) |
c8f699bb | 2828 | { |
eb95419b | 2829 | struct cpuset *cs = css_cs(css); |
c8f699bb | 2830 | |
d74b27d6 | 2831 | get_online_cpus(); |
1243dc51 | 2832 | percpu_down_write(&cpuset_rwsem); |
c8f699bb | 2833 | |
ee8dde0c WL |
2834 | if (is_partition_root(cs)) |
2835 | update_prstate(cs, 0); | |
2836 | ||
2837 | if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && | |
2838 | is_sched_load_balance(cs)) | |
c8f699bb TH |
2839 | update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); |
2840 | ||
4716909c WL |
2841 | if (cs->use_parent_ecpus) { |
2842 | struct cpuset *parent = parent_cs(cs); | |
2843 | ||
2844 | cs->use_parent_ecpus = false; | |
2845 | parent->child_ecpus_count--; | |
2846 | } | |
2847 | ||
664eedde | 2848 | cpuset_dec(); |
efeb77b2 | 2849 | clear_bit(CS_ONLINE, &cs->flags); |
c8f699bb | 2850 | |
1243dc51 | 2851 | percpu_up_write(&cpuset_rwsem); |
d74b27d6 | 2852 | put_online_cpus(); |
1da177e4 LT |
2853 | } |
2854 | ||
eb95419b | 2855 | static void cpuset_css_free(struct cgroup_subsys_state *css) |
1da177e4 | 2856 | { |
eb95419b | 2857 | struct cpuset *cs = css_cs(css); |
1da177e4 | 2858 | |
bf92370c | 2859 | free_cpuset(cs); |
1da177e4 LT |
2860 | } |
2861 | ||
39bd0d15 LZ |
2862 | static void cpuset_bind(struct cgroup_subsys_state *root_css) |
2863 | { | |
1243dc51 | 2864 | percpu_down_write(&cpuset_rwsem); |
8447a0fe | 2865 | spin_lock_irq(&callback_lock); |
39bd0d15 | 2866 | |
b8d1b8ee | 2867 | if (is_in_v2_mode()) { |
39bd0d15 LZ |
2868 | cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask); |
2869 | top_cpuset.mems_allowed = node_possible_map; | |
2870 | } else { | |
2871 | cpumask_copy(top_cpuset.cpus_allowed, | |
2872 | top_cpuset.effective_cpus); | |
2873 | top_cpuset.mems_allowed = top_cpuset.effective_mems; | |
2874 | } | |
2875 | ||
8447a0fe | 2876 | spin_unlock_irq(&callback_lock); |
1243dc51 | 2877 | percpu_up_write(&cpuset_rwsem); |
39bd0d15 LZ |
2878 | } |
2879 | ||
06f4e948 ZL |
2880 | /* |
2881 | * Make sure the new task conform to the current state of its parent, | |
2882 | * which could have been changed by cpuset just after it inherits the | |
2883 | * state from the parent and before it sits on the cgroup's task list. | |
2884 | */ | |
8a15b817 | 2885 | static void cpuset_fork(struct task_struct *task) |
06f4e948 ZL |
2886 | { |
2887 | if (task_css_is_root(task, cpuset_cgrp_id)) | |
2888 | return; | |
2889 | ||
3bd37062 | 2890 | set_cpus_allowed_ptr(task, current->cpus_ptr); |
06f4e948 ZL |
2891 | task->mems_allowed = current->mems_allowed; |
2892 | } | |
2893 | ||
073219e9 | 2894 | struct cgroup_subsys cpuset_cgrp_subsys = { |
39bd0d15 LZ |
2895 | .css_alloc = cpuset_css_alloc, |
2896 | .css_online = cpuset_css_online, | |
2897 | .css_offline = cpuset_css_offline, | |
2898 | .css_free = cpuset_css_free, | |
2899 | .can_attach = cpuset_can_attach, | |
2900 | .cancel_attach = cpuset_cancel_attach, | |
2901 | .attach = cpuset_attach, | |
5cf1cacb | 2902 | .post_attach = cpuset_post_attach, |
39bd0d15 | 2903 | .bind = cpuset_bind, |
06f4e948 | 2904 | .fork = cpuset_fork, |
4ec22e9c WL |
2905 | .legacy_cftypes = legacy_files, |
2906 | .dfl_cftypes = dfl_files, | |
b38e42e9 | 2907 | .early_init = true, |
4ec22e9c | 2908 | .threaded = true, |
8793d854 PM |
2909 | }; |
2910 | ||
1da177e4 LT |
2911 | /** |
2912 | * cpuset_init - initialize cpusets at system boot | |
2913 | * | |
d5f68d33 | 2914 | * Description: Initialize top_cpuset |
1da177e4 LT |
2915 | **/ |
2916 | ||
2917 | int __init cpuset_init(void) | |
2918 | { | |
1243dc51 JL |
2919 | BUG_ON(percpu_init_rwsem(&cpuset_rwsem)); |
2920 | ||
75fa8e5d NMG |
2921 | BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL)); |
2922 | BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL)); | |
bf92370c | 2923 | BUG_ON(!zalloc_cpumask_var(&top_cpuset.subparts_cpus, GFP_KERNEL)); |
58568d2a | 2924 | |
300ed6cb | 2925 | cpumask_setall(top_cpuset.cpus_allowed); |
f9a86fcb | 2926 | nodes_setall(top_cpuset.mems_allowed); |
e2b9a3d7 LZ |
2927 | cpumask_setall(top_cpuset.effective_cpus); |
2928 | nodes_setall(top_cpuset.effective_mems); | |
1da177e4 | 2929 | |
3e0d98b9 | 2930 | fmeter_init(&top_cpuset.fmeter); |
029190c5 | 2931 | set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags); |
1d3504fc | 2932 | top_cpuset.relax_domain_level = -1; |
1da177e4 | 2933 | |
75fa8e5d | 2934 | BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL)); |
2341d1b6 | 2935 | |
8793d854 | 2936 | return 0; |
1da177e4 LT |
2937 | } |
2938 | ||
b1aac8bb | 2939 | /* |
cf417141 | 2940 | * If CPU and/or memory hotplug handlers, below, unplug any CPUs |
b1aac8bb PJ |
2941 | * or memory nodes, we need to walk over the cpuset hierarchy, |
2942 | * removing that CPU or node from all cpusets. If this removes the | |
956db3ca CW |
2943 | * last CPU or node from a cpuset, then move the tasks in the empty |
2944 | * cpuset to its next-highest non-empty parent. | |
b1aac8bb | 2945 | */ |
956db3ca CW |
2946 | static void remove_tasks_in_empty_cpuset(struct cpuset *cs) |
2947 | { | |
2948 | struct cpuset *parent; | |
2949 | ||
956db3ca CW |
2950 | /* |
2951 | * Find its next-highest non-empty parent, (top cpuset | |
2952 | * has online cpus, so can't be empty). | |
2953 | */ | |
c431069f | 2954 | parent = parent_cs(cs); |
300ed6cb | 2955 | while (cpumask_empty(parent->cpus_allowed) || |
b4501295 | 2956 | nodes_empty(parent->mems_allowed)) |
c431069f | 2957 | parent = parent_cs(parent); |
956db3ca | 2958 | |
8cc99345 | 2959 | if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { |
12d3089c | 2960 | pr_err("cpuset: failed to transfer tasks out of empty cpuset "); |
e61734c5 TH |
2961 | pr_cont_cgroup_name(cs->css.cgroup); |
2962 | pr_cont("\n"); | |
8cc99345 | 2963 | } |
956db3ca CW |
2964 | } |
2965 | ||
be4c9dd7 LZ |
2966 | static void |
2967 | hotplug_update_tasks_legacy(struct cpuset *cs, | |
2968 | struct cpumask *new_cpus, nodemask_t *new_mems, | |
2969 | bool cpus_updated, bool mems_updated) | |
390a36aa LZ |
2970 | { |
2971 | bool is_empty; | |
2972 | ||
8447a0fe | 2973 | spin_lock_irq(&callback_lock); |
be4c9dd7 LZ |
2974 | cpumask_copy(cs->cpus_allowed, new_cpus); |
2975 | cpumask_copy(cs->effective_cpus, new_cpus); | |
2976 | cs->mems_allowed = *new_mems; | |
2977 | cs->effective_mems = *new_mems; | |
8447a0fe | 2978 | spin_unlock_irq(&callback_lock); |
390a36aa LZ |
2979 | |
2980 | /* | |
2981 | * Don't call update_tasks_cpumask() if the cpuset becomes empty, | |
2982 | * as the tasks will be migratecd to an ancestor. | |
2983 | */ | |
be4c9dd7 | 2984 | if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) |
390a36aa | 2985 | update_tasks_cpumask(cs); |
be4c9dd7 | 2986 | if (mems_updated && !nodes_empty(cs->mems_allowed)) |
390a36aa LZ |
2987 | update_tasks_nodemask(cs); |
2988 | ||
2989 | is_empty = cpumask_empty(cs->cpus_allowed) || | |
2990 | nodes_empty(cs->mems_allowed); | |
2991 | ||
1243dc51 | 2992 | percpu_up_write(&cpuset_rwsem); |
390a36aa LZ |
2993 | |
2994 | /* | |
2995 | * Move tasks to the nearest ancestor with execution resources, | |
2996 | * This is full cgroup operation which will also call back into | |
2997 | * cpuset. Should be done outside any lock. | |
2998 | */ | |
2999 | if (is_empty) | |
3000 | remove_tasks_in_empty_cpuset(cs); | |
3001 | ||
1243dc51 | 3002 | percpu_down_write(&cpuset_rwsem); |
390a36aa LZ |
3003 | } |
3004 | ||
be4c9dd7 LZ |
3005 | static void |
3006 | hotplug_update_tasks(struct cpuset *cs, | |
3007 | struct cpumask *new_cpus, nodemask_t *new_mems, | |
3008 | bool cpus_updated, bool mems_updated) | |
390a36aa | 3009 | { |
be4c9dd7 LZ |
3010 | if (cpumask_empty(new_cpus)) |
3011 | cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); | |
3012 | if (nodes_empty(*new_mems)) | |
3013 | *new_mems = parent_cs(cs)->effective_mems; | |
3014 | ||
8447a0fe | 3015 | spin_lock_irq(&callback_lock); |
be4c9dd7 LZ |
3016 | cpumask_copy(cs->effective_cpus, new_cpus); |
3017 | cs->effective_mems = *new_mems; | |
8447a0fe | 3018 | spin_unlock_irq(&callback_lock); |
390a36aa | 3019 | |
be4c9dd7 | 3020 | if (cpus_updated) |
390a36aa | 3021 | update_tasks_cpumask(cs); |
be4c9dd7 | 3022 | if (mems_updated) |
390a36aa LZ |
3023 | update_tasks_nodemask(cs); |
3024 | } | |
3025 | ||
4b842da2 WL |
3026 | static bool force_rebuild; |
3027 | ||
3028 | void cpuset_force_rebuild(void) | |
3029 | { | |
3030 | force_rebuild = true; | |
3031 | } | |
3032 | ||
deb7aa30 | 3033 | /** |
388afd85 | 3034 | * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug |
deb7aa30 | 3035 | * @cs: cpuset in interest |
4b842da2 | 3036 | * @tmp: the tmpmasks structure pointer |
956db3ca | 3037 | * |
deb7aa30 TH |
3038 | * Compare @cs's cpu and mem masks against top_cpuset and if some have gone |
3039 | * offline, update @cs accordingly. If @cs ends up with no CPU or memory, | |
3040 | * all its tasks are moved to the nearest ancestor with both resources. | |
80d1fa64 | 3041 | */ |
4b842da2 | 3042 | static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) |
80d1fa64 | 3043 | { |
be4c9dd7 LZ |
3044 | static cpumask_t new_cpus; |
3045 | static nodemask_t new_mems; | |
3046 | bool cpus_updated; | |
3047 | bool mems_updated; | |
4b842da2 | 3048 | struct cpuset *parent; |
e44193d3 LZ |
3049 | retry: |
3050 | wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); | |
80d1fa64 | 3051 | |
1243dc51 | 3052 | percpu_down_write(&cpuset_rwsem); |
7ddf96b0 | 3053 | |
e44193d3 LZ |
3054 | /* |
3055 | * We have raced with task attaching. We wait until attaching | |
3056 | * is finished, so we won't attach a task to an empty cpuset. | |
3057 | */ | |
3058 | if (cs->attach_in_progress) { | |
1243dc51 | 3059 | percpu_up_write(&cpuset_rwsem); |
e44193d3 LZ |
3060 | goto retry; |
3061 | } | |
3062 | ||
4b842da2 WL |
3063 | parent = parent_cs(cs); |
3064 | compute_effective_cpumask(&new_cpus, cs, parent); | |
3065 | nodes_and(new_mems, cs->mems_allowed, parent->effective_mems); | |
3066 | ||
3067 | if (cs->nr_subparts_cpus) | |
3068 | /* | |
3069 | * Make sure that CPUs allocated to child partitions | |
3070 | * do not show up in effective_cpus. | |
3071 | */ | |
3072 | cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus); | |
3073 | ||
3074 | if (!tmp || !cs->partition_root_state) | |
3075 | goto update_tasks; | |
80d1fa64 | 3076 | |
4b842da2 WL |
3077 | /* |
3078 | * In the unlikely event that a partition root has empty | |
3079 | * effective_cpus or its parent becomes erroneous, we have to | |
3080 | * transition it to the erroneous state. | |
3081 | */ | |
3082 | if (is_partition_root(cs) && (cpumask_empty(&new_cpus) || | |
3083 | (parent->partition_root_state == PRS_ERROR))) { | |
3084 | if (cs->nr_subparts_cpus) { | |
3085 | cs->nr_subparts_cpus = 0; | |
3086 | cpumask_clear(cs->subparts_cpus); | |
3087 | compute_effective_cpumask(&new_cpus, cs, parent); | |
3088 | } | |
80d1fa64 | 3089 | |
4b842da2 WL |
3090 | /* |
3091 | * If the effective_cpus is empty because the child | |
3092 | * partitions take away all the CPUs, we can keep | |
3093 | * the current partition and let the child partitions | |
3094 | * fight for available CPUs. | |
3095 | */ | |
3096 | if ((parent->partition_root_state == PRS_ERROR) || | |
3097 | cpumask_empty(&new_cpus)) { | |
3098 | update_parent_subparts_cpumask(cs, partcmd_disable, | |
3099 | NULL, tmp); | |
3100 | cs->partition_root_state = PRS_ERROR; | |
3101 | } | |
3102 | cpuset_force_rebuild(); | |
3103 | } | |
3104 | ||
3105 | /* | |
3106 | * On the other hand, an erroneous partition root may be transitioned | |
3107 | * back to a regular one or a partition root with no CPU allocated | |
3108 | * from the parent may change to erroneous. | |
3109 | */ | |
3110 | if (is_partition_root(parent) && | |
3111 | ((cs->partition_root_state == PRS_ERROR) || | |
3112 | !cpumask_intersects(&new_cpus, parent->subparts_cpus)) && | |
3113 | update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp)) | |
3114 | cpuset_force_rebuild(); | |
3115 | ||
3116 | update_tasks: | |
be4c9dd7 LZ |
3117 | cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); |
3118 | mems_updated = !nodes_equal(new_mems, cs->effective_mems); | |
deb7aa30 | 3119 | |
b8d1b8ee | 3120 | if (is_in_v2_mode()) |
be4c9dd7 LZ |
3121 | hotplug_update_tasks(cs, &new_cpus, &new_mems, |
3122 | cpus_updated, mems_updated); | |
390a36aa | 3123 | else |
be4c9dd7 LZ |
3124 | hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems, |
3125 | cpus_updated, mems_updated); | |
8d033948 | 3126 | |
1243dc51 | 3127 | percpu_up_write(&cpuset_rwsem); |
b1aac8bb PJ |
3128 | } |
3129 | ||
deb7aa30 | 3130 | /** |
2b729fe7 | 3131 | * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset |
956db3ca | 3132 | * |
deb7aa30 TH |
3133 | * This function is called after either CPU or memory configuration has |
3134 | * changed and updates cpuset accordingly. The top_cpuset is always | |
3135 | * synchronized to cpu_active_mask and N_MEMORY, which is necessary in | |
3136 | * order to make cpusets transparent (of no affect) on systems that are | |
3137 | * actively using CPU hotplug but making no active use of cpusets. | |
956db3ca | 3138 | * |
deb7aa30 | 3139 | * Non-root cpusets are only affected by offlining. If any CPUs or memory |
388afd85 LZ |
3140 | * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on |
3141 | * all descendants. | |
956db3ca | 3142 | * |
deb7aa30 TH |
3143 | * Note that CPU offlining during suspend is ignored. We don't modify |
3144 | * cpusets across suspend/resume cycles at all. | |
956db3ca | 3145 | */ |
2b729fe7 | 3146 | static void cpuset_hotplug_workfn(struct work_struct *work) |
b1aac8bb | 3147 | { |
5c5cc623 LZ |
3148 | static cpumask_t new_cpus; |
3149 | static nodemask_t new_mems; | |
deb7aa30 | 3150 | bool cpus_updated, mems_updated; |
b8d1b8ee | 3151 | bool on_dfl = is_in_v2_mode(); |
4b842da2 WL |
3152 | struct tmpmasks tmp, *ptmp = NULL; |
3153 | ||
3154 | if (on_dfl && !alloc_cpumasks(NULL, &tmp)) | |
3155 | ptmp = &tmp; | |
b1aac8bb | 3156 | |
1243dc51 | 3157 | percpu_down_write(&cpuset_rwsem); |
956db3ca | 3158 | |
deb7aa30 TH |
3159 | /* fetch the available cpus/mems and find out which changed how */ |
3160 | cpumask_copy(&new_cpus, cpu_active_mask); | |
3161 | new_mems = node_states[N_MEMORY]; | |
7ddf96b0 | 3162 | |
4b842da2 WL |
3163 | /* |
3164 | * If subparts_cpus is populated, it is likely that the check below | |
3165 | * will produce a false positive on cpus_updated when the cpu list | |
3166 | * isn't changed. It is extra work, but it is better to be safe. | |
3167 | */ | |
7e88291b LZ |
3168 | cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus); |
3169 | mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems); | |
7ddf96b0 | 3170 | |
deb7aa30 TH |
3171 | /* synchronize cpus_allowed to cpu_active_mask */ |
3172 | if (cpus_updated) { | |
8447a0fe | 3173 | spin_lock_irq(&callback_lock); |
7e88291b LZ |
3174 | if (!on_dfl) |
3175 | cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); | |
4b842da2 WL |
3176 | /* |
3177 | * Make sure that CPUs allocated to child partitions | |
3178 | * do not show up in effective_cpus. If no CPU is left, | |
3179 | * we clear the subparts_cpus & let the child partitions | |
3180 | * fight for the CPUs again. | |
3181 | */ | |
3182 | if (top_cpuset.nr_subparts_cpus) { | |
3183 | if (cpumask_subset(&new_cpus, | |
3184 | top_cpuset.subparts_cpus)) { | |
3185 | top_cpuset.nr_subparts_cpus = 0; | |
3186 | cpumask_clear(top_cpuset.subparts_cpus); | |
3187 | } else { | |
3188 | cpumask_andnot(&new_cpus, &new_cpus, | |
3189 | top_cpuset.subparts_cpus); | |
3190 | } | |
3191 | } | |
1344ab9c | 3192 | cpumask_copy(top_cpuset.effective_cpus, &new_cpus); |
8447a0fe | 3193 | spin_unlock_irq(&callback_lock); |
deb7aa30 TH |
3194 | /* we don't mess with cpumasks of tasks in top_cpuset */ |
3195 | } | |
b4501295 | 3196 | |
deb7aa30 TH |
3197 | /* synchronize mems_allowed to N_MEMORY */ |
3198 | if (mems_updated) { | |
8447a0fe | 3199 | spin_lock_irq(&callback_lock); |
7e88291b LZ |
3200 | if (!on_dfl) |
3201 | top_cpuset.mems_allowed = new_mems; | |
1344ab9c | 3202 | top_cpuset.effective_mems = new_mems; |
8447a0fe | 3203 | spin_unlock_irq(&callback_lock); |
d66393e5 | 3204 | update_tasks_nodemask(&top_cpuset); |
deb7aa30 | 3205 | } |
b4501295 | 3206 | |
1243dc51 | 3207 | percpu_up_write(&cpuset_rwsem); |
388afd85 | 3208 | |
5c5cc623 LZ |
3209 | /* if cpus or mems changed, we need to propagate to descendants */ |
3210 | if (cpus_updated || mems_updated) { | |
deb7aa30 | 3211 | struct cpuset *cs; |
492eb21b | 3212 | struct cgroup_subsys_state *pos_css; |
f9b4fb8d | 3213 | |
fc560a26 | 3214 | rcu_read_lock(); |
492eb21b | 3215 | cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { |
ec903c0c | 3216 | if (cs == &top_cpuset || !css_tryget_online(&cs->css)) |
388afd85 LZ |
3217 | continue; |
3218 | rcu_read_unlock(); | |
7ddf96b0 | 3219 | |
4b842da2 | 3220 | cpuset_hotplug_update_tasks(cs, ptmp); |
b4501295 | 3221 | |
388afd85 LZ |
3222 | rcu_read_lock(); |
3223 | css_put(&cs->css); | |
3224 | } | |
3225 | rcu_read_unlock(); | |
3226 | } | |
8d033948 | 3227 | |
deb7aa30 | 3228 | /* rebuild sched domains if cpus_allowed has changed */ |
50e76632 PZ |
3229 | if (cpus_updated || force_rebuild) { |
3230 | force_rebuild = false; | |
2b729fe7 | 3231 | rebuild_sched_domains(); |
50e76632 | 3232 | } |
4b842da2 WL |
3233 | |
3234 | free_cpumasks(NULL, ptmp); | |
b1aac8bb PJ |
3235 | } |
3236 | ||
2b729fe7 | 3237 | void cpuset_update_active_cpus(void) |
4c4d50f7 | 3238 | { |
2b729fe7 TH |
3239 | /* |
3240 | * We're inside cpu hotplug critical region which usually nests | |
3241 | * inside cgroup synchronization. Bounce actual hotplug processing | |
3242 | * to a work item to avoid reverse locking order. | |
3243 | */ | |
3244 | schedule_work(&cpuset_hotplug_work); | |
4c4d50f7 | 3245 | } |
4c4d50f7 | 3246 | |
2b729fe7 | 3247 | void cpuset_wait_for_hotplug(void) |
50e76632 | 3248 | { |
2b729fe7 | 3249 | flush_work(&cpuset_hotplug_work); |
50e76632 PZ |
3250 | } |
3251 | ||
38837fc7 | 3252 | /* |
38d7bee9 LJ |
3253 | * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY]. |
3254 | * Call this routine anytime after node_states[N_MEMORY] changes. | |
a1cd2b13 | 3255 | * See cpuset_update_active_cpus() for CPU hotplug handling. |
38837fc7 | 3256 | */ |
f481891f MX |
3257 | static int cpuset_track_online_nodes(struct notifier_block *self, |
3258 | unsigned long action, void *arg) | |
38837fc7 | 3259 | { |
3a5a6d0c | 3260 | schedule_work(&cpuset_hotplug_work); |
f481891f | 3261 | return NOTIFY_OK; |
38837fc7 | 3262 | } |
d8f10cb3 AM |
3263 | |
3264 | static struct notifier_block cpuset_track_online_nodes_nb = { | |
3265 | .notifier_call = cpuset_track_online_nodes, | |
3266 | .priority = 10, /* ??! */ | |
3267 | }; | |
38837fc7 | 3268 | |
1da177e4 LT |
3269 | /** |
3270 | * cpuset_init_smp - initialize cpus_allowed | |
3271 | * | |
3272 | * Description: Finish top cpuset after cpu, node maps are initialized | |
d8f10cb3 | 3273 | */ |
1da177e4 LT |
3274 | void __init cpuset_init_smp(void) |
3275 | { | |
6ad4c188 | 3276 | cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask); |
38d7bee9 | 3277 | top_cpuset.mems_allowed = node_states[N_MEMORY]; |
33ad801d | 3278 | top_cpuset.old_mems_allowed = top_cpuset.mems_allowed; |
4c4d50f7 | 3279 | |
e2b9a3d7 LZ |
3280 | cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask); |
3281 | top_cpuset.effective_mems = node_states[N_MEMORY]; | |
3282 | ||
d8f10cb3 | 3283 | register_hotmemory_notifier(&cpuset_track_online_nodes_nb); |
e93ad19d TH |
3284 | |
3285 | cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0); | |
3286 | BUG_ON(!cpuset_migrate_mm_wq); | |
1da177e4 LT |
3287 | } |
3288 | ||
3289 | /** | |
1da177e4 LT |
3290 | * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. |
3291 | * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. | |
6af866af | 3292 | * @pmask: pointer to struct cpumask variable to receive cpus_allowed set. |
1da177e4 | 3293 | * |
300ed6cb | 3294 | * Description: Returns the cpumask_var_t cpus_allowed of the cpuset |
1da177e4 | 3295 | * attached to the specified @tsk. Guaranteed to return some non-empty |
5f054e31 | 3296 | * subset of cpu_online_mask, even if this means going outside the |
1da177e4 LT |
3297 | * tasks cpuset. |
3298 | **/ | |
3299 | ||
6af866af | 3300 | void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) |
1da177e4 | 3301 | { |
8447a0fe VD |
3302 | unsigned long flags; |
3303 | ||
3304 | spin_lock_irqsave(&callback_lock, flags); | |
b8dadcb5 | 3305 | rcu_read_lock(); |
ae1c8023 | 3306 | guarantee_online_cpus(task_cs(tsk), pmask); |
b8dadcb5 | 3307 | rcu_read_unlock(); |
8447a0fe | 3308 | spin_unlock_irqrestore(&callback_lock, flags); |
1da177e4 LT |
3309 | } |
3310 | ||
d477f8c2 JS |
3311 | /** |
3312 | * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe. | |
3313 | * @tsk: pointer to task_struct with which the scheduler is struggling | |
3314 | * | |
3315 | * Description: In the case that the scheduler cannot find an allowed cpu in | |
3316 | * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy | |
3317 | * mode however, this value is the same as task_cs(tsk)->effective_cpus, | |
3318 | * which will not contain a sane cpumask during cases such as cpu hotplugging. | |
3319 | * This is the absolute last resort for the scheduler and it is only used if | |
3320 | * _every_ other avenue has been traveled. | |
3321 | **/ | |
3322 | ||
2baab4e9 | 3323 | void cpuset_cpus_allowed_fallback(struct task_struct *tsk) |
9084bb82 | 3324 | { |
9084bb82 | 3325 | rcu_read_lock(); |
d477f8c2 JS |
3326 | do_set_cpus_allowed(tsk, is_in_v2_mode() ? |
3327 | task_cs(tsk)->cpus_allowed : cpu_possible_mask); | |
9084bb82 ON |
3328 | rcu_read_unlock(); |
3329 | ||
3330 | /* | |
3331 | * We own tsk->cpus_allowed, nobody can change it under us. | |
3332 | * | |
3333 | * But we used cs && cs->cpus_allowed lockless and thus can | |
3334 | * race with cgroup_attach_task() or update_cpumask() and get | |
3335 | * the wrong tsk->cpus_allowed. However, both cases imply the | |
3336 | * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr() | |
3337 | * which takes task_rq_lock(). | |
3338 | * | |
3339 | * If we are called after it dropped the lock we must see all | |
3340 | * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary | |
3341 | * set any mask even if it is not right from task_cs() pov, | |
3342 | * the pending set_cpus_allowed_ptr() will fix things. | |
2baab4e9 PZ |
3343 | * |
3344 | * select_fallback_rq() will fix things ups and set cpu_possible_mask | |
3345 | * if required. | |
9084bb82 | 3346 | */ |
9084bb82 ON |
3347 | } |
3348 | ||
8f4ab07f | 3349 | void __init cpuset_init_current_mems_allowed(void) |
1da177e4 | 3350 | { |
f9a86fcb | 3351 | nodes_setall(current->mems_allowed); |
1da177e4 LT |
3352 | } |
3353 | ||
909d75a3 PJ |
3354 | /** |
3355 | * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset. | |
3356 | * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed. | |
3357 | * | |
3358 | * Description: Returns the nodemask_t mems_allowed of the cpuset | |
3359 | * attached to the specified @tsk. Guaranteed to return some non-empty | |
38d7bee9 | 3360 | * subset of node_states[N_MEMORY], even if this means going outside the |
909d75a3 PJ |
3361 | * tasks cpuset. |
3362 | **/ | |
3363 | ||
3364 | nodemask_t cpuset_mems_allowed(struct task_struct *tsk) | |
3365 | { | |
3366 | nodemask_t mask; | |
8447a0fe | 3367 | unsigned long flags; |
909d75a3 | 3368 | |
8447a0fe | 3369 | spin_lock_irqsave(&callback_lock, flags); |
b8dadcb5 | 3370 | rcu_read_lock(); |
ae1c8023 | 3371 | guarantee_online_mems(task_cs(tsk), &mask); |
b8dadcb5 | 3372 | rcu_read_unlock(); |
8447a0fe | 3373 | spin_unlock_irqrestore(&callback_lock, flags); |
909d75a3 PJ |
3374 | |
3375 | return mask; | |
3376 | } | |
3377 | ||
d9fd8a6d | 3378 | /** |
19770b32 MG |
3379 | * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed |
3380 | * @nodemask: the nodemask to be checked | |
d9fd8a6d | 3381 | * |
19770b32 | 3382 | * Are any of the nodes in the nodemask allowed in current->mems_allowed? |
1da177e4 | 3383 | */ |
19770b32 | 3384 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) |
1da177e4 | 3385 | { |
19770b32 | 3386 | return nodes_intersects(*nodemask, current->mems_allowed); |
1da177e4 LT |
3387 | } |
3388 | ||
9bf2229f | 3389 | /* |
78608366 PM |
3390 | * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or |
3391 | * mem_hardwall ancestor to the specified cpuset. Call holding | |
8447a0fe | 3392 | * callback_lock. If no ancestor is mem_exclusive or mem_hardwall |
78608366 | 3393 | * (an unusual configuration), then returns the root cpuset. |
9bf2229f | 3394 | */ |
c9710d80 | 3395 | static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) |
9bf2229f | 3396 | { |
c431069f TH |
3397 | while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) |
3398 | cs = parent_cs(cs); | |
9bf2229f PJ |
3399 | return cs; |
3400 | } | |
3401 | ||
d9fd8a6d | 3402 | /** |
344736f2 | 3403 | * cpuset_node_allowed - Can we allocate on a memory node? |
a1bc5a4e | 3404 | * @node: is this an allowed node? |
02a0e53d | 3405 | * @gfp_mask: memory allocation flags |
d9fd8a6d | 3406 | * |
6e276d2a DR |
3407 | * If we're in interrupt, yes, we can always allocate. If @node is set in |
3408 | * current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this | |
3409 | * node is set in the nearest hardwalled cpuset ancestor to current's cpuset, | |
da99ecf1 | 3410 | * yes. If current has access to memory reserves as an oom victim, yes. |
9bf2229f PJ |
3411 | * Otherwise, no. |
3412 | * | |
3413 | * GFP_USER allocations are marked with the __GFP_HARDWALL bit, | |
c596d9f3 | 3414 | * and do not allow allocations outside the current tasks cpuset |
da99ecf1 | 3415 | * unless the task has been OOM killed. |
9bf2229f | 3416 | * GFP_KERNEL allocations are not so marked, so can escape to the |
78608366 | 3417 | * nearest enclosing hardwalled ancestor cpuset. |
9bf2229f | 3418 | * |
8447a0fe | 3419 | * Scanning up parent cpusets requires callback_lock. The |
02a0e53d PJ |
3420 | * __alloc_pages() routine only calls here with __GFP_HARDWALL bit |
3421 | * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the | |
3422 | * current tasks mems_allowed came up empty on the first pass over | |
3423 | * the zonelist. So only GFP_KERNEL allocations, if all nodes in the | |
8447a0fe | 3424 | * cpuset are short of memory, might require taking the callback_lock. |
9bf2229f | 3425 | * |
36be57ff | 3426 | * The first call here from mm/page_alloc:get_page_from_freelist() |
02a0e53d PJ |
3427 | * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets, |
3428 | * so no allocation on a node outside the cpuset is allowed (unless | |
3429 | * in interrupt, of course). | |
36be57ff PJ |
3430 | * |
3431 | * The second pass through get_page_from_freelist() doesn't even call | |
3432 | * here for GFP_ATOMIC calls. For those calls, the __alloc_pages() | |
3433 | * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set | |
3434 | * in alloc_flags. That logic and the checks below have the combined | |
3435 | * affect that: | |
9bf2229f PJ |
3436 | * in_interrupt - any node ok (current task context irrelevant) |
3437 | * GFP_ATOMIC - any node ok | |
da99ecf1 | 3438 | * tsk_is_oom_victim - any node ok |
78608366 | 3439 | * GFP_KERNEL - any node in enclosing hardwalled cpuset ok |
9bf2229f | 3440 | * GFP_USER - only nodes in current tasks mems allowed ok. |
02a0e53d | 3441 | */ |
002f2906 | 3442 | bool __cpuset_node_allowed(int node, gfp_t gfp_mask) |
1da177e4 | 3443 | { |
c9710d80 | 3444 | struct cpuset *cs; /* current cpuset ancestors */ |
29afd49b | 3445 | int allowed; /* is allocation in zone z allowed? */ |
8447a0fe | 3446 | unsigned long flags; |
9bf2229f | 3447 | |
6e276d2a | 3448 | if (in_interrupt()) |
002f2906 | 3449 | return true; |
9bf2229f | 3450 | if (node_isset(node, current->mems_allowed)) |
002f2906 | 3451 | return true; |
c596d9f3 DR |
3452 | /* |
3453 | * Allow tasks that have access to memory reserves because they have | |
3454 | * been OOM killed to get memory anywhere. | |
3455 | */ | |
da99ecf1 | 3456 | if (unlikely(tsk_is_oom_victim(current))) |
002f2906 | 3457 | return true; |
9bf2229f | 3458 | if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */ |
002f2906 | 3459 | return false; |
9bf2229f | 3460 | |
5563e770 | 3461 | if (current->flags & PF_EXITING) /* Let dying task have memory */ |
002f2906 | 3462 | return true; |
5563e770 | 3463 | |
9bf2229f | 3464 | /* Not hardwall and node outside mems_allowed: scan up cpusets */ |
8447a0fe | 3465 | spin_lock_irqsave(&callback_lock, flags); |
053199ed | 3466 | |
b8dadcb5 | 3467 | rcu_read_lock(); |
78608366 | 3468 | cs = nearest_hardwall_ancestor(task_cs(current)); |
99afb0fd | 3469 | allowed = node_isset(node, cs->mems_allowed); |
b8dadcb5 | 3470 | rcu_read_unlock(); |
053199ed | 3471 | |
8447a0fe | 3472 | spin_unlock_irqrestore(&callback_lock, flags); |
9bf2229f | 3473 | return allowed; |
1da177e4 LT |
3474 | } |
3475 | ||
825a46af | 3476 | /** |
6adef3eb JS |
3477 | * cpuset_mem_spread_node() - On which node to begin search for a file page |
3478 | * cpuset_slab_spread_node() - On which node to begin search for a slab page | |
825a46af PJ |
3479 | * |
3480 | * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for | |
3481 | * tasks in a cpuset with is_spread_page or is_spread_slab set), | |
3482 | * and if the memory allocation used cpuset_mem_spread_node() | |
3483 | * to determine on which node to start looking, as it will for | |
3484 | * certain page cache or slab cache pages such as used for file | |
3485 | * system buffers and inode caches, then instead of starting on the | |
3486 | * local node to look for a free page, rather spread the starting | |
3487 | * node around the tasks mems_allowed nodes. | |
3488 | * | |
3489 | * We don't have to worry about the returned node being offline | |
3490 | * because "it can't happen", and even if it did, it would be ok. | |
3491 | * | |
3492 | * The routines calling guarantee_online_mems() are careful to | |
3493 | * only set nodes in task->mems_allowed that are online. So it | |
3494 | * should not be possible for the following code to return an | |
3495 | * offline node. But if it did, that would be ok, as this routine | |
3496 | * is not returning the node where the allocation must be, only | |
3497 | * the node where the search should start. The zonelist passed to | |
3498 | * __alloc_pages() will include all nodes. If the slab allocator | |
3499 | * is passed an offline node, it will fall back to the local node. | |
3500 | * See kmem_cache_alloc_node(). | |
3501 | */ | |
3502 | ||
6adef3eb | 3503 | static int cpuset_spread_node(int *rotor) |
825a46af | 3504 | { |
0edaf86c | 3505 | return *rotor = next_node_in(*rotor, current->mems_allowed); |
825a46af | 3506 | } |
6adef3eb JS |
3507 | |
3508 | int cpuset_mem_spread_node(void) | |
3509 | { | |
778d3b0f MH |
3510 | if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE) |
3511 | current->cpuset_mem_spread_rotor = | |
3512 | node_random(¤t->mems_allowed); | |
3513 | ||
6adef3eb JS |
3514 | return cpuset_spread_node(¤t->cpuset_mem_spread_rotor); |
3515 | } | |
3516 | ||
3517 | int cpuset_slab_spread_node(void) | |
3518 | { | |
778d3b0f MH |
3519 | if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE) |
3520 | current->cpuset_slab_spread_rotor = | |
3521 | node_random(¤t->mems_allowed); | |
3522 | ||
6adef3eb JS |
3523 | return cpuset_spread_node(¤t->cpuset_slab_spread_rotor); |
3524 | } | |
3525 | ||
825a46af PJ |
3526 | EXPORT_SYMBOL_GPL(cpuset_mem_spread_node); |
3527 | ||
ef08e3b4 | 3528 | /** |
bbe373f2 DR |
3529 | * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's? |
3530 | * @tsk1: pointer to task_struct of some task. | |
3531 | * @tsk2: pointer to task_struct of some other task. | |
3532 | * | |
3533 | * Description: Return true if @tsk1's mems_allowed intersects the | |
3534 | * mems_allowed of @tsk2. Used by the OOM killer to determine if | |
3535 | * one of the task's memory usage might impact the memory available | |
3536 | * to the other. | |
ef08e3b4 PJ |
3537 | **/ |
3538 | ||
bbe373f2 DR |
3539 | int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
3540 | const struct task_struct *tsk2) | |
ef08e3b4 | 3541 | { |
bbe373f2 | 3542 | return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed); |
ef08e3b4 PJ |
3543 | } |
3544 | ||
75aa1994 | 3545 | /** |
da39da3a | 3546 | * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed |
75aa1994 | 3547 | * |
da39da3a | 3548 | * Description: Prints current's name, cpuset name, and cached copy of its |
b8dadcb5 | 3549 | * mems_allowed to the kernel log. |
75aa1994 | 3550 | */ |
da39da3a | 3551 | void cpuset_print_current_mems_allowed(void) |
75aa1994 | 3552 | { |
b8dadcb5 | 3553 | struct cgroup *cgrp; |
75aa1994 | 3554 | |
b8dadcb5 | 3555 | rcu_read_lock(); |
63f43f55 | 3556 | |
da39da3a | 3557 | cgrp = task_cs(current)->css.cgroup; |
ef8444ea | 3558 | pr_cont(",cpuset="); |
e61734c5 | 3559 | pr_cont_cgroup_name(cgrp); |
ef8444ea | 3560 | pr_cont(",mems_allowed=%*pbl", |
da39da3a | 3561 | nodemask_pr_args(¤t->mems_allowed)); |
f440d98f | 3562 | |
cfb5966b | 3563 | rcu_read_unlock(); |
75aa1994 DR |
3564 | } |
3565 | ||
3e0d98b9 PJ |
3566 | /* |
3567 | * Collection of memory_pressure is suppressed unless | |
3568 | * this flag is enabled by writing "1" to the special | |
3569 | * cpuset file 'memory_pressure_enabled' in the root cpuset. | |
3570 | */ | |
3571 | ||
c5b2aff8 | 3572 | int cpuset_memory_pressure_enabled __read_mostly; |
3e0d98b9 PJ |
3573 | |
3574 | /** | |
3575 | * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims. | |
3576 | * | |
3577 | * Keep a running average of the rate of synchronous (direct) | |
3578 | * page reclaim efforts initiated by tasks in each cpuset. | |
3579 | * | |
3580 | * This represents the rate at which some task in the cpuset | |
3581 | * ran low on memory on all nodes it was allowed to use, and | |
3582 | * had to enter the kernels page reclaim code in an effort to | |
3583 | * create more free memory by tossing clean pages or swapping | |
3584 | * or writing dirty pages. | |
3585 | * | |
3586 | * Display to user space in the per-cpuset read-only file | |
3587 | * "memory_pressure". Value displayed is an integer | |
3588 | * representing the recent rate of entry into the synchronous | |
3589 | * (direct) page reclaim by any task attached to the cpuset. | |
3590 | **/ | |
3591 | ||
3592 | void __cpuset_memory_pressure_bump(void) | |
3593 | { | |
b8dadcb5 | 3594 | rcu_read_lock(); |
8793d854 | 3595 | fmeter_markevent(&task_cs(current)->fmeter); |
b8dadcb5 | 3596 | rcu_read_unlock(); |
3e0d98b9 PJ |
3597 | } |
3598 | ||
8793d854 | 3599 | #ifdef CONFIG_PROC_PID_CPUSET |
1da177e4 LT |
3600 | /* |
3601 | * proc_cpuset_show() | |
3602 | * - Print tasks cpuset path into seq_file. | |
3603 | * - Used for /proc/<pid>/cpuset. | |
053199ed PJ |
3604 | * - No need to task_lock(tsk) on this tsk->cpuset reference, as it |
3605 | * doesn't really matter if tsk->cpuset changes after we read it, | |
5d21cc2d | 3606 | * and we take cpuset_mutex, keeping cpuset_attach() from changing it |
2df167a3 | 3607 | * anyway. |
1da177e4 | 3608 | */ |
52de4779 ZL |
3609 | int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, |
3610 | struct pid *pid, struct task_struct *tsk) | |
1da177e4 | 3611 | { |
4c737b41 | 3612 | char *buf; |
8793d854 | 3613 | struct cgroup_subsys_state *css; |
99f89551 | 3614 | int retval; |
1da177e4 | 3615 | |
99f89551 | 3616 | retval = -ENOMEM; |
e61734c5 | 3617 | buf = kmalloc(PATH_MAX, GFP_KERNEL); |
1da177e4 | 3618 | if (!buf) |
99f89551 EB |
3619 | goto out; |
3620 | ||
a79a908f | 3621 | css = task_get_css(tsk, cpuset_cgrp_id); |
4c737b41 TH |
3622 | retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX, |
3623 | current->nsproxy->cgroup_ns); | |
a79a908f | 3624 | css_put(css); |
4c737b41 | 3625 | if (retval >= PATH_MAX) |
679a5e3f TH |
3626 | retval = -ENAMETOOLONG; |
3627 | if (retval < 0) | |
52de4779 | 3628 | goto out_free; |
4c737b41 | 3629 | seq_puts(m, buf); |
1da177e4 | 3630 | seq_putc(m, '\n'); |
e61734c5 | 3631 | retval = 0; |
99f89551 | 3632 | out_free: |
1da177e4 | 3633 | kfree(buf); |
99f89551 | 3634 | out: |
1da177e4 LT |
3635 | return retval; |
3636 | } | |
8793d854 | 3637 | #endif /* CONFIG_PROC_PID_CPUSET */ |
1da177e4 | 3638 | |
d01d4827 | 3639 | /* Display task mems_allowed in /proc/<pid>/status file. */ |
df5f8314 EB |
3640 | void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task) |
3641 | { | |
e8e6d97c TH |
3642 | seq_printf(m, "Mems_allowed:\t%*pb\n", |
3643 | nodemask_pr_args(&task->mems_allowed)); | |
3644 | seq_printf(m, "Mems_allowed_list:\t%*pbl\n", | |
3645 | nodemask_pr_args(&task->mems_allowed)); | |
1da177e4 | 3646 | } |