]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * kernel/cpuset.c | |
3 | * | |
4 | * Processor and Memory placement constraints for sets of tasks. | |
5 | * | |
6 | * Copyright (C) 2003 BULL SA. | |
029190c5 | 7 | * Copyright (C) 2004-2007 Silicon Graphics, Inc. |
8793d854 | 8 | * Copyright (C) 2006 Google, Inc |
1da177e4 LT |
9 | * |
10 | * Portions derived from Patrick Mochel's sysfs code. | |
11 | * sysfs is Copyright (c) 2001-3 Patrick Mochel | |
1da177e4 | 12 | * |
825a46af | 13 | * 2003-10-10 Written by Simon Derr. |
1da177e4 | 14 | * 2003-10-22 Updates by Stephen Hemminger. |
825a46af | 15 | * 2004 May-July Rework by Paul Jackson. |
8793d854 | 16 | * 2006 Rework by Paul Menage to use generic cgroups |
cf417141 MK |
17 | * 2008 Rework of the scheduler domains and CPU hotplug handling |
18 | * by Max Krasnyansky | |
1da177e4 LT |
19 | * |
20 | * This file is subject to the terms and conditions of the GNU General Public | |
21 | * License. See the file COPYING in the main directory of the Linux | |
22 | * distribution for more details. | |
23 | */ | |
24 | ||
1da177e4 LT |
25 | #include <linux/cpu.h> |
26 | #include <linux/cpumask.h> | |
27 | #include <linux/cpuset.h> | |
28 | #include <linux/err.h> | |
29 | #include <linux/errno.h> | |
30 | #include <linux/file.h> | |
31 | #include <linux/fs.h> | |
32 | #include <linux/init.h> | |
33 | #include <linux/interrupt.h> | |
34 | #include <linux/kernel.h> | |
35 | #include <linux/kmod.h> | |
36 | #include <linux/list.h> | |
68860ec1 | 37 | #include <linux/mempolicy.h> |
1da177e4 LT |
38 | #include <linux/mm.h> |
39 | #include <linux/module.h> | |
40 | #include <linux/mount.h> | |
41 | #include <linux/namei.h> | |
42 | #include <linux/pagemap.h> | |
43 | #include <linux/proc_fs.h> | |
6b9c2603 | 44 | #include <linux/rcupdate.h> |
1da177e4 LT |
45 | #include <linux/sched.h> |
46 | #include <linux/seq_file.h> | |
22fb52dd | 47 | #include <linux/security.h> |
1da177e4 | 48 | #include <linux/slab.h> |
1da177e4 LT |
49 | #include <linux/spinlock.h> |
50 | #include <linux/stat.h> | |
51 | #include <linux/string.h> | |
52 | #include <linux/time.h> | |
53 | #include <linux/backing-dev.h> | |
54 | #include <linux/sort.h> | |
55 | ||
56 | #include <asm/uaccess.h> | |
57 | #include <asm/atomic.h> | |
3d3f26a7 | 58 | #include <linux/mutex.h> |
956db3ca CW |
59 | #include <linux/workqueue.h> |
60 | #include <linux/cgroup.h> | |
1da177e4 | 61 | |
202f72d5 PJ |
62 | /* |
63 | * Tracks how many cpusets are currently defined in system. | |
64 | * When there is only one cpuset (the root cpuset) we can | |
65 | * short circuit some hooks. | |
66 | */ | |
7edc5962 | 67 | int number_of_cpusets __read_mostly; |
202f72d5 | 68 | |
2df167a3 | 69 | /* Forward declare cgroup structures */ |
8793d854 PM |
70 | struct cgroup_subsys cpuset_subsys; |
71 | struct cpuset; | |
72 | ||
3e0d98b9 PJ |
73 | /* See "Frequency meter" comments, below. */ |
74 | ||
75 | struct fmeter { | |
76 | int cnt; /* unprocessed events count */ | |
77 | int val; /* most recent output value */ | |
78 | time_t time; /* clock (secs) when val computed */ | |
79 | spinlock_t lock; /* guards read or write of above */ | |
80 | }; | |
81 | ||
1da177e4 | 82 | struct cpuset { |
8793d854 PM |
83 | struct cgroup_subsys_state css; |
84 | ||
1da177e4 LT |
85 | unsigned long flags; /* "unsigned long" so bitops work */ |
86 | cpumask_t cpus_allowed; /* CPUs allowed to tasks in cpuset */ | |
87 | nodemask_t mems_allowed; /* Memory Nodes allowed to tasks */ | |
88 | ||
1da177e4 | 89 | struct cpuset *parent; /* my parent */ |
1da177e4 LT |
90 | |
91 | /* | |
92 | * Copy of global cpuset_mems_generation as of the most | |
93 | * recent time this cpuset changed its mems_allowed. | |
94 | */ | |
3e0d98b9 PJ |
95 | int mems_generation; |
96 | ||
97 | struct fmeter fmeter; /* memory_pressure filter */ | |
029190c5 PJ |
98 | |
99 | /* partition number for rebuild_sched_domains() */ | |
100 | int pn; | |
956db3ca | 101 | |
1d3504fc HS |
102 | /* for custom sched domain */ |
103 | int relax_domain_level; | |
104 | ||
956db3ca CW |
105 | /* used for walking a cpuset heirarchy */ |
106 | struct list_head stack_list; | |
1da177e4 LT |
107 | }; |
108 | ||
8793d854 PM |
109 | /* Retrieve the cpuset for a cgroup */ |
110 | static inline struct cpuset *cgroup_cs(struct cgroup *cont) | |
111 | { | |
112 | return container_of(cgroup_subsys_state(cont, cpuset_subsys_id), | |
113 | struct cpuset, css); | |
114 | } | |
115 | ||
116 | /* Retrieve the cpuset for a task */ | |
117 | static inline struct cpuset *task_cs(struct task_struct *task) | |
118 | { | |
119 | return container_of(task_subsys_state(task, cpuset_subsys_id), | |
120 | struct cpuset, css); | |
121 | } | |
956db3ca CW |
122 | struct cpuset_hotplug_scanner { |
123 | struct cgroup_scanner scan; | |
124 | struct cgroup *to; | |
125 | }; | |
8793d854 | 126 | |
1da177e4 LT |
127 | /* bits in struct cpuset flags field */ |
128 | typedef enum { | |
129 | CS_CPU_EXCLUSIVE, | |
130 | CS_MEM_EXCLUSIVE, | |
78608366 | 131 | CS_MEM_HARDWALL, |
45b07ef3 | 132 | CS_MEMORY_MIGRATE, |
029190c5 | 133 | CS_SCHED_LOAD_BALANCE, |
825a46af PJ |
134 | CS_SPREAD_PAGE, |
135 | CS_SPREAD_SLAB, | |
1da177e4 LT |
136 | } cpuset_flagbits_t; |
137 | ||
138 | /* convenient tests for these bits */ | |
139 | static inline int is_cpu_exclusive(const struct cpuset *cs) | |
140 | { | |
7b5b9ef0 | 141 | return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); |
1da177e4 LT |
142 | } |
143 | ||
144 | static inline int is_mem_exclusive(const struct cpuset *cs) | |
145 | { | |
7b5b9ef0 | 146 | return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); |
1da177e4 LT |
147 | } |
148 | ||
78608366 PM |
149 | static inline int is_mem_hardwall(const struct cpuset *cs) |
150 | { | |
151 | return test_bit(CS_MEM_HARDWALL, &cs->flags); | |
152 | } | |
153 | ||
029190c5 PJ |
154 | static inline int is_sched_load_balance(const struct cpuset *cs) |
155 | { | |
156 | return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); | |
157 | } | |
158 | ||
45b07ef3 PJ |
159 | static inline int is_memory_migrate(const struct cpuset *cs) |
160 | { | |
7b5b9ef0 | 161 | return test_bit(CS_MEMORY_MIGRATE, &cs->flags); |
45b07ef3 PJ |
162 | } |
163 | ||
825a46af PJ |
164 | static inline int is_spread_page(const struct cpuset *cs) |
165 | { | |
166 | return test_bit(CS_SPREAD_PAGE, &cs->flags); | |
167 | } | |
168 | ||
169 | static inline int is_spread_slab(const struct cpuset *cs) | |
170 | { | |
171 | return test_bit(CS_SPREAD_SLAB, &cs->flags); | |
172 | } | |
173 | ||
1da177e4 | 174 | /* |
151a4420 | 175 | * Increment this integer everytime any cpuset changes its |
1da177e4 LT |
176 | * mems_allowed value. Users of cpusets can track this generation |
177 | * number, and avoid having to lock and reload mems_allowed unless | |
178 | * the cpuset they're using changes generation. | |
179 | * | |
2df167a3 | 180 | * A single, global generation is needed because cpuset_attach_task() could |
1da177e4 LT |
181 | * reattach a task to a different cpuset, which must not have its |
182 | * generation numbers aliased with those of that tasks previous cpuset. | |
183 | * | |
184 | * Generations are needed for mems_allowed because one task cannot | |
2df167a3 | 185 | * modify another's memory placement. So we must enable every task, |
1da177e4 LT |
186 | * on every visit to __alloc_pages(), to efficiently check whether |
187 | * its current->cpuset->mems_allowed has changed, requiring an update | |
188 | * of its current->mems_allowed. | |
151a4420 | 189 | * |
2df167a3 | 190 | * Since writes to cpuset_mems_generation are guarded by the cgroup lock |
151a4420 | 191 | * there is no need to mark it atomic. |
1da177e4 | 192 | */ |
151a4420 | 193 | static int cpuset_mems_generation; |
1da177e4 LT |
194 | |
195 | static struct cpuset top_cpuset = { | |
196 | .flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)), | |
197 | .cpus_allowed = CPU_MASK_ALL, | |
198 | .mems_allowed = NODE_MASK_ALL, | |
1da177e4 LT |
199 | }; |
200 | ||
1da177e4 | 201 | /* |
2df167a3 PM |
202 | * There are two global mutexes guarding cpuset structures. The first |
203 | * is the main control groups cgroup_mutex, accessed via | |
204 | * cgroup_lock()/cgroup_unlock(). The second is the cpuset-specific | |
205 | * callback_mutex, below. They can nest. It is ok to first take | |
206 | * cgroup_mutex, then nest callback_mutex. We also require taking | |
207 | * task_lock() when dereferencing a task's cpuset pointer. See "The | |
208 | * task_lock() exception", at the end of this comment. | |
053199ed | 209 | * |
3d3f26a7 | 210 | * A task must hold both mutexes to modify cpusets. If a task |
2df167a3 | 211 | * holds cgroup_mutex, then it blocks others wanting that mutex, |
3d3f26a7 | 212 | * ensuring that it is the only task able to also acquire callback_mutex |
053199ed PJ |
213 | * and be able to modify cpusets. It can perform various checks on |
214 | * the cpuset structure first, knowing nothing will change. It can | |
2df167a3 | 215 | * also allocate memory while just holding cgroup_mutex. While it is |
053199ed | 216 | * performing these checks, various callback routines can briefly |
3d3f26a7 IM |
217 | * acquire callback_mutex to query cpusets. Once it is ready to make |
218 | * the changes, it takes callback_mutex, blocking everyone else. | |
053199ed PJ |
219 | * |
220 | * Calls to the kernel memory allocator can not be made while holding | |
3d3f26a7 | 221 | * callback_mutex, as that would risk double tripping on callback_mutex |
053199ed PJ |
222 | * from one of the callbacks into the cpuset code from within |
223 | * __alloc_pages(). | |
224 | * | |
3d3f26a7 | 225 | * If a task is only holding callback_mutex, then it has read-only |
053199ed PJ |
226 | * access to cpusets. |
227 | * | |
228 | * The task_struct fields mems_allowed and mems_generation may only | |
229 | * be accessed in the context of that task, so require no locks. | |
230 | * | |
3d3f26a7 | 231 | * The cpuset_common_file_read() handlers only hold callback_mutex across |
053199ed PJ |
232 | * small pieces of code, such as when reading out possibly multi-word |
233 | * cpumasks and nodemasks. | |
234 | * | |
2df167a3 PM |
235 | * Accessing a task's cpuset should be done in accordance with the |
236 | * guidelines for accessing subsystem state in kernel/cgroup.c | |
1da177e4 LT |
237 | */ |
238 | ||
3d3f26a7 | 239 | static DEFINE_MUTEX(callback_mutex); |
4247bdc6 | 240 | |
cf417141 MK |
241 | /* |
242 | * This is ugly, but preserves the userspace API for existing cpuset | |
8793d854 | 243 | * users. If someone tries to mount the "cpuset" filesystem, we |
cf417141 MK |
244 | * silently switch it to mount "cgroup" instead |
245 | */ | |
454e2398 DH |
246 | static int cpuset_get_sb(struct file_system_type *fs_type, |
247 | int flags, const char *unused_dev_name, | |
248 | void *data, struct vfsmount *mnt) | |
1da177e4 | 249 | { |
8793d854 PM |
250 | struct file_system_type *cgroup_fs = get_fs_type("cgroup"); |
251 | int ret = -ENODEV; | |
252 | if (cgroup_fs) { | |
253 | char mountopts[] = | |
254 | "cpuset,noprefix," | |
255 | "release_agent=/sbin/cpuset_release_agent"; | |
256 | ret = cgroup_fs->get_sb(cgroup_fs, flags, | |
257 | unused_dev_name, mountopts, mnt); | |
258 | put_filesystem(cgroup_fs); | |
259 | } | |
260 | return ret; | |
1da177e4 LT |
261 | } |
262 | ||
263 | static struct file_system_type cpuset_fs_type = { | |
264 | .name = "cpuset", | |
265 | .get_sb = cpuset_get_sb, | |
1da177e4 LT |
266 | }; |
267 | ||
1da177e4 LT |
268 | /* |
269 | * Return in *pmask the portion of a cpusets's cpus_allowed that | |
270 | * are online. If none are online, walk up the cpuset hierarchy | |
271 | * until we find one that does have some online cpus. If we get | |
272 | * all the way to the top and still haven't found any online cpus, | |
273 | * return cpu_online_map. Or if passed a NULL cs from an exit'ing | |
274 | * task, return cpu_online_map. | |
275 | * | |
276 | * One way or another, we guarantee to return some non-empty subset | |
277 | * of cpu_online_map. | |
278 | * | |
3d3f26a7 | 279 | * Call with callback_mutex held. |
1da177e4 LT |
280 | */ |
281 | ||
282 | static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask) | |
283 | { | |
284 | while (cs && !cpus_intersects(cs->cpus_allowed, cpu_online_map)) | |
285 | cs = cs->parent; | |
286 | if (cs) | |
287 | cpus_and(*pmask, cs->cpus_allowed, cpu_online_map); | |
288 | else | |
289 | *pmask = cpu_online_map; | |
290 | BUG_ON(!cpus_intersects(*pmask, cpu_online_map)); | |
291 | } | |
292 | ||
293 | /* | |
294 | * Return in *pmask the portion of a cpusets's mems_allowed that | |
0e1e7c7a CL |
295 | * are online, with memory. If none are online with memory, walk |
296 | * up the cpuset hierarchy until we find one that does have some | |
297 | * online mems. If we get all the way to the top and still haven't | |
298 | * found any online mems, return node_states[N_HIGH_MEMORY]. | |
1da177e4 LT |
299 | * |
300 | * One way or another, we guarantee to return some non-empty subset | |
0e1e7c7a | 301 | * of node_states[N_HIGH_MEMORY]. |
1da177e4 | 302 | * |
3d3f26a7 | 303 | * Call with callback_mutex held. |
1da177e4 LT |
304 | */ |
305 | ||
306 | static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask) | |
307 | { | |
0e1e7c7a CL |
308 | while (cs && !nodes_intersects(cs->mems_allowed, |
309 | node_states[N_HIGH_MEMORY])) | |
1da177e4 LT |
310 | cs = cs->parent; |
311 | if (cs) | |
0e1e7c7a CL |
312 | nodes_and(*pmask, cs->mems_allowed, |
313 | node_states[N_HIGH_MEMORY]); | |
1da177e4 | 314 | else |
0e1e7c7a CL |
315 | *pmask = node_states[N_HIGH_MEMORY]; |
316 | BUG_ON(!nodes_intersects(*pmask, node_states[N_HIGH_MEMORY])); | |
1da177e4 LT |
317 | } |
318 | ||
cf2a473c PJ |
319 | /** |
320 | * cpuset_update_task_memory_state - update task memory placement | |
321 | * | |
322 | * If the current tasks cpusets mems_allowed changed behind our | |
323 | * backs, update current->mems_allowed, mems_generation and task NUMA | |
324 | * mempolicy to the new value. | |
053199ed | 325 | * |
cf2a473c PJ |
326 | * Task mempolicy is updated by rebinding it relative to the |
327 | * current->cpuset if a task has its memory placement changed. | |
328 | * Do not call this routine if in_interrupt(). | |
329 | * | |
4a01c8d5 | 330 | * Call without callback_mutex or task_lock() held. May be |
2df167a3 PM |
331 | * called with or without cgroup_mutex held. Thanks in part to |
332 | * 'the_top_cpuset_hack', the task's cpuset pointer will never | |
41f7f60d DR |
333 | * be NULL. This routine also might acquire callback_mutex during |
334 | * call. | |
053199ed | 335 | * |
6b9c2603 PJ |
336 | * Reading current->cpuset->mems_generation doesn't need task_lock |
337 | * to guard the current->cpuset derefence, because it is guarded | |
2df167a3 | 338 | * from concurrent freeing of current->cpuset using RCU. |
6b9c2603 PJ |
339 | * |
340 | * The rcu_dereference() is technically probably not needed, | |
341 | * as I don't actually mind if I see a new cpuset pointer but | |
342 | * an old value of mems_generation. However this really only | |
343 | * matters on alpha systems using cpusets heavily. If I dropped | |
344 | * that rcu_dereference(), it would save them a memory barrier. | |
345 | * For all other arch's, rcu_dereference is a no-op anyway, and for | |
346 | * alpha systems not using cpusets, another planned optimization, | |
347 | * avoiding the rcu critical section for tasks in the root cpuset | |
348 | * which is statically allocated, so can't vanish, will make this | |
349 | * irrelevant. Better to use RCU as intended, than to engage in | |
350 | * some cute trick to save a memory barrier that is impossible to | |
351 | * test, for alpha systems using cpusets heavily, which might not | |
352 | * even exist. | |
053199ed PJ |
353 | * |
354 | * This routine is needed to update the per-task mems_allowed data, | |
355 | * within the tasks context, when it is trying to allocate memory | |
356 | * (in various mm/mempolicy.c routines) and notices that some other | |
357 | * task has been modifying its cpuset. | |
1da177e4 LT |
358 | */ |
359 | ||
fe85a998 | 360 | void cpuset_update_task_memory_state(void) |
1da177e4 | 361 | { |
053199ed | 362 | int my_cpusets_mem_gen; |
cf2a473c | 363 | struct task_struct *tsk = current; |
6b9c2603 | 364 | struct cpuset *cs; |
053199ed | 365 | |
8793d854 | 366 | if (task_cs(tsk) == &top_cpuset) { |
03a285f5 PJ |
367 | /* Don't need rcu for top_cpuset. It's never freed. */ |
368 | my_cpusets_mem_gen = top_cpuset.mems_generation; | |
369 | } else { | |
370 | rcu_read_lock(); | |
da5ef6bb | 371 | my_cpusets_mem_gen = task_cs(tsk)->mems_generation; |
03a285f5 PJ |
372 | rcu_read_unlock(); |
373 | } | |
1da177e4 | 374 | |
cf2a473c | 375 | if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) { |
3d3f26a7 | 376 | mutex_lock(&callback_mutex); |
cf2a473c | 377 | task_lock(tsk); |
8793d854 | 378 | cs = task_cs(tsk); /* Maybe changed when task not locked */ |
cf2a473c PJ |
379 | guarantee_online_mems(cs, &tsk->mems_allowed); |
380 | tsk->cpuset_mems_generation = cs->mems_generation; | |
825a46af PJ |
381 | if (is_spread_page(cs)) |
382 | tsk->flags |= PF_SPREAD_PAGE; | |
383 | else | |
384 | tsk->flags &= ~PF_SPREAD_PAGE; | |
385 | if (is_spread_slab(cs)) | |
386 | tsk->flags |= PF_SPREAD_SLAB; | |
387 | else | |
388 | tsk->flags &= ~PF_SPREAD_SLAB; | |
cf2a473c | 389 | task_unlock(tsk); |
3d3f26a7 | 390 | mutex_unlock(&callback_mutex); |
74cb2155 | 391 | mpol_rebind_task(tsk, &tsk->mems_allowed); |
1da177e4 LT |
392 | } |
393 | } | |
394 | ||
395 | /* | |
396 | * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q? | |
397 | * | |
398 | * One cpuset is a subset of another if all its allowed CPUs and | |
399 | * Memory Nodes are a subset of the other, and its exclusive flags | |
2df167a3 | 400 | * are only set if the other's are set. Call holding cgroup_mutex. |
1da177e4 LT |
401 | */ |
402 | ||
403 | static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) | |
404 | { | |
405 | return cpus_subset(p->cpus_allowed, q->cpus_allowed) && | |
406 | nodes_subset(p->mems_allowed, q->mems_allowed) && | |
407 | is_cpu_exclusive(p) <= is_cpu_exclusive(q) && | |
408 | is_mem_exclusive(p) <= is_mem_exclusive(q); | |
409 | } | |
410 | ||
411 | /* | |
412 | * validate_change() - Used to validate that any proposed cpuset change | |
413 | * follows the structural rules for cpusets. | |
414 | * | |
415 | * If we replaced the flag and mask values of the current cpuset | |
416 | * (cur) with those values in the trial cpuset (trial), would | |
417 | * our various subset and exclusive rules still be valid? Presumes | |
2df167a3 | 418 | * cgroup_mutex held. |
1da177e4 LT |
419 | * |
420 | * 'cur' is the address of an actual, in-use cpuset. Operations | |
421 | * such as list traversal that depend on the actual address of the | |
422 | * cpuset in the list must use cur below, not trial. | |
423 | * | |
424 | * 'trial' is the address of bulk structure copy of cur, with | |
425 | * perhaps one or more of the fields cpus_allowed, mems_allowed, | |
426 | * or flags changed to new, trial values. | |
427 | * | |
428 | * Return 0 if valid, -errno if not. | |
429 | */ | |
430 | ||
431 | static int validate_change(const struct cpuset *cur, const struct cpuset *trial) | |
432 | { | |
8793d854 | 433 | struct cgroup *cont; |
1da177e4 LT |
434 | struct cpuset *c, *par; |
435 | ||
436 | /* Each of our child cpusets must be a subset of us */ | |
8793d854 PM |
437 | list_for_each_entry(cont, &cur->css.cgroup->children, sibling) { |
438 | if (!is_cpuset_subset(cgroup_cs(cont), trial)) | |
1da177e4 LT |
439 | return -EBUSY; |
440 | } | |
441 | ||
442 | /* Remaining checks don't apply to root cpuset */ | |
69604067 | 443 | if (cur == &top_cpuset) |
1da177e4 LT |
444 | return 0; |
445 | ||
69604067 PJ |
446 | par = cur->parent; |
447 | ||
1da177e4 LT |
448 | /* We must be a subset of our parent cpuset */ |
449 | if (!is_cpuset_subset(trial, par)) | |
450 | return -EACCES; | |
451 | ||
2df167a3 PM |
452 | /* |
453 | * If either I or some sibling (!= me) is exclusive, we can't | |
454 | * overlap | |
455 | */ | |
8793d854 PM |
456 | list_for_each_entry(cont, &par->css.cgroup->children, sibling) { |
457 | c = cgroup_cs(cont); | |
1da177e4 LT |
458 | if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && |
459 | c != cur && | |
460 | cpus_intersects(trial->cpus_allowed, c->cpus_allowed)) | |
461 | return -EINVAL; | |
462 | if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) && | |
463 | c != cur && | |
464 | nodes_intersects(trial->mems_allowed, c->mems_allowed)) | |
465 | return -EINVAL; | |
466 | } | |
467 | ||
020958b6 PJ |
468 | /* Cpusets with tasks can't have empty cpus_allowed or mems_allowed */ |
469 | if (cgroup_task_count(cur->css.cgroup)) { | |
470 | if (cpus_empty(trial->cpus_allowed) || | |
471 | nodes_empty(trial->mems_allowed)) { | |
472 | return -ENOSPC; | |
473 | } | |
474 | } | |
475 | ||
1da177e4 LT |
476 | return 0; |
477 | } | |
478 | ||
029190c5 | 479 | /* |
cf417141 | 480 | * Helper routine for generate_sched_domains(). |
029190c5 PJ |
481 | * Do cpusets a, b have overlapping cpus_allowed masks? |
482 | */ | |
029190c5 PJ |
483 | static int cpusets_overlap(struct cpuset *a, struct cpuset *b) |
484 | { | |
485 | return cpus_intersects(a->cpus_allowed, b->cpus_allowed); | |
486 | } | |
487 | ||
1d3504fc HS |
488 | static void |
489 | update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c) | |
490 | { | |
1d3504fc HS |
491 | if (dattr->relax_domain_level < c->relax_domain_level) |
492 | dattr->relax_domain_level = c->relax_domain_level; | |
493 | return; | |
494 | } | |
495 | ||
f5393693 LJ |
496 | static void |
497 | update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c) | |
498 | { | |
499 | LIST_HEAD(q); | |
500 | ||
501 | list_add(&c->stack_list, &q); | |
502 | while (!list_empty(&q)) { | |
503 | struct cpuset *cp; | |
504 | struct cgroup *cont; | |
505 | struct cpuset *child; | |
506 | ||
507 | cp = list_first_entry(&q, struct cpuset, stack_list); | |
508 | list_del(q.next); | |
509 | ||
510 | if (cpus_empty(cp->cpus_allowed)) | |
511 | continue; | |
512 | ||
513 | if (is_sched_load_balance(cp)) | |
514 | update_domain_attr(dattr, cp); | |
515 | ||
516 | list_for_each_entry(cont, &cp->css.cgroup->children, sibling) { | |
517 | child = cgroup_cs(cont); | |
518 | list_add_tail(&child->stack_list, &q); | |
519 | } | |
520 | } | |
521 | } | |
522 | ||
029190c5 | 523 | /* |
cf417141 MK |
524 | * generate_sched_domains() |
525 | * | |
526 | * This function builds a partial partition of the systems CPUs | |
527 | * A 'partial partition' is a set of non-overlapping subsets whose | |
528 | * union is a subset of that set. | |
529 | * The output of this function needs to be passed to kernel/sched.c | |
530 | * partition_sched_domains() routine, which will rebuild the scheduler's | |
531 | * load balancing domains (sched domains) as specified by that partial | |
532 | * partition. | |
029190c5 PJ |
533 | * |
534 | * See "What is sched_load_balance" in Documentation/cpusets.txt | |
535 | * for a background explanation of this. | |
536 | * | |
537 | * Does not return errors, on the theory that the callers of this | |
538 | * routine would rather not worry about failures to rebuild sched | |
539 | * domains when operating in the severe memory shortage situations | |
540 | * that could cause allocation failures below. | |
541 | * | |
cf417141 | 542 | * Must be called with cgroup_lock held. |
029190c5 PJ |
543 | * |
544 | * The three key local variables below are: | |
aeed6824 | 545 | * q - a linked-list queue of cpuset pointers, used to implement a |
029190c5 PJ |
546 | * top-down scan of all cpusets. This scan loads a pointer |
547 | * to each cpuset marked is_sched_load_balance into the | |
548 | * array 'csa'. For our purposes, rebuilding the schedulers | |
549 | * sched domains, we can ignore !is_sched_load_balance cpusets. | |
550 | * csa - (for CpuSet Array) Array of pointers to all the cpusets | |
551 | * that need to be load balanced, for convenient iterative | |
552 | * access by the subsequent code that finds the best partition, | |
553 | * i.e the set of domains (subsets) of CPUs such that the | |
554 | * cpus_allowed of every cpuset marked is_sched_load_balance | |
555 | * is a subset of one of these domains, while there are as | |
556 | * many such domains as possible, each as small as possible. | |
557 | * doms - Conversion of 'csa' to an array of cpumasks, for passing to | |
558 | * the kernel/sched.c routine partition_sched_domains() in a | |
559 | * convenient format, that can be easily compared to the prior | |
560 | * value to determine what partition elements (sched domains) | |
561 | * were changed (added or removed.) | |
562 | * | |
563 | * Finding the best partition (set of domains): | |
564 | * The triple nested loops below over i, j, k scan over the | |
565 | * load balanced cpusets (using the array of cpuset pointers in | |
566 | * csa[]) looking for pairs of cpusets that have overlapping | |
567 | * cpus_allowed, but which don't have the same 'pn' partition | |
568 | * number and gives them in the same partition number. It keeps | |
569 | * looping on the 'restart' label until it can no longer find | |
570 | * any such pairs. | |
571 | * | |
572 | * The union of the cpus_allowed masks from the set of | |
573 | * all cpusets having the same 'pn' value then form the one | |
574 | * element of the partition (one sched domain) to be passed to | |
575 | * partition_sched_domains(). | |
576 | */ | |
cf417141 MK |
577 | static int generate_sched_domains(cpumask_t **domains, |
578 | struct sched_domain_attr **attributes) | |
029190c5 | 579 | { |
cf417141 | 580 | LIST_HEAD(q); /* queue of cpusets to be scanned */ |
029190c5 PJ |
581 | struct cpuset *cp; /* scans q */ |
582 | struct cpuset **csa; /* array of all cpuset ptrs */ | |
583 | int csn; /* how many cpuset ptrs in csa so far */ | |
584 | int i, j, k; /* indices for partition finding loops */ | |
585 | cpumask_t *doms; /* resulting partition; i.e. sched domains */ | |
1d3504fc | 586 | struct sched_domain_attr *dattr; /* attributes for custom domains */ |
029190c5 PJ |
587 | int ndoms; /* number of sched domains in result */ |
588 | int nslot; /* next empty doms[] cpumask_t slot */ | |
589 | ||
029190c5 | 590 | doms = NULL; |
1d3504fc | 591 | dattr = NULL; |
cf417141 | 592 | csa = NULL; |
029190c5 PJ |
593 | |
594 | /* Special case for the 99% of systems with one, full, sched domain */ | |
595 | if (is_sched_load_balance(&top_cpuset)) { | |
029190c5 PJ |
596 | doms = kmalloc(sizeof(cpumask_t), GFP_KERNEL); |
597 | if (!doms) | |
cf417141 MK |
598 | goto done; |
599 | ||
1d3504fc HS |
600 | dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL); |
601 | if (dattr) { | |
602 | *dattr = SD_ATTR_INIT; | |
93a65575 | 603 | update_domain_attr_tree(dattr, &top_cpuset); |
1d3504fc | 604 | } |
029190c5 | 605 | *doms = top_cpuset.cpus_allowed; |
cf417141 MK |
606 | |
607 | ndoms = 1; | |
608 | goto done; | |
029190c5 PJ |
609 | } |
610 | ||
029190c5 PJ |
611 | csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL); |
612 | if (!csa) | |
613 | goto done; | |
614 | csn = 0; | |
615 | ||
aeed6824 LZ |
616 | list_add(&top_cpuset.stack_list, &q); |
617 | while (!list_empty(&q)) { | |
029190c5 PJ |
618 | struct cgroup *cont; |
619 | struct cpuset *child; /* scans child cpusets of cp */ | |
489a5393 | 620 | |
aeed6824 LZ |
621 | cp = list_first_entry(&q, struct cpuset, stack_list); |
622 | list_del(q.next); | |
623 | ||
489a5393 LJ |
624 | if (cpus_empty(cp->cpus_allowed)) |
625 | continue; | |
626 | ||
f5393693 LJ |
627 | /* |
628 | * All child cpusets contain a subset of the parent's cpus, so | |
629 | * just skip them, and then we call update_domain_attr_tree() | |
630 | * to calc relax_domain_level of the corresponding sched | |
631 | * domain. | |
632 | */ | |
633 | if (is_sched_load_balance(cp)) { | |
029190c5 | 634 | csa[csn++] = cp; |
f5393693 LJ |
635 | continue; |
636 | } | |
489a5393 | 637 | |
029190c5 PJ |
638 | list_for_each_entry(cont, &cp->css.cgroup->children, sibling) { |
639 | child = cgroup_cs(cont); | |
aeed6824 | 640 | list_add_tail(&child->stack_list, &q); |
029190c5 PJ |
641 | } |
642 | } | |
643 | ||
644 | for (i = 0; i < csn; i++) | |
645 | csa[i]->pn = i; | |
646 | ndoms = csn; | |
647 | ||
648 | restart: | |
649 | /* Find the best partition (set of sched domains) */ | |
650 | for (i = 0; i < csn; i++) { | |
651 | struct cpuset *a = csa[i]; | |
652 | int apn = a->pn; | |
653 | ||
654 | for (j = 0; j < csn; j++) { | |
655 | struct cpuset *b = csa[j]; | |
656 | int bpn = b->pn; | |
657 | ||
658 | if (apn != bpn && cpusets_overlap(a, b)) { | |
659 | for (k = 0; k < csn; k++) { | |
660 | struct cpuset *c = csa[k]; | |
661 | ||
662 | if (c->pn == bpn) | |
663 | c->pn = apn; | |
664 | } | |
665 | ndoms--; /* one less element */ | |
666 | goto restart; | |
667 | } | |
668 | } | |
669 | } | |
670 | ||
cf417141 MK |
671 | /* |
672 | * Now we know how many domains to create. | |
673 | * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. | |
674 | */ | |
029190c5 | 675 | doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL); |
700018e0 | 676 | if (!doms) |
cf417141 | 677 | goto done; |
cf417141 MK |
678 | |
679 | /* | |
680 | * The rest of the code, including the scheduler, can deal with | |
681 | * dattr==NULL case. No need to abort if alloc fails. | |
682 | */ | |
1d3504fc | 683 | dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL); |
029190c5 PJ |
684 | |
685 | for (nslot = 0, i = 0; i < csn; i++) { | |
686 | struct cpuset *a = csa[i]; | |
cf417141 | 687 | cpumask_t *dp; |
029190c5 PJ |
688 | int apn = a->pn; |
689 | ||
cf417141 MK |
690 | if (apn < 0) { |
691 | /* Skip completed partitions */ | |
692 | continue; | |
693 | } | |
694 | ||
695 | dp = doms + nslot; | |
696 | ||
697 | if (nslot == ndoms) { | |
698 | static int warnings = 10; | |
699 | if (warnings) { | |
700 | printk(KERN_WARNING | |
701 | "rebuild_sched_domains confused:" | |
702 | " nslot %d, ndoms %d, csn %d, i %d," | |
703 | " apn %d\n", | |
704 | nslot, ndoms, csn, i, apn); | |
705 | warnings--; | |
029190c5 | 706 | } |
cf417141 MK |
707 | continue; |
708 | } | |
029190c5 | 709 | |
cf417141 MK |
710 | cpus_clear(*dp); |
711 | if (dattr) | |
712 | *(dattr + nslot) = SD_ATTR_INIT; | |
713 | for (j = i; j < csn; j++) { | |
714 | struct cpuset *b = csa[j]; | |
715 | ||
716 | if (apn == b->pn) { | |
717 | cpus_or(*dp, *dp, b->cpus_allowed); | |
718 | if (dattr) | |
719 | update_domain_attr_tree(dattr + nslot, b); | |
720 | ||
721 | /* Done with this partition */ | |
722 | b->pn = -1; | |
029190c5 | 723 | } |
029190c5 | 724 | } |
cf417141 | 725 | nslot++; |
029190c5 PJ |
726 | } |
727 | BUG_ON(nslot != ndoms); | |
728 | ||
cf417141 MK |
729 | done: |
730 | kfree(csa); | |
731 | ||
700018e0 LZ |
732 | /* |
733 | * Fallback to the default domain if kmalloc() failed. | |
734 | * See comments in partition_sched_domains(). | |
735 | */ | |
736 | if (doms == NULL) | |
737 | ndoms = 1; | |
738 | ||
cf417141 MK |
739 | *domains = doms; |
740 | *attributes = dattr; | |
741 | return ndoms; | |
742 | } | |
743 | ||
744 | /* | |
745 | * Rebuild scheduler domains. | |
746 | * | |
747 | * Call with neither cgroup_mutex held nor within get_online_cpus(). | |
748 | * Takes both cgroup_mutex and get_online_cpus(). | |
749 | * | |
750 | * Cannot be directly called from cpuset code handling changes | |
751 | * to the cpuset pseudo-filesystem, because it cannot be called | |
752 | * from code that already holds cgroup_mutex. | |
753 | */ | |
754 | static void do_rebuild_sched_domains(struct work_struct *unused) | |
755 | { | |
756 | struct sched_domain_attr *attr; | |
757 | cpumask_t *doms; | |
758 | int ndoms; | |
759 | ||
86ef5c9a | 760 | get_online_cpus(); |
cf417141 MK |
761 | |
762 | /* Generate domain masks and attrs */ | |
763 | cgroup_lock(); | |
764 | ndoms = generate_sched_domains(&doms, &attr); | |
765 | cgroup_unlock(); | |
766 | ||
767 | /* Have scheduler rebuild the domains */ | |
768 | partition_sched_domains(ndoms, doms, attr); | |
769 | ||
86ef5c9a | 770 | put_online_cpus(); |
cf417141 | 771 | } |
029190c5 | 772 | |
cf417141 MK |
773 | static DECLARE_WORK(rebuild_sched_domains_work, do_rebuild_sched_domains); |
774 | ||
775 | /* | |
776 | * Rebuild scheduler domains, asynchronously via workqueue. | |
777 | * | |
778 | * If the flag 'sched_load_balance' of any cpuset with non-empty | |
779 | * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset | |
780 | * which has that flag enabled, or if any cpuset with a non-empty | |
781 | * 'cpus' is removed, then call this routine to rebuild the | |
782 | * scheduler's dynamic sched domains. | |
783 | * | |
784 | * The rebuild_sched_domains() and partition_sched_domains() | |
785 | * routines must nest cgroup_lock() inside get_online_cpus(), | |
786 | * but such cpuset changes as these must nest that locking the | |
787 | * other way, holding cgroup_lock() for much of the code. | |
788 | * | |
789 | * So in order to avoid an ABBA deadlock, the cpuset code handling | |
790 | * these user changes delegates the actual sched domain rebuilding | |
791 | * to a separate workqueue thread, which ends up processing the | |
792 | * above do_rebuild_sched_domains() function. | |
793 | */ | |
794 | static void async_rebuild_sched_domains(void) | |
795 | { | |
796 | schedule_work(&rebuild_sched_domains_work); | |
797 | } | |
798 | ||
799 | /* | |
800 | * Accomplishes the same scheduler domain rebuild as the above | |
801 | * async_rebuild_sched_domains(), however it directly calls the | |
802 | * rebuild routine synchronously rather than calling it via an | |
803 | * asynchronous work thread. | |
804 | * | |
805 | * This can only be called from code that is not holding | |
806 | * cgroup_mutex (not nested in a cgroup_lock() call.) | |
807 | */ | |
808 | void rebuild_sched_domains(void) | |
809 | { | |
810 | do_rebuild_sched_domains(NULL); | |
029190c5 PJ |
811 | } |
812 | ||
58f4790b CW |
813 | /** |
814 | * cpuset_test_cpumask - test a task's cpus_allowed versus its cpuset's | |
815 | * @tsk: task to test | |
816 | * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner | |
817 | * | |
2df167a3 | 818 | * Call with cgroup_mutex held. May take callback_mutex during call. |
58f4790b CW |
819 | * Called for each task in a cgroup by cgroup_scan_tasks(). |
820 | * Return nonzero if this tasks's cpus_allowed mask should be changed (in other | |
821 | * words, if its mask is not equal to its cpuset's mask). | |
053199ed | 822 | */ |
9e0c914c AB |
823 | static int cpuset_test_cpumask(struct task_struct *tsk, |
824 | struct cgroup_scanner *scan) | |
58f4790b CW |
825 | { |
826 | return !cpus_equal(tsk->cpus_allowed, | |
827 | (cgroup_cs(scan->cg))->cpus_allowed); | |
828 | } | |
053199ed | 829 | |
58f4790b CW |
830 | /** |
831 | * cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's | |
832 | * @tsk: task to test | |
833 | * @scan: struct cgroup_scanner containing the cgroup of the task | |
834 | * | |
835 | * Called by cgroup_scan_tasks() for each task in a cgroup whose | |
836 | * cpus_allowed mask needs to be changed. | |
837 | * | |
838 | * We don't need to re-check for the cgroup/cpuset membership, since we're | |
839 | * holding cgroup_lock() at this point. | |
840 | */ | |
9e0c914c AB |
841 | static void cpuset_change_cpumask(struct task_struct *tsk, |
842 | struct cgroup_scanner *scan) | |
58f4790b | 843 | { |
f9a86fcb | 844 | set_cpus_allowed_ptr(tsk, &((cgroup_cs(scan->cg))->cpus_allowed)); |
58f4790b CW |
845 | } |
846 | ||
0b2f630a MX |
847 | /** |
848 | * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. | |
849 | * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed | |
4e74339a | 850 | * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks() |
0b2f630a MX |
851 | * |
852 | * Called with cgroup_mutex held | |
853 | * | |
854 | * The cgroup_scan_tasks() function will scan all the tasks in a cgroup, | |
855 | * calling callback functions for each. | |
856 | * | |
4e74339a LZ |
857 | * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0 |
858 | * if @heap != NULL. | |
0b2f630a | 859 | */ |
4e74339a | 860 | static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap) |
0b2f630a MX |
861 | { |
862 | struct cgroup_scanner scan; | |
0b2f630a MX |
863 | |
864 | scan.cg = cs->css.cgroup; | |
865 | scan.test_task = cpuset_test_cpumask; | |
866 | scan.process_task = cpuset_change_cpumask; | |
4e74339a LZ |
867 | scan.heap = heap; |
868 | cgroup_scan_tasks(&scan); | |
0b2f630a MX |
869 | } |
870 | ||
58f4790b CW |
871 | /** |
872 | * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it | |
873 | * @cs: the cpuset to consider | |
874 | * @buf: buffer of cpu numbers written to this cpuset | |
875 | */ | |
e3712395 | 876 | static int update_cpumask(struct cpuset *cs, const char *buf) |
1da177e4 | 877 | { |
4e74339a | 878 | struct ptr_heap heap; |
1da177e4 | 879 | struct cpuset trialcs; |
58f4790b CW |
880 | int retval; |
881 | int is_load_balanced; | |
1da177e4 | 882 | |
4c4d50f7 PJ |
883 | /* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */ |
884 | if (cs == &top_cpuset) | |
885 | return -EACCES; | |
886 | ||
1da177e4 | 887 | trialcs = *cs; |
6f7f02e7 DR |
888 | |
889 | /* | |
c8d9c90c | 890 | * An empty cpus_allowed is ok only if the cpuset has no tasks. |
020958b6 PJ |
891 | * Since cpulist_parse() fails on an empty mask, we special case |
892 | * that parsing. The validate_change() call ensures that cpusets | |
893 | * with tasks have cpus. | |
6f7f02e7 | 894 | */ |
020958b6 | 895 | if (!*buf) { |
6f7f02e7 DR |
896 | cpus_clear(trialcs.cpus_allowed); |
897 | } else { | |
898 | retval = cpulist_parse(buf, trialcs.cpus_allowed); | |
899 | if (retval < 0) | |
900 | return retval; | |
37340746 LJ |
901 | |
902 | if (!cpus_subset(trialcs.cpus_allowed, cpu_online_map)) | |
903 | return -EINVAL; | |
6f7f02e7 | 904 | } |
1da177e4 | 905 | retval = validate_change(cs, &trialcs); |
85d7b949 DG |
906 | if (retval < 0) |
907 | return retval; | |
029190c5 | 908 | |
8707d8b8 PM |
909 | /* Nothing to do if the cpus didn't change */ |
910 | if (cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed)) | |
911 | return 0; | |
58f4790b | 912 | |
4e74339a LZ |
913 | retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL); |
914 | if (retval) | |
915 | return retval; | |
916 | ||
029190c5 PJ |
917 | is_load_balanced = is_sched_load_balance(&trialcs); |
918 | ||
3d3f26a7 | 919 | mutex_lock(&callback_mutex); |
85d7b949 | 920 | cs->cpus_allowed = trialcs.cpus_allowed; |
3d3f26a7 | 921 | mutex_unlock(&callback_mutex); |
029190c5 | 922 | |
8707d8b8 PM |
923 | /* |
924 | * Scan tasks in the cpuset, and update the cpumasks of any | |
58f4790b | 925 | * that need an update. |
8707d8b8 | 926 | */ |
4e74339a LZ |
927 | update_tasks_cpumask(cs, &heap); |
928 | ||
929 | heap_free(&heap); | |
58f4790b | 930 | |
8707d8b8 | 931 | if (is_load_balanced) |
cf417141 | 932 | async_rebuild_sched_domains(); |
85d7b949 | 933 | return 0; |
1da177e4 LT |
934 | } |
935 | ||
e4e364e8 PJ |
936 | /* |
937 | * cpuset_migrate_mm | |
938 | * | |
939 | * Migrate memory region from one set of nodes to another. | |
940 | * | |
941 | * Temporarilly set tasks mems_allowed to target nodes of migration, | |
942 | * so that the migration code can allocate pages on these nodes. | |
943 | * | |
2df167a3 | 944 | * Call holding cgroup_mutex, so current's cpuset won't change |
c8d9c90c | 945 | * during this call, as manage_mutex holds off any cpuset_attach() |
e4e364e8 PJ |
946 | * calls. Therefore we don't need to take task_lock around the |
947 | * call to guarantee_online_mems(), as we know no one is changing | |
2df167a3 | 948 | * our task's cpuset. |
e4e364e8 PJ |
949 | * |
950 | * Hold callback_mutex around the two modifications of our tasks | |
951 | * mems_allowed to synchronize with cpuset_mems_allowed(). | |
952 | * | |
953 | * While the mm_struct we are migrating is typically from some | |
954 | * other task, the task_struct mems_allowed that we are hacking | |
955 | * is for our current task, which must allocate new pages for that | |
956 | * migrating memory region. | |
957 | * | |
958 | * We call cpuset_update_task_memory_state() before hacking | |
959 | * our tasks mems_allowed, so that we are assured of being in | |
960 | * sync with our tasks cpuset, and in particular, callbacks to | |
961 | * cpuset_update_task_memory_state() from nested page allocations | |
962 | * won't see any mismatch of our cpuset and task mems_generation | |
963 | * values, so won't overwrite our hacked tasks mems_allowed | |
964 | * nodemask. | |
965 | */ | |
966 | ||
967 | static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, | |
968 | const nodemask_t *to) | |
969 | { | |
970 | struct task_struct *tsk = current; | |
971 | ||
972 | cpuset_update_task_memory_state(); | |
973 | ||
974 | mutex_lock(&callback_mutex); | |
975 | tsk->mems_allowed = *to; | |
976 | mutex_unlock(&callback_mutex); | |
977 | ||
978 | do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL); | |
979 | ||
980 | mutex_lock(&callback_mutex); | |
8793d854 | 981 | guarantee_online_mems(task_cs(tsk),&tsk->mems_allowed); |
e4e364e8 PJ |
982 | mutex_unlock(&callback_mutex); |
983 | } | |
984 | ||
8793d854 PM |
985 | static void *cpuset_being_rebound; |
986 | ||
0b2f630a MX |
987 | /** |
988 | * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset. | |
989 | * @cs: the cpuset in which each task's mems_allowed mask needs to be changed | |
990 | * @oldmem: old mems_allowed of cpuset cs | |
991 | * | |
992 | * Called with cgroup_mutex held | |
993 | * Return 0 if successful, -errno if not. | |
994 | */ | |
995 | static int update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem) | |
1da177e4 | 996 | { |
8793d854 | 997 | struct task_struct *p; |
4225399a PJ |
998 | struct mm_struct **mmarray; |
999 | int i, n, ntasks; | |
04c19fa6 | 1000 | int migrate; |
4225399a | 1001 | int fudge; |
8793d854 | 1002 | struct cgroup_iter it; |
0b2f630a | 1003 | int retval; |
59dac16f | 1004 | |
846a16bf | 1005 | cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ |
4225399a PJ |
1006 | |
1007 | fudge = 10; /* spare mmarray[] slots */ | |
1008 | fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */ | |
1009 | retval = -ENOMEM; | |
1010 | ||
1011 | /* | |
1012 | * Allocate mmarray[] to hold mm reference for each task | |
1013 | * in cpuset cs. Can't kmalloc GFP_KERNEL while holding | |
1014 | * tasklist_lock. We could use GFP_ATOMIC, but with a | |
1015 | * few more lines of code, we can retry until we get a big | |
1016 | * enough mmarray[] w/o using GFP_ATOMIC. | |
1017 | */ | |
1018 | while (1) { | |
8793d854 | 1019 | ntasks = cgroup_task_count(cs->css.cgroup); /* guess */ |
4225399a PJ |
1020 | ntasks += fudge; |
1021 | mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL); | |
1022 | if (!mmarray) | |
1023 | goto done; | |
c2aef333 | 1024 | read_lock(&tasklist_lock); /* block fork */ |
8793d854 | 1025 | if (cgroup_task_count(cs->css.cgroup) <= ntasks) |
4225399a | 1026 | break; /* got enough */ |
c2aef333 | 1027 | read_unlock(&tasklist_lock); /* try again */ |
4225399a PJ |
1028 | kfree(mmarray); |
1029 | } | |
1030 | ||
1031 | n = 0; | |
1032 | ||
1033 | /* Load up mmarray[] with mm reference for each task in cpuset. */ | |
8793d854 PM |
1034 | cgroup_iter_start(cs->css.cgroup, &it); |
1035 | while ((p = cgroup_iter_next(cs->css.cgroup, &it))) { | |
4225399a PJ |
1036 | struct mm_struct *mm; |
1037 | ||
1038 | if (n >= ntasks) { | |
1039 | printk(KERN_WARNING | |
1040 | "Cpuset mempolicy rebind incomplete.\n"); | |
8793d854 | 1041 | break; |
4225399a | 1042 | } |
4225399a PJ |
1043 | mm = get_task_mm(p); |
1044 | if (!mm) | |
1045 | continue; | |
1046 | mmarray[n++] = mm; | |
8793d854 PM |
1047 | } |
1048 | cgroup_iter_end(cs->css.cgroup, &it); | |
c2aef333 | 1049 | read_unlock(&tasklist_lock); |
4225399a PJ |
1050 | |
1051 | /* | |
1052 | * Now that we've dropped the tasklist spinlock, we can | |
1053 | * rebind the vma mempolicies of each mm in mmarray[] to their | |
1054 | * new cpuset, and release that mm. The mpol_rebind_mm() | |
1055 | * call takes mmap_sem, which we couldn't take while holding | |
846a16bf | 1056 | * tasklist_lock. Forks can happen again now - the mpol_dup() |
4225399a PJ |
1057 | * cpuset_being_rebound check will catch such forks, and rebind |
1058 | * their vma mempolicies too. Because we still hold the global | |
2df167a3 | 1059 | * cgroup_mutex, we know that no other rebind effort will |
4225399a PJ |
1060 | * be contending for the global variable cpuset_being_rebound. |
1061 | * It's ok if we rebind the same mm twice; mpol_rebind_mm() | |
04c19fa6 | 1062 | * is idempotent. Also migrate pages in each mm to new nodes. |
4225399a | 1063 | */ |
04c19fa6 | 1064 | migrate = is_memory_migrate(cs); |
4225399a PJ |
1065 | for (i = 0; i < n; i++) { |
1066 | struct mm_struct *mm = mmarray[i]; | |
1067 | ||
1068 | mpol_rebind_mm(mm, &cs->mems_allowed); | |
e4e364e8 | 1069 | if (migrate) |
0b2f630a | 1070 | cpuset_migrate_mm(mm, oldmem, &cs->mems_allowed); |
4225399a PJ |
1071 | mmput(mm); |
1072 | } | |
1073 | ||
2df167a3 | 1074 | /* We're done rebinding vmas to this cpuset's new mems_allowed. */ |
4225399a | 1075 | kfree(mmarray); |
8793d854 | 1076 | cpuset_being_rebound = NULL; |
4225399a | 1077 | retval = 0; |
59dac16f | 1078 | done: |
1da177e4 LT |
1079 | return retval; |
1080 | } | |
1081 | ||
0b2f630a MX |
1082 | /* |
1083 | * Handle user request to change the 'mems' memory placement | |
1084 | * of a cpuset. Needs to validate the request, update the | |
1085 | * cpusets mems_allowed and mems_generation, and for each | |
1086 | * task in the cpuset, rebind any vma mempolicies and if | |
1087 | * the cpuset is marked 'memory_migrate', migrate the tasks | |
1088 | * pages to the new memory. | |
1089 | * | |
1090 | * Call with cgroup_mutex held. May take callback_mutex during call. | |
1091 | * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, | |
1092 | * lock each such tasks mm->mmap_sem, scan its vma's and rebind | |
1093 | * their mempolicies to the cpusets new mems_allowed. | |
1094 | */ | |
1095 | static int update_nodemask(struct cpuset *cs, const char *buf) | |
1096 | { | |
1097 | struct cpuset trialcs; | |
1098 | nodemask_t oldmem; | |
1099 | int retval; | |
1100 | ||
1101 | /* | |
1102 | * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY]; | |
1103 | * it's read-only | |
1104 | */ | |
1105 | if (cs == &top_cpuset) | |
1106 | return -EACCES; | |
1107 | ||
1108 | trialcs = *cs; | |
1109 | ||
1110 | /* | |
1111 | * An empty mems_allowed is ok iff there are no tasks in the cpuset. | |
1112 | * Since nodelist_parse() fails on an empty mask, we special case | |
1113 | * that parsing. The validate_change() call ensures that cpusets | |
1114 | * with tasks have memory. | |
1115 | */ | |
1116 | if (!*buf) { | |
1117 | nodes_clear(trialcs.mems_allowed); | |
1118 | } else { | |
1119 | retval = nodelist_parse(buf, trialcs.mems_allowed); | |
1120 | if (retval < 0) | |
1121 | goto done; | |
1122 | ||
1123 | if (!nodes_subset(trialcs.mems_allowed, | |
1124 | node_states[N_HIGH_MEMORY])) | |
1125 | return -EINVAL; | |
1126 | } | |
1127 | oldmem = cs->mems_allowed; | |
1128 | if (nodes_equal(oldmem, trialcs.mems_allowed)) { | |
1129 | retval = 0; /* Too easy - nothing to do */ | |
1130 | goto done; | |
1131 | } | |
1132 | retval = validate_change(cs, &trialcs); | |
1133 | if (retval < 0) | |
1134 | goto done; | |
1135 | ||
1136 | mutex_lock(&callback_mutex); | |
1137 | cs->mems_allowed = trialcs.mems_allowed; | |
1138 | cs->mems_generation = cpuset_mems_generation++; | |
1139 | mutex_unlock(&callback_mutex); | |
1140 | ||
1141 | retval = update_tasks_nodemask(cs, &oldmem); | |
1142 | done: | |
1143 | return retval; | |
1144 | } | |
1145 | ||
8793d854 PM |
1146 | int current_cpuset_is_being_rebound(void) |
1147 | { | |
1148 | return task_cs(current) == cpuset_being_rebound; | |
1149 | } | |
1150 | ||
5be7a479 | 1151 | static int update_relax_domain_level(struct cpuset *cs, s64 val) |
1d3504fc | 1152 | { |
30e0e178 LZ |
1153 | if (val < -1 || val >= SD_LV_MAX) |
1154 | return -EINVAL; | |
1d3504fc HS |
1155 | |
1156 | if (val != cs->relax_domain_level) { | |
1157 | cs->relax_domain_level = val; | |
c372e817 | 1158 | if (!cpus_empty(cs->cpus_allowed) && is_sched_load_balance(cs)) |
cf417141 | 1159 | async_rebuild_sched_domains(); |
1d3504fc HS |
1160 | } |
1161 | ||
1162 | return 0; | |
1163 | } | |
1164 | ||
1da177e4 LT |
1165 | /* |
1166 | * update_flag - read a 0 or a 1 in a file and update associated flag | |
78608366 PM |
1167 | * bit: the bit to update (see cpuset_flagbits_t) |
1168 | * cs: the cpuset to update | |
1169 | * turning_on: whether the flag is being set or cleared | |
053199ed | 1170 | * |
2df167a3 | 1171 | * Call with cgroup_mutex held. |
1da177e4 LT |
1172 | */ |
1173 | ||
700fe1ab PM |
1174 | static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, |
1175 | int turning_on) | |
1da177e4 | 1176 | { |
1da177e4 | 1177 | struct cpuset trialcs; |
607717a6 | 1178 | int err; |
40b6a762 | 1179 | int balance_flag_changed; |
1da177e4 | 1180 | |
1da177e4 LT |
1181 | trialcs = *cs; |
1182 | if (turning_on) | |
1183 | set_bit(bit, &trialcs.flags); | |
1184 | else | |
1185 | clear_bit(bit, &trialcs.flags); | |
1186 | ||
1187 | err = validate_change(cs, &trialcs); | |
85d7b949 DG |
1188 | if (err < 0) |
1189 | return err; | |
029190c5 | 1190 | |
029190c5 PJ |
1191 | balance_flag_changed = (is_sched_load_balance(cs) != |
1192 | is_sched_load_balance(&trialcs)); | |
1193 | ||
3d3f26a7 | 1194 | mutex_lock(&callback_mutex); |
69604067 | 1195 | cs->flags = trialcs.flags; |
3d3f26a7 | 1196 | mutex_unlock(&callback_mutex); |
85d7b949 | 1197 | |
40b6a762 | 1198 | if (!cpus_empty(trialcs.cpus_allowed) && balance_flag_changed) |
cf417141 | 1199 | async_rebuild_sched_domains(); |
029190c5 | 1200 | |
85d7b949 | 1201 | return 0; |
1da177e4 LT |
1202 | } |
1203 | ||
3e0d98b9 | 1204 | /* |
80f7228b | 1205 | * Frequency meter - How fast is some event occurring? |
3e0d98b9 PJ |
1206 | * |
1207 | * These routines manage a digitally filtered, constant time based, | |
1208 | * event frequency meter. There are four routines: | |
1209 | * fmeter_init() - initialize a frequency meter. | |
1210 | * fmeter_markevent() - called each time the event happens. | |
1211 | * fmeter_getrate() - returns the recent rate of such events. | |
1212 | * fmeter_update() - internal routine used to update fmeter. | |
1213 | * | |
1214 | * A common data structure is passed to each of these routines, | |
1215 | * which is used to keep track of the state required to manage the | |
1216 | * frequency meter and its digital filter. | |
1217 | * | |
1218 | * The filter works on the number of events marked per unit time. | |
1219 | * The filter is single-pole low-pass recursive (IIR). The time unit | |
1220 | * is 1 second. Arithmetic is done using 32-bit integers scaled to | |
1221 | * simulate 3 decimal digits of precision (multiplied by 1000). | |
1222 | * | |
1223 | * With an FM_COEF of 933, and a time base of 1 second, the filter | |
1224 | * has a half-life of 10 seconds, meaning that if the events quit | |
1225 | * happening, then the rate returned from the fmeter_getrate() | |
1226 | * will be cut in half each 10 seconds, until it converges to zero. | |
1227 | * | |
1228 | * It is not worth doing a real infinitely recursive filter. If more | |
1229 | * than FM_MAXTICKS ticks have elapsed since the last filter event, | |
1230 | * just compute FM_MAXTICKS ticks worth, by which point the level | |
1231 | * will be stable. | |
1232 | * | |
1233 | * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid | |
1234 | * arithmetic overflow in the fmeter_update() routine. | |
1235 | * | |
1236 | * Given the simple 32 bit integer arithmetic used, this meter works | |
1237 | * best for reporting rates between one per millisecond (msec) and | |
1238 | * one per 32 (approx) seconds. At constant rates faster than one | |
1239 | * per msec it maxes out at values just under 1,000,000. At constant | |
1240 | * rates between one per msec, and one per second it will stabilize | |
1241 | * to a value N*1000, where N is the rate of events per second. | |
1242 | * At constant rates between one per second and one per 32 seconds, | |
1243 | * it will be choppy, moving up on the seconds that have an event, | |
1244 | * and then decaying until the next event. At rates slower than | |
1245 | * about one in 32 seconds, it decays all the way back to zero between | |
1246 | * each event. | |
1247 | */ | |
1248 | ||
1249 | #define FM_COEF 933 /* coefficient for half-life of 10 secs */ | |
1250 | #define FM_MAXTICKS ((time_t)99) /* useless computing more ticks than this */ | |
1251 | #define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */ | |
1252 | #define FM_SCALE 1000 /* faux fixed point scale */ | |
1253 | ||
1254 | /* Initialize a frequency meter */ | |
1255 | static void fmeter_init(struct fmeter *fmp) | |
1256 | { | |
1257 | fmp->cnt = 0; | |
1258 | fmp->val = 0; | |
1259 | fmp->time = 0; | |
1260 | spin_lock_init(&fmp->lock); | |
1261 | } | |
1262 | ||
1263 | /* Internal meter update - process cnt events and update value */ | |
1264 | static void fmeter_update(struct fmeter *fmp) | |
1265 | { | |
1266 | time_t now = get_seconds(); | |
1267 | time_t ticks = now - fmp->time; | |
1268 | ||
1269 | if (ticks == 0) | |
1270 | return; | |
1271 | ||
1272 | ticks = min(FM_MAXTICKS, ticks); | |
1273 | while (ticks-- > 0) | |
1274 | fmp->val = (FM_COEF * fmp->val) / FM_SCALE; | |
1275 | fmp->time = now; | |
1276 | ||
1277 | fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE; | |
1278 | fmp->cnt = 0; | |
1279 | } | |
1280 | ||
1281 | /* Process any previous ticks, then bump cnt by one (times scale). */ | |
1282 | static void fmeter_markevent(struct fmeter *fmp) | |
1283 | { | |
1284 | spin_lock(&fmp->lock); | |
1285 | fmeter_update(fmp); | |
1286 | fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE); | |
1287 | spin_unlock(&fmp->lock); | |
1288 | } | |
1289 | ||
1290 | /* Process any previous ticks, then return current value. */ | |
1291 | static int fmeter_getrate(struct fmeter *fmp) | |
1292 | { | |
1293 | int val; | |
1294 | ||
1295 | spin_lock(&fmp->lock); | |
1296 | fmeter_update(fmp); | |
1297 | val = fmp->val; | |
1298 | spin_unlock(&fmp->lock); | |
1299 | return val; | |
1300 | } | |
1301 | ||
2df167a3 | 1302 | /* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */ |
8793d854 PM |
1303 | static int cpuset_can_attach(struct cgroup_subsys *ss, |
1304 | struct cgroup *cont, struct task_struct *tsk) | |
1da177e4 | 1305 | { |
8793d854 | 1306 | struct cpuset *cs = cgroup_cs(cont); |
1da177e4 | 1307 | |
1da177e4 LT |
1308 | if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) |
1309 | return -ENOSPC; | |
9985b0ba DR |
1310 | if (tsk->flags & PF_THREAD_BOUND) { |
1311 | cpumask_t mask; | |
1312 | ||
1313 | mutex_lock(&callback_mutex); | |
1314 | mask = cs->cpus_allowed; | |
1315 | mutex_unlock(&callback_mutex); | |
1316 | if (!cpus_equal(tsk->cpus_allowed, mask)) | |
1317 | return -EINVAL; | |
1318 | } | |
1da177e4 | 1319 | |
8793d854 PM |
1320 | return security_task_setscheduler(tsk, 0, NULL); |
1321 | } | |
1da177e4 | 1322 | |
8793d854 PM |
1323 | static void cpuset_attach(struct cgroup_subsys *ss, |
1324 | struct cgroup *cont, struct cgroup *oldcont, | |
1325 | struct task_struct *tsk) | |
1326 | { | |
1327 | cpumask_t cpus; | |
1328 | nodemask_t from, to; | |
1329 | struct mm_struct *mm; | |
1330 | struct cpuset *cs = cgroup_cs(cont); | |
1331 | struct cpuset *oldcs = cgroup_cs(oldcont); | |
9985b0ba | 1332 | int err; |
22fb52dd | 1333 | |
3d3f26a7 | 1334 | mutex_lock(&callback_mutex); |
1da177e4 | 1335 | guarantee_online_cpus(cs, &cpus); |
9985b0ba | 1336 | err = set_cpus_allowed_ptr(tsk, &cpus); |
8793d854 | 1337 | mutex_unlock(&callback_mutex); |
9985b0ba DR |
1338 | if (err) |
1339 | return; | |
1da177e4 | 1340 | |
45b07ef3 PJ |
1341 | from = oldcs->mems_allowed; |
1342 | to = cs->mems_allowed; | |
4225399a PJ |
1343 | mm = get_task_mm(tsk); |
1344 | if (mm) { | |
1345 | mpol_rebind_mm(mm, &to); | |
2741a559 | 1346 | if (is_memory_migrate(cs)) |
e4e364e8 | 1347 | cpuset_migrate_mm(mm, &from, &to); |
4225399a PJ |
1348 | mmput(mm); |
1349 | } | |
1350 | ||
1da177e4 LT |
1351 | } |
1352 | ||
1353 | /* The various types of files and directories in a cpuset file system */ | |
1354 | ||
1355 | typedef enum { | |
45b07ef3 | 1356 | FILE_MEMORY_MIGRATE, |
1da177e4 LT |
1357 | FILE_CPULIST, |
1358 | FILE_MEMLIST, | |
1359 | FILE_CPU_EXCLUSIVE, | |
1360 | FILE_MEM_EXCLUSIVE, | |
78608366 | 1361 | FILE_MEM_HARDWALL, |
029190c5 | 1362 | FILE_SCHED_LOAD_BALANCE, |
1d3504fc | 1363 | FILE_SCHED_RELAX_DOMAIN_LEVEL, |
3e0d98b9 PJ |
1364 | FILE_MEMORY_PRESSURE_ENABLED, |
1365 | FILE_MEMORY_PRESSURE, | |
825a46af PJ |
1366 | FILE_SPREAD_PAGE, |
1367 | FILE_SPREAD_SLAB, | |
1da177e4 LT |
1368 | } cpuset_filetype_t; |
1369 | ||
700fe1ab PM |
1370 | static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val) |
1371 | { | |
1372 | int retval = 0; | |
1373 | struct cpuset *cs = cgroup_cs(cgrp); | |
1374 | cpuset_filetype_t type = cft->private; | |
1375 | ||
e3712395 | 1376 | if (!cgroup_lock_live_group(cgrp)) |
700fe1ab | 1377 | return -ENODEV; |
700fe1ab PM |
1378 | |
1379 | switch (type) { | |
1da177e4 | 1380 | case FILE_CPU_EXCLUSIVE: |
700fe1ab | 1381 | retval = update_flag(CS_CPU_EXCLUSIVE, cs, val); |
1da177e4 LT |
1382 | break; |
1383 | case FILE_MEM_EXCLUSIVE: | |
700fe1ab | 1384 | retval = update_flag(CS_MEM_EXCLUSIVE, cs, val); |
1da177e4 | 1385 | break; |
78608366 PM |
1386 | case FILE_MEM_HARDWALL: |
1387 | retval = update_flag(CS_MEM_HARDWALL, cs, val); | |
1388 | break; | |
029190c5 | 1389 | case FILE_SCHED_LOAD_BALANCE: |
700fe1ab | 1390 | retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val); |
1d3504fc | 1391 | break; |
45b07ef3 | 1392 | case FILE_MEMORY_MIGRATE: |
700fe1ab | 1393 | retval = update_flag(CS_MEMORY_MIGRATE, cs, val); |
45b07ef3 | 1394 | break; |
3e0d98b9 | 1395 | case FILE_MEMORY_PRESSURE_ENABLED: |
700fe1ab | 1396 | cpuset_memory_pressure_enabled = !!val; |
3e0d98b9 PJ |
1397 | break; |
1398 | case FILE_MEMORY_PRESSURE: | |
1399 | retval = -EACCES; | |
1400 | break; | |
825a46af | 1401 | case FILE_SPREAD_PAGE: |
700fe1ab | 1402 | retval = update_flag(CS_SPREAD_PAGE, cs, val); |
151a4420 | 1403 | cs->mems_generation = cpuset_mems_generation++; |
825a46af PJ |
1404 | break; |
1405 | case FILE_SPREAD_SLAB: | |
700fe1ab | 1406 | retval = update_flag(CS_SPREAD_SLAB, cs, val); |
151a4420 | 1407 | cs->mems_generation = cpuset_mems_generation++; |
825a46af | 1408 | break; |
1da177e4 LT |
1409 | default: |
1410 | retval = -EINVAL; | |
700fe1ab | 1411 | break; |
1da177e4 | 1412 | } |
8793d854 | 1413 | cgroup_unlock(); |
1da177e4 LT |
1414 | return retval; |
1415 | } | |
1416 | ||
5be7a479 PM |
1417 | static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val) |
1418 | { | |
1419 | int retval = 0; | |
1420 | struct cpuset *cs = cgroup_cs(cgrp); | |
1421 | cpuset_filetype_t type = cft->private; | |
1422 | ||
e3712395 | 1423 | if (!cgroup_lock_live_group(cgrp)) |
5be7a479 | 1424 | return -ENODEV; |
e3712395 | 1425 | |
5be7a479 PM |
1426 | switch (type) { |
1427 | case FILE_SCHED_RELAX_DOMAIN_LEVEL: | |
1428 | retval = update_relax_domain_level(cs, val); | |
1429 | break; | |
1430 | default: | |
1431 | retval = -EINVAL; | |
1432 | break; | |
1433 | } | |
1434 | cgroup_unlock(); | |
1435 | return retval; | |
1436 | } | |
1437 | ||
e3712395 PM |
1438 | /* |
1439 | * Common handling for a write to a "cpus" or "mems" file. | |
1440 | */ | |
1441 | static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft, | |
1442 | const char *buf) | |
1443 | { | |
1444 | int retval = 0; | |
1445 | ||
1446 | if (!cgroup_lock_live_group(cgrp)) | |
1447 | return -ENODEV; | |
1448 | ||
1449 | switch (cft->private) { | |
1450 | case FILE_CPULIST: | |
1451 | retval = update_cpumask(cgroup_cs(cgrp), buf); | |
1452 | break; | |
1453 | case FILE_MEMLIST: | |
1454 | retval = update_nodemask(cgroup_cs(cgrp), buf); | |
1455 | break; | |
1456 | default: | |
1457 | retval = -EINVAL; | |
1458 | break; | |
1459 | } | |
1460 | cgroup_unlock(); | |
1461 | return retval; | |
1462 | } | |
1463 | ||
1da177e4 LT |
1464 | /* |
1465 | * These ascii lists should be read in a single call, by using a user | |
1466 | * buffer large enough to hold the entire map. If read in smaller | |
1467 | * chunks, there is no guarantee of atomicity. Since the display format | |
1468 | * used, list of ranges of sequential numbers, is variable length, | |
1469 | * and since these maps can change value dynamically, one could read | |
1470 | * gibberish by doing partial reads while a list was changing. | |
1471 | * A single large read to a buffer that crosses a page boundary is | |
1472 | * ok, because the result being copied to user land is not recomputed | |
1473 | * across a page fault. | |
1474 | */ | |
1475 | ||
1476 | static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs) | |
1477 | { | |
1478 | cpumask_t mask; | |
1479 | ||
3d3f26a7 | 1480 | mutex_lock(&callback_mutex); |
1da177e4 | 1481 | mask = cs->cpus_allowed; |
3d3f26a7 | 1482 | mutex_unlock(&callback_mutex); |
1da177e4 LT |
1483 | |
1484 | return cpulist_scnprintf(page, PAGE_SIZE, mask); | |
1485 | } | |
1486 | ||
1487 | static int cpuset_sprintf_memlist(char *page, struct cpuset *cs) | |
1488 | { | |
1489 | nodemask_t mask; | |
1490 | ||
3d3f26a7 | 1491 | mutex_lock(&callback_mutex); |
1da177e4 | 1492 | mask = cs->mems_allowed; |
3d3f26a7 | 1493 | mutex_unlock(&callback_mutex); |
1da177e4 LT |
1494 | |
1495 | return nodelist_scnprintf(page, PAGE_SIZE, mask); | |
1496 | } | |
1497 | ||
8793d854 PM |
1498 | static ssize_t cpuset_common_file_read(struct cgroup *cont, |
1499 | struct cftype *cft, | |
1500 | struct file *file, | |
1501 | char __user *buf, | |
1502 | size_t nbytes, loff_t *ppos) | |
1da177e4 | 1503 | { |
8793d854 | 1504 | struct cpuset *cs = cgroup_cs(cont); |
1da177e4 LT |
1505 | cpuset_filetype_t type = cft->private; |
1506 | char *page; | |
1507 | ssize_t retval = 0; | |
1508 | char *s; | |
1da177e4 | 1509 | |
e12ba74d | 1510 | if (!(page = (char *)__get_free_page(GFP_TEMPORARY))) |
1da177e4 LT |
1511 | return -ENOMEM; |
1512 | ||
1513 | s = page; | |
1514 | ||
1515 | switch (type) { | |
1516 | case FILE_CPULIST: | |
1517 | s += cpuset_sprintf_cpulist(s, cs); | |
1518 | break; | |
1519 | case FILE_MEMLIST: | |
1520 | s += cpuset_sprintf_memlist(s, cs); | |
1521 | break; | |
1da177e4 LT |
1522 | default: |
1523 | retval = -EINVAL; | |
1524 | goto out; | |
1525 | } | |
1526 | *s++ = '\n'; | |
1da177e4 | 1527 | |
eacaa1f5 | 1528 | retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page); |
1da177e4 LT |
1529 | out: |
1530 | free_page((unsigned long)page); | |
1531 | return retval; | |
1532 | } | |
1533 | ||
700fe1ab PM |
1534 | static u64 cpuset_read_u64(struct cgroup *cont, struct cftype *cft) |
1535 | { | |
1536 | struct cpuset *cs = cgroup_cs(cont); | |
1537 | cpuset_filetype_t type = cft->private; | |
1538 | switch (type) { | |
1539 | case FILE_CPU_EXCLUSIVE: | |
1540 | return is_cpu_exclusive(cs); | |
1541 | case FILE_MEM_EXCLUSIVE: | |
1542 | return is_mem_exclusive(cs); | |
78608366 PM |
1543 | case FILE_MEM_HARDWALL: |
1544 | return is_mem_hardwall(cs); | |
700fe1ab PM |
1545 | case FILE_SCHED_LOAD_BALANCE: |
1546 | return is_sched_load_balance(cs); | |
1547 | case FILE_MEMORY_MIGRATE: | |
1548 | return is_memory_migrate(cs); | |
1549 | case FILE_MEMORY_PRESSURE_ENABLED: | |
1550 | return cpuset_memory_pressure_enabled; | |
1551 | case FILE_MEMORY_PRESSURE: | |
1552 | return fmeter_getrate(&cs->fmeter); | |
1553 | case FILE_SPREAD_PAGE: | |
1554 | return is_spread_page(cs); | |
1555 | case FILE_SPREAD_SLAB: | |
1556 | return is_spread_slab(cs); | |
1557 | default: | |
1558 | BUG(); | |
1559 | } | |
cf417141 MK |
1560 | |
1561 | /* Unreachable but makes gcc happy */ | |
1562 | return 0; | |
700fe1ab | 1563 | } |
1da177e4 | 1564 | |
5be7a479 PM |
1565 | static s64 cpuset_read_s64(struct cgroup *cont, struct cftype *cft) |
1566 | { | |
1567 | struct cpuset *cs = cgroup_cs(cont); | |
1568 | cpuset_filetype_t type = cft->private; | |
1569 | switch (type) { | |
1570 | case FILE_SCHED_RELAX_DOMAIN_LEVEL: | |
1571 | return cs->relax_domain_level; | |
1572 | default: | |
1573 | BUG(); | |
1574 | } | |
cf417141 MK |
1575 | |
1576 | /* Unrechable but makes gcc happy */ | |
1577 | return 0; | |
5be7a479 PM |
1578 | } |
1579 | ||
1da177e4 LT |
1580 | |
1581 | /* | |
1582 | * for the common functions, 'private' gives the type of file | |
1583 | */ | |
1584 | ||
addf2c73 PM |
1585 | static struct cftype files[] = { |
1586 | { | |
1587 | .name = "cpus", | |
1588 | .read = cpuset_common_file_read, | |
e3712395 PM |
1589 | .write_string = cpuset_write_resmask, |
1590 | .max_write_len = (100U + 6 * NR_CPUS), | |
addf2c73 PM |
1591 | .private = FILE_CPULIST, |
1592 | }, | |
1593 | ||
1594 | { | |
1595 | .name = "mems", | |
1596 | .read = cpuset_common_file_read, | |
e3712395 PM |
1597 | .write_string = cpuset_write_resmask, |
1598 | .max_write_len = (100U + 6 * MAX_NUMNODES), | |
addf2c73 PM |
1599 | .private = FILE_MEMLIST, |
1600 | }, | |
1601 | ||
1602 | { | |
1603 | .name = "cpu_exclusive", | |
1604 | .read_u64 = cpuset_read_u64, | |
1605 | .write_u64 = cpuset_write_u64, | |
1606 | .private = FILE_CPU_EXCLUSIVE, | |
1607 | }, | |
1608 | ||
1609 | { | |
1610 | .name = "mem_exclusive", | |
1611 | .read_u64 = cpuset_read_u64, | |
1612 | .write_u64 = cpuset_write_u64, | |
1613 | .private = FILE_MEM_EXCLUSIVE, | |
1614 | }, | |
1615 | ||
78608366 PM |
1616 | { |
1617 | .name = "mem_hardwall", | |
1618 | .read_u64 = cpuset_read_u64, | |
1619 | .write_u64 = cpuset_write_u64, | |
1620 | .private = FILE_MEM_HARDWALL, | |
1621 | }, | |
1622 | ||
addf2c73 PM |
1623 | { |
1624 | .name = "sched_load_balance", | |
1625 | .read_u64 = cpuset_read_u64, | |
1626 | .write_u64 = cpuset_write_u64, | |
1627 | .private = FILE_SCHED_LOAD_BALANCE, | |
1628 | }, | |
1629 | ||
1630 | { | |
1631 | .name = "sched_relax_domain_level", | |
5be7a479 PM |
1632 | .read_s64 = cpuset_read_s64, |
1633 | .write_s64 = cpuset_write_s64, | |
addf2c73 PM |
1634 | .private = FILE_SCHED_RELAX_DOMAIN_LEVEL, |
1635 | }, | |
1636 | ||
1637 | { | |
1638 | .name = "memory_migrate", | |
1639 | .read_u64 = cpuset_read_u64, | |
1640 | .write_u64 = cpuset_write_u64, | |
1641 | .private = FILE_MEMORY_MIGRATE, | |
1642 | }, | |
1643 | ||
1644 | { | |
1645 | .name = "memory_pressure", | |
1646 | .read_u64 = cpuset_read_u64, | |
1647 | .write_u64 = cpuset_write_u64, | |
1648 | .private = FILE_MEMORY_PRESSURE, | |
1649 | }, | |
1650 | ||
1651 | { | |
1652 | .name = "memory_spread_page", | |
1653 | .read_u64 = cpuset_read_u64, | |
1654 | .write_u64 = cpuset_write_u64, | |
1655 | .private = FILE_SPREAD_PAGE, | |
1656 | }, | |
1657 | ||
1658 | { | |
1659 | .name = "memory_spread_slab", | |
1660 | .read_u64 = cpuset_read_u64, | |
1661 | .write_u64 = cpuset_write_u64, | |
1662 | .private = FILE_SPREAD_SLAB, | |
1663 | }, | |
45b07ef3 PJ |
1664 | }; |
1665 | ||
3e0d98b9 PJ |
1666 | static struct cftype cft_memory_pressure_enabled = { |
1667 | .name = "memory_pressure_enabled", | |
700fe1ab PM |
1668 | .read_u64 = cpuset_read_u64, |
1669 | .write_u64 = cpuset_write_u64, | |
3e0d98b9 PJ |
1670 | .private = FILE_MEMORY_PRESSURE_ENABLED, |
1671 | }; | |
1672 | ||
8793d854 | 1673 | static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont) |
1da177e4 LT |
1674 | { |
1675 | int err; | |
1676 | ||
addf2c73 PM |
1677 | err = cgroup_add_files(cont, ss, files, ARRAY_SIZE(files)); |
1678 | if (err) | |
1da177e4 | 1679 | return err; |
8793d854 | 1680 | /* memory_pressure_enabled is in root cpuset only */ |
addf2c73 | 1681 | if (!cont->parent) |
8793d854 | 1682 | err = cgroup_add_file(cont, ss, |
addf2c73 PM |
1683 | &cft_memory_pressure_enabled); |
1684 | return err; | |
1da177e4 LT |
1685 | } |
1686 | ||
8793d854 PM |
1687 | /* |
1688 | * post_clone() is called at the end of cgroup_clone(). | |
1689 | * 'cgroup' was just created automatically as a result of | |
1690 | * a cgroup_clone(), and the current task is about to | |
1691 | * be moved into 'cgroup'. | |
1692 | * | |
1693 | * Currently we refuse to set up the cgroup - thereby | |
1694 | * refusing the task to be entered, and as a result refusing | |
1695 | * the sys_unshare() or clone() which initiated it - if any | |
1696 | * sibling cpusets have exclusive cpus or mem. | |
1697 | * | |
1698 | * If this becomes a problem for some users who wish to | |
1699 | * allow that scenario, then cpuset_post_clone() could be | |
1700 | * changed to grant parent->cpus_allowed-sibling_cpus_exclusive | |
2df167a3 PM |
1701 | * (and likewise for mems) to the new cgroup. Called with cgroup_mutex |
1702 | * held. | |
8793d854 PM |
1703 | */ |
1704 | static void cpuset_post_clone(struct cgroup_subsys *ss, | |
1705 | struct cgroup *cgroup) | |
1706 | { | |
1707 | struct cgroup *parent, *child; | |
1708 | struct cpuset *cs, *parent_cs; | |
1709 | ||
1710 | parent = cgroup->parent; | |
1711 | list_for_each_entry(child, &parent->children, sibling) { | |
1712 | cs = cgroup_cs(child); | |
1713 | if (is_mem_exclusive(cs) || is_cpu_exclusive(cs)) | |
1714 | return; | |
1715 | } | |
1716 | cs = cgroup_cs(cgroup); | |
1717 | parent_cs = cgroup_cs(parent); | |
1718 | ||
1719 | cs->mems_allowed = parent_cs->mems_allowed; | |
1720 | cs->cpus_allowed = parent_cs->cpus_allowed; | |
1721 | return; | |
1722 | } | |
1723 | ||
1da177e4 LT |
1724 | /* |
1725 | * cpuset_create - create a cpuset | |
2df167a3 PM |
1726 | * ss: cpuset cgroup subsystem |
1727 | * cont: control group that the new cpuset will be part of | |
1da177e4 LT |
1728 | */ |
1729 | ||
8793d854 PM |
1730 | static struct cgroup_subsys_state *cpuset_create( |
1731 | struct cgroup_subsys *ss, | |
1732 | struct cgroup *cont) | |
1da177e4 LT |
1733 | { |
1734 | struct cpuset *cs; | |
8793d854 | 1735 | struct cpuset *parent; |
1da177e4 | 1736 | |
8793d854 PM |
1737 | if (!cont->parent) { |
1738 | /* This is early initialization for the top cgroup */ | |
1739 | top_cpuset.mems_generation = cpuset_mems_generation++; | |
1740 | return &top_cpuset.css; | |
1741 | } | |
1742 | parent = cgroup_cs(cont->parent); | |
1da177e4 LT |
1743 | cs = kmalloc(sizeof(*cs), GFP_KERNEL); |
1744 | if (!cs) | |
8793d854 | 1745 | return ERR_PTR(-ENOMEM); |
1da177e4 | 1746 | |
cf2a473c | 1747 | cpuset_update_task_memory_state(); |
1da177e4 | 1748 | cs->flags = 0; |
825a46af PJ |
1749 | if (is_spread_page(parent)) |
1750 | set_bit(CS_SPREAD_PAGE, &cs->flags); | |
1751 | if (is_spread_slab(parent)) | |
1752 | set_bit(CS_SPREAD_SLAB, &cs->flags); | |
029190c5 | 1753 | set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); |
f9a86fcb MT |
1754 | cpus_clear(cs->cpus_allowed); |
1755 | nodes_clear(cs->mems_allowed); | |
151a4420 | 1756 | cs->mems_generation = cpuset_mems_generation++; |
3e0d98b9 | 1757 | fmeter_init(&cs->fmeter); |
1d3504fc | 1758 | cs->relax_domain_level = -1; |
1da177e4 LT |
1759 | |
1760 | cs->parent = parent; | |
202f72d5 | 1761 | number_of_cpusets++; |
8793d854 | 1762 | return &cs->css ; |
1da177e4 LT |
1763 | } |
1764 | ||
029190c5 | 1765 | /* |
029190c5 PJ |
1766 | * If the cpuset being removed has its flag 'sched_load_balance' |
1767 | * enabled, then simulate turning sched_load_balance off, which | |
cf417141 | 1768 | * will call async_rebuild_sched_domains(). |
029190c5 PJ |
1769 | */ |
1770 | ||
8793d854 | 1771 | static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont) |
1da177e4 | 1772 | { |
8793d854 | 1773 | struct cpuset *cs = cgroup_cs(cont); |
1da177e4 | 1774 | |
cf2a473c | 1775 | cpuset_update_task_memory_state(); |
029190c5 PJ |
1776 | |
1777 | if (is_sched_load_balance(cs)) | |
700fe1ab | 1778 | update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); |
029190c5 | 1779 | |
202f72d5 | 1780 | number_of_cpusets--; |
8793d854 | 1781 | kfree(cs); |
1da177e4 LT |
1782 | } |
1783 | ||
8793d854 PM |
1784 | struct cgroup_subsys cpuset_subsys = { |
1785 | .name = "cpuset", | |
1786 | .create = cpuset_create, | |
cf417141 | 1787 | .destroy = cpuset_destroy, |
8793d854 PM |
1788 | .can_attach = cpuset_can_attach, |
1789 | .attach = cpuset_attach, | |
1790 | .populate = cpuset_populate, | |
1791 | .post_clone = cpuset_post_clone, | |
1792 | .subsys_id = cpuset_subsys_id, | |
1793 | .early_init = 1, | |
1794 | }; | |
1795 | ||
c417f024 PJ |
1796 | /* |
1797 | * cpuset_init_early - just enough so that the calls to | |
1798 | * cpuset_update_task_memory_state() in early init code | |
1799 | * are harmless. | |
1800 | */ | |
1801 | ||
1802 | int __init cpuset_init_early(void) | |
1803 | { | |
8793d854 | 1804 | top_cpuset.mems_generation = cpuset_mems_generation++; |
c417f024 PJ |
1805 | return 0; |
1806 | } | |
1807 | ||
8793d854 | 1808 | |
1da177e4 LT |
1809 | /** |
1810 | * cpuset_init - initialize cpusets at system boot | |
1811 | * | |
1812 | * Description: Initialize top_cpuset and the cpuset internal file system, | |
1813 | **/ | |
1814 | ||
1815 | int __init cpuset_init(void) | |
1816 | { | |
8793d854 | 1817 | int err = 0; |
1da177e4 | 1818 | |
f9a86fcb MT |
1819 | cpus_setall(top_cpuset.cpus_allowed); |
1820 | nodes_setall(top_cpuset.mems_allowed); | |
1da177e4 | 1821 | |
3e0d98b9 | 1822 | fmeter_init(&top_cpuset.fmeter); |
151a4420 | 1823 | top_cpuset.mems_generation = cpuset_mems_generation++; |
029190c5 | 1824 | set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags); |
1d3504fc | 1825 | top_cpuset.relax_domain_level = -1; |
1da177e4 | 1826 | |
1da177e4 LT |
1827 | err = register_filesystem(&cpuset_fs_type); |
1828 | if (err < 0) | |
8793d854 PM |
1829 | return err; |
1830 | ||
202f72d5 | 1831 | number_of_cpusets = 1; |
8793d854 | 1832 | return 0; |
1da177e4 LT |
1833 | } |
1834 | ||
956db3ca CW |
1835 | /** |
1836 | * cpuset_do_move_task - move a given task to another cpuset | |
1837 | * @tsk: pointer to task_struct the task to move | |
1838 | * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner | |
1839 | * | |
1840 | * Called by cgroup_scan_tasks() for each task in a cgroup. | |
1841 | * Return nonzero to stop the walk through the tasks. | |
1842 | */ | |
9e0c914c AB |
1843 | static void cpuset_do_move_task(struct task_struct *tsk, |
1844 | struct cgroup_scanner *scan) | |
956db3ca CW |
1845 | { |
1846 | struct cpuset_hotplug_scanner *chsp; | |
1847 | ||
1848 | chsp = container_of(scan, struct cpuset_hotplug_scanner, scan); | |
1849 | cgroup_attach_task(chsp->to, tsk); | |
1850 | } | |
1851 | ||
1852 | /** | |
1853 | * move_member_tasks_to_cpuset - move tasks from one cpuset to another | |
1854 | * @from: cpuset in which the tasks currently reside | |
1855 | * @to: cpuset to which the tasks will be moved | |
1856 | * | |
c8d9c90c PJ |
1857 | * Called with cgroup_mutex held |
1858 | * callback_mutex must not be held, as cpuset_attach() will take it. | |
956db3ca CW |
1859 | * |
1860 | * The cgroup_scan_tasks() function will scan all the tasks in a cgroup, | |
1861 | * calling callback functions for each. | |
1862 | */ | |
1863 | static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to) | |
1864 | { | |
1865 | struct cpuset_hotplug_scanner scan; | |
1866 | ||
1867 | scan.scan.cg = from->css.cgroup; | |
1868 | scan.scan.test_task = NULL; /* select all tasks in cgroup */ | |
1869 | scan.scan.process_task = cpuset_do_move_task; | |
1870 | scan.scan.heap = NULL; | |
1871 | scan.to = to->css.cgroup; | |
1872 | ||
da5ef6bb | 1873 | if (cgroup_scan_tasks(&scan.scan)) |
956db3ca CW |
1874 | printk(KERN_ERR "move_member_tasks_to_cpuset: " |
1875 | "cgroup_scan_tasks failed\n"); | |
1876 | } | |
1877 | ||
b1aac8bb | 1878 | /* |
cf417141 | 1879 | * If CPU and/or memory hotplug handlers, below, unplug any CPUs |
b1aac8bb PJ |
1880 | * or memory nodes, we need to walk over the cpuset hierarchy, |
1881 | * removing that CPU or node from all cpusets. If this removes the | |
956db3ca CW |
1882 | * last CPU or node from a cpuset, then move the tasks in the empty |
1883 | * cpuset to its next-highest non-empty parent. | |
b1aac8bb | 1884 | * |
c8d9c90c PJ |
1885 | * Called with cgroup_mutex held |
1886 | * callback_mutex must not be held, as cpuset_attach() will take it. | |
b1aac8bb | 1887 | */ |
956db3ca CW |
1888 | static void remove_tasks_in_empty_cpuset(struct cpuset *cs) |
1889 | { | |
1890 | struct cpuset *parent; | |
1891 | ||
c8d9c90c PJ |
1892 | /* |
1893 | * The cgroup's css_sets list is in use if there are tasks | |
1894 | * in the cpuset; the list is empty if there are none; | |
1895 | * the cs->css.refcnt seems always 0. | |
1896 | */ | |
956db3ca CW |
1897 | if (list_empty(&cs->css.cgroup->css_sets)) |
1898 | return; | |
b1aac8bb | 1899 | |
956db3ca CW |
1900 | /* |
1901 | * Find its next-highest non-empty parent, (top cpuset | |
1902 | * has online cpus, so can't be empty). | |
1903 | */ | |
1904 | parent = cs->parent; | |
b4501295 PJ |
1905 | while (cpus_empty(parent->cpus_allowed) || |
1906 | nodes_empty(parent->mems_allowed)) | |
956db3ca | 1907 | parent = parent->parent; |
956db3ca CW |
1908 | |
1909 | move_member_tasks_to_cpuset(cs, parent); | |
1910 | } | |
1911 | ||
1912 | /* | |
1913 | * Walk the specified cpuset subtree and look for empty cpusets. | |
1914 | * The tasks of such cpuset must be moved to a parent cpuset. | |
1915 | * | |
2df167a3 | 1916 | * Called with cgroup_mutex held. We take callback_mutex to modify |
956db3ca CW |
1917 | * cpus_allowed and mems_allowed. |
1918 | * | |
1919 | * This walk processes the tree from top to bottom, completing one layer | |
1920 | * before dropping down to the next. It always processes a node before | |
1921 | * any of its children. | |
1922 | * | |
1923 | * For now, since we lack memory hot unplug, we'll never see a cpuset | |
1924 | * that has tasks along with an empty 'mems'. But if we did see such | |
1925 | * a cpuset, we'd handle it just like we do if its 'cpus' was empty. | |
1926 | */ | |
d294eb83 | 1927 | static void scan_for_empty_cpusets(struct cpuset *root) |
b1aac8bb | 1928 | { |
8d1e6266 | 1929 | LIST_HEAD(queue); |
956db3ca CW |
1930 | struct cpuset *cp; /* scans cpusets being updated */ |
1931 | struct cpuset *child; /* scans child cpusets of cp */ | |
8793d854 | 1932 | struct cgroup *cont; |
f9b4fb8d | 1933 | nodemask_t oldmems; |
b1aac8bb | 1934 | |
956db3ca CW |
1935 | list_add_tail((struct list_head *)&root->stack_list, &queue); |
1936 | ||
956db3ca | 1937 | while (!list_empty(&queue)) { |
8d1e6266 | 1938 | cp = list_first_entry(&queue, struct cpuset, stack_list); |
956db3ca CW |
1939 | list_del(queue.next); |
1940 | list_for_each_entry(cont, &cp->css.cgroup->children, sibling) { | |
1941 | child = cgroup_cs(cont); | |
1942 | list_add_tail(&child->stack_list, &queue); | |
1943 | } | |
b4501295 PJ |
1944 | |
1945 | /* Continue past cpusets with all cpus, mems online */ | |
1946 | if (cpus_subset(cp->cpus_allowed, cpu_online_map) && | |
1947 | nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY])) | |
1948 | continue; | |
1949 | ||
f9b4fb8d MX |
1950 | oldmems = cp->mems_allowed; |
1951 | ||
956db3ca | 1952 | /* Remove offline cpus and mems from this cpuset. */ |
b4501295 | 1953 | mutex_lock(&callback_mutex); |
956db3ca CW |
1954 | cpus_and(cp->cpus_allowed, cp->cpus_allowed, cpu_online_map); |
1955 | nodes_and(cp->mems_allowed, cp->mems_allowed, | |
1956 | node_states[N_HIGH_MEMORY]); | |
b4501295 PJ |
1957 | mutex_unlock(&callback_mutex); |
1958 | ||
1959 | /* Move tasks from the empty cpuset to a parent */ | |
c8d9c90c | 1960 | if (cpus_empty(cp->cpus_allowed) || |
b4501295 | 1961 | nodes_empty(cp->mems_allowed)) |
956db3ca | 1962 | remove_tasks_in_empty_cpuset(cp); |
f9b4fb8d | 1963 | else { |
4e74339a | 1964 | update_tasks_cpumask(cp, NULL); |
f9b4fb8d MX |
1965 | update_tasks_nodemask(cp, &oldmems); |
1966 | } | |
b1aac8bb PJ |
1967 | } |
1968 | } | |
1969 | ||
4c4d50f7 PJ |
1970 | /* |
1971 | * The top_cpuset tracks what CPUs and Memory Nodes are online, | |
1972 | * period. This is necessary in order to make cpusets transparent | |
1973 | * (of no affect) on systems that are actively using CPU hotplug | |
1974 | * but making no active use of cpusets. | |
1975 | * | |
38837fc7 PJ |
1976 | * This routine ensures that top_cpuset.cpus_allowed tracks |
1977 | * cpu_online_map on each CPU hotplug (cpuhp) event. | |
cf417141 MK |
1978 | * |
1979 | * Called within get_online_cpus(). Needs to call cgroup_lock() | |
1980 | * before calling generate_sched_domains(). | |
4c4d50f7 | 1981 | */ |
cf417141 | 1982 | static int cpuset_track_online_cpus(struct notifier_block *unused_nb, |
029190c5 | 1983 | unsigned long phase, void *unused_cpu) |
4c4d50f7 | 1984 | { |
cf417141 MK |
1985 | struct sched_domain_attr *attr; |
1986 | cpumask_t *doms; | |
1987 | int ndoms; | |
1988 | ||
3e84050c | 1989 | switch (phase) { |
3e84050c DA |
1990 | case CPU_ONLINE: |
1991 | case CPU_ONLINE_FROZEN: | |
1992 | case CPU_DEAD: | |
1993 | case CPU_DEAD_FROZEN: | |
3e84050c | 1994 | break; |
cf417141 | 1995 | |
3e84050c | 1996 | default: |
ac076758 | 1997 | return NOTIFY_DONE; |
3e84050c | 1998 | } |
ac076758 | 1999 | |
cf417141 MK |
2000 | cgroup_lock(); |
2001 | top_cpuset.cpus_allowed = cpu_online_map; | |
2002 | scan_for_empty_cpusets(&top_cpuset); | |
2003 | ndoms = generate_sched_domains(&doms, &attr); | |
2004 | cgroup_unlock(); | |
2005 | ||
2006 | /* Have scheduler rebuild the domains */ | |
2007 | partition_sched_domains(ndoms, doms, attr); | |
2008 | ||
3e84050c | 2009 | return NOTIFY_OK; |
4c4d50f7 | 2010 | } |
4c4d50f7 | 2011 | |
b1aac8bb | 2012 | #ifdef CONFIG_MEMORY_HOTPLUG |
38837fc7 | 2013 | /* |
0e1e7c7a | 2014 | * Keep top_cpuset.mems_allowed tracking node_states[N_HIGH_MEMORY]. |
cf417141 MK |
2015 | * Call this routine anytime after node_states[N_HIGH_MEMORY] changes. |
2016 | * See also the previous routine cpuset_track_online_cpus(). | |
38837fc7 | 2017 | */ |
1af98928 | 2018 | void cpuset_track_online_nodes(void) |
38837fc7 | 2019 | { |
cf417141 MK |
2020 | cgroup_lock(); |
2021 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; | |
2022 | scan_for_empty_cpusets(&top_cpuset); | |
2023 | cgroup_unlock(); | |
38837fc7 PJ |
2024 | } |
2025 | #endif | |
2026 | ||
1da177e4 LT |
2027 | /** |
2028 | * cpuset_init_smp - initialize cpus_allowed | |
2029 | * | |
2030 | * Description: Finish top cpuset after cpu, node maps are initialized | |
2031 | **/ | |
2032 | ||
2033 | void __init cpuset_init_smp(void) | |
2034 | { | |
2035 | top_cpuset.cpus_allowed = cpu_online_map; | |
0e1e7c7a | 2036 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; |
4c4d50f7 | 2037 | |
cf417141 | 2038 | hotcpu_notifier(cpuset_track_online_cpus, 0); |
1da177e4 LT |
2039 | } |
2040 | ||
2041 | /** | |
1da177e4 LT |
2042 | * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. |
2043 | * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. | |
f9a86fcb | 2044 | * @pmask: pointer to cpumask_t variable to receive cpus_allowed set. |
1da177e4 LT |
2045 | * |
2046 | * Description: Returns the cpumask_t cpus_allowed of the cpuset | |
2047 | * attached to the specified @tsk. Guaranteed to return some non-empty | |
2048 | * subset of cpu_online_map, even if this means going outside the | |
2049 | * tasks cpuset. | |
2050 | **/ | |
2051 | ||
f9a86fcb | 2052 | void cpuset_cpus_allowed(struct task_struct *tsk, cpumask_t *pmask) |
1da177e4 | 2053 | { |
3d3f26a7 | 2054 | mutex_lock(&callback_mutex); |
f9a86fcb | 2055 | cpuset_cpus_allowed_locked(tsk, pmask); |
470fd646 | 2056 | mutex_unlock(&callback_mutex); |
470fd646 CW |
2057 | } |
2058 | ||
2059 | /** | |
2060 | * cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset. | |
2df167a3 | 2061 | * Must be called with callback_mutex held. |
470fd646 | 2062 | **/ |
f9a86fcb | 2063 | void cpuset_cpus_allowed_locked(struct task_struct *tsk, cpumask_t *pmask) |
470fd646 | 2064 | { |
909d75a3 | 2065 | task_lock(tsk); |
f9a86fcb | 2066 | guarantee_online_cpus(task_cs(tsk), pmask); |
909d75a3 | 2067 | task_unlock(tsk); |
1da177e4 LT |
2068 | } |
2069 | ||
2070 | void cpuset_init_current_mems_allowed(void) | |
2071 | { | |
f9a86fcb | 2072 | nodes_setall(current->mems_allowed); |
1da177e4 LT |
2073 | } |
2074 | ||
909d75a3 PJ |
2075 | /** |
2076 | * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset. | |
2077 | * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed. | |
2078 | * | |
2079 | * Description: Returns the nodemask_t mems_allowed of the cpuset | |
2080 | * attached to the specified @tsk. Guaranteed to return some non-empty | |
0e1e7c7a | 2081 | * subset of node_states[N_HIGH_MEMORY], even if this means going outside the |
909d75a3 PJ |
2082 | * tasks cpuset. |
2083 | **/ | |
2084 | ||
2085 | nodemask_t cpuset_mems_allowed(struct task_struct *tsk) | |
2086 | { | |
2087 | nodemask_t mask; | |
2088 | ||
3d3f26a7 | 2089 | mutex_lock(&callback_mutex); |
909d75a3 | 2090 | task_lock(tsk); |
8793d854 | 2091 | guarantee_online_mems(task_cs(tsk), &mask); |
909d75a3 | 2092 | task_unlock(tsk); |
3d3f26a7 | 2093 | mutex_unlock(&callback_mutex); |
909d75a3 PJ |
2094 | |
2095 | return mask; | |
2096 | } | |
2097 | ||
d9fd8a6d | 2098 | /** |
19770b32 MG |
2099 | * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed |
2100 | * @nodemask: the nodemask to be checked | |
d9fd8a6d | 2101 | * |
19770b32 | 2102 | * Are any of the nodes in the nodemask allowed in current->mems_allowed? |
1da177e4 | 2103 | */ |
19770b32 | 2104 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) |
1da177e4 | 2105 | { |
19770b32 | 2106 | return nodes_intersects(*nodemask, current->mems_allowed); |
1da177e4 LT |
2107 | } |
2108 | ||
9bf2229f | 2109 | /* |
78608366 PM |
2110 | * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or |
2111 | * mem_hardwall ancestor to the specified cpuset. Call holding | |
2112 | * callback_mutex. If no ancestor is mem_exclusive or mem_hardwall | |
2113 | * (an unusual configuration), then returns the root cpuset. | |
9bf2229f | 2114 | */ |
78608366 | 2115 | static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs) |
9bf2229f | 2116 | { |
78608366 | 2117 | while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && cs->parent) |
9bf2229f PJ |
2118 | cs = cs->parent; |
2119 | return cs; | |
2120 | } | |
2121 | ||
d9fd8a6d | 2122 | /** |
02a0e53d | 2123 | * cpuset_zone_allowed_softwall - Can we allocate on zone z's memory node? |
9bf2229f | 2124 | * @z: is this zone on an allowed node? |
02a0e53d | 2125 | * @gfp_mask: memory allocation flags |
d9fd8a6d | 2126 | * |
02a0e53d PJ |
2127 | * If we're in interrupt, yes, we can always allocate. If |
2128 | * __GFP_THISNODE is set, yes, we can always allocate. If zone | |
9bf2229f PJ |
2129 | * z's node is in our tasks mems_allowed, yes. If it's not a |
2130 | * __GFP_HARDWALL request and this zone's nodes is in the nearest | |
78608366 | 2131 | * hardwalled cpuset ancestor to this tasks cpuset, yes. |
c596d9f3 DR |
2132 | * If the task has been OOM killed and has access to memory reserves |
2133 | * as specified by the TIF_MEMDIE flag, yes. | |
9bf2229f PJ |
2134 | * Otherwise, no. |
2135 | * | |
02a0e53d PJ |
2136 | * If __GFP_HARDWALL is set, cpuset_zone_allowed_softwall() |
2137 | * reduces to cpuset_zone_allowed_hardwall(). Otherwise, | |
2138 | * cpuset_zone_allowed_softwall() might sleep, and might allow a zone | |
2139 | * from an enclosing cpuset. | |
2140 | * | |
2141 | * cpuset_zone_allowed_hardwall() only handles the simpler case of | |
2142 | * hardwall cpusets, and never sleeps. | |
2143 | * | |
2144 | * The __GFP_THISNODE placement logic is really handled elsewhere, | |
2145 | * by forcibly using a zonelist starting at a specified node, and by | |
2146 | * (in get_page_from_freelist()) refusing to consider the zones for | |
2147 | * any node on the zonelist except the first. By the time any such | |
2148 | * calls get to this routine, we should just shut up and say 'yes'. | |
2149 | * | |
9bf2229f | 2150 | * GFP_USER allocations are marked with the __GFP_HARDWALL bit, |
c596d9f3 DR |
2151 | * and do not allow allocations outside the current tasks cpuset |
2152 | * unless the task has been OOM killed as is marked TIF_MEMDIE. | |
9bf2229f | 2153 | * GFP_KERNEL allocations are not so marked, so can escape to the |
78608366 | 2154 | * nearest enclosing hardwalled ancestor cpuset. |
9bf2229f | 2155 | * |
02a0e53d PJ |
2156 | * Scanning up parent cpusets requires callback_mutex. The |
2157 | * __alloc_pages() routine only calls here with __GFP_HARDWALL bit | |
2158 | * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the | |
2159 | * current tasks mems_allowed came up empty on the first pass over | |
2160 | * the zonelist. So only GFP_KERNEL allocations, if all nodes in the | |
2161 | * cpuset are short of memory, might require taking the callback_mutex | |
2162 | * mutex. | |
9bf2229f | 2163 | * |
36be57ff | 2164 | * The first call here from mm/page_alloc:get_page_from_freelist() |
02a0e53d PJ |
2165 | * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets, |
2166 | * so no allocation on a node outside the cpuset is allowed (unless | |
2167 | * in interrupt, of course). | |
36be57ff PJ |
2168 | * |
2169 | * The second pass through get_page_from_freelist() doesn't even call | |
2170 | * here for GFP_ATOMIC calls. For those calls, the __alloc_pages() | |
2171 | * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set | |
2172 | * in alloc_flags. That logic and the checks below have the combined | |
2173 | * affect that: | |
9bf2229f PJ |
2174 | * in_interrupt - any node ok (current task context irrelevant) |
2175 | * GFP_ATOMIC - any node ok | |
c596d9f3 | 2176 | * TIF_MEMDIE - any node ok |
78608366 | 2177 | * GFP_KERNEL - any node in enclosing hardwalled cpuset ok |
9bf2229f | 2178 | * GFP_USER - only nodes in current tasks mems allowed ok. |
36be57ff PJ |
2179 | * |
2180 | * Rule: | |
02a0e53d | 2181 | * Don't call cpuset_zone_allowed_softwall if you can't sleep, unless you |
36be57ff PJ |
2182 | * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables |
2183 | * the code that might scan up ancestor cpusets and sleep. | |
02a0e53d | 2184 | */ |
9bf2229f | 2185 | |
02a0e53d | 2186 | int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) |
1da177e4 | 2187 | { |
9bf2229f PJ |
2188 | int node; /* node that zone z is on */ |
2189 | const struct cpuset *cs; /* current cpuset ancestors */ | |
29afd49b | 2190 | int allowed; /* is allocation in zone z allowed? */ |
9bf2229f | 2191 | |
9b819d20 | 2192 | if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) |
9bf2229f | 2193 | return 1; |
89fa3024 | 2194 | node = zone_to_nid(z); |
92d1dbd2 | 2195 | might_sleep_if(!(gfp_mask & __GFP_HARDWALL)); |
9bf2229f PJ |
2196 | if (node_isset(node, current->mems_allowed)) |
2197 | return 1; | |
c596d9f3 DR |
2198 | /* |
2199 | * Allow tasks that have access to memory reserves because they have | |
2200 | * been OOM killed to get memory anywhere. | |
2201 | */ | |
2202 | if (unlikely(test_thread_flag(TIF_MEMDIE))) | |
2203 | return 1; | |
9bf2229f PJ |
2204 | if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */ |
2205 | return 0; | |
2206 | ||
5563e770 BP |
2207 | if (current->flags & PF_EXITING) /* Let dying task have memory */ |
2208 | return 1; | |
2209 | ||
9bf2229f | 2210 | /* Not hardwall and node outside mems_allowed: scan up cpusets */ |
3d3f26a7 | 2211 | mutex_lock(&callback_mutex); |
053199ed | 2212 | |
053199ed | 2213 | task_lock(current); |
78608366 | 2214 | cs = nearest_hardwall_ancestor(task_cs(current)); |
053199ed PJ |
2215 | task_unlock(current); |
2216 | ||
9bf2229f | 2217 | allowed = node_isset(node, cs->mems_allowed); |
3d3f26a7 | 2218 | mutex_unlock(&callback_mutex); |
9bf2229f | 2219 | return allowed; |
1da177e4 LT |
2220 | } |
2221 | ||
02a0e53d PJ |
2222 | /* |
2223 | * cpuset_zone_allowed_hardwall - Can we allocate on zone z's memory node? | |
2224 | * @z: is this zone on an allowed node? | |
2225 | * @gfp_mask: memory allocation flags | |
2226 | * | |
2227 | * If we're in interrupt, yes, we can always allocate. | |
2228 | * If __GFP_THISNODE is set, yes, we can always allocate. If zone | |
c596d9f3 DR |
2229 | * z's node is in our tasks mems_allowed, yes. If the task has been |
2230 | * OOM killed and has access to memory reserves as specified by the | |
2231 | * TIF_MEMDIE flag, yes. Otherwise, no. | |
02a0e53d PJ |
2232 | * |
2233 | * The __GFP_THISNODE placement logic is really handled elsewhere, | |
2234 | * by forcibly using a zonelist starting at a specified node, and by | |
2235 | * (in get_page_from_freelist()) refusing to consider the zones for | |
2236 | * any node on the zonelist except the first. By the time any such | |
2237 | * calls get to this routine, we should just shut up and say 'yes'. | |
2238 | * | |
2239 | * Unlike the cpuset_zone_allowed_softwall() variant, above, | |
2240 | * this variant requires that the zone be in the current tasks | |
2241 | * mems_allowed or that we're in interrupt. It does not scan up the | |
2242 | * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset. | |
2243 | * It never sleeps. | |
2244 | */ | |
2245 | ||
2246 | int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) | |
2247 | { | |
2248 | int node; /* node that zone z is on */ | |
2249 | ||
2250 | if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) | |
2251 | return 1; | |
2252 | node = zone_to_nid(z); | |
2253 | if (node_isset(node, current->mems_allowed)) | |
2254 | return 1; | |
dedf8b79 DW |
2255 | /* |
2256 | * Allow tasks that have access to memory reserves because they have | |
2257 | * been OOM killed to get memory anywhere. | |
2258 | */ | |
2259 | if (unlikely(test_thread_flag(TIF_MEMDIE))) | |
2260 | return 1; | |
02a0e53d PJ |
2261 | return 0; |
2262 | } | |
2263 | ||
505970b9 PJ |
2264 | /** |
2265 | * cpuset_lock - lock out any changes to cpuset structures | |
2266 | * | |
3d3f26a7 | 2267 | * The out of memory (oom) code needs to mutex_lock cpusets |
505970b9 | 2268 | * from being changed while it scans the tasklist looking for a |
3d3f26a7 | 2269 | * task in an overlapping cpuset. Expose callback_mutex via this |
505970b9 PJ |
2270 | * cpuset_lock() routine, so the oom code can lock it, before |
2271 | * locking the task list. The tasklist_lock is a spinlock, so | |
3d3f26a7 | 2272 | * must be taken inside callback_mutex. |
505970b9 PJ |
2273 | */ |
2274 | ||
2275 | void cpuset_lock(void) | |
2276 | { | |
3d3f26a7 | 2277 | mutex_lock(&callback_mutex); |
505970b9 PJ |
2278 | } |
2279 | ||
2280 | /** | |
2281 | * cpuset_unlock - release lock on cpuset changes | |
2282 | * | |
2283 | * Undo the lock taken in a previous cpuset_lock() call. | |
2284 | */ | |
2285 | ||
2286 | void cpuset_unlock(void) | |
2287 | { | |
3d3f26a7 | 2288 | mutex_unlock(&callback_mutex); |
505970b9 PJ |
2289 | } |
2290 | ||
825a46af PJ |
2291 | /** |
2292 | * cpuset_mem_spread_node() - On which node to begin search for a page | |
2293 | * | |
2294 | * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for | |
2295 | * tasks in a cpuset with is_spread_page or is_spread_slab set), | |
2296 | * and if the memory allocation used cpuset_mem_spread_node() | |
2297 | * to determine on which node to start looking, as it will for | |
2298 | * certain page cache or slab cache pages such as used for file | |
2299 | * system buffers and inode caches, then instead of starting on the | |
2300 | * local node to look for a free page, rather spread the starting | |
2301 | * node around the tasks mems_allowed nodes. | |
2302 | * | |
2303 | * We don't have to worry about the returned node being offline | |
2304 | * because "it can't happen", and even if it did, it would be ok. | |
2305 | * | |
2306 | * The routines calling guarantee_online_mems() are careful to | |
2307 | * only set nodes in task->mems_allowed that are online. So it | |
2308 | * should not be possible for the following code to return an | |
2309 | * offline node. But if it did, that would be ok, as this routine | |
2310 | * is not returning the node where the allocation must be, only | |
2311 | * the node where the search should start. The zonelist passed to | |
2312 | * __alloc_pages() will include all nodes. If the slab allocator | |
2313 | * is passed an offline node, it will fall back to the local node. | |
2314 | * See kmem_cache_alloc_node(). | |
2315 | */ | |
2316 | ||
2317 | int cpuset_mem_spread_node(void) | |
2318 | { | |
2319 | int node; | |
2320 | ||
2321 | node = next_node(current->cpuset_mem_spread_rotor, current->mems_allowed); | |
2322 | if (node == MAX_NUMNODES) | |
2323 | node = first_node(current->mems_allowed); | |
2324 | current->cpuset_mem_spread_rotor = node; | |
2325 | return node; | |
2326 | } | |
2327 | EXPORT_SYMBOL_GPL(cpuset_mem_spread_node); | |
2328 | ||
ef08e3b4 | 2329 | /** |
bbe373f2 DR |
2330 | * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's? |
2331 | * @tsk1: pointer to task_struct of some task. | |
2332 | * @tsk2: pointer to task_struct of some other task. | |
2333 | * | |
2334 | * Description: Return true if @tsk1's mems_allowed intersects the | |
2335 | * mems_allowed of @tsk2. Used by the OOM killer to determine if | |
2336 | * one of the task's memory usage might impact the memory available | |
2337 | * to the other. | |
ef08e3b4 PJ |
2338 | **/ |
2339 | ||
bbe373f2 DR |
2340 | int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
2341 | const struct task_struct *tsk2) | |
ef08e3b4 | 2342 | { |
bbe373f2 | 2343 | return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed); |
ef08e3b4 PJ |
2344 | } |
2345 | ||
3e0d98b9 PJ |
2346 | /* |
2347 | * Collection of memory_pressure is suppressed unless | |
2348 | * this flag is enabled by writing "1" to the special | |
2349 | * cpuset file 'memory_pressure_enabled' in the root cpuset. | |
2350 | */ | |
2351 | ||
c5b2aff8 | 2352 | int cpuset_memory_pressure_enabled __read_mostly; |
3e0d98b9 PJ |
2353 | |
2354 | /** | |
2355 | * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims. | |
2356 | * | |
2357 | * Keep a running average of the rate of synchronous (direct) | |
2358 | * page reclaim efforts initiated by tasks in each cpuset. | |
2359 | * | |
2360 | * This represents the rate at which some task in the cpuset | |
2361 | * ran low on memory on all nodes it was allowed to use, and | |
2362 | * had to enter the kernels page reclaim code in an effort to | |
2363 | * create more free memory by tossing clean pages or swapping | |
2364 | * or writing dirty pages. | |
2365 | * | |
2366 | * Display to user space in the per-cpuset read-only file | |
2367 | * "memory_pressure". Value displayed is an integer | |
2368 | * representing the recent rate of entry into the synchronous | |
2369 | * (direct) page reclaim by any task attached to the cpuset. | |
2370 | **/ | |
2371 | ||
2372 | void __cpuset_memory_pressure_bump(void) | |
2373 | { | |
3e0d98b9 | 2374 | task_lock(current); |
8793d854 | 2375 | fmeter_markevent(&task_cs(current)->fmeter); |
3e0d98b9 PJ |
2376 | task_unlock(current); |
2377 | } | |
2378 | ||
8793d854 | 2379 | #ifdef CONFIG_PROC_PID_CPUSET |
1da177e4 LT |
2380 | /* |
2381 | * proc_cpuset_show() | |
2382 | * - Print tasks cpuset path into seq_file. | |
2383 | * - Used for /proc/<pid>/cpuset. | |
053199ed PJ |
2384 | * - No need to task_lock(tsk) on this tsk->cpuset reference, as it |
2385 | * doesn't really matter if tsk->cpuset changes after we read it, | |
c8d9c90c | 2386 | * and we take cgroup_mutex, keeping cpuset_attach() from changing it |
2df167a3 | 2387 | * anyway. |
1da177e4 | 2388 | */ |
029190c5 | 2389 | static int proc_cpuset_show(struct seq_file *m, void *unused_v) |
1da177e4 | 2390 | { |
13b41b09 | 2391 | struct pid *pid; |
1da177e4 LT |
2392 | struct task_struct *tsk; |
2393 | char *buf; | |
8793d854 | 2394 | struct cgroup_subsys_state *css; |
99f89551 | 2395 | int retval; |
1da177e4 | 2396 | |
99f89551 | 2397 | retval = -ENOMEM; |
1da177e4 LT |
2398 | buf = kmalloc(PAGE_SIZE, GFP_KERNEL); |
2399 | if (!buf) | |
99f89551 EB |
2400 | goto out; |
2401 | ||
2402 | retval = -ESRCH; | |
13b41b09 EB |
2403 | pid = m->private; |
2404 | tsk = get_pid_task(pid, PIDTYPE_PID); | |
99f89551 EB |
2405 | if (!tsk) |
2406 | goto out_free; | |
1da177e4 | 2407 | |
99f89551 | 2408 | retval = -EINVAL; |
8793d854 PM |
2409 | cgroup_lock(); |
2410 | css = task_subsys_state(tsk, cpuset_subsys_id); | |
2411 | retval = cgroup_path(css->cgroup, buf, PAGE_SIZE); | |
1da177e4 | 2412 | if (retval < 0) |
99f89551 | 2413 | goto out_unlock; |
1da177e4 LT |
2414 | seq_puts(m, buf); |
2415 | seq_putc(m, '\n'); | |
99f89551 | 2416 | out_unlock: |
8793d854 | 2417 | cgroup_unlock(); |
99f89551 EB |
2418 | put_task_struct(tsk); |
2419 | out_free: | |
1da177e4 | 2420 | kfree(buf); |
99f89551 | 2421 | out: |
1da177e4 LT |
2422 | return retval; |
2423 | } | |
2424 | ||
2425 | static int cpuset_open(struct inode *inode, struct file *file) | |
2426 | { | |
13b41b09 EB |
2427 | struct pid *pid = PROC_I(inode)->pid; |
2428 | return single_open(file, proc_cpuset_show, pid); | |
1da177e4 LT |
2429 | } |
2430 | ||
9a32144e | 2431 | const struct file_operations proc_cpuset_operations = { |
1da177e4 LT |
2432 | .open = cpuset_open, |
2433 | .read = seq_read, | |
2434 | .llseek = seq_lseek, | |
2435 | .release = single_release, | |
2436 | }; | |
8793d854 | 2437 | #endif /* CONFIG_PROC_PID_CPUSET */ |
1da177e4 LT |
2438 | |
2439 | /* Display task cpus_allowed, mems_allowed in /proc/<pid>/status file. */ | |
df5f8314 EB |
2440 | void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task) |
2441 | { | |
2442 | seq_printf(m, "Cpus_allowed:\t"); | |
30e8e136 | 2443 | seq_cpumask(m, &task->cpus_allowed); |
df5f8314 | 2444 | seq_printf(m, "\n"); |
39106dcf | 2445 | seq_printf(m, "Cpus_allowed_list:\t"); |
30e8e136 | 2446 | seq_cpumask_list(m, &task->cpus_allowed); |
39106dcf | 2447 | seq_printf(m, "\n"); |
df5f8314 | 2448 | seq_printf(m, "Mems_allowed:\t"); |
30e8e136 | 2449 | seq_nodemask(m, &task->mems_allowed); |
df5f8314 | 2450 | seq_printf(m, "\n"); |
39106dcf | 2451 | seq_printf(m, "Mems_allowed_list:\t"); |
30e8e136 | 2452 | seq_nodemask_list(m, &task->mems_allowed); |
39106dcf | 2453 | seq_printf(m, "\n"); |
1da177e4 | 2454 | } |