4 * Processor and Memory placement constraints for sets of tasks.
6 * Copyright (C) 2003 BULL SA.
7 * Copyright (C) 2004-2007 Silicon Graphics, Inc.
8 * Copyright (C) 2006 Google, Inc
10 * Portions derived from Patrick Mochel's sysfs code.
11 * sysfs is Copyright (c) 2001-3 Patrick Mochel
13 * 2003-10-10 Written by Simon Derr.
14 * 2003-10-22 Updates by Stephen Hemminger.
15 * 2004 May-July Rework by Paul Jackson.
16 * 2006 Rework by Paul Menage to use generic cgroups
17 * 2008 Rework of the scheduler domains and CPU hotplug handling
20 * This file is subject to the terms and conditions of the GNU General Public
21 * License. See the file COPYING in the main directory of the Linux
22 * distribution for more details.
25 #include <linux/cpu.h>
26 #include <linux/cpumask.h>
27 #include <linux/cpuset.h>
28 #include <linux/err.h>
29 #include <linux/errno.h>
30 #include <linux/file.h>
32 #include <linux/init.h>
33 #include <linux/interrupt.h>
34 #include <linux/kernel.h>
35 #include <linux/kmod.h>
36 #include <linux/list.h>
37 #include <linux/mempolicy.h>
39 #include <linux/memory.h>
40 #include <linux/export.h>
41 #include <linux/mount.h>
42 #include <linux/namei.h>
43 #include <linux/pagemap.h>
44 #include <linux/proc_fs.h>
45 #include <linux/rcupdate.h>
46 #include <linux/sched.h>
47 #include <linux/seq_file.h>
48 #include <linux/security.h>
49 #include <linux/slab.h>
50 #include <linux/spinlock.h>
51 #include <linux/stat.h>
52 #include <linux/string.h>
53 #include <linux/time.h>
54 #include <linux/backing-dev.h>
55 #include <linux/sort.h>
57 #include <asm/uaccess.h>
58 #include <linux/atomic.h>
59 #include <linux/mutex.h>
60 #include <linux/workqueue.h>
61 #include <linux/cgroup.h>
62 #include <linux/wait.h>
65 * Tracks how many cpusets are currently defined in system.
66 * When there is only one cpuset (the root cpuset) we can
67 * short circuit some hooks.
69 int number_of_cpusets __read_mostly
;
71 /* Forward declare cgroup structures */
72 struct cgroup_subsys cpuset_subsys
;
74 /* See "Frequency meter" comments, below. */
77 int cnt
; /* unprocessed events count */
78 int val
; /* most recent output value */
79 time_t time
; /* clock (secs) when val computed */
80 spinlock_t lock
; /* guards read or write of above */
84 struct cgroup_subsys_state css
;
86 unsigned long flags
; /* "unsigned long" so bitops work */
87 cpumask_var_t cpus_allowed
; /* CPUs allowed to tasks in cpuset */
88 nodemask_t mems_allowed
; /* Memory Nodes allowed to tasks */
91 * This is old Memory Nodes tasks took on.
93 * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
94 * - A new cpuset's old_mems_allowed is initialized when some
95 * task is moved into it.
96 * - old_mems_allowed is used in cpuset_migrate_mm() when we change
97 * cpuset.mems_allowed and have tasks' nodemask updated, and
98 * then old_mems_allowed is updated to mems_allowed.
100 nodemask_t old_mems_allowed
;
102 struct fmeter fmeter
; /* memory_pressure filter */
105 * Tasks are being attached to this cpuset. Used to prevent
106 * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
108 int attach_in_progress
;
110 /* partition number for rebuild_sched_domains() */
113 /* for custom sched domain */
114 int relax_domain_level
;
117 /* Retrieve the cpuset for a cgroup */
118 static inline struct cpuset
*cgroup_cs(struct cgroup
*cgrp
)
120 return container_of(cgroup_subsys_state(cgrp
, cpuset_subsys_id
),
124 /* Retrieve the cpuset for a task */
125 static inline struct cpuset
*task_cs(struct task_struct
*task
)
127 return container_of(task_subsys_state(task
, cpuset_subsys_id
),
131 static inline struct cpuset
*parent_cs(const struct cpuset
*cs
)
133 struct cgroup
*pcgrp
= cs
->css
.cgroup
->parent
;
136 return cgroup_cs(pcgrp
);
141 static inline bool task_has_mempolicy(struct task_struct
*task
)
143 return task
->mempolicy
;
146 static inline bool task_has_mempolicy(struct task_struct
*task
)
153 /* bits in struct cpuset flags field */
160 CS_SCHED_LOAD_BALANCE
,
165 /* convenient tests for these bits */
166 static inline bool is_cpuset_online(const struct cpuset
*cs
)
168 return test_bit(CS_ONLINE
, &cs
->flags
);
171 static inline int is_cpu_exclusive(const struct cpuset
*cs
)
173 return test_bit(CS_CPU_EXCLUSIVE
, &cs
->flags
);
176 static inline int is_mem_exclusive(const struct cpuset
*cs
)
178 return test_bit(CS_MEM_EXCLUSIVE
, &cs
->flags
);
181 static inline int is_mem_hardwall(const struct cpuset
*cs
)
183 return test_bit(CS_MEM_HARDWALL
, &cs
->flags
);
186 static inline int is_sched_load_balance(const struct cpuset
*cs
)
188 return test_bit(CS_SCHED_LOAD_BALANCE
, &cs
->flags
);
191 static inline int is_memory_migrate(const struct cpuset
*cs
)
193 return test_bit(CS_MEMORY_MIGRATE
, &cs
->flags
);
196 static inline int is_spread_page(const struct cpuset
*cs
)
198 return test_bit(CS_SPREAD_PAGE
, &cs
->flags
);
201 static inline int is_spread_slab(const struct cpuset
*cs
)
203 return test_bit(CS_SPREAD_SLAB
, &cs
->flags
);
206 static struct cpuset top_cpuset
= {
207 .flags
= ((1 << CS_ONLINE
) | (1 << CS_CPU_EXCLUSIVE
) |
208 (1 << CS_MEM_EXCLUSIVE
)),
212 * cpuset_for_each_child - traverse online children of a cpuset
213 * @child_cs: loop cursor pointing to the current child
214 * @pos_cgrp: used for iteration
215 * @parent_cs: target cpuset to walk children of
217 * Walk @child_cs through the online children of @parent_cs. Must be used
218 * with RCU read locked.
220 #define cpuset_for_each_child(child_cs, pos_cgrp, parent_cs) \
221 cgroup_for_each_child((pos_cgrp), (parent_cs)->css.cgroup) \
222 if (is_cpuset_online(((child_cs) = cgroup_cs((pos_cgrp)))))
225 * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
226 * @des_cs: loop cursor pointing to the current descendant
227 * @pos_cgrp: used for iteration
228 * @root_cs: target cpuset to walk ancestor of
230 * Walk @des_cs through the online descendants of @root_cs. Must be used
231 * with RCU read locked. The caller may modify @pos_cgrp by calling
232 * cgroup_rightmost_descendant() to skip subtree.
234 #define cpuset_for_each_descendant_pre(des_cs, pos_cgrp, root_cs) \
235 cgroup_for_each_descendant_pre((pos_cgrp), (root_cs)->css.cgroup) \
236 if (is_cpuset_online(((des_cs) = cgroup_cs((pos_cgrp)))))
239 * There are two global mutexes guarding cpuset structures - cpuset_mutex
240 * and callback_mutex. The latter may nest inside the former. We also
241 * require taking task_lock() when dereferencing a task's cpuset pointer.
242 * See "The task_lock() exception", at the end of this comment.
244 * A task must hold both mutexes to modify cpusets. If a task holds
245 * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it
246 * is the only task able to also acquire callback_mutex and be able to
247 * modify cpusets. It can perform various checks on the cpuset structure
248 * first, knowing nothing will change. It can also allocate memory while
249 * just holding cpuset_mutex. While it is performing these checks, various
250 * callback routines can briefly acquire callback_mutex to query cpusets.
251 * Once it is ready to make the changes, it takes callback_mutex, blocking
254 * Calls to the kernel memory allocator can not be made while holding
255 * callback_mutex, as that would risk double tripping on callback_mutex
256 * from one of the callbacks into the cpuset code from within
259 * If a task is only holding callback_mutex, then it has read-only
262 * Now, the task_struct fields mems_allowed and mempolicy may be changed
263 * by other task, we use alloc_lock in the task_struct fields to protect
266 * The cpuset_common_file_read() handlers only hold callback_mutex across
267 * small pieces of code, such as when reading out possibly multi-word
268 * cpumasks and nodemasks.
270 * Accessing a task's cpuset should be done in accordance with the
271 * guidelines for accessing subsystem state in kernel/cgroup.c
274 static DEFINE_MUTEX(cpuset_mutex
);
275 static DEFINE_MUTEX(callback_mutex
);
278 * CPU / memory hotplug is handled asynchronously.
280 static void cpuset_hotplug_workfn(struct work_struct
*work
);
281 static DECLARE_WORK(cpuset_hotplug_work
, cpuset_hotplug_workfn
);
283 static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq
);
286 * This is ugly, but preserves the userspace API for existing cpuset
287 * users. If someone tries to mount the "cpuset" filesystem, we
288 * silently switch it to mount "cgroup" instead
290 static struct dentry
*cpuset_mount(struct file_system_type
*fs_type
,
291 int flags
, const char *unused_dev_name
, void *data
)
293 struct file_system_type
*cgroup_fs
= get_fs_type("cgroup");
294 struct dentry
*ret
= ERR_PTR(-ENODEV
);
298 "release_agent=/sbin/cpuset_release_agent";
299 ret
= cgroup_fs
->mount(cgroup_fs
, flags
,
300 unused_dev_name
, mountopts
);
301 put_filesystem(cgroup_fs
);
306 static struct file_system_type cpuset_fs_type
= {
308 .mount
= cpuset_mount
,
312 * Return in pmask the portion of a cpusets's cpus_allowed that
313 * are online. If none are online, walk up the cpuset hierarchy
314 * until we find one that does have some online cpus. The top
315 * cpuset always has some cpus online.
317 * One way or another, we guarantee to return some non-empty subset
318 * of cpu_online_mask.
320 * Call with callback_mutex held.
322 static void guarantee_online_cpus(const struct cpuset
*cs
,
323 struct cpumask
*pmask
)
325 while (!cpumask_intersects(cs
->cpus_allowed
, cpu_online_mask
))
327 cpumask_and(pmask
, cs
->cpus_allowed
, cpu_online_mask
);
331 * Return in *pmask the portion of a cpusets's mems_allowed that
332 * are online, with memory. If none are online with memory, walk
333 * up the cpuset hierarchy until we find one that does have some
334 * online mems. The top cpuset always has some mems online.
336 * One way or another, we guarantee to return some non-empty subset
337 * of node_states[N_MEMORY].
339 * Call with callback_mutex held.
341 static void guarantee_online_mems(const struct cpuset
*cs
, nodemask_t
*pmask
)
343 while (!nodes_intersects(cs
->mems_allowed
, node_states
[N_MEMORY
]))
345 nodes_and(*pmask
, cs
->mems_allowed
, node_states
[N_MEMORY
]);
349 * update task's spread flag if cpuset's page/slab spread flag is set
351 * Called with callback_mutex/cpuset_mutex held
353 static void cpuset_update_task_spread_flag(struct cpuset
*cs
,
354 struct task_struct
*tsk
)
356 if (is_spread_page(cs
))
357 tsk
->flags
|= PF_SPREAD_PAGE
;
359 tsk
->flags
&= ~PF_SPREAD_PAGE
;
360 if (is_spread_slab(cs
))
361 tsk
->flags
|= PF_SPREAD_SLAB
;
363 tsk
->flags
&= ~PF_SPREAD_SLAB
;
367 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
369 * One cpuset is a subset of another if all its allowed CPUs and
370 * Memory Nodes are a subset of the other, and its exclusive flags
371 * are only set if the other's are set. Call holding cpuset_mutex.
374 static int is_cpuset_subset(const struct cpuset
*p
, const struct cpuset
*q
)
376 return cpumask_subset(p
->cpus_allowed
, q
->cpus_allowed
) &&
377 nodes_subset(p
->mems_allowed
, q
->mems_allowed
) &&
378 is_cpu_exclusive(p
) <= is_cpu_exclusive(q
) &&
379 is_mem_exclusive(p
) <= is_mem_exclusive(q
);
383 * alloc_trial_cpuset - allocate a trial cpuset
384 * @cs: the cpuset that the trial cpuset duplicates
386 static struct cpuset
*alloc_trial_cpuset(const struct cpuset
*cs
)
388 struct cpuset
*trial
;
390 trial
= kmemdup(cs
, sizeof(*cs
), GFP_KERNEL
);
394 if (!alloc_cpumask_var(&trial
->cpus_allowed
, GFP_KERNEL
)) {
398 cpumask_copy(trial
->cpus_allowed
, cs
->cpus_allowed
);
404 * free_trial_cpuset - free the trial cpuset
405 * @trial: the trial cpuset to be freed
407 static void free_trial_cpuset(struct cpuset
*trial
)
409 free_cpumask_var(trial
->cpus_allowed
);
414 * validate_change() - Used to validate that any proposed cpuset change
415 * follows the structural rules for cpusets.
417 * If we replaced the flag and mask values of the current cpuset
418 * (cur) with those values in the trial cpuset (trial), would
419 * our various subset and exclusive rules still be valid? Presumes
422 * 'cur' is the address of an actual, in-use cpuset. Operations
423 * such as list traversal that depend on the actual address of the
424 * cpuset in the list must use cur below, not trial.
426 * 'trial' is the address of bulk structure copy of cur, with
427 * perhaps one or more of the fields cpus_allowed, mems_allowed,
428 * or flags changed to new, trial values.
430 * Return 0 if valid, -errno if not.
433 static int validate_change(const struct cpuset
*cur
, const struct cpuset
*trial
)
436 struct cpuset
*c
, *par
;
441 /* Each of our child cpusets must be a subset of us */
443 cpuset_for_each_child(c
, cgrp
, cur
)
444 if (!is_cpuset_subset(c
, trial
))
447 /* Remaining checks don't apply to root cpuset */
449 if (cur
== &top_cpuset
)
452 par
= parent_cs(cur
);
454 /* We must be a subset of our parent cpuset */
456 if (!is_cpuset_subset(trial
, par
))
460 * If either I or some sibling (!= me) is exclusive, we can't
464 cpuset_for_each_child(c
, cgrp
, par
) {
465 if ((is_cpu_exclusive(trial
) || is_cpu_exclusive(c
)) &&
467 cpumask_intersects(trial
->cpus_allowed
, c
->cpus_allowed
))
469 if ((is_mem_exclusive(trial
) || is_mem_exclusive(c
)) &&
471 nodes_intersects(trial
->mems_allowed
, c
->mems_allowed
))
476 * Cpusets with tasks - existing or newly being attached - can't
477 * have empty cpus_allowed or mems_allowed.
480 if ((cgroup_task_count(cur
->css
.cgroup
) || cur
->attach_in_progress
) &&
481 (cpumask_empty(trial
->cpus_allowed
) &&
482 nodes_empty(trial
->mems_allowed
)))
493 * Helper routine for generate_sched_domains().
494 * Do cpusets a, b have overlapping cpus_allowed masks?
496 static int cpusets_overlap(struct cpuset
*a
, struct cpuset
*b
)
498 return cpumask_intersects(a
->cpus_allowed
, b
->cpus_allowed
);
502 update_domain_attr(struct sched_domain_attr
*dattr
, struct cpuset
*c
)
504 if (dattr
->relax_domain_level
< c
->relax_domain_level
)
505 dattr
->relax_domain_level
= c
->relax_domain_level
;
509 static void update_domain_attr_tree(struct sched_domain_attr
*dattr
,
510 struct cpuset
*root_cs
)
513 struct cgroup
*pos_cgrp
;
516 cpuset_for_each_descendant_pre(cp
, pos_cgrp
, root_cs
) {
517 /* skip the whole subtree if @cp doesn't have any CPU */
518 if (cpumask_empty(cp
->cpus_allowed
)) {
519 pos_cgrp
= cgroup_rightmost_descendant(pos_cgrp
);
523 if (is_sched_load_balance(cp
))
524 update_domain_attr(dattr
, cp
);
530 * generate_sched_domains()
532 * This function builds a partial partition of the systems CPUs
533 * A 'partial partition' is a set of non-overlapping subsets whose
534 * union is a subset of that set.
535 * The output of this function needs to be passed to kernel/sched/core.c
536 * partition_sched_domains() routine, which will rebuild the scheduler's
537 * load balancing domains (sched domains) as specified by that partial
540 * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt
541 * for a background explanation of this.
543 * Does not return errors, on the theory that the callers of this
544 * routine would rather not worry about failures to rebuild sched
545 * domains when operating in the severe memory shortage situations
546 * that could cause allocation failures below.
548 * Must be called with cpuset_mutex held.
550 * The three key local variables below are:
551 * q - a linked-list queue of cpuset pointers, used to implement a
552 * top-down scan of all cpusets. This scan loads a pointer
553 * to each cpuset marked is_sched_load_balance into the
554 * array 'csa'. For our purposes, rebuilding the schedulers
555 * sched domains, we can ignore !is_sched_load_balance cpusets.
556 * csa - (for CpuSet Array) Array of pointers to all the cpusets
557 * that need to be load balanced, for convenient iterative
558 * access by the subsequent code that finds the best partition,
559 * i.e the set of domains (subsets) of CPUs such that the
560 * cpus_allowed of every cpuset marked is_sched_load_balance
561 * is a subset of one of these domains, while there are as
562 * many such domains as possible, each as small as possible.
563 * doms - Conversion of 'csa' to an array of cpumasks, for passing to
564 * the kernel/sched/core.c routine partition_sched_domains() in a
565 * convenient format, that can be easily compared to the prior
566 * value to determine what partition elements (sched domains)
567 * were changed (added or removed.)
569 * Finding the best partition (set of domains):
570 * The triple nested loops below over i, j, k scan over the
571 * load balanced cpusets (using the array of cpuset pointers in
572 * csa[]) looking for pairs of cpusets that have overlapping
573 * cpus_allowed, but which don't have the same 'pn' partition
574 * number and gives them in the same partition number. It keeps
575 * looping on the 'restart' label until it can no longer find
578 * The union of the cpus_allowed masks from the set of
579 * all cpusets having the same 'pn' value then form the one
580 * element of the partition (one sched domain) to be passed to
581 * partition_sched_domains().
583 static int generate_sched_domains(cpumask_var_t
**domains
,
584 struct sched_domain_attr
**attributes
)
586 struct cpuset
*cp
; /* scans q */
587 struct cpuset
**csa
; /* array of all cpuset ptrs */
588 int csn
; /* how many cpuset ptrs in csa so far */
589 int i
, j
, k
; /* indices for partition finding loops */
590 cpumask_var_t
*doms
; /* resulting partition; i.e. sched domains */
591 struct sched_domain_attr
*dattr
; /* attributes for custom domains */
592 int ndoms
= 0; /* number of sched domains in result */
593 int nslot
; /* next empty doms[] struct cpumask slot */
594 struct cgroup
*pos_cgrp
;
600 /* Special case for the 99% of systems with one, full, sched domain */
601 if (is_sched_load_balance(&top_cpuset
)) {
603 doms
= alloc_sched_domains(ndoms
);
607 dattr
= kmalloc(sizeof(struct sched_domain_attr
), GFP_KERNEL
);
609 *dattr
= SD_ATTR_INIT
;
610 update_domain_attr_tree(dattr
, &top_cpuset
);
612 cpumask_copy(doms
[0], top_cpuset
.cpus_allowed
);
617 csa
= kmalloc(number_of_cpusets
* sizeof(cp
), GFP_KERNEL
);
623 cpuset_for_each_descendant_pre(cp
, pos_cgrp
, &top_cpuset
) {
625 * Continue traversing beyond @cp iff @cp has some CPUs and
626 * isn't load balancing. The former is obvious. The
627 * latter: All child cpusets contain a subset of the
628 * parent's cpus, so just skip them, and then we call
629 * update_domain_attr_tree() to calc relax_domain_level of
630 * the corresponding sched domain.
632 if (!cpumask_empty(cp
->cpus_allowed
) &&
633 !is_sched_load_balance(cp
))
636 if (is_sched_load_balance(cp
))
639 /* skip @cp's subtree */
640 pos_cgrp
= cgroup_rightmost_descendant(pos_cgrp
);
644 for (i
= 0; i
< csn
; i
++)
649 /* Find the best partition (set of sched domains) */
650 for (i
= 0; i
< csn
; i
++) {
651 struct cpuset
*a
= csa
[i
];
654 for (j
= 0; j
< csn
; j
++) {
655 struct cpuset
*b
= csa
[j
];
658 if (apn
!= bpn
&& cpusets_overlap(a
, b
)) {
659 for (k
= 0; k
< csn
; k
++) {
660 struct cpuset
*c
= csa
[k
];
665 ndoms
--; /* one less element */
672 * Now we know how many domains to create.
673 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
675 doms
= alloc_sched_domains(ndoms
);
680 * The rest of the code, including the scheduler, can deal with
681 * dattr==NULL case. No need to abort if alloc fails.
683 dattr
= kmalloc(ndoms
* sizeof(struct sched_domain_attr
), GFP_KERNEL
);
685 for (nslot
= 0, i
= 0; i
< csn
; i
++) {
686 struct cpuset
*a
= csa
[i
];
691 /* Skip completed partitions */
697 if (nslot
== ndoms
) {
698 static int warnings
= 10;
701 "rebuild_sched_domains confused:"
702 " nslot %d, ndoms %d, csn %d, i %d,"
704 nslot
, ndoms
, csn
, i
, apn
);
712 *(dattr
+ nslot
) = SD_ATTR_INIT
;
713 for (j
= i
; j
< csn
; j
++) {
714 struct cpuset
*b
= csa
[j
];
717 cpumask_or(dp
, dp
, b
->cpus_allowed
);
719 update_domain_attr_tree(dattr
+ nslot
, b
);
721 /* Done with this partition */
727 BUG_ON(nslot
!= ndoms
);
733 * Fallback to the default domain if kmalloc() failed.
734 * See comments in partition_sched_domains().
745 * Rebuild scheduler domains.
747 * If the flag 'sched_load_balance' of any cpuset with non-empty
748 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
749 * which has that flag enabled, or if any cpuset with a non-empty
750 * 'cpus' is removed, then call this routine to rebuild the
751 * scheduler's dynamic sched domains.
753 * Call with cpuset_mutex held. Takes get_online_cpus().
755 static void rebuild_sched_domains_locked(void)
757 struct sched_domain_attr
*attr
;
761 lockdep_assert_held(&cpuset_mutex
);
765 * We have raced with CPU hotplug. Don't do anything to avoid
766 * passing doms with offlined cpu to partition_sched_domains().
767 * Anyways, hotplug work item will rebuild sched domains.
769 if (!cpumask_equal(top_cpuset
.cpus_allowed
, cpu_active_mask
))
772 /* Generate domain masks and attrs */
773 ndoms
= generate_sched_domains(&doms
, &attr
);
775 /* Have scheduler rebuild the domains */
776 partition_sched_domains(ndoms
, doms
, attr
);
780 #else /* !CONFIG_SMP */
781 static void rebuild_sched_domains_locked(void)
784 #endif /* CONFIG_SMP */
786 void rebuild_sched_domains(void)
788 mutex_lock(&cpuset_mutex
);
789 rebuild_sched_domains_locked();
790 mutex_unlock(&cpuset_mutex
);
794 * effective_cpumask_cpuset - return nearest ancestor with non-empty cpus
795 * @cs: the cpuset in interest
797 * A cpuset's effective cpumask is the cpumask of the nearest ancestor
798 * with non-empty cpus. We use effective cpumask whenever:
799 * - we update tasks' cpus_allowed. (they take on the ancestor's cpumask
800 * if the cpuset they reside in has no cpus)
801 * - we want to retrieve task_cs(tsk)'s cpus_allowed.
803 * Called with cpuset_mutex held. cpuset_cpus_allowed_fallback() is an
804 * exception. See comments there.
806 static struct cpuset
*effective_cpumask_cpuset(struct cpuset
*cs
)
808 while (cpumask_empty(cs
->cpus_allowed
))
814 * effective_nodemask_cpuset - return nearest ancestor with non-empty mems
815 * @cs: the cpuset in interest
817 * A cpuset's effective nodemask is the nodemask of the nearest ancestor
818 * with non-empty memss. We use effective nodemask whenever:
819 * - we update tasks' mems_allowed. (they take on the ancestor's nodemask
820 * if the cpuset they reside in has no mems)
821 * - we want to retrieve task_cs(tsk)'s mems_allowed.
823 * Called with cpuset_mutex held.
825 static struct cpuset
*effective_nodemask_cpuset(struct cpuset
*cs
)
827 while (nodes_empty(cs
->mems_allowed
))
833 * cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's
835 * @scan: struct cgroup_scanner containing the cgroup of the task
837 * Called by cgroup_scan_tasks() for each task in a cgroup whose
838 * cpus_allowed mask needs to be changed.
840 * We don't need to re-check for the cgroup/cpuset membership, since we're
841 * holding cpuset_mutex at this point.
843 static void cpuset_change_cpumask(struct task_struct
*tsk
,
844 struct cgroup_scanner
*scan
)
846 struct cpuset
*cpus_cs
;
848 cpus_cs
= effective_cpumask_cpuset(cgroup_cs(scan
->cg
));
849 set_cpus_allowed_ptr(tsk
, cpus_cs
->cpus_allowed
);
853 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
854 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
855 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
857 * Called with cpuset_mutex held
859 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
860 * calling callback functions for each.
862 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
865 static void update_tasks_cpumask(struct cpuset
*cs
, struct ptr_heap
*heap
)
867 struct cgroup_scanner scan
;
869 scan
.cg
= cs
->css
.cgroup
;
870 scan
.test_task
= NULL
;
871 scan
.process_task
= cpuset_change_cpumask
;
873 cgroup_scan_tasks(&scan
);
877 * update_tasks_cpumask_hier - Update the cpumasks of tasks in the hierarchy.
878 * @root_cs: the root cpuset of the hierarchy
879 * @update_root: update root cpuset or not?
880 * @heap: the heap used by cgroup_scan_tasks()
882 * This will update cpumasks of tasks in @root_cs and all other empty cpusets
883 * which take on cpumask of @root_cs.
885 * Called with cpuset_mutex held
887 static void update_tasks_cpumask_hier(struct cpuset
*root_cs
,
888 bool update_root
, struct ptr_heap
*heap
)
891 struct cgroup
*pos_cgrp
;
894 update_tasks_cpumask(root_cs
, heap
);
897 cpuset_for_each_descendant_pre(cp
, pos_cgrp
, root_cs
) {
898 /* skip the whole subtree if @cp have some CPU */
899 if (!cpumask_empty(cp
->cpus_allowed
)) {
900 pos_cgrp
= cgroup_rightmost_descendant(pos_cgrp
);
903 if (!css_tryget(&cp
->css
))
907 update_tasks_cpumask(cp
, heap
);
916 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
917 * @cs: the cpuset to consider
918 * @buf: buffer of cpu numbers written to this cpuset
920 static int update_cpumask(struct cpuset
*cs
, struct cpuset
*trialcs
,
923 struct ptr_heap heap
;
925 int is_load_balanced
;
927 /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
928 if (cs
== &top_cpuset
)
932 * An empty cpus_allowed is ok only if the cpuset has no tasks.
933 * Since cpulist_parse() fails on an empty mask, we special case
934 * that parsing. The validate_change() call ensures that cpusets
935 * with tasks have cpus.
938 cpumask_clear(trialcs
->cpus_allowed
);
940 retval
= cpulist_parse(buf
, trialcs
->cpus_allowed
);
944 if (!cpumask_subset(trialcs
->cpus_allowed
, cpu_active_mask
))
948 /* Nothing to do if the cpus didn't change */
949 if (cpumask_equal(cs
->cpus_allowed
, trialcs
->cpus_allowed
))
952 retval
= validate_change(cs
, trialcs
);
956 retval
= heap_init(&heap
, PAGE_SIZE
, GFP_KERNEL
, NULL
);
960 is_load_balanced
= is_sched_load_balance(trialcs
);
962 mutex_lock(&callback_mutex
);
963 cpumask_copy(cs
->cpus_allowed
, trialcs
->cpus_allowed
);
964 mutex_unlock(&callback_mutex
);
966 update_tasks_cpumask_hier(cs
, true, &heap
);
970 if (is_load_balanced
)
971 rebuild_sched_domains_locked();
978 * Migrate memory region from one set of nodes to another.
980 * Temporarilly set tasks mems_allowed to target nodes of migration,
981 * so that the migration code can allocate pages on these nodes.
983 * Call holding cpuset_mutex, so current's cpuset won't change
984 * during this call, as manage_mutex holds off any cpuset_attach()
985 * calls. Therefore we don't need to take task_lock around the
986 * call to guarantee_online_mems(), as we know no one is changing
989 * While the mm_struct we are migrating is typically from some
990 * other task, the task_struct mems_allowed that we are hacking
991 * is for our current task, which must allocate new pages for that
992 * migrating memory region.
995 static void cpuset_migrate_mm(struct mm_struct
*mm
, const nodemask_t
*from
,
996 const nodemask_t
*to
)
998 struct task_struct
*tsk
= current
;
999 struct cpuset
*mems_cs
;
1001 tsk
->mems_allowed
= *to
;
1003 do_migrate_pages(mm
, from
, to
, MPOL_MF_MOVE_ALL
);
1005 mems_cs
= effective_nodemask_cpuset(task_cs(tsk
));
1006 guarantee_online_mems(mems_cs
, &tsk
->mems_allowed
);
1010 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
1011 * @tsk: the task to change
1012 * @newmems: new nodes that the task will be set
1014 * In order to avoid seeing no nodes if the old and new nodes are disjoint,
1015 * we structure updates as setting all new allowed nodes, then clearing newly
1018 static void cpuset_change_task_nodemask(struct task_struct
*tsk
,
1019 nodemask_t
*newmems
)
1024 * Allow tasks that have access to memory reserves because they have
1025 * been OOM killed to get memory anywhere.
1027 if (unlikely(test_thread_flag(TIF_MEMDIE
)))
1029 if (current
->flags
& PF_EXITING
) /* Let dying task have memory */
1034 * Determine if a loop is necessary if another thread is doing
1035 * get_mems_allowed(). If at least one node remains unchanged and
1036 * tsk does not have a mempolicy, then an empty nodemask will not be
1037 * possible when mems_allowed is larger than a word.
1039 need_loop
= task_has_mempolicy(tsk
) ||
1040 !nodes_intersects(*newmems
, tsk
->mems_allowed
);
1043 write_seqcount_begin(&tsk
->mems_allowed_seq
);
1045 nodes_or(tsk
->mems_allowed
, tsk
->mems_allowed
, *newmems
);
1046 mpol_rebind_task(tsk
, newmems
, MPOL_REBIND_STEP1
);
1048 mpol_rebind_task(tsk
, newmems
, MPOL_REBIND_STEP2
);
1049 tsk
->mems_allowed
= *newmems
;
1052 write_seqcount_end(&tsk
->mems_allowed_seq
);
1058 * Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy
1059 * of it to cpuset's new mems_allowed, and migrate pages to new nodes if
1060 * memory_migrate flag is set. Called with cpuset_mutex held.
1062 static void cpuset_change_nodemask(struct task_struct
*p
,
1063 struct cgroup_scanner
*scan
)
1065 struct cpuset
*cs
= cgroup_cs(scan
->cg
);
1066 struct mm_struct
*mm
;
1068 nodemask_t
*newmems
= scan
->data
;
1070 cpuset_change_task_nodemask(p
, newmems
);
1072 mm
= get_task_mm(p
);
1076 migrate
= is_memory_migrate(cs
);
1078 mpol_rebind_mm(mm
, &cs
->mems_allowed
);
1080 cpuset_migrate_mm(mm
, &cs
->old_mems_allowed
, newmems
);
1084 static void *cpuset_being_rebound
;
1087 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
1088 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1089 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
1091 * Called with cpuset_mutex held
1092 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
1095 static void update_tasks_nodemask(struct cpuset
*cs
, struct ptr_heap
*heap
)
1097 static nodemask_t newmems
; /* protected by cpuset_mutex */
1098 struct cgroup_scanner scan
;
1099 struct cpuset
*mems_cs
= effective_nodemask_cpuset(cs
);
1101 cpuset_being_rebound
= cs
; /* causes mpol_dup() rebind */
1103 guarantee_online_mems(mems_cs
, &newmems
);
1105 scan
.cg
= cs
->css
.cgroup
;
1106 scan
.test_task
= NULL
;
1107 scan
.process_task
= cpuset_change_nodemask
;
1109 scan
.data
= &newmems
;
1112 * The mpol_rebind_mm() call takes mmap_sem, which we couldn't
1113 * take while holding tasklist_lock. Forks can happen - the
1114 * mpol_dup() cpuset_being_rebound check will catch such forks,
1115 * and rebind their vma mempolicies too. Because we still hold
1116 * the global cpuset_mutex, we know that no other rebind effort
1117 * will be contending for the global variable cpuset_being_rebound.
1118 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
1119 * is idempotent. Also migrate pages in each mm to new nodes.
1121 cgroup_scan_tasks(&scan
);
1124 * All the tasks' nodemasks have been updated, update
1125 * cs->old_mems_allowed.
1127 cs
->old_mems_allowed
= newmems
;
1129 /* We're done rebinding vmas to this cpuset's new mems_allowed. */
1130 cpuset_being_rebound
= NULL
;
1134 * update_tasks_nodemask_hier - Update the nodemasks of tasks in the hierarchy.
1135 * @cs: the root cpuset of the hierarchy
1136 * @update_root: update the root cpuset or not?
1137 * @heap: the heap used by cgroup_scan_tasks()
1139 * This will update nodemasks of tasks in @root_cs and all other empty cpusets
1140 * which take on nodemask of @root_cs.
1142 * Called with cpuset_mutex held
1144 static void update_tasks_nodemask_hier(struct cpuset
*root_cs
,
1145 bool update_root
, struct ptr_heap
*heap
)
1148 struct cgroup
*pos_cgrp
;
1151 update_tasks_nodemask(root_cs
, heap
);
1154 cpuset_for_each_descendant_pre(cp
, pos_cgrp
, root_cs
) {
1155 /* skip the whole subtree if @cp have some CPU */
1156 if (!nodes_empty(cp
->mems_allowed
)) {
1157 pos_cgrp
= cgroup_rightmost_descendant(pos_cgrp
);
1160 if (!css_tryget(&cp
->css
))
1164 update_tasks_nodemask(cp
, heap
);
1173 * Handle user request to change the 'mems' memory placement
1174 * of a cpuset. Needs to validate the request, update the
1175 * cpusets mems_allowed, and for each task in the cpuset,
1176 * update mems_allowed and rebind task's mempolicy and any vma
1177 * mempolicies and if the cpuset is marked 'memory_migrate',
1178 * migrate the tasks pages to the new memory.
1180 * Call with cpuset_mutex held. May take callback_mutex during call.
1181 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
1182 * lock each such tasks mm->mmap_sem, scan its vma's and rebind
1183 * their mempolicies to the cpusets new mems_allowed.
1185 static int update_nodemask(struct cpuset
*cs
, struct cpuset
*trialcs
,
1189 struct ptr_heap heap
;
1192 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
1195 if (cs
== &top_cpuset
) {
1201 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
1202 * Since nodelist_parse() fails on an empty mask, we special case
1203 * that parsing. The validate_change() call ensures that cpusets
1204 * with tasks have memory.
1207 nodes_clear(trialcs
->mems_allowed
);
1209 retval
= nodelist_parse(buf
, trialcs
->mems_allowed
);
1213 if (!nodes_subset(trialcs
->mems_allowed
,
1214 node_states
[N_MEMORY
])) {
1220 if (nodes_equal(cs
->mems_allowed
, trialcs
->mems_allowed
)) {
1221 retval
= 0; /* Too easy - nothing to do */
1224 retval
= validate_change(cs
, trialcs
);
1228 retval
= heap_init(&heap
, PAGE_SIZE
, GFP_KERNEL
, NULL
);
1232 mutex_lock(&callback_mutex
);
1233 cs
->mems_allowed
= trialcs
->mems_allowed
;
1234 mutex_unlock(&callback_mutex
);
1236 update_tasks_nodemask_hier(cs
, true, &heap
);
1243 int current_cpuset_is_being_rebound(void)
1245 return task_cs(current
) == cpuset_being_rebound
;
1248 static int update_relax_domain_level(struct cpuset
*cs
, s64 val
)
1251 if (val
< -1 || val
>= sched_domain_level_max
)
1255 if (val
!= cs
->relax_domain_level
) {
1256 cs
->relax_domain_level
= val
;
1257 if (!cpumask_empty(cs
->cpus_allowed
) &&
1258 is_sched_load_balance(cs
))
1259 rebuild_sched_domains_locked();
1266 * cpuset_change_flag - make a task's spread flags the same as its cpuset's
1267 * @tsk: task to be updated
1268 * @scan: struct cgroup_scanner containing the cgroup of the task
1270 * Called by cgroup_scan_tasks() for each task in a cgroup.
1272 * We don't need to re-check for the cgroup/cpuset membership, since we're
1273 * holding cpuset_mutex at this point.
1275 static void cpuset_change_flag(struct task_struct
*tsk
,
1276 struct cgroup_scanner
*scan
)
1278 cpuset_update_task_spread_flag(cgroup_cs(scan
->cg
), tsk
);
1282 * update_tasks_flags - update the spread flags of tasks in the cpuset.
1283 * @cs: the cpuset in which each task's spread flags needs to be changed
1284 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
1286 * Called with cpuset_mutex held
1288 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
1289 * calling callback functions for each.
1291 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
1294 static void update_tasks_flags(struct cpuset
*cs
, struct ptr_heap
*heap
)
1296 struct cgroup_scanner scan
;
1298 scan
.cg
= cs
->css
.cgroup
;
1299 scan
.test_task
= NULL
;
1300 scan
.process_task
= cpuset_change_flag
;
1302 cgroup_scan_tasks(&scan
);
1306 * update_flag - read a 0 or a 1 in a file and update associated flag
1307 * bit: the bit to update (see cpuset_flagbits_t)
1308 * cs: the cpuset to update
1309 * turning_on: whether the flag is being set or cleared
1311 * Call with cpuset_mutex held.
1314 static int update_flag(cpuset_flagbits_t bit
, struct cpuset
*cs
,
1317 struct cpuset
*trialcs
;
1318 int balance_flag_changed
;
1319 int spread_flag_changed
;
1320 struct ptr_heap heap
;
1323 trialcs
= alloc_trial_cpuset(cs
);
1328 set_bit(bit
, &trialcs
->flags
);
1330 clear_bit(bit
, &trialcs
->flags
);
1332 err
= validate_change(cs
, trialcs
);
1336 err
= heap_init(&heap
, PAGE_SIZE
, GFP_KERNEL
, NULL
);
1340 balance_flag_changed
= (is_sched_load_balance(cs
) !=
1341 is_sched_load_balance(trialcs
));
1343 spread_flag_changed
= ((is_spread_slab(cs
) != is_spread_slab(trialcs
))
1344 || (is_spread_page(cs
) != is_spread_page(trialcs
)));
1346 mutex_lock(&callback_mutex
);
1347 cs
->flags
= trialcs
->flags
;
1348 mutex_unlock(&callback_mutex
);
1350 if (!cpumask_empty(trialcs
->cpus_allowed
) && balance_flag_changed
)
1351 rebuild_sched_domains_locked();
1353 if (spread_flag_changed
)
1354 update_tasks_flags(cs
, &heap
);
1357 free_trial_cpuset(trialcs
);
1362 * Frequency meter - How fast is some event occurring?
1364 * These routines manage a digitally filtered, constant time based,
1365 * event frequency meter. There are four routines:
1366 * fmeter_init() - initialize a frequency meter.
1367 * fmeter_markevent() - called each time the event happens.
1368 * fmeter_getrate() - returns the recent rate of such events.
1369 * fmeter_update() - internal routine used to update fmeter.
1371 * A common data structure is passed to each of these routines,
1372 * which is used to keep track of the state required to manage the
1373 * frequency meter and its digital filter.
1375 * The filter works on the number of events marked per unit time.
1376 * The filter is single-pole low-pass recursive (IIR). The time unit
1377 * is 1 second. Arithmetic is done using 32-bit integers scaled to
1378 * simulate 3 decimal digits of precision (multiplied by 1000).
1380 * With an FM_COEF of 933, and a time base of 1 second, the filter
1381 * has a half-life of 10 seconds, meaning that if the events quit
1382 * happening, then the rate returned from the fmeter_getrate()
1383 * will be cut in half each 10 seconds, until it converges to zero.
1385 * It is not worth doing a real infinitely recursive filter. If more
1386 * than FM_MAXTICKS ticks have elapsed since the last filter event,
1387 * just compute FM_MAXTICKS ticks worth, by which point the level
1390 * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
1391 * arithmetic overflow in the fmeter_update() routine.
1393 * Given the simple 32 bit integer arithmetic used, this meter works
1394 * best for reporting rates between one per millisecond (msec) and
1395 * one per 32 (approx) seconds. At constant rates faster than one
1396 * per msec it maxes out at values just under 1,000,000. At constant
1397 * rates between one per msec, and one per second it will stabilize
1398 * to a value N*1000, where N is the rate of events per second.
1399 * At constant rates between one per second and one per 32 seconds,
1400 * it will be choppy, moving up on the seconds that have an event,
1401 * and then decaying until the next event. At rates slower than
1402 * about one in 32 seconds, it decays all the way back to zero between
1406 #define FM_COEF 933 /* coefficient for half-life of 10 secs */
1407 #define FM_MAXTICKS ((time_t)99) /* useless computing more ticks than this */
1408 #define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */
1409 #define FM_SCALE 1000 /* faux fixed point scale */
1411 /* Initialize a frequency meter */
1412 static void fmeter_init(struct fmeter
*fmp
)
1417 spin_lock_init(&fmp
->lock
);
1420 /* Internal meter update - process cnt events and update value */
1421 static void fmeter_update(struct fmeter
*fmp
)
1423 time_t now
= get_seconds();
1424 time_t ticks
= now
- fmp
->time
;
1429 ticks
= min(FM_MAXTICKS
, ticks
);
1431 fmp
->val
= (FM_COEF
* fmp
->val
) / FM_SCALE
;
1434 fmp
->val
+= ((FM_SCALE
- FM_COEF
) * fmp
->cnt
) / FM_SCALE
;
1438 /* Process any previous ticks, then bump cnt by one (times scale). */
1439 static void fmeter_markevent(struct fmeter
*fmp
)
1441 spin_lock(&fmp
->lock
);
1443 fmp
->cnt
= min(FM_MAXCNT
, fmp
->cnt
+ FM_SCALE
);
1444 spin_unlock(&fmp
->lock
);
1447 /* Process any previous ticks, then return current value. */
1448 static int fmeter_getrate(struct fmeter
*fmp
)
1452 spin_lock(&fmp
->lock
);
1455 spin_unlock(&fmp
->lock
);
1459 /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
1460 static int cpuset_can_attach(struct cgroup
*cgrp
, struct cgroup_taskset
*tset
)
1462 struct cpuset
*cs
= cgroup_cs(cgrp
);
1463 struct task_struct
*task
;
1466 mutex_lock(&cpuset_mutex
);
1469 * We allow to move tasks into an empty cpuset if sane_behavior
1473 if (!cgroup_sane_behavior(cgrp
) &&
1474 (cpumask_empty(cs
->cpus_allowed
) || nodes_empty(cs
->mems_allowed
)))
1477 cgroup_taskset_for_each(task
, cgrp
, tset
) {
1479 * Kthreads which disallow setaffinity shouldn't be moved
1480 * to a new cpuset; we don't want to change their cpu
1481 * affinity and isolating such threads by their set of
1482 * allowed nodes is unnecessary. Thus, cpusets are not
1483 * applicable for such threads. This prevents checking for
1484 * success of set_cpus_allowed_ptr() on all attached tasks
1485 * before cpus_allowed may be changed.
1488 if (task
->flags
& PF_NO_SETAFFINITY
)
1490 ret
= security_task_setscheduler(task
);
1496 * Mark attach is in progress. This makes validate_change() fail
1497 * changes which zero cpus/mems_allowed.
1499 cs
->attach_in_progress
++;
1502 mutex_unlock(&cpuset_mutex
);
1506 static void cpuset_cancel_attach(struct cgroup
*cgrp
,
1507 struct cgroup_taskset
*tset
)
1509 mutex_lock(&cpuset_mutex
);
1510 cgroup_cs(cgrp
)->attach_in_progress
--;
1511 mutex_unlock(&cpuset_mutex
);
1515 * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach()
1516 * but we can't allocate it dynamically there. Define it global and
1517 * allocate from cpuset_init().
1519 static cpumask_var_t cpus_attach
;
1521 static void cpuset_attach(struct cgroup
*cgrp
, struct cgroup_taskset
*tset
)
1523 /* static buf protected by cpuset_mutex */
1524 static nodemask_t cpuset_attach_nodemask_to
;
1525 struct mm_struct
*mm
;
1526 struct task_struct
*task
;
1527 struct task_struct
*leader
= cgroup_taskset_first(tset
);
1528 struct cgroup
*oldcgrp
= cgroup_taskset_cur_cgroup(tset
);
1529 struct cpuset
*cs
= cgroup_cs(cgrp
);
1530 struct cpuset
*oldcs
= cgroup_cs(oldcgrp
);
1531 struct cpuset
*cpus_cs
= effective_cpumask_cpuset(cs
);
1532 struct cpuset
*mems_cs
= effective_nodemask_cpuset(cs
);
1534 mutex_lock(&cpuset_mutex
);
1536 /* prepare for attach */
1537 if (cs
== &top_cpuset
)
1538 cpumask_copy(cpus_attach
, cpu_possible_mask
);
1540 guarantee_online_cpus(cpus_cs
, cpus_attach
);
1542 guarantee_online_mems(mems_cs
, &cpuset_attach_nodemask_to
);
1544 cgroup_taskset_for_each(task
, cgrp
, tset
) {
1546 * can_attach beforehand should guarantee that this doesn't
1547 * fail. TODO: have a better way to handle failure here
1549 WARN_ON_ONCE(set_cpus_allowed_ptr(task
, cpus_attach
));
1551 cpuset_change_task_nodemask(task
, &cpuset_attach_nodemask_to
);
1552 cpuset_update_task_spread_flag(cs
, task
);
1556 * Change mm, possibly for multiple threads in a threadgroup. This is
1557 * expensive and may sleep.
1559 cpuset_attach_nodemask_to
= cs
->mems_allowed
;
1560 mm
= get_task_mm(leader
);
1562 struct cpuset
*mems_oldcs
= effective_nodemask_cpuset(oldcs
);
1564 mpol_rebind_mm(mm
, &cpuset_attach_nodemask_to
);
1567 * old_mems_allowed is the same with mems_allowed here, except
1568 * if this task is being moved automatically due to hotplug.
1569 * In that case @mems_allowed has been updated and is empty,
1570 * so @old_mems_allowed is the right nodesets that we migrate
1573 if (is_memory_migrate(cs
)) {
1574 cpuset_migrate_mm(mm
, &mems_oldcs
->old_mems_allowed
,
1575 &cpuset_attach_nodemask_to
);
1580 cs
->old_mems_allowed
= cpuset_attach_nodemask_to
;
1582 cs
->attach_in_progress
--;
1583 if (!cs
->attach_in_progress
)
1584 wake_up(&cpuset_attach_wq
);
1586 mutex_unlock(&cpuset_mutex
);
1589 /* The various types of files and directories in a cpuset file system */
1592 FILE_MEMORY_MIGRATE
,
1598 FILE_SCHED_LOAD_BALANCE
,
1599 FILE_SCHED_RELAX_DOMAIN_LEVEL
,
1600 FILE_MEMORY_PRESSURE_ENABLED
,
1601 FILE_MEMORY_PRESSURE
,
1604 } cpuset_filetype_t
;
1606 static int cpuset_write_u64(struct cgroup
*cgrp
, struct cftype
*cft
, u64 val
)
1608 struct cpuset
*cs
= cgroup_cs(cgrp
);
1609 cpuset_filetype_t type
= cft
->private;
1610 int retval
= -ENODEV
;
1612 mutex_lock(&cpuset_mutex
);
1613 if (!is_cpuset_online(cs
))
1617 case FILE_CPU_EXCLUSIVE
:
1618 retval
= update_flag(CS_CPU_EXCLUSIVE
, cs
, val
);
1620 case FILE_MEM_EXCLUSIVE
:
1621 retval
= update_flag(CS_MEM_EXCLUSIVE
, cs
, val
);
1623 case FILE_MEM_HARDWALL
:
1624 retval
= update_flag(CS_MEM_HARDWALL
, cs
, val
);
1626 case FILE_SCHED_LOAD_BALANCE
:
1627 retval
= update_flag(CS_SCHED_LOAD_BALANCE
, cs
, val
);
1629 case FILE_MEMORY_MIGRATE
:
1630 retval
= update_flag(CS_MEMORY_MIGRATE
, cs
, val
);
1632 case FILE_MEMORY_PRESSURE_ENABLED
:
1633 cpuset_memory_pressure_enabled
= !!val
;
1635 case FILE_MEMORY_PRESSURE
:
1638 case FILE_SPREAD_PAGE
:
1639 retval
= update_flag(CS_SPREAD_PAGE
, cs
, val
);
1641 case FILE_SPREAD_SLAB
:
1642 retval
= update_flag(CS_SPREAD_SLAB
, cs
, val
);
1649 mutex_unlock(&cpuset_mutex
);
1653 static int cpuset_write_s64(struct cgroup
*cgrp
, struct cftype
*cft
, s64 val
)
1655 struct cpuset
*cs
= cgroup_cs(cgrp
);
1656 cpuset_filetype_t type
= cft
->private;
1657 int retval
= -ENODEV
;
1659 mutex_lock(&cpuset_mutex
);
1660 if (!is_cpuset_online(cs
))
1664 case FILE_SCHED_RELAX_DOMAIN_LEVEL
:
1665 retval
= update_relax_domain_level(cs
, val
);
1672 mutex_unlock(&cpuset_mutex
);
1677 * Common handling for a write to a "cpus" or "mems" file.
1679 static int cpuset_write_resmask(struct cgroup
*cgrp
, struct cftype
*cft
,
1682 struct cpuset
*cs
= cgroup_cs(cgrp
);
1683 struct cpuset
*trialcs
;
1684 int retval
= -ENODEV
;
1687 * CPU or memory hotunplug may leave @cs w/o any execution
1688 * resources, in which case the hotplug code asynchronously updates
1689 * configuration and transfers all tasks to the nearest ancestor
1690 * which can execute.
1692 * As writes to "cpus" or "mems" may restore @cs's execution
1693 * resources, wait for the previously scheduled operations before
1694 * proceeding, so that we don't end up keep removing tasks added
1695 * after execution capability is restored.
1697 flush_work(&cpuset_hotplug_work
);
1699 mutex_lock(&cpuset_mutex
);
1700 if (!is_cpuset_online(cs
))
1703 trialcs
= alloc_trial_cpuset(cs
);
1709 switch (cft
->private) {
1711 retval
= update_cpumask(cs
, trialcs
, buf
);
1714 retval
= update_nodemask(cs
, trialcs
, buf
);
1721 free_trial_cpuset(trialcs
);
1723 mutex_unlock(&cpuset_mutex
);
1728 * These ascii lists should be read in a single call, by using a user
1729 * buffer large enough to hold the entire map. If read in smaller
1730 * chunks, there is no guarantee of atomicity. Since the display format
1731 * used, list of ranges of sequential numbers, is variable length,
1732 * and since these maps can change value dynamically, one could read
1733 * gibberish by doing partial reads while a list was changing.
1734 * A single large read to a buffer that crosses a page boundary is
1735 * ok, because the result being copied to user land is not recomputed
1736 * across a page fault.
1739 static size_t cpuset_sprintf_cpulist(char *page
, struct cpuset
*cs
)
1743 mutex_lock(&callback_mutex
);
1744 count
= cpulist_scnprintf(page
, PAGE_SIZE
, cs
->cpus_allowed
);
1745 mutex_unlock(&callback_mutex
);
1750 static size_t cpuset_sprintf_memlist(char *page
, struct cpuset
*cs
)
1754 mutex_lock(&callback_mutex
);
1755 count
= nodelist_scnprintf(page
, PAGE_SIZE
, cs
->mems_allowed
);
1756 mutex_unlock(&callback_mutex
);
1761 static ssize_t
cpuset_common_file_read(struct cgroup
*cgrp
,
1765 size_t nbytes
, loff_t
*ppos
)
1767 struct cpuset
*cs
= cgroup_cs(cgrp
);
1768 cpuset_filetype_t type
= cft
->private;
1773 if (!(page
= (char *)__get_free_page(GFP_TEMPORARY
)))
1780 s
+= cpuset_sprintf_cpulist(s
, cs
);
1783 s
+= cpuset_sprintf_memlist(s
, cs
);
1791 retval
= simple_read_from_buffer(buf
, nbytes
, ppos
, page
, s
- page
);
1793 free_page((unsigned long)page
);
1797 static u64
cpuset_read_u64(struct cgroup
*cgrp
, struct cftype
*cft
)
1799 struct cpuset
*cs
= cgroup_cs(cgrp
);
1800 cpuset_filetype_t type
= cft
->private;
1802 case FILE_CPU_EXCLUSIVE
:
1803 return is_cpu_exclusive(cs
);
1804 case FILE_MEM_EXCLUSIVE
:
1805 return is_mem_exclusive(cs
);
1806 case FILE_MEM_HARDWALL
:
1807 return is_mem_hardwall(cs
);
1808 case FILE_SCHED_LOAD_BALANCE
:
1809 return is_sched_load_balance(cs
);
1810 case FILE_MEMORY_MIGRATE
:
1811 return is_memory_migrate(cs
);
1812 case FILE_MEMORY_PRESSURE_ENABLED
:
1813 return cpuset_memory_pressure_enabled
;
1814 case FILE_MEMORY_PRESSURE
:
1815 return fmeter_getrate(&cs
->fmeter
);
1816 case FILE_SPREAD_PAGE
:
1817 return is_spread_page(cs
);
1818 case FILE_SPREAD_SLAB
:
1819 return is_spread_slab(cs
);
1824 /* Unreachable but makes gcc happy */
1828 static s64
cpuset_read_s64(struct cgroup
*cgrp
, struct cftype
*cft
)
1830 struct cpuset
*cs
= cgroup_cs(cgrp
);
1831 cpuset_filetype_t type
= cft
->private;
1833 case FILE_SCHED_RELAX_DOMAIN_LEVEL
:
1834 return cs
->relax_domain_level
;
1839 /* Unrechable but makes gcc happy */
1845 * for the common functions, 'private' gives the type of file
1848 static struct cftype files
[] = {
1851 .read
= cpuset_common_file_read
,
1852 .write_string
= cpuset_write_resmask
,
1853 .max_write_len
= (100U + 6 * NR_CPUS
),
1854 .private = FILE_CPULIST
,
1859 .read
= cpuset_common_file_read
,
1860 .write_string
= cpuset_write_resmask
,
1861 .max_write_len
= (100U + 6 * MAX_NUMNODES
),
1862 .private = FILE_MEMLIST
,
1866 .name
= "cpu_exclusive",
1867 .read_u64
= cpuset_read_u64
,
1868 .write_u64
= cpuset_write_u64
,
1869 .private = FILE_CPU_EXCLUSIVE
,
1873 .name
= "mem_exclusive",
1874 .read_u64
= cpuset_read_u64
,
1875 .write_u64
= cpuset_write_u64
,
1876 .private = FILE_MEM_EXCLUSIVE
,
1880 .name
= "mem_hardwall",
1881 .read_u64
= cpuset_read_u64
,
1882 .write_u64
= cpuset_write_u64
,
1883 .private = FILE_MEM_HARDWALL
,
1887 .name
= "sched_load_balance",
1888 .read_u64
= cpuset_read_u64
,
1889 .write_u64
= cpuset_write_u64
,
1890 .private = FILE_SCHED_LOAD_BALANCE
,
1894 .name
= "sched_relax_domain_level",
1895 .read_s64
= cpuset_read_s64
,
1896 .write_s64
= cpuset_write_s64
,
1897 .private = FILE_SCHED_RELAX_DOMAIN_LEVEL
,
1901 .name
= "memory_migrate",
1902 .read_u64
= cpuset_read_u64
,
1903 .write_u64
= cpuset_write_u64
,
1904 .private = FILE_MEMORY_MIGRATE
,
1908 .name
= "memory_pressure",
1909 .read_u64
= cpuset_read_u64
,
1910 .write_u64
= cpuset_write_u64
,
1911 .private = FILE_MEMORY_PRESSURE
,
1916 .name
= "memory_spread_page",
1917 .read_u64
= cpuset_read_u64
,
1918 .write_u64
= cpuset_write_u64
,
1919 .private = FILE_SPREAD_PAGE
,
1923 .name
= "memory_spread_slab",
1924 .read_u64
= cpuset_read_u64
,
1925 .write_u64
= cpuset_write_u64
,
1926 .private = FILE_SPREAD_SLAB
,
1930 .name
= "memory_pressure_enabled",
1931 .flags
= CFTYPE_ONLY_ON_ROOT
,
1932 .read_u64
= cpuset_read_u64
,
1933 .write_u64
= cpuset_write_u64
,
1934 .private = FILE_MEMORY_PRESSURE_ENABLED
,
1941 * cpuset_css_alloc - allocate a cpuset css
1942 * cgrp: control group that the new cpuset will be part of
1945 static struct cgroup_subsys_state
*cpuset_css_alloc(struct cgroup
*cgrp
)
1950 return &top_cpuset
.css
;
1952 cs
= kzalloc(sizeof(*cs
), GFP_KERNEL
);
1954 return ERR_PTR(-ENOMEM
);
1955 if (!alloc_cpumask_var(&cs
->cpus_allowed
, GFP_KERNEL
)) {
1957 return ERR_PTR(-ENOMEM
);
1960 set_bit(CS_SCHED_LOAD_BALANCE
, &cs
->flags
);
1961 cpumask_clear(cs
->cpus_allowed
);
1962 nodes_clear(cs
->mems_allowed
);
1963 fmeter_init(&cs
->fmeter
);
1964 cs
->relax_domain_level
= -1;
1969 static int cpuset_css_online(struct cgroup
*cgrp
)
1971 struct cpuset
*cs
= cgroup_cs(cgrp
);
1972 struct cpuset
*parent
= parent_cs(cs
);
1973 struct cpuset
*tmp_cs
;
1974 struct cgroup
*pos_cg
;
1979 mutex_lock(&cpuset_mutex
);
1981 set_bit(CS_ONLINE
, &cs
->flags
);
1982 if (is_spread_page(parent
))
1983 set_bit(CS_SPREAD_PAGE
, &cs
->flags
);
1984 if (is_spread_slab(parent
))
1985 set_bit(CS_SPREAD_SLAB
, &cs
->flags
);
1987 number_of_cpusets
++;
1989 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN
, &cgrp
->flags
))
1993 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
1994 * set. This flag handling is implemented in cgroup core for
1995 * histrical reasons - the flag may be specified during mount.
1997 * Currently, if any sibling cpusets have exclusive cpus or mem, we
1998 * refuse to clone the configuration - thereby refusing the task to
1999 * be entered, and as a result refusing the sys_unshare() or
2000 * clone() which initiated it. If this becomes a problem for some
2001 * users who wish to allow that scenario, then this could be
2002 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
2003 * (and likewise for mems) to the new cgroup.
2006 cpuset_for_each_child(tmp_cs
, pos_cg
, parent
) {
2007 if (is_mem_exclusive(tmp_cs
) || is_cpu_exclusive(tmp_cs
)) {
2014 mutex_lock(&callback_mutex
);
2015 cs
->mems_allowed
= parent
->mems_allowed
;
2016 cpumask_copy(cs
->cpus_allowed
, parent
->cpus_allowed
);
2017 mutex_unlock(&callback_mutex
);
2019 mutex_unlock(&cpuset_mutex
);
2023 static void cpuset_css_offline(struct cgroup
*cgrp
)
2025 struct cpuset
*cs
= cgroup_cs(cgrp
);
2027 mutex_lock(&cpuset_mutex
);
2029 if (is_sched_load_balance(cs
))
2030 update_flag(CS_SCHED_LOAD_BALANCE
, cs
, 0);
2032 number_of_cpusets
--;
2033 clear_bit(CS_ONLINE
, &cs
->flags
);
2035 mutex_unlock(&cpuset_mutex
);
2039 * If the cpuset being removed has its flag 'sched_load_balance'
2040 * enabled, then simulate turning sched_load_balance off, which
2041 * will call rebuild_sched_domains_locked().
2044 static void cpuset_css_free(struct cgroup
*cgrp
)
2046 struct cpuset
*cs
= cgroup_cs(cgrp
);
2048 free_cpumask_var(cs
->cpus_allowed
);
2052 struct cgroup_subsys cpuset_subsys
= {
2054 .css_alloc
= cpuset_css_alloc
,
2055 .css_online
= cpuset_css_online
,
2056 .css_offline
= cpuset_css_offline
,
2057 .css_free
= cpuset_css_free
,
2058 .can_attach
= cpuset_can_attach
,
2059 .cancel_attach
= cpuset_cancel_attach
,
2060 .attach
= cpuset_attach
,
2061 .subsys_id
= cpuset_subsys_id
,
2062 .base_cftypes
= files
,
2067 * cpuset_init - initialize cpusets at system boot
2069 * Description: Initialize top_cpuset and the cpuset internal file system,
2072 int __init
cpuset_init(void)
2076 if (!alloc_cpumask_var(&top_cpuset
.cpus_allowed
, GFP_KERNEL
))
2079 cpumask_setall(top_cpuset
.cpus_allowed
);
2080 nodes_setall(top_cpuset
.mems_allowed
);
2082 fmeter_init(&top_cpuset
.fmeter
);
2083 set_bit(CS_SCHED_LOAD_BALANCE
, &top_cpuset
.flags
);
2084 top_cpuset
.relax_domain_level
= -1;
2086 err
= register_filesystem(&cpuset_fs_type
);
2090 if (!alloc_cpumask_var(&cpus_attach
, GFP_KERNEL
))
2093 number_of_cpusets
= 1;
2098 * If CPU and/or memory hotplug handlers, below, unplug any CPUs
2099 * or memory nodes, we need to walk over the cpuset hierarchy,
2100 * removing that CPU or node from all cpusets. If this removes the
2101 * last CPU or node from a cpuset, then move the tasks in the empty
2102 * cpuset to its next-highest non-empty parent.
2104 static void remove_tasks_in_empty_cpuset(struct cpuset
*cs
)
2106 struct cpuset
*parent
;
2109 * Find its next-highest non-empty parent, (top cpuset
2110 * has online cpus, so can't be empty).
2112 parent
= parent_cs(cs
);
2113 while (cpumask_empty(parent
->cpus_allowed
) ||
2114 nodes_empty(parent
->mems_allowed
))
2115 parent
= parent_cs(parent
);
2117 if (cgroup_transfer_tasks(parent
->css
.cgroup
, cs
->css
.cgroup
)) {
2119 printk(KERN_ERR
"cpuset: failed to transfer tasks out of empty cpuset %s\n",
2120 cgroup_name(cs
->css
.cgroup
));
2126 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
2127 * @cs: cpuset in interest
2129 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
2130 * offline, update @cs accordingly. If @cs ends up with no CPU or memory,
2131 * all its tasks are moved to the nearest ancestor with both resources.
2133 static void cpuset_hotplug_update_tasks(struct cpuset
*cs
)
2135 static cpumask_t off_cpus
;
2136 static nodemask_t off_mems
;
2138 bool sane
= cgroup_sane_behavior(cs
->css
.cgroup
);
2141 wait_event(cpuset_attach_wq
, cs
->attach_in_progress
== 0);
2143 mutex_lock(&cpuset_mutex
);
2146 * We have raced with task attaching. We wait until attaching
2147 * is finished, so we won't attach a task to an empty cpuset.
2149 if (cs
->attach_in_progress
) {
2150 mutex_unlock(&cpuset_mutex
);
2154 cpumask_andnot(&off_cpus
, cs
->cpus_allowed
, top_cpuset
.cpus_allowed
);
2155 nodes_andnot(off_mems
, cs
->mems_allowed
, top_cpuset
.mems_allowed
);
2157 mutex_lock(&callback_mutex
);
2158 cpumask_andnot(cs
->cpus_allowed
, cs
->cpus_allowed
, &off_cpus
);
2159 mutex_unlock(&callback_mutex
);
2162 * If sane_behavior flag is set, we need to update tasks' cpumask
2163 * for empty cpuset to take on ancestor's cpumask. Otherwise, don't
2164 * call update_tasks_cpumask() if the cpuset becomes empty, as
2165 * the tasks in it will be migrated to an ancestor.
2167 if ((sane
&& cpumask_empty(cs
->cpus_allowed
)) ||
2168 (!cpumask_empty(&off_cpus
) && !cpumask_empty(cs
->cpus_allowed
)))
2169 update_tasks_cpumask(cs
, NULL
);
2171 mutex_lock(&callback_mutex
);
2172 nodes_andnot(cs
->mems_allowed
, cs
->mems_allowed
, off_mems
);
2173 mutex_unlock(&callback_mutex
);
2176 * If sane_behavior flag is set, we need to update tasks' nodemask
2177 * for empty cpuset to take on ancestor's nodemask. Otherwise, don't
2178 * call update_tasks_nodemask() if the cpuset becomes empty, as
2179 * the tasks in it will be migratd to an ancestor.
2181 if ((sane
&& nodes_empty(cs
->mems_allowed
)) ||
2182 (!nodes_empty(off_mems
) && !nodes_empty(cs
->mems_allowed
)))
2183 update_tasks_nodemask(cs
, NULL
);
2185 is_empty
= cpumask_empty(cs
->cpus_allowed
) ||
2186 nodes_empty(cs
->mems_allowed
);
2188 mutex_unlock(&cpuset_mutex
);
2191 * If sane_behavior flag is set, we'll keep tasks in empty cpusets.
2193 * Otherwise move tasks to the nearest ancestor with execution
2194 * resources. This is full cgroup operation which will
2195 * also call back into cpuset. Should be done outside any lock.
2197 if (!sane
&& is_empty
)
2198 remove_tasks_in_empty_cpuset(cs
);
2202 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
2204 * This function is called after either CPU or memory configuration has
2205 * changed and updates cpuset accordingly. The top_cpuset is always
2206 * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
2207 * order to make cpusets transparent (of no affect) on systems that are
2208 * actively using CPU hotplug but making no active use of cpusets.
2210 * Non-root cpusets are only affected by offlining. If any CPUs or memory
2211 * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
2214 * Note that CPU offlining during suspend is ignored. We don't modify
2215 * cpusets across suspend/resume cycles at all.
2217 static void cpuset_hotplug_workfn(struct work_struct
*work
)
2219 static cpumask_t new_cpus
;
2220 static nodemask_t new_mems
;
2221 bool cpus_updated
, mems_updated
;
2223 mutex_lock(&cpuset_mutex
);
2225 /* fetch the available cpus/mems and find out which changed how */
2226 cpumask_copy(&new_cpus
, cpu_active_mask
);
2227 new_mems
= node_states
[N_MEMORY
];
2229 cpus_updated
= !cpumask_equal(top_cpuset
.cpus_allowed
, &new_cpus
);
2230 mems_updated
= !nodes_equal(top_cpuset
.mems_allowed
, new_mems
);
2232 /* synchronize cpus_allowed to cpu_active_mask */
2234 mutex_lock(&callback_mutex
);
2235 cpumask_copy(top_cpuset
.cpus_allowed
, &new_cpus
);
2236 mutex_unlock(&callback_mutex
);
2237 /* we don't mess with cpumasks of tasks in top_cpuset */
2240 /* synchronize mems_allowed to N_MEMORY */
2242 mutex_lock(&callback_mutex
);
2243 top_cpuset
.mems_allowed
= new_mems
;
2244 mutex_unlock(&callback_mutex
);
2245 update_tasks_nodemask(&top_cpuset
, NULL
);
2248 mutex_unlock(&cpuset_mutex
);
2250 /* if cpus or mems changed, we need to propagate to descendants */
2251 if (cpus_updated
|| mems_updated
) {
2253 struct cgroup
*pos_cgrp
;
2256 cpuset_for_each_descendant_pre(cs
, pos_cgrp
, &top_cpuset
) {
2257 if (!css_tryget(&cs
->css
))
2261 cpuset_hotplug_update_tasks(cs
);
2269 /* rebuild sched domains if cpus_allowed has changed */
2271 rebuild_sched_domains();
2274 void cpuset_update_active_cpus(bool cpu_online
)
2277 * We're inside cpu hotplug critical region which usually nests
2278 * inside cgroup synchronization. Bounce actual hotplug processing
2279 * to a work item to avoid reverse locking order.
2281 * We still need to do partition_sched_domains() synchronously;
2282 * otherwise, the scheduler will get confused and put tasks to the
2283 * dead CPU. Fall back to the default single domain.
2284 * cpuset_hotplug_workfn() will rebuild it as necessary.
2286 partition_sched_domains(1, NULL
, NULL
);
2287 schedule_work(&cpuset_hotplug_work
);
2291 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
2292 * Call this routine anytime after node_states[N_MEMORY] changes.
2293 * See cpuset_update_active_cpus() for CPU hotplug handling.
2295 static int cpuset_track_online_nodes(struct notifier_block
*self
,
2296 unsigned long action
, void *arg
)
2298 schedule_work(&cpuset_hotplug_work
);
2302 static struct notifier_block cpuset_track_online_nodes_nb
= {
2303 .notifier_call
= cpuset_track_online_nodes
,
2304 .priority
= 10, /* ??! */
2308 * cpuset_init_smp - initialize cpus_allowed
2310 * Description: Finish top cpuset after cpu, node maps are initialized
2312 void __init
cpuset_init_smp(void)
2314 cpumask_copy(top_cpuset
.cpus_allowed
, cpu_active_mask
);
2315 top_cpuset
.mems_allowed
= node_states
[N_MEMORY
];
2316 top_cpuset
.old_mems_allowed
= top_cpuset
.mems_allowed
;
2318 register_hotmemory_notifier(&cpuset_track_online_nodes_nb
);
2322 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
2323 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
2324 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
2326 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
2327 * attached to the specified @tsk. Guaranteed to return some non-empty
2328 * subset of cpu_online_mask, even if this means going outside the
2332 void cpuset_cpus_allowed(struct task_struct
*tsk
, struct cpumask
*pmask
)
2334 struct cpuset
*cpus_cs
;
2336 mutex_lock(&callback_mutex
);
2338 cpus_cs
= effective_cpumask_cpuset(task_cs(tsk
));
2339 guarantee_online_cpus(cpus_cs
, pmask
);
2341 mutex_unlock(&callback_mutex
);
2344 void cpuset_cpus_allowed_fallback(struct task_struct
*tsk
)
2346 const struct cpuset
*cpus_cs
;
2349 cpus_cs
= effective_cpumask_cpuset(task_cs(tsk
));
2350 do_set_cpus_allowed(tsk
, cpus_cs
->cpus_allowed
);
2354 * We own tsk->cpus_allowed, nobody can change it under us.
2356 * But we used cs && cs->cpus_allowed lockless and thus can
2357 * race with cgroup_attach_task() or update_cpumask() and get
2358 * the wrong tsk->cpus_allowed. However, both cases imply the
2359 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
2360 * which takes task_rq_lock().
2362 * If we are called after it dropped the lock we must see all
2363 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
2364 * set any mask even if it is not right from task_cs() pov,
2365 * the pending set_cpus_allowed_ptr() will fix things.
2367 * select_fallback_rq() will fix things ups and set cpu_possible_mask
2372 void cpuset_init_current_mems_allowed(void)
2374 nodes_setall(current
->mems_allowed
);
2378 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
2379 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
2381 * Description: Returns the nodemask_t mems_allowed of the cpuset
2382 * attached to the specified @tsk. Guaranteed to return some non-empty
2383 * subset of node_states[N_MEMORY], even if this means going outside the
2387 nodemask_t
cpuset_mems_allowed(struct task_struct
*tsk
)
2389 struct cpuset
*mems_cs
;
2392 mutex_lock(&callback_mutex
);
2394 mems_cs
= effective_nodemask_cpuset(task_cs(tsk
));
2395 guarantee_online_mems(mems_cs
, &mask
);
2397 mutex_unlock(&callback_mutex
);
2403 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
2404 * @nodemask: the nodemask to be checked
2406 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
2408 int cpuset_nodemask_valid_mems_allowed(nodemask_t
*nodemask
)
2410 return nodes_intersects(*nodemask
, current
->mems_allowed
);
2414 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
2415 * mem_hardwall ancestor to the specified cpuset. Call holding
2416 * callback_mutex. If no ancestor is mem_exclusive or mem_hardwall
2417 * (an unusual configuration), then returns the root cpuset.
2419 static const struct cpuset
*nearest_hardwall_ancestor(const struct cpuset
*cs
)
2421 while (!(is_mem_exclusive(cs
) || is_mem_hardwall(cs
)) && parent_cs(cs
))
2427 * cpuset_node_allowed_softwall - Can we allocate on a memory node?
2428 * @node: is this an allowed node?
2429 * @gfp_mask: memory allocation flags
2431 * If we're in interrupt, yes, we can always allocate. If __GFP_THISNODE is
2432 * set, yes, we can always allocate. If node is in our task's mems_allowed,
2433 * yes. If it's not a __GFP_HARDWALL request and this node is in the nearest
2434 * hardwalled cpuset ancestor to this task's cpuset, yes. If the task has been
2435 * OOM killed and has access to memory reserves as specified by the TIF_MEMDIE
2439 * If __GFP_HARDWALL is set, cpuset_node_allowed_softwall() reduces to
2440 * cpuset_node_allowed_hardwall(). Otherwise, cpuset_node_allowed_softwall()
2441 * might sleep, and might allow a node from an enclosing cpuset.
2443 * cpuset_node_allowed_hardwall() only handles the simpler case of hardwall
2444 * cpusets, and never sleeps.
2446 * The __GFP_THISNODE placement logic is really handled elsewhere,
2447 * by forcibly using a zonelist starting at a specified node, and by
2448 * (in get_page_from_freelist()) refusing to consider the zones for
2449 * any node on the zonelist except the first. By the time any such
2450 * calls get to this routine, we should just shut up and say 'yes'.
2452 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
2453 * and do not allow allocations outside the current tasks cpuset
2454 * unless the task has been OOM killed as is marked TIF_MEMDIE.
2455 * GFP_KERNEL allocations are not so marked, so can escape to the
2456 * nearest enclosing hardwalled ancestor cpuset.
2458 * Scanning up parent cpusets requires callback_mutex. The
2459 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
2460 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
2461 * current tasks mems_allowed came up empty on the first pass over
2462 * the zonelist. So only GFP_KERNEL allocations, if all nodes in the
2463 * cpuset are short of memory, might require taking the callback_mutex
2466 * The first call here from mm/page_alloc:get_page_from_freelist()
2467 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
2468 * so no allocation on a node outside the cpuset is allowed (unless
2469 * in interrupt, of course).
2471 * The second pass through get_page_from_freelist() doesn't even call
2472 * here for GFP_ATOMIC calls. For those calls, the __alloc_pages()
2473 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
2474 * in alloc_flags. That logic and the checks below have the combined
2476 * in_interrupt - any node ok (current task context irrelevant)
2477 * GFP_ATOMIC - any node ok
2478 * TIF_MEMDIE - any node ok
2479 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok
2480 * GFP_USER - only nodes in current tasks mems allowed ok.
2483 * Don't call cpuset_node_allowed_softwall if you can't sleep, unless you
2484 * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables
2485 * the code that might scan up ancestor cpusets and sleep.
2487 int __cpuset_node_allowed_softwall(int node
, gfp_t gfp_mask
)
2489 const struct cpuset
*cs
; /* current cpuset ancestors */
2490 int allowed
; /* is allocation in zone z allowed? */
2492 if (in_interrupt() || (gfp_mask
& __GFP_THISNODE
))
2494 might_sleep_if(!(gfp_mask
& __GFP_HARDWALL
));
2495 if (node_isset(node
, current
->mems_allowed
))
2498 * Allow tasks that have access to memory reserves because they have
2499 * been OOM killed to get memory anywhere.
2501 if (unlikely(test_thread_flag(TIF_MEMDIE
)))
2503 if (gfp_mask
& __GFP_HARDWALL
) /* If hardwall request, stop here */
2506 if (current
->flags
& PF_EXITING
) /* Let dying task have memory */
2509 /* Not hardwall and node outside mems_allowed: scan up cpusets */
2510 mutex_lock(&callback_mutex
);
2513 cs
= nearest_hardwall_ancestor(task_cs(current
));
2514 task_unlock(current
);
2516 allowed
= node_isset(node
, cs
->mems_allowed
);
2517 mutex_unlock(&callback_mutex
);
2522 * cpuset_node_allowed_hardwall - Can we allocate on a memory node?
2523 * @node: is this an allowed node?
2524 * @gfp_mask: memory allocation flags
2526 * If we're in interrupt, yes, we can always allocate. If __GFP_THISNODE is
2527 * set, yes, we can always allocate. If node is in our task's mems_allowed,
2528 * yes. If the task has been OOM killed and has access to memory reserves as
2529 * specified by the TIF_MEMDIE flag, yes.
2532 * The __GFP_THISNODE placement logic is really handled elsewhere,
2533 * by forcibly using a zonelist starting at a specified node, and by
2534 * (in get_page_from_freelist()) refusing to consider the zones for
2535 * any node on the zonelist except the first. By the time any such
2536 * calls get to this routine, we should just shut up and say 'yes'.
2538 * Unlike the cpuset_node_allowed_softwall() variant, above,
2539 * this variant requires that the node be in the current task's
2540 * mems_allowed or that we're in interrupt. It does not scan up the
2541 * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset.
2544 int __cpuset_node_allowed_hardwall(int node
, gfp_t gfp_mask
)
2546 if (in_interrupt() || (gfp_mask
& __GFP_THISNODE
))
2548 if (node_isset(node
, current
->mems_allowed
))
2551 * Allow tasks that have access to memory reserves because they have
2552 * been OOM killed to get memory anywhere.
2554 if (unlikely(test_thread_flag(TIF_MEMDIE
)))
2560 * cpuset_mem_spread_node() - On which node to begin search for a file page
2561 * cpuset_slab_spread_node() - On which node to begin search for a slab page
2563 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
2564 * tasks in a cpuset with is_spread_page or is_spread_slab set),
2565 * and if the memory allocation used cpuset_mem_spread_node()
2566 * to determine on which node to start looking, as it will for
2567 * certain page cache or slab cache pages such as used for file
2568 * system buffers and inode caches, then instead of starting on the
2569 * local node to look for a free page, rather spread the starting
2570 * node around the tasks mems_allowed nodes.
2572 * We don't have to worry about the returned node being offline
2573 * because "it can't happen", and even if it did, it would be ok.
2575 * The routines calling guarantee_online_mems() are careful to
2576 * only set nodes in task->mems_allowed that are online. So it
2577 * should not be possible for the following code to return an
2578 * offline node. But if it did, that would be ok, as this routine
2579 * is not returning the node where the allocation must be, only
2580 * the node where the search should start. The zonelist passed to
2581 * __alloc_pages() will include all nodes. If the slab allocator
2582 * is passed an offline node, it will fall back to the local node.
2583 * See kmem_cache_alloc_node().
2586 static int cpuset_spread_node(int *rotor
)
2590 node
= next_node(*rotor
, current
->mems_allowed
);
2591 if (node
== MAX_NUMNODES
)
2592 node
= first_node(current
->mems_allowed
);
2597 int cpuset_mem_spread_node(void)
2599 if (current
->cpuset_mem_spread_rotor
== NUMA_NO_NODE
)
2600 current
->cpuset_mem_spread_rotor
=
2601 node_random(¤t
->mems_allowed
);
2603 return cpuset_spread_node(¤t
->cpuset_mem_spread_rotor
);
2606 int cpuset_slab_spread_node(void)
2608 if (current
->cpuset_slab_spread_rotor
== NUMA_NO_NODE
)
2609 current
->cpuset_slab_spread_rotor
=
2610 node_random(¤t
->mems_allowed
);
2612 return cpuset_spread_node(¤t
->cpuset_slab_spread_rotor
);
2615 EXPORT_SYMBOL_GPL(cpuset_mem_spread_node
);
2618 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
2619 * @tsk1: pointer to task_struct of some task.
2620 * @tsk2: pointer to task_struct of some other task.
2622 * Description: Return true if @tsk1's mems_allowed intersects the
2623 * mems_allowed of @tsk2. Used by the OOM killer to determine if
2624 * one of the task's memory usage might impact the memory available
2628 int cpuset_mems_allowed_intersects(const struct task_struct
*tsk1
,
2629 const struct task_struct
*tsk2
)
2631 return nodes_intersects(tsk1
->mems_allowed
, tsk2
->mems_allowed
);
2634 #define CPUSET_NODELIST_LEN (256)
2637 * cpuset_print_task_mems_allowed - prints task's cpuset and mems_allowed
2638 * @task: pointer to task_struct of some task.
2640 * Description: Prints @task's name, cpuset name, and cached copy of its
2641 * mems_allowed to the kernel log. Must hold task_lock(task) to allow
2642 * dereferencing task_cs(task).
2644 void cpuset_print_task_mems_allowed(struct task_struct
*tsk
)
2646 /* Statically allocated to prevent using excess stack. */
2647 static char cpuset_nodelist
[CPUSET_NODELIST_LEN
];
2648 static DEFINE_SPINLOCK(cpuset_buffer_lock
);
2650 struct cgroup
*cgrp
= task_cs(tsk
)->css
.cgroup
;
2653 spin_lock(&cpuset_buffer_lock
);
2655 nodelist_scnprintf(cpuset_nodelist
, CPUSET_NODELIST_LEN
,
2657 printk(KERN_INFO
"%s cpuset=%s mems_allowed=%s\n",
2658 tsk
->comm
, cgroup_name(cgrp
), cpuset_nodelist
);
2660 spin_unlock(&cpuset_buffer_lock
);
2665 * Collection of memory_pressure is suppressed unless
2666 * this flag is enabled by writing "1" to the special
2667 * cpuset file 'memory_pressure_enabled' in the root cpuset.
2670 int cpuset_memory_pressure_enabled __read_mostly
;
2673 * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
2675 * Keep a running average of the rate of synchronous (direct)
2676 * page reclaim efforts initiated by tasks in each cpuset.
2678 * This represents the rate at which some task in the cpuset
2679 * ran low on memory on all nodes it was allowed to use, and
2680 * had to enter the kernels page reclaim code in an effort to
2681 * create more free memory by tossing clean pages or swapping
2682 * or writing dirty pages.
2684 * Display to user space in the per-cpuset read-only file
2685 * "memory_pressure". Value displayed is an integer
2686 * representing the recent rate of entry into the synchronous
2687 * (direct) page reclaim by any task attached to the cpuset.
2690 void __cpuset_memory_pressure_bump(void)
2693 fmeter_markevent(&task_cs(current
)->fmeter
);
2694 task_unlock(current
);
2697 #ifdef CONFIG_PROC_PID_CPUSET
2699 * proc_cpuset_show()
2700 * - Print tasks cpuset path into seq_file.
2701 * - Used for /proc/<pid>/cpuset.
2702 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it
2703 * doesn't really matter if tsk->cpuset changes after we read it,
2704 * and we take cpuset_mutex, keeping cpuset_attach() from changing it
2707 int proc_cpuset_show(struct seq_file
*m
, void *unused_v
)
2710 struct task_struct
*tsk
;
2712 struct cgroup_subsys_state
*css
;
2716 buf
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
2722 tsk
= get_pid_task(pid
, PIDTYPE_PID
);
2727 css
= task_subsys_state(tsk
, cpuset_subsys_id
);
2728 retval
= cgroup_path(css
->cgroup
, buf
, PAGE_SIZE
);
2735 put_task_struct(tsk
);
2741 #endif /* CONFIG_PROC_PID_CPUSET */
2743 /* Display task mems_allowed in /proc/<pid>/status file. */
2744 void cpuset_task_status_allowed(struct seq_file
*m
, struct task_struct
*task
)
2746 seq_printf(m
, "Mems_allowed:\t");
2747 seq_nodemask(m
, &task
->mems_allowed
);
2748 seq_printf(m
, "\n");
2749 seq_printf(m
, "Mems_allowed_list:\t");
2750 seq_nodemask_list(m
, &task
->mems_allowed
);
2751 seq_printf(m
, "\n");