]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/cgroup/cpuset.c
netfilter: expect: fix crash when putting uninited expectation
[mirror_ubuntu-artful-kernel.git] / kernel / cgroup / cpuset.c
1 /*
2 * kernel/cpuset.c
3 *
4 * Processor and Memory placement constraints for sets of tasks.
5 *
6 * Copyright (C) 2003 BULL SA.
7 * Copyright (C) 2004-2007 Silicon Graphics, Inc.
8 * Copyright (C) 2006 Google, Inc
9 *
10 * Portions derived from Patrick Mochel's sysfs code.
11 * sysfs is Copyright (c) 2001-3 Patrick Mochel
12 *
13 * 2003-10-10 Written by Simon Derr.
14 * 2003-10-22 Updates by Stephen Hemminger.
15 * 2004 May-July Rework by Paul Jackson.
16 * 2006 Rework by Paul Menage to use generic cgroups
17 * 2008 Rework of the scheduler domains and CPU hotplug handling
18 * by Max Krasnyansky
19 *
20 * This file is subject to the terms and conditions of the GNU General Public
21 * License. See the file COPYING in the main directory of the Linux
22 * distribution for more details.
23 */
24
25 #include <linux/cpu.h>
26 #include <linux/cpumask.h>
27 #include <linux/cpuset.h>
28 #include <linux/err.h>
29 #include <linux/errno.h>
30 #include <linux/file.h>
31 #include <linux/fs.h>
32 #include <linux/init.h>
33 #include <linux/interrupt.h>
34 #include <linux/kernel.h>
35 #include <linux/kmod.h>
36 #include <linux/list.h>
37 #include <linux/mempolicy.h>
38 #include <linux/mm.h>
39 #include <linux/memory.h>
40 #include <linux/export.h>
41 #include <linux/mount.h>
42 #include <linux/namei.h>
43 #include <linux/pagemap.h>
44 #include <linux/proc_fs.h>
45 #include <linux/rcupdate.h>
46 #include <linux/sched.h>
47 #include <linux/sched/mm.h>
48 #include <linux/sched/task.h>
49 #include <linux/seq_file.h>
50 #include <linux/security.h>
51 #include <linux/slab.h>
52 #include <linux/spinlock.h>
53 #include <linux/stat.h>
54 #include <linux/string.h>
55 #include <linux/time.h>
56 #include <linux/time64.h>
57 #include <linux/backing-dev.h>
58 #include <linux/sort.h>
59
60 #include <linux/uaccess.h>
61 #include <linux/atomic.h>
62 #include <linux/mutex.h>
63 #include <linux/cgroup.h>
64 #include <linux/wait.h>
65
66 DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
67
68 /* See "Frequency meter" comments, below. */
69
70 struct fmeter {
71 int cnt; /* unprocessed events count */
72 int val; /* most recent output value */
73 time64_t time; /* clock (secs) when val computed */
74 spinlock_t lock; /* guards read or write of above */
75 };
76
77 struct cpuset {
78 struct cgroup_subsys_state css;
79
80 unsigned long flags; /* "unsigned long" so bitops work */
81
82 /*
83 * On default hierarchy:
84 *
85 * The user-configured masks can only be changed by writing to
86 * cpuset.cpus and cpuset.mems, and won't be limited by the
87 * parent masks.
88 *
89 * The effective masks is the real masks that apply to the tasks
90 * in the cpuset. They may be changed if the configured masks are
91 * changed or hotplug happens.
92 *
93 * effective_mask == configured_mask & parent's effective_mask,
94 * and if it ends up empty, it will inherit the parent's mask.
95 *
96 *
97 * On legacy hierachy:
98 *
99 * The user-configured masks are always the same with effective masks.
100 */
101
102 /* user-configured CPUs and Memory Nodes allow to tasks */
103 cpumask_var_t cpus_allowed;
104 nodemask_t mems_allowed;
105
106 /* effective CPUs and Memory Nodes allow to tasks */
107 cpumask_var_t effective_cpus;
108 nodemask_t effective_mems;
109
110 /*
111 * This is old Memory Nodes tasks took on.
112 *
113 * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
114 * - A new cpuset's old_mems_allowed is initialized when some
115 * task is moved into it.
116 * - old_mems_allowed is used in cpuset_migrate_mm() when we change
117 * cpuset.mems_allowed and have tasks' nodemask updated, and
118 * then old_mems_allowed is updated to mems_allowed.
119 */
120 nodemask_t old_mems_allowed;
121
122 struct fmeter fmeter; /* memory_pressure filter */
123
124 /*
125 * Tasks are being attached to this cpuset. Used to prevent
126 * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
127 */
128 int attach_in_progress;
129
130 /* partition number for rebuild_sched_domains() */
131 int pn;
132
133 /* for custom sched domain */
134 int relax_domain_level;
135 };
136
137 static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
138 {
139 return css ? container_of(css, struct cpuset, css) : NULL;
140 }
141
142 /* Retrieve the cpuset for a task */
143 static inline struct cpuset *task_cs(struct task_struct *task)
144 {
145 return css_cs(task_css(task, cpuset_cgrp_id));
146 }
147
148 static inline struct cpuset *parent_cs(struct cpuset *cs)
149 {
150 return css_cs(cs->css.parent);
151 }
152
153 #ifdef CONFIG_NUMA
154 static inline bool task_has_mempolicy(struct task_struct *task)
155 {
156 return task->mempolicy;
157 }
158 #else
159 static inline bool task_has_mempolicy(struct task_struct *task)
160 {
161 return false;
162 }
163 #endif
164
165
166 /* bits in struct cpuset flags field */
167 typedef enum {
168 CS_ONLINE,
169 CS_CPU_EXCLUSIVE,
170 CS_MEM_EXCLUSIVE,
171 CS_MEM_HARDWALL,
172 CS_MEMORY_MIGRATE,
173 CS_SCHED_LOAD_BALANCE,
174 CS_SPREAD_PAGE,
175 CS_SPREAD_SLAB,
176 } cpuset_flagbits_t;
177
178 /* convenient tests for these bits */
179 static inline bool is_cpuset_online(struct cpuset *cs)
180 {
181 return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
182 }
183
184 static inline int is_cpu_exclusive(const struct cpuset *cs)
185 {
186 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
187 }
188
189 static inline int is_mem_exclusive(const struct cpuset *cs)
190 {
191 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
192 }
193
194 static inline int is_mem_hardwall(const struct cpuset *cs)
195 {
196 return test_bit(CS_MEM_HARDWALL, &cs->flags);
197 }
198
199 static inline int is_sched_load_balance(const struct cpuset *cs)
200 {
201 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
202 }
203
204 static inline int is_memory_migrate(const struct cpuset *cs)
205 {
206 return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
207 }
208
209 static inline int is_spread_page(const struct cpuset *cs)
210 {
211 return test_bit(CS_SPREAD_PAGE, &cs->flags);
212 }
213
214 static inline int is_spread_slab(const struct cpuset *cs)
215 {
216 return test_bit(CS_SPREAD_SLAB, &cs->flags);
217 }
218
219 static struct cpuset top_cpuset = {
220 .flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) |
221 (1 << CS_MEM_EXCLUSIVE)),
222 };
223
224 /**
225 * cpuset_for_each_child - traverse online children of a cpuset
226 * @child_cs: loop cursor pointing to the current child
227 * @pos_css: used for iteration
228 * @parent_cs: target cpuset to walk children of
229 *
230 * Walk @child_cs through the online children of @parent_cs. Must be used
231 * with RCU read locked.
232 */
233 #define cpuset_for_each_child(child_cs, pos_css, parent_cs) \
234 css_for_each_child((pos_css), &(parent_cs)->css) \
235 if (is_cpuset_online(((child_cs) = css_cs((pos_css)))))
236
237 /**
238 * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
239 * @des_cs: loop cursor pointing to the current descendant
240 * @pos_css: used for iteration
241 * @root_cs: target cpuset to walk ancestor of
242 *
243 * Walk @des_cs through the online descendants of @root_cs. Must be used
244 * with RCU read locked. The caller may modify @pos_css by calling
245 * css_rightmost_descendant() to skip subtree. @root_cs is included in the
246 * iteration and the first node to be visited.
247 */
248 #define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \
249 css_for_each_descendant_pre((pos_css), &(root_cs)->css) \
250 if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
251
252 /*
253 * There are two global locks guarding cpuset structures - cpuset_mutex and
254 * callback_lock. We also require taking task_lock() when dereferencing a
255 * task's cpuset pointer. See "The task_lock() exception", at the end of this
256 * comment.
257 *
258 * A task must hold both locks to modify cpusets. If a task holds
259 * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it
260 * is the only task able to also acquire callback_lock and be able to
261 * modify cpusets. It can perform various checks on the cpuset structure
262 * first, knowing nothing will change. It can also allocate memory while
263 * just holding cpuset_mutex. While it is performing these checks, various
264 * callback routines can briefly acquire callback_lock to query cpusets.
265 * Once it is ready to make the changes, it takes callback_lock, blocking
266 * everyone else.
267 *
268 * Calls to the kernel memory allocator can not be made while holding
269 * callback_lock, as that would risk double tripping on callback_lock
270 * from one of the callbacks into the cpuset code from within
271 * __alloc_pages().
272 *
273 * If a task is only holding callback_lock, then it has read-only
274 * access to cpusets.
275 *
276 * Now, the task_struct fields mems_allowed and mempolicy may be changed
277 * by other task, we use alloc_lock in the task_struct fields to protect
278 * them.
279 *
280 * The cpuset_common_file_read() handlers only hold callback_lock across
281 * small pieces of code, such as when reading out possibly multi-word
282 * cpumasks and nodemasks.
283 *
284 * Accessing a task's cpuset should be done in accordance with the
285 * guidelines for accessing subsystem state in kernel/cgroup.c
286 */
287
288 static DEFINE_MUTEX(cpuset_mutex);
289 static DEFINE_SPINLOCK(callback_lock);
290
291 static struct workqueue_struct *cpuset_migrate_mm_wq;
292
293 /*
294 * CPU / memory hotplug is handled asynchronously.
295 */
296 static void cpuset_hotplug_workfn(struct work_struct *work);
297 static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);
298
299 static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
300
301 /*
302 * This is ugly, but preserves the userspace API for existing cpuset
303 * users. If someone tries to mount the "cpuset" filesystem, we
304 * silently switch it to mount "cgroup" instead
305 */
306 static struct dentry *cpuset_mount(struct file_system_type *fs_type,
307 int flags, const char *unused_dev_name, void *data)
308 {
309 struct file_system_type *cgroup_fs = get_fs_type("cgroup");
310 struct dentry *ret = ERR_PTR(-ENODEV);
311 if (cgroup_fs) {
312 char mountopts[] =
313 "cpuset,noprefix,"
314 "release_agent=/sbin/cpuset_release_agent";
315 ret = cgroup_fs->mount(cgroup_fs, flags,
316 unused_dev_name, mountopts);
317 put_filesystem(cgroup_fs);
318 }
319 return ret;
320 }
321
322 static struct file_system_type cpuset_fs_type = {
323 .name = "cpuset",
324 .mount = cpuset_mount,
325 };
326
327 /*
328 * Return in pmask the portion of a cpusets's cpus_allowed that
329 * are online. If none are online, walk up the cpuset hierarchy
330 * until we find one that does have some online cpus.
331 *
332 * One way or another, we guarantee to return some non-empty subset
333 * of cpu_online_mask.
334 *
335 * Call with callback_lock or cpuset_mutex held.
336 */
337 static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
338 {
339 while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) {
340 cs = parent_cs(cs);
341 if (unlikely(!cs)) {
342 /*
343 * The top cpuset doesn't have any online cpu as a
344 * consequence of a race between cpuset_hotplug_work
345 * and cpu hotplug notifier. But we know the top
346 * cpuset's effective_cpus is on its way to to be
347 * identical to cpu_online_mask.
348 */
349 cpumask_copy(pmask, cpu_online_mask);
350 return;
351 }
352 }
353 cpumask_and(pmask, cs->effective_cpus, cpu_online_mask);
354 }
355
356 /*
357 * Return in *pmask the portion of a cpusets's mems_allowed that
358 * are online, with memory. If none are online with memory, walk
359 * up the cpuset hierarchy until we find one that does have some
360 * online mems. The top cpuset always has some mems online.
361 *
362 * One way or another, we guarantee to return some non-empty subset
363 * of node_states[N_MEMORY].
364 *
365 * Call with callback_lock or cpuset_mutex held.
366 */
367 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
368 {
369 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY]))
370 cs = parent_cs(cs);
371 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]);
372 }
373
374 /*
375 * update task's spread flag if cpuset's page/slab spread flag is set
376 *
377 * Call with callback_lock or cpuset_mutex held.
378 */
379 static void cpuset_update_task_spread_flag(struct cpuset *cs,
380 struct task_struct *tsk)
381 {
382 if (is_spread_page(cs))
383 task_set_spread_page(tsk);
384 else
385 task_clear_spread_page(tsk);
386
387 if (is_spread_slab(cs))
388 task_set_spread_slab(tsk);
389 else
390 task_clear_spread_slab(tsk);
391 }
392
393 /*
394 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
395 *
396 * One cpuset is a subset of another if all its allowed CPUs and
397 * Memory Nodes are a subset of the other, and its exclusive flags
398 * are only set if the other's are set. Call holding cpuset_mutex.
399 */
400
401 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
402 {
403 return cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
404 nodes_subset(p->mems_allowed, q->mems_allowed) &&
405 is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
406 is_mem_exclusive(p) <= is_mem_exclusive(q);
407 }
408
409 /**
410 * alloc_trial_cpuset - allocate a trial cpuset
411 * @cs: the cpuset that the trial cpuset duplicates
412 */
413 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
414 {
415 struct cpuset *trial;
416
417 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
418 if (!trial)
419 return NULL;
420
421 if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL))
422 goto free_cs;
423 if (!alloc_cpumask_var(&trial->effective_cpus, GFP_KERNEL))
424 goto free_cpus;
425
426 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
427 cpumask_copy(trial->effective_cpus, cs->effective_cpus);
428 return trial;
429
430 free_cpus:
431 free_cpumask_var(trial->cpus_allowed);
432 free_cs:
433 kfree(trial);
434 return NULL;
435 }
436
437 /**
438 * free_trial_cpuset - free the trial cpuset
439 * @trial: the trial cpuset to be freed
440 */
441 static void free_trial_cpuset(struct cpuset *trial)
442 {
443 free_cpumask_var(trial->effective_cpus);
444 free_cpumask_var(trial->cpus_allowed);
445 kfree(trial);
446 }
447
448 /*
449 * validate_change() - Used to validate that any proposed cpuset change
450 * follows the structural rules for cpusets.
451 *
452 * If we replaced the flag and mask values of the current cpuset
453 * (cur) with those values in the trial cpuset (trial), would
454 * our various subset and exclusive rules still be valid? Presumes
455 * cpuset_mutex held.
456 *
457 * 'cur' is the address of an actual, in-use cpuset. Operations
458 * such as list traversal that depend on the actual address of the
459 * cpuset in the list must use cur below, not trial.
460 *
461 * 'trial' is the address of bulk structure copy of cur, with
462 * perhaps one or more of the fields cpus_allowed, mems_allowed,
463 * or flags changed to new, trial values.
464 *
465 * Return 0 if valid, -errno if not.
466 */
467
468 static int validate_change(struct cpuset *cur, struct cpuset *trial)
469 {
470 struct cgroup_subsys_state *css;
471 struct cpuset *c, *par;
472 int ret;
473
474 rcu_read_lock();
475
476 /* Each of our child cpusets must be a subset of us */
477 ret = -EBUSY;
478 cpuset_for_each_child(c, css, cur)
479 if (!is_cpuset_subset(c, trial))
480 goto out;
481
482 /* Remaining checks don't apply to root cpuset */
483 ret = 0;
484 if (cur == &top_cpuset)
485 goto out;
486
487 par = parent_cs(cur);
488
489 /* On legacy hiearchy, we must be a subset of our parent cpuset. */
490 ret = -EACCES;
491 if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
492 !is_cpuset_subset(trial, par))
493 goto out;
494
495 /*
496 * If either I or some sibling (!= me) is exclusive, we can't
497 * overlap
498 */
499 ret = -EINVAL;
500 cpuset_for_each_child(c, css, par) {
501 if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
502 c != cur &&
503 cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
504 goto out;
505 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
506 c != cur &&
507 nodes_intersects(trial->mems_allowed, c->mems_allowed))
508 goto out;
509 }
510
511 /*
512 * Cpusets with tasks - existing or newly being attached - can't
513 * be changed to have empty cpus_allowed or mems_allowed.
514 */
515 ret = -ENOSPC;
516 if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) {
517 if (!cpumask_empty(cur->cpus_allowed) &&
518 cpumask_empty(trial->cpus_allowed))
519 goto out;
520 if (!nodes_empty(cur->mems_allowed) &&
521 nodes_empty(trial->mems_allowed))
522 goto out;
523 }
524
525 /*
526 * We can't shrink if we won't have enough room for SCHED_DEADLINE
527 * tasks.
528 */
529 ret = -EBUSY;
530 if (is_cpu_exclusive(cur) &&
531 !cpuset_cpumask_can_shrink(cur->cpus_allowed,
532 trial->cpus_allowed))
533 goto out;
534
535 ret = 0;
536 out:
537 rcu_read_unlock();
538 return ret;
539 }
540
541 #ifdef CONFIG_SMP
542 /*
543 * Helper routine for generate_sched_domains().
544 * Do cpusets a, b have overlapping effective cpus_allowed masks?
545 */
546 static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
547 {
548 return cpumask_intersects(a->effective_cpus, b->effective_cpus);
549 }
550
551 static void
552 update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
553 {
554 if (dattr->relax_domain_level < c->relax_domain_level)
555 dattr->relax_domain_level = c->relax_domain_level;
556 return;
557 }
558
559 static void update_domain_attr_tree(struct sched_domain_attr *dattr,
560 struct cpuset *root_cs)
561 {
562 struct cpuset *cp;
563 struct cgroup_subsys_state *pos_css;
564
565 rcu_read_lock();
566 cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
567 /* skip the whole subtree if @cp doesn't have any CPU */
568 if (cpumask_empty(cp->cpus_allowed)) {
569 pos_css = css_rightmost_descendant(pos_css);
570 continue;
571 }
572
573 if (is_sched_load_balance(cp))
574 update_domain_attr(dattr, cp);
575 }
576 rcu_read_unlock();
577 }
578
579 /*
580 * generate_sched_domains()
581 *
582 * This function builds a partial partition of the systems CPUs
583 * A 'partial partition' is a set of non-overlapping subsets whose
584 * union is a subset of that set.
585 * The output of this function needs to be passed to kernel/sched/core.c
586 * partition_sched_domains() routine, which will rebuild the scheduler's
587 * load balancing domains (sched domains) as specified by that partial
588 * partition.
589 *
590 * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt
591 * for a background explanation of this.
592 *
593 * Does not return errors, on the theory that the callers of this
594 * routine would rather not worry about failures to rebuild sched
595 * domains when operating in the severe memory shortage situations
596 * that could cause allocation failures below.
597 *
598 * Must be called with cpuset_mutex held.
599 *
600 * The three key local variables below are:
601 * q - a linked-list queue of cpuset pointers, used to implement a
602 * top-down scan of all cpusets. This scan loads a pointer
603 * to each cpuset marked is_sched_load_balance into the
604 * array 'csa'. For our purposes, rebuilding the schedulers
605 * sched domains, we can ignore !is_sched_load_balance cpusets.
606 * csa - (for CpuSet Array) Array of pointers to all the cpusets
607 * that need to be load balanced, for convenient iterative
608 * access by the subsequent code that finds the best partition,
609 * i.e the set of domains (subsets) of CPUs such that the
610 * cpus_allowed of every cpuset marked is_sched_load_balance
611 * is a subset of one of these domains, while there are as
612 * many such domains as possible, each as small as possible.
613 * doms - Conversion of 'csa' to an array of cpumasks, for passing to
614 * the kernel/sched/core.c routine partition_sched_domains() in a
615 * convenient format, that can be easily compared to the prior
616 * value to determine what partition elements (sched domains)
617 * were changed (added or removed.)
618 *
619 * Finding the best partition (set of domains):
620 * The triple nested loops below over i, j, k scan over the
621 * load balanced cpusets (using the array of cpuset pointers in
622 * csa[]) looking for pairs of cpusets that have overlapping
623 * cpus_allowed, but which don't have the same 'pn' partition
624 * number and gives them in the same partition number. It keeps
625 * looping on the 'restart' label until it can no longer find
626 * any such pairs.
627 *
628 * The union of the cpus_allowed masks from the set of
629 * all cpusets having the same 'pn' value then form the one
630 * element of the partition (one sched domain) to be passed to
631 * partition_sched_domains().
632 */
633 static int generate_sched_domains(cpumask_var_t **domains,
634 struct sched_domain_attr **attributes)
635 {
636 struct cpuset *cp; /* scans q */
637 struct cpuset **csa; /* array of all cpuset ptrs */
638 int csn; /* how many cpuset ptrs in csa so far */
639 int i, j, k; /* indices for partition finding loops */
640 cpumask_var_t *doms; /* resulting partition; i.e. sched domains */
641 cpumask_var_t non_isolated_cpus; /* load balanced CPUs */
642 struct sched_domain_attr *dattr; /* attributes for custom domains */
643 int ndoms = 0; /* number of sched domains in result */
644 int nslot; /* next empty doms[] struct cpumask slot */
645 struct cgroup_subsys_state *pos_css;
646
647 doms = NULL;
648 dattr = NULL;
649 csa = NULL;
650
651 if (!alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL))
652 goto done;
653 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
654
655 /* Special case for the 99% of systems with one, full, sched domain */
656 if (is_sched_load_balance(&top_cpuset)) {
657 ndoms = 1;
658 doms = alloc_sched_domains(ndoms);
659 if (!doms)
660 goto done;
661
662 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
663 if (dattr) {
664 *dattr = SD_ATTR_INIT;
665 update_domain_attr_tree(dattr, &top_cpuset);
666 }
667 cpumask_and(doms[0], top_cpuset.effective_cpus,
668 non_isolated_cpus);
669
670 goto done;
671 }
672
673 csa = kmalloc(nr_cpusets() * sizeof(cp), GFP_KERNEL);
674 if (!csa)
675 goto done;
676 csn = 0;
677
678 rcu_read_lock();
679 cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
680 if (cp == &top_cpuset)
681 continue;
682 /*
683 * Continue traversing beyond @cp iff @cp has some CPUs and
684 * isn't load balancing. The former is obvious. The
685 * latter: All child cpusets contain a subset of the
686 * parent's cpus, so just skip them, and then we call
687 * update_domain_attr_tree() to calc relax_domain_level of
688 * the corresponding sched domain.
689 */
690 if (!cpumask_empty(cp->cpus_allowed) &&
691 !(is_sched_load_balance(cp) &&
692 cpumask_intersects(cp->cpus_allowed, non_isolated_cpus)))
693 continue;
694
695 if (is_sched_load_balance(cp))
696 csa[csn++] = cp;
697
698 /* skip @cp's subtree */
699 pos_css = css_rightmost_descendant(pos_css);
700 }
701 rcu_read_unlock();
702
703 for (i = 0; i < csn; i++)
704 csa[i]->pn = i;
705 ndoms = csn;
706
707 restart:
708 /* Find the best partition (set of sched domains) */
709 for (i = 0; i < csn; i++) {
710 struct cpuset *a = csa[i];
711 int apn = a->pn;
712
713 for (j = 0; j < csn; j++) {
714 struct cpuset *b = csa[j];
715 int bpn = b->pn;
716
717 if (apn != bpn && cpusets_overlap(a, b)) {
718 for (k = 0; k < csn; k++) {
719 struct cpuset *c = csa[k];
720
721 if (c->pn == bpn)
722 c->pn = apn;
723 }
724 ndoms--; /* one less element */
725 goto restart;
726 }
727 }
728 }
729
730 /*
731 * Now we know how many domains to create.
732 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
733 */
734 doms = alloc_sched_domains(ndoms);
735 if (!doms)
736 goto done;
737
738 /*
739 * The rest of the code, including the scheduler, can deal with
740 * dattr==NULL case. No need to abort if alloc fails.
741 */
742 dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
743
744 for (nslot = 0, i = 0; i < csn; i++) {
745 struct cpuset *a = csa[i];
746 struct cpumask *dp;
747 int apn = a->pn;
748
749 if (apn < 0) {
750 /* Skip completed partitions */
751 continue;
752 }
753
754 dp = doms[nslot];
755
756 if (nslot == ndoms) {
757 static int warnings = 10;
758 if (warnings) {
759 pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n",
760 nslot, ndoms, csn, i, apn);
761 warnings--;
762 }
763 continue;
764 }
765
766 cpumask_clear(dp);
767 if (dattr)
768 *(dattr + nslot) = SD_ATTR_INIT;
769 for (j = i; j < csn; j++) {
770 struct cpuset *b = csa[j];
771
772 if (apn == b->pn) {
773 cpumask_or(dp, dp, b->effective_cpus);
774 cpumask_and(dp, dp, non_isolated_cpus);
775 if (dattr)
776 update_domain_attr_tree(dattr + nslot, b);
777
778 /* Done with this partition */
779 b->pn = -1;
780 }
781 }
782 nslot++;
783 }
784 BUG_ON(nslot != ndoms);
785
786 done:
787 free_cpumask_var(non_isolated_cpus);
788 kfree(csa);
789
790 /*
791 * Fallback to the default domain if kmalloc() failed.
792 * See comments in partition_sched_domains().
793 */
794 if (doms == NULL)
795 ndoms = 1;
796
797 *domains = doms;
798 *attributes = dattr;
799 return ndoms;
800 }
801
802 /*
803 * Rebuild scheduler domains.
804 *
805 * If the flag 'sched_load_balance' of any cpuset with non-empty
806 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
807 * which has that flag enabled, or if any cpuset with a non-empty
808 * 'cpus' is removed, then call this routine to rebuild the
809 * scheduler's dynamic sched domains.
810 *
811 * Call with cpuset_mutex held. Takes get_online_cpus().
812 */
813 static void rebuild_sched_domains_locked(void)
814 {
815 struct sched_domain_attr *attr;
816 cpumask_var_t *doms;
817 int ndoms;
818
819 lockdep_assert_held(&cpuset_mutex);
820 get_online_cpus();
821
822 /*
823 * We have raced with CPU hotplug. Don't do anything to avoid
824 * passing doms with offlined cpu to partition_sched_domains().
825 * Anyways, hotplug work item will rebuild sched domains.
826 */
827 if (!cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
828 goto out;
829
830 /* Generate domain masks and attrs */
831 ndoms = generate_sched_domains(&doms, &attr);
832
833 /* Have scheduler rebuild the domains */
834 partition_sched_domains(ndoms, doms, attr);
835 out:
836 put_online_cpus();
837 }
838 #else /* !CONFIG_SMP */
839 static void rebuild_sched_domains_locked(void)
840 {
841 }
842 #endif /* CONFIG_SMP */
843
844 void rebuild_sched_domains(void)
845 {
846 mutex_lock(&cpuset_mutex);
847 rebuild_sched_domains_locked();
848 mutex_unlock(&cpuset_mutex);
849 }
850
851 /**
852 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
853 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
854 *
855 * Iterate through each task of @cs updating its cpus_allowed to the
856 * effective cpuset's. As this function is called with cpuset_mutex held,
857 * cpuset membership stays stable.
858 */
859 static void update_tasks_cpumask(struct cpuset *cs)
860 {
861 struct css_task_iter it;
862 struct task_struct *task;
863
864 css_task_iter_start(&cs->css, &it);
865 while ((task = css_task_iter_next(&it)))
866 set_cpus_allowed_ptr(task, cs->effective_cpus);
867 css_task_iter_end(&it);
868 }
869
870 /*
871 * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
872 * @cs: the cpuset to consider
873 * @new_cpus: temp variable for calculating new effective_cpus
874 *
875 * When congifured cpumask is changed, the effective cpumasks of this cpuset
876 * and all its descendants need to be updated.
877 *
878 * On legacy hierachy, effective_cpus will be the same with cpu_allowed.
879 *
880 * Called with cpuset_mutex held
881 */
882 static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
883 {
884 struct cpuset *cp;
885 struct cgroup_subsys_state *pos_css;
886 bool need_rebuild_sched_domains = false;
887
888 rcu_read_lock();
889 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
890 struct cpuset *parent = parent_cs(cp);
891
892 cpumask_and(new_cpus, cp->cpus_allowed, parent->effective_cpus);
893
894 /*
895 * If it becomes empty, inherit the effective mask of the
896 * parent, which is guaranteed to have some CPUs.
897 */
898 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
899 cpumask_empty(new_cpus))
900 cpumask_copy(new_cpus, parent->effective_cpus);
901
902 /* Skip the whole subtree if the cpumask remains the same. */
903 if (cpumask_equal(new_cpus, cp->effective_cpus)) {
904 pos_css = css_rightmost_descendant(pos_css);
905 continue;
906 }
907
908 if (!css_tryget_online(&cp->css))
909 continue;
910 rcu_read_unlock();
911
912 spin_lock_irq(&callback_lock);
913 cpumask_copy(cp->effective_cpus, new_cpus);
914 spin_unlock_irq(&callback_lock);
915
916 WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
917 !cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
918
919 update_tasks_cpumask(cp);
920
921 /*
922 * If the effective cpumask of any non-empty cpuset is changed,
923 * we need to rebuild sched domains.
924 */
925 if (!cpumask_empty(cp->cpus_allowed) &&
926 is_sched_load_balance(cp))
927 need_rebuild_sched_domains = true;
928
929 rcu_read_lock();
930 css_put(&cp->css);
931 }
932 rcu_read_unlock();
933
934 if (need_rebuild_sched_domains)
935 rebuild_sched_domains_locked();
936 }
937
938 /**
939 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
940 * @cs: the cpuset to consider
941 * @trialcs: trial cpuset
942 * @buf: buffer of cpu numbers written to this cpuset
943 */
944 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
945 const char *buf)
946 {
947 int retval;
948
949 /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
950 if (cs == &top_cpuset)
951 return -EACCES;
952
953 /*
954 * An empty cpus_allowed is ok only if the cpuset has no tasks.
955 * Since cpulist_parse() fails on an empty mask, we special case
956 * that parsing. The validate_change() call ensures that cpusets
957 * with tasks have cpus.
958 */
959 if (!*buf) {
960 cpumask_clear(trialcs->cpus_allowed);
961 } else {
962 retval = cpulist_parse(buf, trialcs->cpus_allowed);
963 if (retval < 0)
964 return retval;
965
966 if (!cpumask_subset(trialcs->cpus_allowed,
967 top_cpuset.cpus_allowed))
968 return -EINVAL;
969 }
970
971 /* Nothing to do if the cpus didn't change */
972 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
973 return 0;
974
975 retval = validate_change(cs, trialcs);
976 if (retval < 0)
977 return retval;
978
979 spin_lock_irq(&callback_lock);
980 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
981 spin_unlock_irq(&callback_lock);
982
983 /* use trialcs->cpus_allowed as a temp variable */
984 update_cpumasks_hier(cs, trialcs->cpus_allowed);
985 return 0;
986 }
987
988 /*
989 * Migrate memory region from one set of nodes to another. This is
990 * performed asynchronously as it can be called from process migration path
991 * holding locks involved in process management. All mm migrations are
992 * performed in the queued order and can be waited for by flushing
993 * cpuset_migrate_mm_wq.
994 */
995
996 struct cpuset_migrate_mm_work {
997 struct work_struct work;
998 struct mm_struct *mm;
999 nodemask_t from;
1000 nodemask_t to;
1001 };
1002
1003 static void cpuset_migrate_mm_workfn(struct work_struct *work)
1004 {
1005 struct cpuset_migrate_mm_work *mwork =
1006 container_of(work, struct cpuset_migrate_mm_work, work);
1007
1008 /* on a wq worker, no need to worry about %current's mems_allowed */
1009 do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
1010 mmput(mwork->mm);
1011 kfree(mwork);
1012 }
1013
1014 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
1015 const nodemask_t *to)
1016 {
1017 struct cpuset_migrate_mm_work *mwork;
1018
1019 mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
1020 if (mwork) {
1021 mwork->mm = mm;
1022 mwork->from = *from;
1023 mwork->to = *to;
1024 INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
1025 queue_work(cpuset_migrate_mm_wq, &mwork->work);
1026 } else {
1027 mmput(mm);
1028 }
1029 }
1030
1031 static void cpuset_post_attach(void)
1032 {
1033 flush_workqueue(cpuset_migrate_mm_wq);
1034 }
1035
1036 /*
1037 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
1038 * @tsk: the task to change
1039 * @newmems: new nodes that the task will be set
1040 *
1041 * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
1042 * and rebind an eventual tasks' mempolicy. If the task is allocating in
1043 * parallel, it might temporarily see an empty intersection, which results in
1044 * a seqlock check and retry before OOM or allocation failure.
1045 */
1046 static void cpuset_change_task_nodemask(struct task_struct *tsk,
1047 nodemask_t *newmems)
1048 {
1049 task_lock(tsk);
1050
1051 local_irq_disable();
1052 write_seqcount_begin(&tsk->mems_allowed_seq);
1053
1054 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
1055 mpol_rebind_task(tsk, newmems);
1056 tsk->mems_allowed = *newmems;
1057
1058 write_seqcount_end(&tsk->mems_allowed_seq);
1059 local_irq_enable();
1060
1061 task_unlock(tsk);
1062 }
1063
1064 static void *cpuset_being_rebound;
1065
1066 /**
1067 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
1068 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1069 *
1070 * Iterate through each task of @cs updating its mems_allowed to the
1071 * effective cpuset's. As this function is called with cpuset_mutex held,
1072 * cpuset membership stays stable.
1073 */
1074 static void update_tasks_nodemask(struct cpuset *cs)
1075 {
1076 static nodemask_t newmems; /* protected by cpuset_mutex */
1077 struct css_task_iter it;
1078 struct task_struct *task;
1079
1080 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
1081
1082 guarantee_online_mems(cs, &newmems);
1083
1084 /*
1085 * The mpol_rebind_mm() call takes mmap_sem, which we couldn't
1086 * take while holding tasklist_lock. Forks can happen - the
1087 * mpol_dup() cpuset_being_rebound check will catch such forks,
1088 * and rebind their vma mempolicies too. Because we still hold
1089 * the global cpuset_mutex, we know that no other rebind effort
1090 * will be contending for the global variable cpuset_being_rebound.
1091 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
1092 * is idempotent. Also migrate pages in each mm to new nodes.
1093 */
1094 css_task_iter_start(&cs->css, &it);
1095 while ((task = css_task_iter_next(&it))) {
1096 struct mm_struct *mm;
1097 bool migrate;
1098
1099 cpuset_change_task_nodemask(task, &newmems);
1100
1101 mm = get_task_mm(task);
1102 if (!mm)
1103 continue;
1104
1105 migrate = is_memory_migrate(cs);
1106
1107 mpol_rebind_mm(mm, &cs->mems_allowed);
1108 if (migrate)
1109 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
1110 else
1111 mmput(mm);
1112 }
1113 css_task_iter_end(&it);
1114
1115 /*
1116 * All the tasks' nodemasks have been updated, update
1117 * cs->old_mems_allowed.
1118 */
1119 cs->old_mems_allowed = newmems;
1120
1121 /* We're done rebinding vmas to this cpuset's new mems_allowed. */
1122 cpuset_being_rebound = NULL;
1123 }
1124
1125 /*
1126 * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
1127 * @cs: the cpuset to consider
1128 * @new_mems: a temp variable for calculating new effective_mems
1129 *
1130 * When configured nodemask is changed, the effective nodemasks of this cpuset
1131 * and all its descendants need to be updated.
1132 *
1133 * On legacy hiearchy, effective_mems will be the same with mems_allowed.
1134 *
1135 * Called with cpuset_mutex held
1136 */
1137 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
1138 {
1139 struct cpuset *cp;
1140 struct cgroup_subsys_state *pos_css;
1141
1142 rcu_read_lock();
1143 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
1144 struct cpuset *parent = parent_cs(cp);
1145
1146 nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
1147
1148 /*
1149 * If it becomes empty, inherit the effective mask of the
1150 * parent, which is guaranteed to have some MEMs.
1151 */
1152 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
1153 nodes_empty(*new_mems))
1154 *new_mems = parent->effective_mems;
1155
1156 /* Skip the whole subtree if the nodemask remains the same. */
1157 if (nodes_equal(*new_mems, cp->effective_mems)) {
1158 pos_css = css_rightmost_descendant(pos_css);
1159 continue;
1160 }
1161
1162 if (!css_tryget_online(&cp->css))
1163 continue;
1164 rcu_read_unlock();
1165
1166 spin_lock_irq(&callback_lock);
1167 cp->effective_mems = *new_mems;
1168 spin_unlock_irq(&callback_lock);
1169
1170 WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
1171 !nodes_equal(cp->mems_allowed, cp->effective_mems));
1172
1173 update_tasks_nodemask(cp);
1174
1175 rcu_read_lock();
1176 css_put(&cp->css);
1177 }
1178 rcu_read_unlock();
1179 }
1180
1181 /*
1182 * Handle user request to change the 'mems' memory placement
1183 * of a cpuset. Needs to validate the request, update the
1184 * cpusets mems_allowed, and for each task in the cpuset,
1185 * update mems_allowed and rebind task's mempolicy and any vma
1186 * mempolicies and if the cpuset is marked 'memory_migrate',
1187 * migrate the tasks pages to the new memory.
1188 *
1189 * Call with cpuset_mutex held. May take callback_lock during call.
1190 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
1191 * lock each such tasks mm->mmap_sem, scan its vma's and rebind
1192 * their mempolicies to the cpusets new mems_allowed.
1193 */
1194 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1195 const char *buf)
1196 {
1197 int retval;
1198
1199 /*
1200 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
1201 * it's read-only
1202 */
1203 if (cs == &top_cpuset) {
1204 retval = -EACCES;
1205 goto done;
1206 }
1207
1208 /*
1209 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
1210 * Since nodelist_parse() fails on an empty mask, we special case
1211 * that parsing. The validate_change() call ensures that cpusets
1212 * with tasks have memory.
1213 */
1214 if (!*buf) {
1215 nodes_clear(trialcs->mems_allowed);
1216 } else {
1217 retval = nodelist_parse(buf, trialcs->mems_allowed);
1218 if (retval < 0)
1219 goto done;
1220
1221 if (!nodes_subset(trialcs->mems_allowed,
1222 top_cpuset.mems_allowed)) {
1223 retval = -EINVAL;
1224 goto done;
1225 }
1226 }
1227
1228 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) {
1229 retval = 0; /* Too easy - nothing to do */
1230 goto done;
1231 }
1232 retval = validate_change(cs, trialcs);
1233 if (retval < 0)
1234 goto done;
1235
1236 spin_lock_irq(&callback_lock);
1237 cs->mems_allowed = trialcs->mems_allowed;
1238 spin_unlock_irq(&callback_lock);
1239
1240 /* use trialcs->mems_allowed as a temp variable */
1241 update_nodemasks_hier(cs, &trialcs->mems_allowed);
1242 done:
1243 return retval;
1244 }
1245
1246 int current_cpuset_is_being_rebound(void)
1247 {
1248 int ret;
1249
1250 rcu_read_lock();
1251 ret = task_cs(current) == cpuset_being_rebound;
1252 rcu_read_unlock();
1253
1254 return ret;
1255 }
1256
1257 static int update_relax_domain_level(struct cpuset *cs, s64 val)
1258 {
1259 #ifdef CONFIG_SMP
1260 if (val < -1 || val >= sched_domain_level_max)
1261 return -EINVAL;
1262 #endif
1263
1264 if (val != cs->relax_domain_level) {
1265 cs->relax_domain_level = val;
1266 if (!cpumask_empty(cs->cpus_allowed) &&
1267 is_sched_load_balance(cs))
1268 rebuild_sched_domains_locked();
1269 }
1270
1271 return 0;
1272 }
1273
1274 /**
1275 * update_tasks_flags - update the spread flags of tasks in the cpuset.
1276 * @cs: the cpuset in which each task's spread flags needs to be changed
1277 *
1278 * Iterate through each task of @cs updating its spread flags. As this
1279 * function is called with cpuset_mutex held, cpuset membership stays
1280 * stable.
1281 */
1282 static void update_tasks_flags(struct cpuset *cs)
1283 {
1284 struct css_task_iter it;
1285 struct task_struct *task;
1286
1287 css_task_iter_start(&cs->css, &it);
1288 while ((task = css_task_iter_next(&it)))
1289 cpuset_update_task_spread_flag(cs, task);
1290 css_task_iter_end(&it);
1291 }
1292
1293 /*
1294 * update_flag - read a 0 or a 1 in a file and update associated flag
1295 * bit: the bit to update (see cpuset_flagbits_t)
1296 * cs: the cpuset to update
1297 * turning_on: whether the flag is being set or cleared
1298 *
1299 * Call with cpuset_mutex held.
1300 */
1301
1302 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1303 int turning_on)
1304 {
1305 struct cpuset *trialcs;
1306 int balance_flag_changed;
1307 int spread_flag_changed;
1308 int err;
1309
1310 trialcs = alloc_trial_cpuset(cs);
1311 if (!trialcs)
1312 return -ENOMEM;
1313
1314 if (turning_on)
1315 set_bit(bit, &trialcs->flags);
1316 else
1317 clear_bit(bit, &trialcs->flags);
1318
1319 err = validate_change(cs, trialcs);
1320 if (err < 0)
1321 goto out;
1322
1323 balance_flag_changed = (is_sched_load_balance(cs) !=
1324 is_sched_load_balance(trialcs));
1325
1326 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
1327 || (is_spread_page(cs) != is_spread_page(trialcs)));
1328
1329 spin_lock_irq(&callback_lock);
1330 cs->flags = trialcs->flags;
1331 spin_unlock_irq(&callback_lock);
1332
1333 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
1334 rebuild_sched_domains_locked();
1335
1336 if (spread_flag_changed)
1337 update_tasks_flags(cs);
1338 out:
1339 free_trial_cpuset(trialcs);
1340 return err;
1341 }
1342
1343 /*
1344 * Frequency meter - How fast is some event occurring?
1345 *
1346 * These routines manage a digitally filtered, constant time based,
1347 * event frequency meter. There are four routines:
1348 * fmeter_init() - initialize a frequency meter.
1349 * fmeter_markevent() - called each time the event happens.
1350 * fmeter_getrate() - returns the recent rate of such events.
1351 * fmeter_update() - internal routine used to update fmeter.
1352 *
1353 * A common data structure is passed to each of these routines,
1354 * which is used to keep track of the state required to manage the
1355 * frequency meter and its digital filter.
1356 *
1357 * The filter works on the number of events marked per unit time.
1358 * The filter is single-pole low-pass recursive (IIR). The time unit
1359 * is 1 second. Arithmetic is done using 32-bit integers scaled to
1360 * simulate 3 decimal digits of precision (multiplied by 1000).
1361 *
1362 * With an FM_COEF of 933, and a time base of 1 second, the filter
1363 * has a half-life of 10 seconds, meaning that if the events quit
1364 * happening, then the rate returned from the fmeter_getrate()
1365 * will be cut in half each 10 seconds, until it converges to zero.
1366 *
1367 * It is not worth doing a real infinitely recursive filter. If more
1368 * than FM_MAXTICKS ticks have elapsed since the last filter event,
1369 * just compute FM_MAXTICKS ticks worth, by which point the level
1370 * will be stable.
1371 *
1372 * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
1373 * arithmetic overflow in the fmeter_update() routine.
1374 *
1375 * Given the simple 32 bit integer arithmetic used, this meter works
1376 * best for reporting rates between one per millisecond (msec) and
1377 * one per 32 (approx) seconds. At constant rates faster than one
1378 * per msec it maxes out at values just under 1,000,000. At constant
1379 * rates between one per msec, and one per second it will stabilize
1380 * to a value N*1000, where N is the rate of events per second.
1381 * At constant rates between one per second and one per 32 seconds,
1382 * it will be choppy, moving up on the seconds that have an event,
1383 * and then decaying until the next event. At rates slower than
1384 * about one in 32 seconds, it decays all the way back to zero between
1385 * each event.
1386 */
1387
1388 #define FM_COEF 933 /* coefficient for half-life of 10 secs */
1389 #define FM_MAXTICKS ((u32)99) /* useless computing more ticks than this */
1390 #define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */
1391 #define FM_SCALE 1000 /* faux fixed point scale */
1392
1393 /* Initialize a frequency meter */
1394 static void fmeter_init(struct fmeter *fmp)
1395 {
1396 fmp->cnt = 0;
1397 fmp->val = 0;
1398 fmp->time = 0;
1399 spin_lock_init(&fmp->lock);
1400 }
1401
1402 /* Internal meter update - process cnt events and update value */
1403 static void fmeter_update(struct fmeter *fmp)
1404 {
1405 time64_t now;
1406 u32 ticks;
1407
1408 now = ktime_get_seconds();
1409 ticks = now - fmp->time;
1410
1411 if (ticks == 0)
1412 return;
1413
1414 ticks = min(FM_MAXTICKS, ticks);
1415 while (ticks-- > 0)
1416 fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
1417 fmp->time = now;
1418
1419 fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
1420 fmp->cnt = 0;
1421 }
1422
1423 /* Process any previous ticks, then bump cnt by one (times scale). */
1424 static void fmeter_markevent(struct fmeter *fmp)
1425 {
1426 spin_lock(&fmp->lock);
1427 fmeter_update(fmp);
1428 fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
1429 spin_unlock(&fmp->lock);
1430 }
1431
1432 /* Process any previous ticks, then return current value. */
1433 static int fmeter_getrate(struct fmeter *fmp)
1434 {
1435 int val;
1436
1437 spin_lock(&fmp->lock);
1438 fmeter_update(fmp);
1439 val = fmp->val;
1440 spin_unlock(&fmp->lock);
1441 return val;
1442 }
1443
1444 static struct cpuset *cpuset_attach_old_cs;
1445
1446 /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
1447 static int cpuset_can_attach(struct cgroup_taskset *tset)
1448 {
1449 struct cgroup_subsys_state *css;
1450 struct cpuset *cs;
1451 struct task_struct *task;
1452 int ret;
1453
1454 /* used later by cpuset_attach() */
1455 cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
1456 cs = css_cs(css);
1457
1458 mutex_lock(&cpuset_mutex);
1459
1460 /* allow moving tasks into an empty cpuset if on default hierarchy */
1461 ret = -ENOSPC;
1462 if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
1463 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
1464 goto out_unlock;
1465
1466 cgroup_taskset_for_each(task, css, tset) {
1467 ret = task_can_attach(task, cs->cpus_allowed);
1468 if (ret)
1469 goto out_unlock;
1470 ret = security_task_setscheduler(task);
1471 if (ret)
1472 goto out_unlock;
1473 }
1474
1475 /*
1476 * Mark attach is in progress. This makes validate_change() fail
1477 * changes which zero cpus/mems_allowed.
1478 */
1479 cs->attach_in_progress++;
1480 ret = 0;
1481 out_unlock:
1482 mutex_unlock(&cpuset_mutex);
1483 return ret;
1484 }
1485
1486 static void cpuset_cancel_attach(struct cgroup_taskset *tset)
1487 {
1488 struct cgroup_subsys_state *css;
1489 struct cpuset *cs;
1490
1491 cgroup_taskset_first(tset, &css);
1492 cs = css_cs(css);
1493
1494 mutex_lock(&cpuset_mutex);
1495 css_cs(css)->attach_in_progress--;
1496 mutex_unlock(&cpuset_mutex);
1497 }
1498
1499 /*
1500 * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach()
1501 * but we can't allocate it dynamically there. Define it global and
1502 * allocate from cpuset_init().
1503 */
1504 static cpumask_var_t cpus_attach;
1505
1506 static void cpuset_attach(struct cgroup_taskset *tset)
1507 {
1508 /* static buf protected by cpuset_mutex */
1509 static nodemask_t cpuset_attach_nodemask_to;
1510 struct task_struct *task;
1511 struct task_struct *leader;
1512 struct cgroup_subsys_state *css;
1513 struct cpuset *cs;
1514 struct cpuset *oldcs = cpuset_attach_old_cs;
1515
1516 cgroup_taskset_first(tset, &css);
1517 cs = css_cs(css);
1518
1519 mutex_lock(&cpuset_mutex);
1520
1521 /* prepare for attach */
1522 if (cs == &top_cpuset)
1523 cpumask_copy(cpus_attach, cpu_possible_mask);
1524 else
1525 guarantee_online_cpus(cs, cpus_attach);
1526
1527 guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
1528
1529 cgroup_taskset_for_each(task, css, tset) {
1530 /*
1531 * can_attach beforehand should guarantee that this doesn't
1532 * fail. TODO: have a better way to handle failure here
1533 */
1534 WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
1535
1536 cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
1537 cpuset_update_task_spread_flag(cs, task);
1538 }
1539
1540 /*
1541 * Change mm for all threadgroup leaders. This is expensive and may
1542 * sleep and should be moved outside migration path proper.
1543 */
1544 cpuset_attach_nodemask_to = cs->effective_mems;
1545 cgroup_taskset_for_each_leader(leader, css, tset) {
1546 struct mm_struct *mm = get_task_mm(leader);
1547
1548 if (mm) {
1549 mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
1550
1551 /*
1552 * old_mems_allowed is the same with mems_allowed
1553 * here, except if this task is being moved
1554 * automatically due to hotplug. In that case
1555 * @mems_allowed has been updated and is empty, so
1556 * @old_mems_allowed is the right nodesets that we
1557 * migrate mm from.
1558 */
1559 if (is_memory_migrate(cs))
1560 cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
1561 &cpuset_attach_nodemask_to);
1562 else
1563 mmput(mm);
1564 }
1565 }
1566
1567 cs->old_mems_allowed = cpuset_attach_nodemask_to;
1568
1569 cs->attach_in_progress--;
1570 if (!cs->attach_in_progress)
1571 wake_up(&cpuset_attach_wq);
1572
1573 mutex_unlock(&cpuset_mutex);
1574 }
1575
1576 /* The various types of files and directories in a cpuset file system */
1577
1578 typedef enum {
1579 FILE_MEMORY_MIGRATE,
1580 FILE_CPULIST,
1581 FILE_MEMLIST,
1582 FILE_EFFECTIVE_CPULIST,
1583 FILE_EFFECTIVE_MEMLIST,
1584 FILE_CPU_EXCLUSIVE,
1585 FILE_MEM_EXCLUSIVE,
1586 FILE_MEM_HARDWALL,
1587 FILE_SCHED_LOAD_BALANCE,
1588 FILE_SCHED_RELAX_DOMAIN_LEVEL,
1589 FILE_MEMORY_PRESSURE_ENABLED,
1590 FILE_MEMORY_PRESSURE,
1591 FILE_SPREAD_PAGE,
1592 FILE_SPREAD_SLAB,
1593 } cpuset_filetype_t;
1594
1595 static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
1596 u64 val)
1597 {
1598 struct cpuset *cs = css_cs(css);
1599 cpuset_filetype_t type = cft->private;
1600 int retval = 0;
1601
1602 mutex_lock(&cpuset_mutex);
1603 if (!is_cpuset_online(cs)) {
1604 retval = -ENODEV;
1605 goto out_unlock;
1606 }
1607
1608 switch (type) {
1609 case FILE_CPU_EXCLUSIVE:
1610 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
1611 break;
1612 case FILE_MEM_EXCLUSIVE:
1613 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
1614 break;
1615 case FILE_MEM_HARDWALL:
1616 retval = update_flag(CS_MEM_HARDWALL, cs, val);
1617 break;
1618 case FILE_SCHED_LOAD_BALANCE:
1619 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
1620 break;
1621 case FILE_MEMORY_MIGRATE:
1622 retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
1623 break;
1624 case FILE_MEMORY_PRESSURE_ENABLED:
1625 cpuset_memory_pressure_enabled = !!val;
1626 break;
1627 case FILE_SPREAD_PAGE:
1628 retval = update_flag(CS_SPREAD_PAGE, cs, val);
1629 break;
1630 case FILE_SPREAD_SLAB:
1631 retval = update_flag(CS_SPREAD_SLAB, cs, val);
1632 break;
1633 default:
1634 retval = -EINVAL;
1635 break;
1636 }
1637 out_unlock:
1638 mutex_unlock(&cpuset_mutex);
1639 return retval;
1640 }
1641
1642 static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
1643 s64 val)
1644 {
1645 struct cpuset *cs = css_cs(css);
1646 cpuset_filetype_t type = cft->private;
1647 int retval = -ENODEV;
1648
1649 mutex_lock(&cpuset_mutex);
1650 if (!is_cpuset_online(cs))
1651 goto out_unlock;
1652
1653 switch (type) {
1654 case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1655 retval = update_relax_domain_level(cs, val);
1656 break;
1657 default:
1658 retval = -EINVAL;
1659 break;
1660 }
1661 out_unlock:
1662 mutex_unlock(&cpuset_mutex);
1663 return retval;
1664 }
1665
1666 /*
1667 * Common handling for a write to a "cpus" or "mems" file.
1668 */
1669 static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
1670 char *buf, size_t nbytes, loff_t off)
1671 {
1672 struct cpuset *cs = css_cs(of_css(of));
1673 struct cpuset *trialcs;
1674 int retval = -ENODEV;
1675
1676 buf = strstrip(buf);
1677
1678 /*
1679 * CPU or memory hotunplug may leave @cs w/o any execution
1680 * resources, in which case the hotplug code asynchronously updates
1681 * configuration and transfers all tasks to the nearest ancestor
1682 * which can execute.
1683 *
1684 * As writes to "cpus" or "mems" may restore @cs's execution
1685 * resources, wait for the previously scheduled operations before
1686 * proceeding, so that we don't end up keep removing tasks added
1687 * after execution capability is restored.
1688 *
1689 * cpuset_hotplug_work calls back into cgroup core via
1690 * cgroup_transfer_tasks() and waiting for it from a cgroupfs
1691 * operation like this one can lead to a deadlock through kernfs
1692 * active_ref protection. Let's break the protection. Losing the
1693 * protection is okay as we check whether @cs is online after
1694 * grabbing cpuset_mutex anyway. This only happens on the legacy
1695 * hierarchies.
1696 */
1697 css_get(&cs->css);
1698 kernfs_break_active_protection(of->kn);
1699 flush_work(&cpuset_hotplug_work);
1700
1701 mutex_lock(&cpuset_mutex);
1702 if (!is_cpuset_online(cs))
1703 goto out_unlock;
1704
1705 trialcs = alloc_trial_cpuset(cs);
1706 if (!trialcs) {
1707 retval = -ENOMEM;
1708 goto out_unlock;
1709 }
1710
1711 switch (of_cft(of)->private) {
1712 case FILE_CPULIST:
1713 retval = update_cpumask(cs, trialcs, buf);
1714 break;
1715 case FILE_MEMLIST:
1716 retval = update_nodemask(cs, trialcs, buf);
1717 break;
1718 default:
1719 retval = -EINVAL;
1720 break;
1721 }
1722
1723 free_trial_cpuset(trialcs);
1724 out_unlock:
1725 mutex_unlock(&cpuset_mutex);
1726 kernfs_unbreak_active_protection(of->kn);
1727 css_put(&cs->css);
1728 flush_workqueue(cpuset_migrate_mm_wq);
1729 return retval ?: nbytes;
1730 }
1731
1732 /*
1733 * These ascii lists should be read in a single call, by using a user
1734 * buffer large enough to hold the entire map. If read in smaller
1735 * chunks, there is no guarantee of atomicity. Since the display format
1736 * used, list of ranges of sequential numbers, is variable length,
1737 * and since these maps can change value dynamically, one could read
1738 * gibberish by doing partial reads while a list was changing.
1739 */
1740 static int cpuset_common_seq_show(struct seq_file *sf, void *v)
1741 {
1742 struct cpuset *cs = css_cs(seq_css(sf));
1743 cpuset_filetype_t type = seq_cft(sf)->private;
1744 int ret = 0;
1745
1746 spin_lock_irq(&callback_lock);
1747
1748 switch (type) {
1749 case FILE_CPULIST:
1750 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
1751 break;
1752 case FILE_MEMLIST:
1753 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
1754 break;
1755 case FILE_EFFECTIVE_CPULIST:
1756 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus));
1757 break;
1758 case FILE_EFFECTIVE_MEMLIST:
1759 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
1760 break;
1761 default:
1762 ret = -EINVAL;
1763 }
1764
1765 spin_unlock_irq(&callback_lock);
1766 return ret;
1767 }
1768
1769 static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft)
1770 {
1771 struct cpuset *cs = css_cs(css);
1772 cpuset_filetype_t type = cft->private;
1773 switch (type) {
1774 case FILE_CPU_EXCLUSIVE:
1775 return is_cpu_exclusive(cs);
1776 case FILE_MEM_EXCLUSIVE:
1777 return is_mem_exclusive(cs);
1778 case FILE_MEM_HARDWALL:
1779 return is_mem_hardwall(cs);
1780 case FILE_SCHED_LOAD_BALANCE:
1781 return is_sched_load_balance(cs);
1782 case FILE_MEMORY_MIGRATE:
1783 return is_memory_migrate(cs);
1784 case FILE_MEMORY_PRESSURE_ENABLED:
1785 return cpuset_memory_pressure_enabled;
1786 case FILE_MEMORY_PRESSURE:
1787 return fmeter_getrate(&cs->fmeter);
1788 case FILE_SPREAD_PAGE:
1789 return is_spread_page(cs);
1790 case FILE_SPREAD_SLAB:
1791 return is_spread_slab(cs);
1792 default:
1793 BUG();
1794 }
1795
1796 /* Unreachable but makes gcc happy */
1797 return 0;
1798 }
1799
1800 static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft)
1801 {
1802 struct cpuset *cs = css_cs(css);
1803 cpuset_filetype_t type = cft->private;
1804 switch (type) {
1805 case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1806 return cs->relax_domain_level;
1807 default:
1808 BUG();
1809 }
1810
1811 /* Unrechable but makes gcc happy */
1812 return 0;
1813 }
1814
1815
1816 /*
1817 * for the common functions, 'private' gives the type of file
1818 */
1819
1820 static struct cftype files[] = {
1821 {
1822 .name = "cpus",
1823 .seq_show = cpuset_common_seq_show,
1824 .write = cpuset_write_resmask,
1825 .max_write_len = (100U + 6 * NR_CPUS),
1826 .private = FILE_CPULIST,
1827 },
1828
1829 {
1830 .name = "mems",
1831 .seq_show = cpuset_common_seq_show,
1832 .write = cpuset_write_resmask,
1833 .max_write_len = (100U + 6 * MAX_NUMNODES),
1834 .private = FILE_MEMLIST,
1835 },
1836
1837 {
1838 .name = "effective_cpus",
1839 .seq_show = cpuset_common_seq_show,
1840 .private = FILE_EFFECTIVE_CPULIST,
1841 },
1842
1843 {
1844 .name = "effective_mems",
1845 .seq_show = cpuset_common_seq_show,
1846 .private = FILE_EFFECTIVE_MEMLIST,
1847 },
1848
1849 {
1850 .name = "cpu_exclusive",
1851 .read_u64 = cpuset_read_u64,
1852 .write_u64 = cpuset_write_u64,
1853 .private = FILE_CPU_EXCLUSIVE,
1854 },
1855
1856 {
1857 .name = "mem_exclusive",
1858 .read_u64 = cpuset_read_u64,
1859 .write_u64 = cpuset_write_u64,
1860 .private = FILE_MEM_EXCLUSIVE,
1861 },
1862
1863 {
1864 .name = "mem_hardwall",
1865 .read_u64 = cpuset_read_u64,
1866 .write_u64 = cpuset_write_u64,
1867 .private = FILE_MEM_HARDWALL,
1868 },
1869
1870 {
1871 .name = "sched_load_balance",
1872 .read_u64 = cpuset_read_u64,
1873 .write_u64 = cpuset_write_u64,
1874 .private = FILE_SCHED_LOAD_BALANCE,
1875 },
1876
1877 {
1878 .name = "sched_relax_domain_level",
1879 .read_s64 = cpuset_read_s64,
1880 .write_s64 = cpuset_write_s64,
1881 .private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
1882 },
1883
1884 {
1885 .name = "memory_migrate",
1886 .read_u64 = cpuset_read_u64,
1887 .write_u64 = cpuset_write_u64,
1888 .private = FILE_MEMORY_MIGRATE,
1889 },
1890
1891 {
1892 .name = "memory_pressure",
1893 .read_u64 = cpuset_read_u64,
1894 },
1895
1896 {
1897 .name = "memory_spread_page",
1898 .read_u64 = cpuset_read_u64,
1899 .write_u64 = cpuset_write_u64,
1900 .private = FILE_SPREAD_PAGE,
1901 },
1902
1903 {
1904 .name = "memory_spread_slab",
1905 .read_u64 = cpuset_read_u64,
1906 .write_u64 = cpuset_write_u64,
1907 .private = FILE_SPREAD_SLAB,
1908 },
1909
1910 {
1911 .name = "memory_pressure_enabled",
1912 .flags = CFTYPE_ONLY_ON_ROOT,
1913 .read_u64 = cpuset_read_u64,
1914 .write_u64 = cpuset_write_u64,
1915 .private = FILE_MEMORY_PRESSURE_ENABLED,
1916 },
1917
1918 { } /* terminate */
1919 };
1920
1921 /*
1922 * cpuset_css_alloc - allocate a cpuset css
1923 * cgrp: control group that the new cpuset will be part of
1924 */
1925
1926 static struct cgroup_subsys_state *
1927 cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
1928 {
1929 struct cpuset *cs;
1930
1931 if (!parent_css)
1932 return &top_cpuset.css;
1933
1934 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
1935 if (!cs)
1936 return ERR_PTR(-ENOMEM);
1937 if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL))
1938 goto free_cs;
1939 if (!alloc_cpumask_var(&cs->effective_cpus, GFP_KERNEL))
1940 goto free_cpus;
1941
1942 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1943 cpumask_clear(cs->cpus_allowed);
1944 nodes_clear(cs->mems_allowed);
1945 cpumask_clear(cs->effective_cpus);
1946 nodes_clear(cs->effective_mems);
1947 fmeter_init(&cs->fmeter);
1948 cs->relax_domain_level = -1;
1949
1950 return &cs->css;
1951
1952 free_cpus:
1953 free_cpumask_var(cs->cpus_allowed);
1954 free_cs:
1955 kfree(cs);
1956 return ERR_PTR(-ENOMEM);
1957 }
1958
1959 static int cpuset_css_online(struct cgroup_subsys_state *css)
1960 {
1961 struct cpuset *cs = css_cs(css);
1962 struct cpuset *parent = parent_cs(cs);
1963 struct cpuset *tmp_cs;
1964 struct cgroup_subsys_state *pos_css;
1965
1966 if (!parent)
1967 return 0;
1968
1969 mutex_lock(&cpuset_mutex);
1970
1971 set_bit(CS_ONLINE, &cs->flags);
1972 if (is_spread_page(parent))
1973 set_bit(CS_SPREAD_PAGE, &cs->flags);
1974 if (is_spread_slab(parent))
1975 set_bit(CS_SPREAD_SLAB, &cs->flags);
1976
1977 cpuset_inc();
1978
1979 spin_lock_irq(&callback_lock);
1980 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
1981 cpumask_copy(cs->effective_cpus, parent->effective_cpus);
1982 cs->effective_mems = parent->effective_mems;
1983 }
1984 spin_unlock_irq(&callback_lock);
1985
1986 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
1987 goto out_unlock;
1988
1989 /*
1990 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
1991 * set. This flag handling is implemented in cgroup core for
1992 * histrical reasons - the flag may be specified during mount.
1993 *
1994 * Currently, if any sibling cpusets have exclusive cpus or mem, we
1995 * refuse to clone the configuration - thereby refusing the task to
1996 * be entered, and as a result refusing the sys_unshare() or
1997 * clone() which initiated it. If this becomes a problem for some
1998 * users who wish to allow that scenario, then this could be
1999 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
2000 * (and likewise for mems) to the new cgroup.
2001 */
2002 rcu_read_lock();
2003 cpuset_for_each_child(tmp_cs, pos_css, parent) {
2004 if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
2005 rcu_read_unlock();
2006 goto out_unlock;
2007 }
2008 }
2009 rcu_read_unlock();
2010
2011 spin_lock_irq(&callback_lock);
2012 cs->mems_allowed = parent->mems_allowed;
2013 cs->effective_mems = parent->mems_allowed;
2014 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
2015 cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
2016 spin_unlock_irq(&callback_lock);
2017 out_unlock:
2018 mutex_unlock(&cpuset_mutex);
2019 return 0;
2020 }
2021
2022 /*
2023 * If the cpuset being removed has its flag 'sched_load_balance'
2024 * enabled, then simulate turning sched_load_balance off, which
2025 * will call rebuild_sched_domains_locked().
2026 */
2027
2028 static void cpuset_css_offline(struct cgroup_subsys_state *css)
2029 {
2030 struct cpuset *cs = css_cs(css);
2031
2032 mutex_lock(&cpuset_mutex);
2033
2034 if (is_sched_load_balance(cs))
2035 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
2036
2037 cpuset_dec();
2038 clear_bit(CS_ONLINE, &cs->flags);
2039
2040 mutex_unlock(&cpuset_mutex);
2041 }
2042
2043 static void cpuset_css_free(struct cgroup_subsys_state *css)
2044 {
2045 struct cpuset *cs = css_cs(css);
2046
2047 free_cpumask_var(cs->effective_cpus);
2048 free_cpumask_var(cs->cpus_allowed);
2049 kfree(cs);
2050 }
2051
2052 static void cpuset_bind(struct cgroup_subsys_state *root_css)
2053 {
2054 mutex_lock(&cpuset_mutex);
2055 spin_lock_irq(&callback_lock);
2056
2057 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
2058 cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
2059 top_cpuset.mems_allowed = node_possible_map;
2060 } else {
2061 cpumask_copy(top_cpuset.cpus_allowed,
2062 top_cpuset.effective_cpus);
2063 top_cpuset.mems_allowed = top_cpuset.effective_mems;
2064 }
2065
2066 spin_unlock_irq(&callback_lock);
2067 mutex_unlock(&cpuset_mutex);
2068 }
2069
2070 /*
2071 * Make sure the new task conform to the current state of its parent,
2072 * which could have been changed by cpuset just after it inherits the
2073 * state from the parent and before it sits on the cgroup's task list.
2074 */
2075 static void cpuset_fork(struct task_struct *task)
2076 {
2077 if (task_css_is_root(task, cpuset_cgrp_id))
2078 return;
2079
2080 set_cpus_allowed_ptr(task, &current->cpus_allowed);
2081 task->mems_allowed = current->mems_allowed;
2082 }
2083
2084 struct cgroup_subsys cpuset_cgrp_subsys = {
2085 .css_alloc = cpuset_css_alloc,
2086 .css_online = cpuset_css_online,
2087 .css_offline = cpuset_css_offline,
2088 .css_free = cpuset_css_free,
2089 .can_attach = cpuset_can_attach,
2090 .cancel_attach = cpuset_cancel_attach,
2091 .attach = cpuset_attach,
2092 .post_attach = cpuset_post_attach,
2093 .bind = cpuset_bind,
2094 .fork = cpuset_fork,
2095 .legacy_cftypes = files,
2096 .early_init = true,
2097 };
2098
2099 /**
2100 * cpuset_init - initialize cpusets at system boot
2101 *
2102 * Description: Initialize top_cpuset and the cpuset internal file system,
2103 **/
2104
2105 int __init cpuset_init(void)
2106 {
2107 int err = 0;
2108
2109 BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL));
2110 BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
2111
2112 cpumask_setall(top_cpuset.cpus_allowed);
2113 nodes_setall(top_cpuset.mems_allowed);
2114 cpumask_setall(top_cpuset.effective_cpus);
2115 nodes_setall(top_cpuset.effective_mems);
2116
2117 fmeter_init(&top_cpuset.fmeter);
2118 set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
2119 top_cpuset.relax_domain_level = -1;
2120
2121 err = register_filesystem(&cpuset_fs_type);
2122 if (err < 0)
2123 return err;
2124
2125 BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL));
2126
2127 return 0;
2128 }
2129
2130 /*
2131 * If CPU and/or memory hotplug handlers, below, unplug any CPUs
2132 * or memory nodes, we need to walk over the cpuset hierarchy,
2133 * removing that CPU or node from all cpusets. If this removes the
2134 * last CPU or node from a cpuset, then move the tasks in the empty
2135 * cpuset to its next-highest non-empty parent.
2136 */
2137 static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
2138 {
2139 struct cpuset *parent;
2140
2141 /*
2142 * Find its next-highest non-empty parent, (top cpuset
2143 * has online cpus, so can't be empty).
2144 */
2145 parent = parent_cs(cs);
2146 while (cpumask_empty(parent->cpus_allowed) ||
2147 nodes_empty(parent->mems_allowed))
2148 parent = parent_cs(parent);
2149
2150 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
2151 pr_err("cpuset: failed to transfer tasks out of empty cpuset ");
2152 pr_cont_cgroup_name(cs->css.cgroup);
2153 pr_cont("\n");
2154 }
2155 }
2156
2157 static void
2158 hotplug_update_tasks_legacy(struct cpuset *cs,
2159 struct cpumask *new_cpus, nodemask_t *new_mems,
2160 bool cpus_updated, bool mems_updated)
2161 {
2162 bool is_empty;
2163
2164 spin_lock_irq(&callback_lock);
2165 cpumask_copy(cs->cpus_allowed, new_cpus);
2166 cpumask_copy(cs->effective_cpus, new_cpus);
2167 cs->mems_allowed = *new_mems;
2168 cs->effective_mems = *new_mems;
2169 spin_unlock_irq(&callback_lock);
2170
2171 /*
2172 * Don't call update_tasks_cpumask() if the cpuset becomes empty,
2173 * as the tasks will be migratecd to an ancestor.
2174 */
2175 if (cpus_updated && !cpumask_empty(cs->cpus_allowed))
2176 update_tasks_cpumask(cs);
2177 if (mems_updated && !nodes_empty(cs->mems_allowed))
2178 update_tasks_nodemask(cs);
2179
2180 is_empty = cpumask_empty(cs->cpus_allowed) ||
2181 nodes_empty(cs->mems_allowed);
2182
2183 mutex_unlock(&cpuset_mutex);
2184
2185 /*
2186 * Move tasks to the nearest ancestor with execution resources,
2187 * This is full cgroup operation which will also call back into
2188 * cpuset. Should be done outside any lock.
2189 */
2190 if (is_empty)
2191 remove_tasks_in_empty_cpuset(cs);
2192
2193 mutex_lock(&cpuset_mutex);
2194 }
2195
2196 static void
2197 hotplug_update_tasks(struct cpuset *cs,
2198 struct cpumask *new_cpus, nodemask_t *new_mems,
2199 bool cpus_updated, bool mems_updated)
2200 {
2201 if (cpumask_empty(new_cpus))
2202 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus);
2203 if (nodes_empty(*new_mems))
2204 *new_mems = parent_cs(cs)->effective_mems;
2205
2206 spin_lock_irq(&callback_lock);
2207 cpumask_copy(cs->effective_cpus, new_cpus);
2208 cs->effective_mems = *new_mems;
2209 spin_unlock_irq(&callback_lock);
2210
2211 if (cpus_updated)
2212 update_tasks_cpumask(cs);
2213 if (mems_updated)
2214 update_tasks_nodemask(cs);
2215 }
2216
2217 /**
2218 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
2219 * @cs: cpuset in interest
2220 *
2221 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
2222 * offline, update @cs accordingly. If @cs ends up with no CPU or memory,
2223 * all its tasks are moved to the nearest ancestor with both resources.
2224 */
2225 static void cpuset_hotplug_update_tasks(struct cpuset *cs)
2226 {
2227 static cpumask_t new_cpus;
2228 static nodemask_t new_mems;
2229 bool cpus_updated;
2230 bool mems_updated;
2231 retry:
2232 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
2233
2234 mutex_lock(&cpuset_mutex);
2235
2236 /*
2237 * We have raced with task attaching. We wait until attaching
2238 * is finished, so we won't attach a task to an empty cpuset.
2239 */
2240 if (cs->attach_in_progress) {
2241 mutex_unlock(&cpuset_mutex);
2242 goto retry;
2243 }
2244
2245 cpumask_and(&new_cpus, cs->cpus_allowed, parent_cs(cs)->effective_cpus);
2246 nodes_and(new_mems, cs->mems_allowed, parent_cs(cs)->effective_mems);
2247
2248 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
2249 mems_updated = !nodes_equal(new_mems, cs->effective_mems);
2250
2251 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
2252 hotplug_update_tasks(cs, &new_cpus, &new_mems,
2253 cpus_updated, mems_updated);
2254 else
2255 hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems,
2256 cpus_updated, mems_updated);
2257
2258 mutex_unlock(&cpuset_mutex);
2259 }
2260
2261 /**
2262 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
2263 *
2264 * This function is called after either CPU or memory configuration has
2265 * changed and updates cpuset accordingly. The top_cpuset is always
2266 * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
2267 * order to make cpusets transparent (of no affect) on systems that are
2268 * actively using CPU hotplug but making no active use of cpusets.
2269 *
2270 * Non-root cpusets are only affected by offlining. If any CPUs or memory
2271 * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
2272 * all descendants.
2273 *
2274 * Note that CPU offlining during suspend is ignored. We don't modify
2275 * cpusets across suspend/resume cycles at all.
2276 */
2277 static void cpuset_hotplug_workfn(struct work_struct *work)
2278 {
2279 static cpumask_t new_cpus;
2280 static nodemask_t new_mems;
2281 bool cpus_updated, mems_updated;
2282 bool on_dfl = cgroup_subsys_on_dfl(cpuset_cgrp_subsys);
2283
2284 mutex_lock(&cpuset_mutex);
2285
2286 /* fetch the available cpus/mems and find out which changed how */
2287 cpumask_copy(&new_cpus, cpu_active_mask);
2288 new_mems = node_states[N_MEMORY];
2289
2290 cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus);
2291 mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
2292
2293 /* synchronize cpus_allowed to cpu_active_mask */
2294 if (cpus_updated) {
2295 spin_lock_irq(&callback_lock);
2296 if (!on_dfl)
2297 cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
2298 cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
2299 spin_unlock_irq(&callback_lock);
2300 /* we don't mess with cpumasks of tasks in top_cpuset */
2301 }
2302
2303 /* synchronize mems_allowed to N_MEMORY */
2304 if (mems_updated) {
2305 spin_lock_irq(&callback_lock);
2306 if (!on_dfl)
2307 top_cpuset.mems_allowed = new_mems;
2308 top_cpuset.effective_mems = new_mems;
2309 spin_unlock_irq(&callback_lock);
2310 update_tasks_nodemask(&top_cpuset);
2311 }
2312
2313 mutex_unlock(&cpuset_mutex);
2314
2315 /* if cpus or mems changed, we need to propagate to descendants */
2316 if (cpus_updated || mems_updated) {
2317 struct cpuset *cs;
2318 struct cgroup_subsys_state *pos_css;
2319
2320 rcu_read_lock();
2321 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
2322 if (cs == &top_cpuset || !css_tryget_online(&cs->css))
2323 continue;
2324 rcu_read_unlock();
2325
2326 cpuset_hotplug_update_tasks(cs);
2327
2328 rcu_read_lock();
2329 css_put(&cs->css);
2330 }
2331 rcu_read_unlock();
2332 }
2333
2334 /* rebuild sched domains if cpus_allowed has changed */
2335 if (cpus_updated)
2336 rebuild_sched_domains();
2337 }
2338
2339 void cpuset_update_active_cpus(void)
2340 {
2341 /*
2342 * We're inside cpu hotplug critical region which usually nests
2343 * inside cgroup synchronization. Bounce actual hotplug processing
2344 * to a work item to avoid reverse locking order.
2345 *
2346 * We still need to do partition_sched_domains() synchronously;
2347 * otherwise, the scheduler will get confused and put tasks to the
2348 * dead CPU. Fall back to the default single domain.
2349 * cpuset_hotplug_workfn() will rebuild it as necessary.
2350 */
2351 partition_sched_domains(1, NULL, NULL);
2352 schedule_work(&cpuset_hotplug_work);
2353 }
2354
2355 /*
2356 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
2357 * Call this routine anytime after node_states[N_MEMORY] changes.
2358 * See cpuset_update_active_cpus() for CPU hotplug handling.
2359 */
2360 static int cpuset_track_online_nodes(struct notifier_block *self,
2361 unsigned long action, void *arg)
2362 {
2363 schedule_work(&cpuset_hotplug_work);
2364 return NOTIFY_OK;
2365 }
2366
2367 static struct notifier_block cpuset_track_online_nodes_nb = {
2368 .notifier_call = cpuset_track_online_nodes,
2369 .priority = 10, /* ??! */
2370 };
2371
2372 /**
2373 * cpuset_init_smp - initialize cpus_allowed
2374 *
2375 * Description: Finish top cpuset after cpu, node maps are initialized
2376 */
2377 void __init cpuset_init_smp(void)
2378 {
2379 cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
2380 top_cpuset.mems_allowed = node_states[N_MEMORY];
2381 top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
2382
2383 cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
2384 top_cpuset.effective_mems = node_states[N_MEMORY];
2385
2386 register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
2387
2388 cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
2389 BUG_ON(!cpuset_migrate_mm_wq);
2390 }
2391
2392 /**
2393 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
2394 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
2395 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
2396 *
2397 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
2398 * attached to the specified @tsk. Guaranteed to return some non-empty
2399 * subset of cpu_online_mask, even if this means going outside the
2400 * tasks cpuset.
2401 **/
2402
2403 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
2404 {
2405 unsigned long flags;
2406
2407 spin_lock_irqsave(&callback_lock, flags);
2408 rcu_read_lock();
2409 guarantee_online_cpus(task_cs(tsk), pmask);
2410 rcu_read_unlock();
2411 spin_unlock_irqrestore(&callback_lock, flags);
2412 }
2413
2414 void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
2415 {
2416 rcu_read_lock();
2417 do_set_cpus_allowed(tsk, task_cs(tsk)->effective_cpus);
2418 rcu_read_unlock();
2419
2420 /*
2421 * We own tsk->cpus_allowed, nobody can change it under us.
2422 *
2423 * But we used cs && cs->cpus_allowed lockless and thus can
2424 * race with cgroup_attach_task() or update_cpumask() and get
2425 * the wrong tsk->cpus_allowed. However, both cases imply the
2426 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
2427 * which takes task_rq_lock().
2428 *
2429 * If we are called after it dropped the lock we must see all
2430 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
2431 * set any mask even if it is not right from task_cs() pov,
2432 * the pending set_cpus_allowed_ptr() will fix things.
2433 *
2434 * select_fallback_rq() will fix things ups and set cpu_possible_mask
2435 * if required.
2436 */
2437 }
2438
2439 void __init cpuset_init_current_mems_allowed(void)
2440 {
2441 nodes_setall(current->mems_allowed);
2442 }
2443
2444 /**
2445 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
2446 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
2447 *
2448 * Description: Returns the nodemask_t mems_allowed of the cpuset
2449 * attached to the specified @tsk. Guaranteed to return some non-empty
2450 * subset of node_states[N_MEMORY], even if this means going outside the
2451 * tasks cpuset.
2452 **/
2453
2454 nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
2455 {
2456 nodemask_t mask;
2457 unsigned long flags;
2458
2459 spin_lock_irqsave(&callback_lock, flags);
2460 rcu_read_lock();
2461 guarantee_online_mems(task_cs(tsk), &mask);
2462 rcu_read_unlock();
2463 spin_unlock_irqrestore(&callback_lock, flags);
2464
2465 return mask;
2466 }
2467
2468 /**
2469 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
2470 * @nodemask: the nodemask to be checked
2471 *
2472 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
2473 */
2474 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
2475 {
2476 return nodes_intersects(*nodemask, current->mems_allowed);
2477 }
2478
2479 /*
2480 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
2481 * mem_hardwall ancestor to the specified cpuset. Call holding
2482 * callback_lock. If no ancestor is mem_exclusive or mem_hardwall
2483 * (an unusual configuration), then returns the root cpuset.
2484 */
2485 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
2486 {
2487 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
2488 cs = parent_cs(cs);
2489 return cs;
2490 }
2491
2492 /**
2493 * cpuset_node_allowed - Can we allocate on a memory node?
2494 * @node: is this an allowed node?
2495 * @gfp_mask: memory allocation flags
2496 *
2497 * If we're in interrupt, yes, we can always allocate. If @node is set in
2498 * current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this
2499 * node is set in the nearest hardwalled cpuset ancestor to current's cpuset,
2500 * yes. If current has access to memory reserves due to TIF_MEMDIE, yes.
2501 * Otherwise, no.
2502 *
2503 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
2504 * and do not allow allocations outside the current tasks cpuset
2505 * unless the task has been OOM killed as is marked TIF_MEMDIE.
2506 * GFP_KERNEL allocations are not so marked, so can escape to the
2507 * nearest enclosing hardwalled ancestor cpuset.
2508 *
2509 * Scanning up parent cpusets requires callback_lock. The
2510 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
2511 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
2512 * current tasks mems_allowed came up empty on the first pass over
2513 * the zonelist. So only GFP_KERNEL allocations, if all nodes in the
2514 * cpuset are short of memory, might require taking the callback_lock.
2515 *
2516 * The first call here from mm/page_alloc:get_page_from_freelist()
2517 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
2518 * so no allocation on a node outside the cpuset is allowed (unless
2519 * in interrupt, of course).
2520 *
2521 * The second pass through get_page_from_freelist() doesn't even call
2522 * here for GFP_ATOMIC calls. For those calls, the __alloc_pages()
2523 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
2524 * in alloc_flags. That logic and the checks below have the combined
2525 * affect that:
2526 * in_interrupt - any node ok (current task context irrelevant)
2527 * GFP_ATOMIC - any node ok
2528 * TIF_MEMDIE - any node ok
2529 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok
2530 * GFP_USER - only nodes in current tasks mems allowed ok.
2531 */
2532 bool __cpuset_node_allowed(int node, gfp_t gfp_mask)
2533 {
2534 struct cpuset *cs; /* current cpuset ancestors */
2535 int allowed; /* is allocation in zone z allowed? */
2536 unsigned long flags;
2537
2538 if (in_interrupt())
2539 return true;
2540 if (node_isset(node, current->mems_allowed))
2541 return true;
2542 /*
2543 * Allow tasks that have access to memory reserves because they have
2544 * been OOM killed to get memory anywhere.
2545 */
2546 if (unlikely(test_thread_flag(TIF_MEMDIE)))
2547 return true;
2548 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */
2549 return false;
2550
2551 if (current->flags & PF_EXITING) /* Let dying task have memory */
2552 return true;
2553
2554 /* Not hardwall and node outside mems_allowed: scan up cpusets */
2555 spin_lock_irqsave(&callback_lock, flags);
2556
2557 rcu_read_lock();
2558 cs = nearest_hardwall_ancestor(task_cs(current));
2559 allowed = node_isset(node, cs->mems_allowed);
2560 rcu_read_unlock();
2561
2562 spin_unlock_irqrestore(&callback_lock, flags);
2563 return allowed;
2564 }
2565
2566 /**
2567 * cpuset_mem_spread_node() - On which node to begin search for a file page
2568 * cpuset_slab_spread_node() - On which node to begin search for a slab page
2569 *
2570 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
2571 * tasks in a cpuset with is_spread_page or is_spread_slab set),
2572 * and if the memory allocation used cpuset_mem_spread_node()
2573 * to determine on which node to start looking, as it will for
2574 * certain page cache or slab cache pages such as used for file
2575 * system buffers and inode caches, then instead of starting on the
2576 * local node to look for a free page, rather spread the starting
2577 * node around the tasks mems_allowed nodes.
2578 *
2579 * We don't have to worry about the returned node being offline
2580 * because "it can't happen", and even if it did, it would be ok.
2581 *
2582 * The routines calling guarantee_online_mems() are careful to
2583 * only set nodes in task->mems_allowed that are online. So it
2584 * should not be possible for the following code to return an
2585 * offline node. But if it did, that would be ok, as this routine
2586 * is not returning the node where the allocation must be, only
2587 * the node where the search should start. The zonelist passed to
2588 * __alloc_pages() will include all nodes. If the slab allocator
2589 * is passed an offline node, it will fall back to the local node.
2590 * See kmem_cache_alloc_node().
2591 */
2592
2593 static int cpuset_spread_node(int *rotor)
2594 {
2595 return *rotor = next_node_in(*rotor, current->mems_allowed);
2596 }
2597
2598 int cpuset_mem_spread_node(void)
2599 {
2600 if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
2601 current->cpuset_mem_spread_rotor =
2602 node_random(&current->mems_allowed);
2603
2604 return cpuset_spread_node(&current->cpuset_mem_spread_rotor);
2605 }
2606
2607 int cpuset_slab_spread_node(void)
2608 {
2609 if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE)
2610 current->cpuset_slab_spread_rotor =
2611 node_random(&current->mems_allowed);
2612
2613 return cpuset_spread_node(&current->cpuset_slab_spread_rotor);
2614 }
2615
2616 EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
2617
2618 /**
2619 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
2620 * @tsk1: pointer to task_struct of some task.
2621 * @tsk2: pointer to task_struct of some other task.
2622 *
2623 * Description: Return true if @tsk1's mems_allowed intersects the
2624 * mems_allowed of @tsk2. Used by the OOM killer to determine if
2625 * one of the task's memory usage might impact the memory available
2626 * to the other.
2627 **/
2628
2629 int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
2630 const struct task_struct *tsk2)
2631 {
2632 return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
2633 }
2634
2635 /**
2636 * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
2637 *
2638 * Description: Prints current's name, cpuset name, and cached copy of its
2639 * mems_allowed to the kernel log.
2640 */
2641 void cpuset_print_current_mems_allowed(void)
2642 {
2643 struct cgroup *cgrp;
2644
2645 rcu_read_lock();
2646
2647 cgrp = task_cs(current)->css.cgroup;
2648 pr_info("%s cpuset=", current->comm);
2649 pr_cont_cgroup_name(cgrp);
2650 pr_cont(" mems_allowed=%*pbl\n",
2651 nodemask_pr_args(&current->mems_allowed));
2652
2653 rcu_read_unlock();
2654 }
2655
2656 /*
2657 * Collection of memory_pressure is suppressed unless
2658 * this flag is enabled by writing "1" to the special
2659 * cpuset file 'memory_pressure_enabled' in the root cpuset.
2660 */
2661
2662 int cpuset_memory_pressure_enabled __read_mostly;
2663
2664 /**
2665 * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
2666 *
2667 * Keep a running average of the rate of synchronous (direct)
2668 * page reclaim efforts initiated by tasks in each cpuset.
2669 *
2670 * This represents the rate at which some task in the cpuset
2671 * ran low on memory on all nodes it was allowed to use, and
2672 * had to enter the kernels page reclaim code in an effort to
2673 * create more free memory by tossing clean pages or swapping
2674 * or writing dirty pages.
2675 *
2676 * Display to user space in the per-cpuset read-only file
2677 * "memory_pressure". Value displayed is an integer
2678 * representing the recent rate of entry into the synchronous
2679 * (direct) page reclaim by any task attached to the cpuset.
2680 **/
2681
2682 void __cpuset_memory_pressure_bump(void)
2683 {
2684 rcu_read_lock();
2685 fmeter_markevent(&task_cs(current)->fmeter);
2686 rcu_read_unlock();
2687 }
2688
2689 #ifdef CONFIG_PROC_PID_CPUSET
2690 /*
2691 * proc_cpuset_show()
2692 * - Print tasks cpuset path into seq_file.
2693 * - Used for /proc/<pid>/cpuset.
2694 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it
2695 * doesn't really matter if tsk->cpuset changes after we read it,
2696 * and we take cpuset_mutex, keeping cpuset_attach() from changing it
2697 * anyway.
2698 */
2699 int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
2700 struct pid *pid, struct task_struct *tsk)
2701 {
2702 char *buf;
2703 struct cgroup_subsys_state *css;
2704 int retval;
2705
2706 retval = -ENOMEM;
2707 buf = kmalloc(PATH_MAX, GFP_KERNEL);
2708 if (!buf)
2709 goto out;
2710
2711 css = task_get_css(tsk, cpuset_cgrp_id);
2712 retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX,
2713 current->nsproxy->cgroup_ns);
2714 css_put(css);
2715 if (retval >= PATH_MAX)
2716 retval = -ENAMETOOLONG;
2717 if (retval < 0)
2718 goto out_free;
2719 seq_puts(m, buf);
2720 seq_putc(m, '\n');
2721 retval = 0;
2722 out_free:
2723 kfree(buf);
2724 out:
2725 return retval;
2726 }
2727 #endif /* CONFIG_PROC_PID_CPUSET */
2728
2729 /* Display task mems_allowed in /proc/<pid>/status file. */
2730 void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
2731 {
2732 seq_printf(m, "Mems_allowed:\t%*pb\n",
2733 nodemask_pr_args(&task->mems_allowed));
2734 seq_printf(m, "Mems_allowed_list:\t%*pbl\n",
2735 nodemask_pr_args(&task->mems_allowed));
2736 }