]> git.proxmox.com Git - pve-kernel.git/blob - patches/kernel/0006-cpuset-Allow-v2-behavior-in-v1-cgroup.patch
25be085d0ef3c08dd176723fd3cf6649c4f20c5f
[pve-kernel.git] / patches / kernel / 0006-cpuset-Allow-v2-behavior-in-v1-cgroup.patch
1 From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2 From: Waiman Long <longman@redhat.com>
3 Date: Thu, 17 Aug 2017 15:33:10 -0400
4 Subject: [PATCH] cpuset: Allow v2 behavior in v1 cgroup
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 Cpuset v2 has some useful behaviors that are not present in v1 because
10 of backward compatibility concern. One of that is the restoration of
11 the original cpu and memory node mask after a hot removal and addition
12 event sequence.
13
14 This patch makes the cpuset controller to check the
15 CGRP_ROOT_CPUSET_V2_MODE flag and use the v2 behavior if it is set.
16
17 Signed-off-by: Waiman Long <longman@redhat.com>
18 Signed-off-by: Tejun Heo <tj@kernel.org>
19 (cherry-picked from b8d1b8ee93df8ffbabbeadd65d39853cfad6d698)
20 Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
21 ---
22 kernel/cgroup/cpuset.c | 33 ++++++++++++++++++++-------------
23 1 file changed, 20 insertions(+), 13 deletions(-)
24
25 diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
26 index e8cb34193433..f76c4bf3d46a 100644
27 --- a/kernel/cgroup/cpuset.c
28 +++ b/kernel/cgroup/cpuset.c
29 @@ -299,6 +299,16 @@ static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);
30
31 static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
32
33 +/*
34 + * Cgroup v2 behavior is used when on default hierarchy or the
35 + * cgroup_v2_mode flag is set.
36 + */
37 +static inline bool is_in_v2_mode(void)
38 +{
39 + return cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
40 + (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE);
41 +}
42 +
43 /*
44 * This is ugly, but preserves the userspace API for existing cpuset
45 * users. If someone tries to mount the "cpuset" filesystem, we
46 @@ -489,8 +499,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
47
48 /* On legacy hiearchy, we must be a subset of our parent cpuset. */
49 ret = -EACCES;
50 - if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
51 - !is_cpuset_subset(trial, par))
52 + if (!is_in_v2_mode() && !is_cpuset_subset(trial, par))
53 goto out;
54
55 /*
56 @@ -896,8 +905,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
57 * If it becomes empty, inherit the effective mask of the
58 * parent, which is guaranteed to have some CPUs.
59 */
60 - if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
61 - cpumask_empty(new_cpus))
62 + if (is_in_v2_mode() && cpumask_empty(new_cpus))
63 cpumask_copy(new_cpus, parent->effective_cpus);
64
65 /* Skip the whole subtree if the cpumask remains the same. */
66 @@ -914,7 +922,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
67 cpumask_copy(cp->effective_cpus, new_cpus);
68 spin_unlock_irq(&callback_lock);
69
70 - WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
71 + WARN_ON(!is_in_v2_mode() &&
72 !cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
73
74 update_tasks_cpumask(cp);
75 @@ -1150,8 +1158,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
76 * If it becomes empty, inherit the effective mask of the
77 * parent, which is guaranteed to have some MEMs.
78 */
79 - if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
80 - nodes_empty(*new_mems))
81 + if (is_in_v2_mode() && nodes_empty(*new_mems))
82 *new_mems = parent->effective_mems;
83
84 /* Skip the whole subtree if the nodemask remains the same. */
85 @@ -1168,7 +1175,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
86 cp->effective_mems = *new_mems;
87 spin_unlock_irq(&callback_lock);
88
89 - WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
90 + WARN_ON(!is_in_v2_mode() &&
91 !nodes_equal(cp->mems_allowed, cp->effective_mems));
92
93 update_tasks_nodemask(cp);
94 @@ -1460,7 +1467,7 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
95
96 /* allow moving tasks into an empty cpuset if on default hierarchy */
97 ret = -ENOSPC;
98 - if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
99 + if (!is_in_v2_mode() &&
100 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
101 goto out_unlock;
102
103 @@ -1979,7 +1986,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
104 cpuset_inc();
105
106 spin_lock_irq(&callback_lock);
107 - if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
108 + if (is_in_v2_mode()) {
109 cpumask_copy(cs->effective_cpus, parent->effective_cpus);
110 cs->effective_mems = parent->effective_mems;
111 }
112 @@ -2056,7 +2063,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
113 mutex_lock(&cpuset_mutex);
114 spin_lock_irq(&callback_lock);
115
116 - if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
117 + if (is_in_v2_mode()) {
118 cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
119 top_cpuset.mems_allowed = node_possible_map;
120 } else {
121 @@ -2250,7 +2257,7 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs)
122 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
123 mems_updated = !nodes_equal(new_mems, cs->effective_mems);
124
125 - if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
126 + if (is_in_v2_mode())
127 hotplug_update_tasks(cs, &new_cpus, &new_mems,
128 cpus_updated, mems_updated);
129 else
130 @@ -2288,7 +2295,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
131 static cpumask_t new_cpus;
132 static nodemask_t new_mems;
133 bool cpus_updated, mems_updated;
134 - bool on_dfl = cgroup_subsys_on_dfl(cpuset_cgrp_subsys);
135 + bool on_dfl = is_in_v2_mode();
136
137 mutex_lock(&cpuset_mutex);
138
139 --
140 2.14.2
141