]>
Commit | Line | Data |
---|---|---|
2e76c24d LZ |
1 | #include <linux/cgroup.h> |
2 | #include <linux/slab.h> | |
3 | #include <linux/percpu.h> | |
4 | #include <linux/spinlock.h> | |
5 | #include <linux/cpumask.h> | |
6 | #include <linux/seq_file.h> | |
7 | #include <linux/rcupdate.h> | |
8 | #include <linux/kernel_stat.h> | |
9 | ||
10 | #include "sched.h" | |
11 | ||
12 | /* | |
13 | * CPU accounting code for task groups. | |
14 | * | |
15 | * Based on the work by Paul Menage (menage@google.com) and Balbir Singh | |
16 | * (balbir@in.ibm.com). | |
17 | */ | |
18 | ||
19 | struct cpuacct root_cpuacct; | |
20 | ||
21 | /* create a new cpu accounting group */ | |
22 | static struct cgroup_subsys_state *cpuacct_css_alloc(struct cgroup *cgrp) | |
23 | { | |
24 | struct cpuacct *ca; | |
25 | ||
26 | if (!cgrp->parent) | |
27 | return &root_cpuacct.css; | |
28 | ||
29 | ca = kzalloc(sizeof(*ca), GFP_KERNEL); | |
30 | if (!ca) | |
31 | goto out; | |
32 | ||
33 | ca->cpuusage = alloc_percpu(u64); | |
34 | if (!ca->cpuusage) | |
35 | goto out_free_ca; | |
36 | ||
37 | ca->cpustat = alloc_percpu(struct kernel_cpustat); | |
38 | if (!ca->cpustat) | |
39 | goto out_free_cpuusage; | |
40 | ||
41 | return &ca->css; | |
42 | ||
43 | out_free_cpuusage: | |
44 | free_percpu(ca->cpuusage); | |
45 | out_free_ca: | |
46 | kfree(ca); | |
47 | out: | |
48 | return ERR_PTR(-ENOMEM); | |
49 | } | |
50 | ||
51 | /* destroy an existing cpu accounting group */ | |
52 | static void cpuacct_css_free(struct cgroup *cgrp) | |
53 | { | |
54 | struct cpuacct *ca = cgroup_ca(cgrp); | |
55 | ||
56 | free_percpu(ca->cpustat); | |
57 | free_percpu(ca->cpuusage); | |
58 | kfree(ca); | |
59 | } | |
60 | ||
61 | static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu) | |
62 | { | |
63 | u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); | |
64 | u64 data; | |
65 | ||
66 | #ifndef CONFIG_64BIT | |
67 | /* | |
68 | * Take rq->lock to make 64-bit read safe on 32-bit platforms. | |
69 | */ | |
70 | raw_spin_lock_irq(&cpu_rq(cpu)->lock); | |
71 | data = *cpuusage; | |
72 | raw_spin_unlock_irq(&cpu_rq(cpu)->lock); | |
73 | #else | |
74 | data = *cpuusage; | |
75 | #endif | |
76 | ||
77 | return data; | |
78 | } | |
79 | ||
80 | static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val) | |
81 | { | |
82 | u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); | |
83 | ||
84 | #ifndef CONFIG_64BIT | |
85 | /* | |
86 | * Take rq->lock to make 64-bit write safe on 32-bit platforms. | |
87 | */ | |
88 | raw_spin_lock_irq(&cpu_rq(cpu)->lock); | |
89 | *cpuusage = val; | |
90 | raw_spin_unlock_irq(&cpu_rq(cpu)->lock); | |
91 | #else | |
92 | *cpuusage = val; | |
93 | #endif | |
94 | } | |
95 | ||
96 | /* return total cpu usage (in nanoseconds) of a group */ | |
97 | static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft) | |
98 | { | |
99 | struct cpuacct *ca = cgroup_ca(cgrp); | |
100 | u64 totalcpuusage = 0; | |
101 | int i; | |
102 | ||
103 | for_each_present_cpu(i) | |
104 | totalcpuusage += cpuacct_cpuusage_read(ca, i); | |
105 | ||
106 | return totalcpuusage; | |
107 | } | |
108 | ||
109 | static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype, | |
110 | u64 reset) | |
111 | { | |
112 | struct cpuacct *ca = cgroup_ca(cgrp); | |
113 | int err = 0; | |
114 | int i; | |
115 | ||
116 | if (reset) { | |
117 | err = -EINVAL; | |
118 | goto out; | |
119 | } | |
120 | ||
121 | for_each_present_cpu(i) | |
122 | cpuacct_cpuusage_write(ca, i, 0); | |
123 | ||
124 | out: | |
125 | return err; | |
126 | } | |
127 | ||
128 | static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft, | |
129 | struct seq_file *m) | |
130 | { | |
131 | struct cpuacct *ca = cgroup_ca(cgroup); | |
132 | u64 percpu; | |
133 | int i; | |
134 | ||
135 | for_each_present_cpu(i) { | |
136 | percpu = cpuacct_cpuusage_read(ca, i); | |
137 | seq_printf(m, "%llu ", (unsigned long long) percpu); | |
138 | } | |
139 | seq_printf(m, "\n"); | |
140 | return 0; | |
141 | } | |
142 | ||
143 | static const char * const cpuacct_stat_desc[] = { | |
144 | [CPUACCT_STAT_USER] = "user", | |
145 | [CPUACCT_STAT_SYSTEM] = "system", | |
146 | }; | |
147 | ||
148 | static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft, | |
149 | struct cgroup_map_cb *cb) | |
150 | { | |
151 | struct cpuacct *ca = cgroup_ca(cgrp); | |
152 | int cpu; | |
153 | s64 val = 0; | |
154 | ||
155 | for_each_online_cpu(cpu) { | |
156 | struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu); | |
157 | val += kcpustat->cpustat[CPUTIME_USER]; | |
158 | val += kcpustat->cpustat[CPUTIME_NICE]; | |
159 | } | |
160 | val = cputime64_to_clock_t(val); | |
161 | cb->fill(cb, cpuacct_stat_desc[CPUACCT_STAT_USER], val); | |
162 | ||
163 | val = 0; | |
164 | for_each_online_cpu(cpu) { | |
165 | struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu); | |
166 | val += kcpustat->cpustat[CPUTIME_SYSTEM]; | |
167 | val += kcpustat->cpustat[CPUTIME_IRQ]; | |
168 | val += kcpustat->cpustat[CPUTIME_SOFTIRQ]; | |
169 | } | |
170 | ||
171 | val = cputime64_to_clock_t(val); | |
172 | cb->fill(cb, cpuacct_stat_desc[CPUACCT_STAT_SYSTEM], val); | |
173 | ||
174 | return 0; | |
175 | } | |
176 | ||
177 | static struct cftype files[] = { | |
178 | { | |
179 | .name = "usage", | |
180 | .read_u64 = cpuusage_read, | |
181 | .write_u64 = cpuusage_write, | |
182 | }, | |
183 | { | |
184 | .name = "usage_percpu", | |
185 | .read_seq_string = cpuacct_percpu_seq_read, | |
186 | }, | |
187 | { | |
188 | .name = "stat", | |
189 | .read_map = cpuacct_stats_show, | |
190 | }, | |
191 | { } /* terminate */ | |
192 | }; | |
193 | ||
194 | /* | |
195 | * charge this task's execution time to its accounting group. | |
196 | * | |
197 | * called with rq->lock held. | |
198 | */ | |
199 | void cpuacct_charge(struct task_struct *tsk, u64 cputime) | |
200 | { | |
201 | struct cpuacct *ca; | |
202 | int cpu; | |
203 | ||
204 | if (unlikely(!cpuacct_subsys.active)) | |
205 | return; | |
206 | ||
207 | cpu = task_cpu(tsk); | |
208 | ||
209 | rcu_read_lock(); | |
210 | ||
211 | ca = task_ca(tsk); | |
212 | ||
543bc0e7 | 213 | while (true) { |
2e76c24d LZ |
214 | u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); |
215 | *cpuusage += cputime; | |
543bc0e7 LZ |
216 | |
217 | ca = parent_ca(ca); | |
218 | if (!ca) | |
219 | break; | |
2e76c24d LZ |
220 | } |
221 | ||
222 | rcu_read_unlock(); | |
223 | } | |
224 | ||
1966aaf7 LZ |
225 | /* |
226 | * Add user/system time to cpuacct. | |
227 | * | |
228 | * Note: it's the caller that updates the account of the root cgroup. | |
229 | */ | |
230 | void cpuacct_account_field(struct task_struct *p, int index, u64 val) | |
231 | { | |
232 | struct kernel_cpustat *kcpustat; | |
233 | struct cpuacct *ca; | |
234 | ||
235 | if (unlikely(!cpuacct_subsys.active)) | |
236 | return; | |
237 | ||
238 | rcu_read_lock(); | |
239 | ca = task_ca(p); | |
5f40d804 | 240 | while (ca != &root_cpuacct) { |
1966aaf7 LZ |
241 | kcpustat = this_cpu_ptr(ca->cpustat); |
242 | kcpustat->cpustat[index] += val; | |
5f40d804 | 243 | ca = __parent_ca(ca); |
1966aaf7 LZ |
244 | } |
245 | rcu_read_unlock(); | |
246 | } | |
247 | ||
dbe4b41f LZ |
248 | void __init cpuacct_init(void) |
249 | { | |
250 | root_cpuacct.cpustat = &kernel_cpustat; | |
251 | root_cpuacct.cpuusage = alloc_percpu(u64); | |
252 | BUG_ON(!root_cpuacct.cpuusage); /* Too early, not expected to fail */ | |
253 | } | |
254 | ||
2e76c24d LZ |
255 | struct cgroup_subsys cpuacct_subsys = { |
256 | .name = "cpuacct", | |
257 | .css_alloc = cpuacct_css_alloc, | |
258 | .css_free = cpuacct_css_free, | |
259 | .subsys_id = cpuacct_subsys_id, | |
260 | .base_cftypes = files, | |
261 | }; |