]>
Commit | Line | Data |
---|---|---|
e1d8aa9f FW |
1 | /* |
2 | * Workqueue statistical tracer. | |
3 | * | |
4 | * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com> | |
5 | * | |
6 | */ | |
7 | ||
8 | ||
fb39125f | 9 | #include <trace/events/workqueue.h> |
e1d8aa9f | 10 | #include <linux/list.h> |
3690b5e6 | 11 | #include <linux/percpu.h> |
e1d8aa9f FW |
12 | #include "trace_stat.h" |
13 | #include "trace.h" | |
14 | ||
15 | ||
16 | /* A cpu workqueue thread */ | |
17 | struct cpu_workqueue_stats { | |
18 | struct list_head list; | |
e1d8aa9f | 19 | int cpu; |
ef18012b | 20 | pid_t pid; |
e1d8aa9f | 21 | /* Can be inserted from interrupt or user context, need to be atomic */ |
ef18012b | 22 | atomic_t inserted; |
e1d8aa9f FW |
23 | /* |
24 | * Don't need to be atomic, works are serialized in a single workqueue thread | |
25 | * on a single CPU. | |
26 | */ | |
ef18012b | 27 | unsigned int executed; |
e1d8aa9f FW |
28 | }; |
29 | ||
30 | /* List of workqueue threads on one cpu */ | |
31 | struct workqueue_global_stats { | |
32 | struct list_head list; | |
33 | spinlock_t lock; | |
34 | }; | |
35 | ||
36 | /* Don't need a global lock because allocated before the workqueues, and | |
37 | * never freed. | |
38 | */ | |
3690b5e6 LJ |
39 | static DEFINE_PER_CPU(struct workqueue_global_stats, all_workqueue_stat); |
40 | #define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu)) | |
e1d8aa9f FW |
41 | |
42 | /* Insertion of a work */ | |
43 | static void | |
44 | probe_workqueue_insertion(struct task_struct *wq_thread, | |
45 | struct work_struct *work) | |
46 | { | |
47 | int cpu = cpumask_first(&wq_thread->cpus_allowed); | |
1fdfca9c | 48 | struct cpu_workqueue_stats *node; |
e1d8aa9f FW |
49 | unsigned long flags; |
50 | ||
3690b5e6 | 51 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); |
1fdfca9c | 52 | list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) { |
e1d8aa9f FW |
53 | if (node->pid == wq_thread->pid) { |
54 | atomic_inc(&node->inserted); | |
55 | goto found; | |
56 | } | |
57 | } | |
58 | pr_debug("trace_workqueue: entry not found\n"); | |
59 | found: | |
3690b5e6 | 60 | spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); |
e1d8aa9f FW |
61 | } |
62 | ||
63 | /* Execution of a work */ | |
64 | static void | |
65 | probe_workqueue_execution(struct task_struct *wq_thread, | |
66 | struct work_struct *work) | |
67 | { | |
68 | int cpu = cpumask_first(&wq_thread->cpus_allowed); | |
1fdfca9c | 69 | struct cpu_workqueue_stats *node; |
e1d8aa9f FW |
70 | unsigned long flags; |
71 | ||
3690b5e6 | 72 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); |
1fdfca9c | 73 | list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) { |
e1d8aa9f FW |
74 | if (node->pid == wq_thread->pid) { |
75 | node->executed++; | |
76 | goto found; | |
77 | } | |
78 | } | |
79 | pr_debug("trace_workqueue: entry not found\n"); | |
80 | found: | |
3690b5e6 | 81 | spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); |
e1d8aa9f FW |
82 | } |
83 | ||
84 | /* Creation of a cpu workqueue thread */ | |
85 | static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu) | |
86 | { | |
87 | struct cpu_workqueue_stats *cws; | |
88 | unsigned long flags; | |
89 | ||
bbcd3063 | 90 | WARN_ON(cpu < 0); |
e1d8aa9f FW |
91 | |
92 | /* Workqueues are sometimes created in atomic context */ | |
93 | cws = kzalloc(sizeof(struct cpu_workqueue_stats), GFP_ATOMIC); | |
94 | if (!cws) { | |
95 | pr_warning("trace_workqueue: not enough memory\n"); | |
96 | return; | |
97 | } | |
e1d8aa9f FW |
98 | INIT_LIST_HEAD(&cws->list); |
99 | cws->cpu = cpu; | |
100 | ||
101 | cws->pid = wq_thread->pid; | |
102 | ||
3690b5e6 | 103 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); |
3690b5e6 LJ |
104 | list_add_tail(&cws->list, &workqueue_cpu_stat(cpu)->list); |
105 | spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); | |
e1d8aa9f FW |
106 | } |
107 | ||
108 | /* Destruction of a cpu workqueue thread */ | |
109 | static void probe_workqueue_destruction(struct task_struct *wq_thread) | |
110 | { | |
111 | /* Workqueue only execute on one cpu */ | |
112 | int cpu = cpumask_first(&wq_thread->cpus_allowed); | |
113 | struct cpu_workqueue_stats *node, *next; | |
114 | unsigned long flags; | |
115 | ||
3690b5e6 LJ |
116 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); |
117 | list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list, | |
e1d8aa9f FW |
118 | list) { |
119 | if (node->pid == wq_thread->pid) { | |
120 | list_del(&node->list); | |
121 | kfree(node); | |
122 | goto found; | |
123 | } | |
124 | } | |
125 | ||
126 | pr_debug("trace_workqueue: don't find workqueue to destroy\n"); | |
127 | found: | |
3690b5e6 | 128 | spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); |
e1d8aa9f FW |
129 | |
130 | } | |
131 | ||
132 | static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu) | |
133 | { | |
134 | unsigned long flags; | |
135 | struct cpu_workqueue_stats *ret = NULL; | |
136 | ||
137 | ||
3690b5e6 | 138 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); |
e1d8aa9f | 139 | |
3690b5e6 LJ |
140 | if (!list_empty(&workqueue_cpu_stat(cpu)->list)) |
141 | ret = list_entry(workqueue_cpu_stat(cpu)->list.next, | |
e1d8aa9f FW |
142 | struct cpu_workqueue_stats, list); |
143 | ||
3690b5e6 | 144 | spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); |
e1d8aa9f FW |
145 | |
146 | return ret; | |
147 | } | |
148 | ||
42548008 | 149 | static void *workqueue_stat_start(struct tracer_stat *trace) |
e1d8aa9f FW |
150 | { |
151 | int cpu; | |
152 | void *ret = NULL; | |
153 | ||
154 | for_each_possible_cpu(cpu) { | |
155 | ret = workqueue_stat_start_cpu(cpu); | |
156 | if (ret) | |
157 | return ret; | |
158 | } | |
159 | return NULL; | |
160 | } | |
161 | ||
162 | static void *workqueue_stat_next(void *prev, int idx) | |
163 | { | |
164 | struct cpu_workqueue_stats *prev_cws = prev; | |
165 | int cpu = prev_cws->cpu; | |
166 | unsigned long flags; | |
167 | void *ret = NULL; | |
168 | ||
3690b5e6 LJ |
169 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); |
170 | if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) { | |
171 | spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); | |
bbcd3063 KM |
172 | do { |
173 | cpu = cpumask_next(cpu, cpu_possible_mask); | |
174 | if (cpu >= nr_cpu_ids) | |
175 | return NULL; | |
176 | } while (!(ret = workqueue_stat_start_cpu(cpu))); | |
177 | return ret; | |
e1d8aa9f | 178 | } |
3690b5e6 | 179 | spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); |
e1d8aa9f FW |
180 | |
181 | return list_entry(prev_cws->list.next, struct cpu_workqueue_stats, | |
182 | list); | |
183 | } | |
184 | ||
185 | static int workqueue_stat_show(struct seq_file *s, void *p) | |
186 | { | |
187 | struct cpu_workqueue_stats *cws = p; | |
188 | unsigned long flags; | |
189 | int cpu = cws->cpu; | |
889a6c36 KM |
190 | struct pid *pid; |
191 | struct task_struct *tsk; | |
192 | ||
2f63b840 LJ |
193 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); |
194 | if (&cws->list == workqueue_cpu_stat(cpu)->list.next) | |
195 | seq_printf(s, "\n"); | |
196 | spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); | |
197 | ||
889a6c36 KM |
198 | pid = find_get_pid(cws->pid); |
199 | if (pid) { | |
200 | tsk = get_pid_task(pid, PIDTYPE_PID); | |
201 | if (tsk) { | |
202 | seq_printf(s, "%3d %6d %6u %s\n", cws->cpu, | |
203 | atomic_read(&cws->inserted), cws->executed, | |
204 | tsk->comm); | |
205 | put_task_struct(tsk); | |
206 | } | |
207 | put_pid(pid); | |
208 | } | |
e1d8aa9f | 209 | |
e1d8aa9f FW |
210 | return 0; |
211 | } | |
212 | ||
213 | static int workqueue_stat_headers(struct seq_file *s) | |
214 | { | |
215 | seq_printf(s, "# CPU INSERTED EXECUTED NAME\n"); | |
2f63b840 | 216 | seq_printf(s, "# | | | |\n"); |
e1d8aa9f FW |
217 | return 0; |
218 | } | |
219 | ||
220 | struct tracer_stat workqueue_stats __read_mostly = { | |
221 | .name = "workqueues", | |
222 | .stat_start = workqueue_stat_start, | |
223 | .stat_next = workqueue_stat_next, | |
224 | .stat_show = workqueue_stat_show, | |
225 | .stat_headers = workqueue_stat_headers | |
226 | }; | |
227 | ||
228 | ||
229 | int __init stat_workqueue_init(void) | |
230 | { | |
231 | if (register_stat_tracer(&workqueue_stats)) { | |
232 | pr_warning("Unable to register workqueue stat tracer\n"); | |
233 | return 1; | |
234 | } | |
235 | ||
236 | return 0; | |
237 | } | |
238 | fs_initcall(stat_workqueue_init); | |
239 | ||
240 | /* | |
241 | * Workqueues are created very early, just after pre-smp initcalls. | |
242 | * So we must register our tracepoints at this stage. | |
243 | */ | |
244 | int __init trace_workqueue_early_init(void) | |
245 | { | |
246 | int ret, cpu; | |
247 | ||
248 | ret = register_trace_workqueue_insertion(probe_workqueue_insertion); | |
249 | if (ret) | |
250 | goto out; | |
251 | ||
252 | ret = register_trace_workqueue_execution(probe_workqueue_execution); | |
253 | if (ret) | |
254 | goto no_insertion; | |
255 | ||
256 | ret = register_trace_workqueue_creation(probe_workqueue_creation); | |
257 | if (ret) | |
258 | goto no_execution; | |
259 | ||
260 | ret = register_trace_workqueue_destruction(probe_workqueue_destruction); | |
261 | if (ret) | |
262 | goto no_creation; | |
263 | ||
e1d8aa9f | 264 | for_each_possible_cpu(cpu) { |
3690b5e6 LJ |
265 | spin_lock_init(&workqueue_cpu_stat(cpu)->lock); |
266 | INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list); | |
e1d8aa9f FW |
267 | } |
268 | ||
269 | return 0; | |
270 | ||
271 | no_creation: | |
272 | unregister_trace_workqueue_creation(probe_workqueue_creation); | |
273 | no_execution: | |
274 | unregister_trace_workqueue_execution(probe_workqueue_execution); | |
275 | no_insertion: | |
276 | unregister_trace_workqueue_insertion(probe_workqueue_insertion); | |
277 | out: | |
278 | pr_warning("trace_workqueue: unable to trace workqueues\n"); | |
279 | ||
280 | return 1; | |
281 | } | |
282 | early_initcall(trace_workqueue_early_init); |