]>
Commit | Line | Data |
---|---|---|
9745512c AV |
1 | /* |
2 | * latencytop.c: Latency display infrastructure | |
3 | * | |
4 | * (C) Copyright 2008 Intel Corporation | |
5 | * Author: Arjan van de Ven <arjan@linux.intel.com> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; version 2 | |
10 | * of the License. | |
11 | */ | |
12 | #include <linux/latencytop.h> | |
13 | #include <linux/kallsyms.h> | |
14 | #include <linux/seq_file.h> | |
15 | #include <linux/notifier.h> | |
16 | #include <linux/spinlock.h> | |
17 | #include <linux/proc_fs.h> | |
18 | #include <linux/module.h> | |
19 | #include <linux/sched.h> | |
20 | #include <linux/list.h> | |
21 | #include <linux/slab.h> | |
22 | #include <linux/stacktrace.h> | |
23 | ||
24 | static DEFINE_SPINLOCK(latency_lock); | |
25 | ||
26 | #define MAXLR 128 | |
27 | static struct latency_record latency_record[MAXLR]; | |
28 | ||
29 | int latencytop_enabled; | |
30 | ||
31 | void clear_all_latency_tracing(struct task_struct *p) | |
32 | { | |
33 | unsigned long flags; | |
34 | ||
35 | if (!latencytop_enabled) | |
36 | return; | |
37 | ||
38 | spin_lock_irqsave(&latency_lock, flags); | |
39 | memset(&p->latency_record, 0, sizeof(p->latency_record)); | |
40 | p->latency_record_count = 0; | |
41 | spin_unlock_irqrestore(&latency_lock, flags); | |
42 | } | |
43 | ||
44 | static void clear_global_latency_tracing(void) | |
45 | { | |
46 | unsigned long flags; | |
47 | ||
48 | spin_lock_irqsave(&latency_lock, flags); | |
49 | memset(&latency_record, 0, sizeof(latency_record)); | |
50 | spin_unlock_irqrestore(&latency_lock, flags); | |
51 | } | |
52 | ||
53 | static void __sched | |
54 | account_global_scheduler_latency(struct task_struct *tsk, struct latency_record *lat) | |
55 | { | |
56 | int firstnonnull = MAXLR + 1; | |
57 | int i; | |
58 | ||
59 | if (!latencytop_enabled) | |
60 | return; | |
61 | ||
62 | /* skip kernel threads for now */ | |
63 | if (!tsk->mm) | |
64 | return; | |
65 | ||
66 | for (i = 0; i < MAXLR; i++) { | |
67 | int q; | |
68 | int same = 1; | |
69 | /* Nothing stored: */ | |
70 | if (!latency_record[i].backtrace[0]) { | |
71 | if (firstnonnull > i) | |
72 | firstnonnull = i; | |
73 | continue; | |
74 | } | |
75 | for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) { | |
76 | if (latency_record[i].backtrace[q] != | |
77 | lat->backtrace[q]) | |
78 | same = 0; | |
79 | if (same && lat->backtrace[q] == 0) | |
80 | break; | |
81 | if (same && lat->backtrace[q] == ULONG_MAX) | |
82 | break; | |
83 | } | |
84 | if (same) { | |
85 | latency_record[i].count++; | |
86 | latency_record[i].time += lat->time; | |
87 | if (lat->time > latency_record[i].max) | |
88 | latency_record[i].max = lat->time; | |
89 | return; | |
90 | } | |
91 | } | |
92 | ||
93 | i = firstnonnull; | |
94 | if (i >= MAXLR - 1) | |
95 | return; | |
96 | ||
97 | /* Allocted a new one: */ | |
98 | memcpy(&latency_record[i], lat, sizeof(struct latency_record)); | |
99 | } | |
100 | ||
101 | static inline void store_stacktrace(struct task_struct *tsk, struct latency_record *lat) | |
102 | { | |
103 | struct stack_trace trace; | |
104 | ||
105 | memset(&trace, 0, sizeof(trace)); | |
106 | trace.max_entries = LT_BACKTRACEDEPTH; | |
107 | trace.entries = &lat->backtrace[0]; | |
108 | trace.skip = 0; | |
109 | save_stack_trace_tsk(tsk, &trace); | |
110 | } | |
111 | ||
112 | void __sched | |
113 | account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) | |
114 | { | |
115 | unsigned long flags; | |
116 | int i, q; | |
117 | struct latency_record lat; | |
118 | ||
119 | if (!latencytop_enabled) | |
120 | return; | |
121 | ||
122 | /* Long interruptible waits are generally user requested... */ | |
123 | if (inter && usecs > 5000) | |
124 | return; | |
125 | ||
126 | memset(&lat, 0, sizeof(lat)); | |
127 | lat.count = 1; | |
128 | lat.time = usecs; | |
129 | lat.max = usecs; | |
130 | store_stacktrace(tsk, &lat); | |
131 | ||
132 | spin_lock_irqsave(&latency_lock, flags); | |
133 | ||
134 | account_global_scheduler_latency(tsk, &lat); | |
135 | ||
136 | /* | |
137 | * short term hack; if we're > 32 we stop; future we recycle: | |
138 | */ | |
139 | tsk->latency_record_count++; | |
140 | if (tsk->latency_record_count >= LT_SAVECOUNT) | |
141 | goto out_unlock; | |
142 | ||
143 | for (i = 0; i < LT_SAVECOUNT ; i++) { | |
144 | struct latency_record *mylat; | |
145 | int same = 1; | |
146 | mylat = &tsk->latency_record[i]; | |
147 | for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) { | |
148 | if (mylat->backtrace[q] != | |
149 | lat.backtrace[q]) | |
150 | same = 0; | |
151 | if (same && lat.backtrace[q] == 0) | |
152 | break; | |
153 | if (same && lat.backtrace[q] == ULONG_MAX) | |
154 | break; | |
155 | } | |
156 | if (same) { | |
157 | mylat->count++; | |
158 | mylat->time += lat.time; | |
159 | if (lat.time > mylat->max) | |
160 | mylat->max = lat.time; | |
161 | goto out_unlock; | |
162 | } | |
163 | } | |
164 | ||
165 | /* Allocated a new one: */ | |
166 | i = tsk->latency_record_count; | |
167 | memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record)); | |
168 | ||
169 | out_unlock: | |
170 | spin_unlock_irqrestore(&latency_lock, flags); | |
171 | } | |
172 | ||
173 | static int lstats_show(struct seq_file *m, void *v) | |
174 | { | |
175 | int i; | |
176 | ||
177 | seq_puts(m, "Latency Top version : v0.1\n"); | |
178 | ||
179 | for (i = 0; i < MAXLR; i++) { | |
180 | if (latency_record[i].backtrace[0]) { | |
181 | int q; | |
182 | seq_printf(m, "%i %li %li ", | |
183 | latency_record[i].count, | |
184 | latency_record[i].time, | |
185 | latency_record[i].max); | |
186 | for (q = 0; q < LT_BACKTRACEDEPTH; q++) { | |
187 | char sym[KSYM_NAME_LEN]; | |
188 | char *c; | |
189 | if (!latency_record[i].backtrace[q]) | |
190 | break; | |
191 | if (latency_record[i].backtrace[q] == ULONG_MAX) | |
192 | break; | |
193 | sprint_symbol(sym, latency_record[i].backtrace[q]); | |
194 | c = strchr(sym, '+'); | |
195 | if (c) | |
196 | *c = 0; | |
197 | seq_printf(m, "%s ", sym); | |
198 | } | |
199 | seq_printf(m, "\n"); | |
200 | } | |
201 | } | |
202 | return 0; | |
203 | } | |
204 | ||
205 | static ssize_t | |
206 | lstats_write(struct file *file, const char __user *buf, size_t count, | |
207 | loff_t *offs) | |
208 | { | |
209 | clear_global_latency_tracing(); | |
210 | ||
211 | return count; | |
212 | } | |
213 | ||
214 | static int lstats_open(struct inode *inode, struct file *filp) | |
215 | { | |
216 | return single_open(filp, lstats_show, NULL); | |
217 | } | |
218 | ||
219 | static struct file_operations lstats_fops = { | |
220 | .open = lstats_open, | |
221 | .read = seq_read, | |
222 | .write = lstats_write, | |
223 | .llseek = seq_lseek, | |
224 | .release = single_release, | |
225 | }; | |
226 | ||
227 | static int __init init_lstats_procfs(void) | |
228 | { | |
229 | struct proc_dir_entry *pe; | |
230 | ||
231 | pe = create_proc_entry("latency_stats", 0644, NULL); | |
232 | if (!pe) | |
233 | return -ENOMEM; | |
234 | ||
235 | pe->proc_fops = &lstats_fops; | |
236 | ||
237 | return 0; | |
238 | } | |
239 | __initcall(init_lstats_procfs); |