]>
Commit | Line | Data |
---|---|---|
f984b51e PP |
1 | /* |
2 | * Memory mapped I/O tracing | |
3 | * | |
4 | * Copyright (C) 2008 Pekka Paalanen <pq@iki.fi> | |
5 | */ | |
6 | ||
7 | #define DEBUG 1 | |
8 | ||
9 | #include <linux/kernel.h> | |
10 | #include <linux/mmiotrace.h> | |
13829537 | 11 | #include <linux/pci.h> |
5a0e3ad6 | 12 | #include <linux/slab.h> |
a5dec557 LZ |
13 | #include <linux/time.h> |
14 | ||
60063497 | 15 | #include <linux/atomic.h> |
f984b51e PP |
16 | |
17 | #include "trace.h" | |
f0868d1e | 18 | #include "trace_output.h" |
f984b51e | 19 | |
d0a7e8ca PP |
20 | struct header_iter { |
21 | struct pci_dev *dev; | |
22 | }; | |
23 | ||
f984b51e | 24 | static struct trace_array *mmio_trace_array; |
2039238b | 25 | static bool overrun_detected; |
7ee1768d | 26 | static unsigned long prev_overruns; |
173ed24e | 27 | static atomic_t dropped_count; |
f984b51e | 28 | |
bd8ac686 PP |
29 | static void mmio_reset_data(struct trace_array *tr) |
30 | { | |
2039238b | 31 | overrun_detected = false; |
7ee1768d | 32 | prev_overruns = 0; |
bd8ac686 | 33 | |
12883efb | 34 | tracing_reset_online_cpus(&tr->trace_buffer); |
bd8ac686 | 35 | } |
f984b51e | 36 | |
1c80025a | 37 | static int mmio_trace_init(struct trace_array *tr) |
f984b51e PP |
38 | { |
39 | pr_debug("in %s\n", __func__); | |
40 | mmio_trace_array = tr; | |
c76f0694 SR |
41 | |
42 | mmio_reset_data(tr); | |
43 | enable_mmiotrace(); | |
1c80025a | 44 | return 0; |
f984b51e PP |
45 | } |
46 | ||
47 | static void mmio_trace_reset(struct trace_array *tr) | |
48 | { | |
49 | pr_debug("in %s\n", __func__); | |
c76f0694 SR |
50 | |
51 | disable_mmiotrace(); | |
bd8ac686 PP |
52 | mmio_reset_data(tr); |
53 | mmio_trace_array = NULL; | |
f984b51e PP |
54 | } |
55 | ||
bbf5b1a0 | 56 | static void mmio_trace_start(struct trace_array *tr) |
f984b51e PP |
57 | { |
58 | pr_debug("in %s\n", __func__); | |
bbf5b1a0 | 59 | mmio_reset_data(tr); |
bd8ac686 PP |
60 | } |
61 | ||
a72e10af | 62 | static void mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev) |
13829537 | 63 | { |
13829537 PP |
64 | int i; |
65 | resource_size_t start, end; | |
66 | const struct pci_driver *drv = pci_dev_driver(dev); | |
67 | ||
a72e10af SRRH |
68 | trace_seq_printf(s, "PCIDEV %02x%02x %04x%04x %x", |
69 | dev->bus->number, dev->devfn, | |
70 | dev->vendor, dev->device, dev->irq); | |
13829537 PP |
71 | /* |
72 | * XXX: is pci_resource_to_user() appropriate, since we are | |
73 | * supposed to interpret the __ioremap() phys_addr argument based on | |
74 | * these printed values? | |
75 | */ | |
76 | for (i = 0; i < 7; i++) { | |
77 | pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); | |
a72e10af | 78 | trace_seq_printf(s, " %llx", |
13829537 PP |
79 | (unsigned long long)(start | |
80 | (dev->resource[i].flags & PCI_REGION_FLAG_MASK))); | |
81 | } | |
82 | for (i = 0; i < 7; i++) { | |
83 | pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); | |
a72e10af | 84 | trace_seq_printf(s, " %llx", |
13829537 PP |
85 | dev->resource[i].start < dev->resource[i].end ? |
86 | (unsigned long long)(end - start) + 1 : 0); | |
87 | } | |
88 | if (drv) | |
a72e10af | 89 | trace_seq_printf(s, " %s\n", drv->name); |
13829537 | 90 | else |
a72e10af | 91 | trace_seq_puts(s, " \n"); |
13829537 PP |
92 | } |
93 | ||
d0a7e8ca PP |
94 | static void destroy_header_iter(struct header_iter *hiter) |
95 | { | |
96 | if (!hiter) | |
97 | return; | |
98 | pci_dev_put(hiter->dev); | |
99 | kfree(hiter); | |
100 | } | |
101 | ||
102 | static void mmio_pipe_open(struct trace_iterator *iter) | |
bd8ac686 | 103 | { |
d0a7e8ca | 104 | struct header_iter *hiter; |
bd8ac686 | 105 | struct trace_seq *s = &iter->seq; |
13829537 | 106 | |
146c3442 | 107 | trace_seq_puts(s, "VERSION 20070824\n"); |
13829537 | 108 | |
d0a7e8ca PP |
109 | hiter = kzalloc(sizeof(*hiter), GFP_KERNEL); |
110 | if (!hiter) | |
111 | return; | |
112 | ||
113 | hiter->dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL); | |
114 | iter->private = hiter; | |
115 | } | |
116 | ||
117 | /* XXX: This is not called when the pipe is closed! */ | |
118 | static void mmio_close(struct trace_iterator *iter) | |
119 | { | |
120 | struct header_iter *hiter = iter->private; | |
121 | destroy_header_iter(hiter); | |
122 | iter->private = NULL; | |
123 | } | |
124 | ||
2039238b PP |
125 | static unsigned long count_overruns(struct trace_iterator *iter) |
126 | { | |
173ed24e | 127 | unsigned long cnt = atomic_xchg(&dropped_count, 0); |
12883efb | 128 | unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer); |
7ee1768d PP |
129 | |
130 | if (over > prev_overruns) | |
173ed24e | 131 | cnt += over - prev_overruns; |
7ee1768d | 132 | prev_overruns = over; |
2039238b PP |
133 | return cnt; |
134 | } | |
135 | ||
d0a7e8ca PP |
136 | static ssize_t mmio_read(struct trace_iterator *iter, struct file *filp, |
137 | char __user *ubuf, size_t cnt, loff_t *ppos) | |
138 | { | |
139 | ssize_t ret; | |
140 | struct header_iter *hiter = iter->private; | |
141 | struct trace_seq *s = &iter->seq; | |
2039238b PP |
142 | unsigned long n; |
143 | ||
144 | n = count_overruns(iter); | |
145 | if (n) { | |
146 | /* XXX: This is later than where events were lost. */ | |
147 | trace_seq_printf(s, "MARK 0.000000 Lost %lu events.\n", n); | |
148 | if (!overrun_detected) | |
149 | pr_warning("mmiotrace has lost events.\n"); | |
150 | overrun_detected = true; | |
151 | goto print_out; | |
152 | } | |
d0a7e8ca PP |
153 | |
154 | if (!hiter) | |
155 | return 0; | |
156 | ||
157 | mmio_print_pcidev(s, hiter->dev); | |
158 | hiter->dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, hiter->dev); | |
159 | ||
160 | if (!hiter->dev) { | |
161 | destroy_header_iter(hiter); | |
162 | iter->private = NULL; | |
163 | } | |
164 | ||
2039238b | 165 | print_out: |
d0a7e8ca PP |
166 | ret = trace_seq_to_user(s, ubuf, cnt); |
167 | return (ret == -EBUSY) ? 0 : ret; | |
bd8ac686 PP |
168 | } |
169 | ||
07f4e4f7 | 170 | static enum print_line_t mmio_print_rw(struct trace_iterator *iter) |
bd8ac686 PP |
171 | { |
172 | struct trace_entry *entry = iter->ent; | |
7104f300 SR |
173 | struct trace_mmiotrace_rw *field; |
174 | struct mmiotrace_rw *rw; | |
bd8ac686 | 175 | struct trace_seq *s = &iter->seq; |
3928a8a2 | 176 | unsigned long long t = ns2usecs(iter->ts); |
a5dec557 | 177 | unsigned long usec_rem = do_div(t, USEC_PER_SEC); |
bd8ac686 | 178 | unsigned secs = (unsigned long)t; |
bd8ac686 | 179 | |
7104f300 SR |
180 | trace_assign_type(field, entry); |
181 | rw = &field->rw; | |
182 | ||
777e208d | 183 | switch (rw->opcode) { |
bd8ac686 | 184 | case MMIO_READ: |
a72e10af | 185 | trace_seq_printf(s, |
5e4abc98 | 186 | "R %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", |
dee310d0 PP |
187 | rw->width, secs, usec_rem, rw->map_id, |
188 | (unsigned long long)rw->phys, | |
736ca61f | 189 | rw->value, rw->pc, 0); |
bd8ac686 PP |
190 | break; |
191 | case MMIO_WRITE: | |
a72e10af | 192 | trace_seq_printf(s, |
5e4abc98 | 193 | "W %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", |
dee310d0 PP |
194 | rw->width, secs, usec_rem, rw->map_id, |
195 | (unsigned long long)rw->phys, | |
736ca61f | 196 | rw->value, rw->pc, 0); |
bd8ac686 PP |
197 | break; |
198 | case MMIO_UNKNOWN_OP: | |
a72e10af | 199 | trace_seq_printf(s, |
5e4abc98 SR |
200 | "UNKNOWN %u.%06lu %d 0x%llx %02lx,%02lx," |
201 | "%02lx 0x%lx %d\n", | |
dee310d0 PP |
202 | secs, usec_rem, rw->map_id, |
203 | (unsigned long long)rw->phys, | |
bd8ac686 | 204 | (rw->value >> 16) & 0xff, (rw->value >> 8) & 0xff, |
736ca61f | 205 | (rw->value >> 0) & 0xff, rw->pc, 0); |
bd8ac686 PP |
206 | break; |
207 | default: | |
a72e10af | 208 | trace_seq_puts(s, "rw what?\n"); |
bd8ac686 PP |
209 | break; |
210 | } | |
a72e10af SRRH |
211 | |
212 | return trace_handle_return(s); | |
bd8ac686 PP |
213 | } |
214 | ||
07f4e4f7 | 215 | static enum print_line_t mmio_print_map(struct trace_iterator *iter) |
bd8ac686 PP |
216 | { |
217 | struct trace_entry *entry = iter->ent; | |
7104f300 SR |
218 | struct trace_mmiotrace_map *field; |
219 | struct mmiotrace_map *m; | |
bd8ac686 | 220 | struct trace_seq *s = &iter->seq; |
3928a8a2 | 221 | unsigned long long t = ns2usecs(iter->ts); |
a5dec557 | 222 | unsigned long usec_rem = do_div(t, USEC_PER_SEC); |
bd8ac686 | 223 | unsigned secs = (unsigned long)t; |
bd8ac686 | 224 | |
7104f300 SR |
225 | trace_assign_type(field, entry); |
226 | m = &field->map; | |
227 | ||
777e208d | 228 | switch (m->opcode) { |
bd8ac686 | 229 | case MMIO_PROBE: |
a72e10af | 230 | trace_seq_printf(s, |
5e4abc98 | 231 | "MAP %u.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n", |
dee310d0 PP |
232 | secs, usec_rem, m->map_id, |
233 | (unsigned long long)m->phys, m->virt, m->len, | |
e0fd5c2f | 234 | 0UL, 0); |
bd8ac686 PP |
235 | break; |
236 | case MMIO_UNPROBE: | |
a72e10af | 237 | trace_seq_printf(s, |
5e4abc98 | 238 | "UNMAP %u.%06lu %d 0x%lx %d\n", |
e0fd5c2f | 239 | secs, usec_rem, m->map_id, 0UL, 0); |
bd8ac686 PP |
240 | break; |
241 | default: | |
a72e10af | 242 | trace_seq_puts(s, "map what?\n"); |
bd8ac686 PP |
243 | break; |
244 | } | |
a72e10af SRRH |
245 | |
246 | return trace_handle_return(s); | |
bd8ac686 PP |
247 | } |
248 | ||
07f4e4f7 | 249 | static enum print_line_t mmio_print_mark(struct trace_iterator *iter) |
fc5e27ae PP |
250 | { |
251 | struct trace_entry *entry = iter->ent; | |
777e208d | 252 | struct print_entry *print = (struct print_entry *)entry; |
48ead020 | 253 | const char *msg = print->buf; |
fc5e27ae | 254 | struct trace_seq *s = &iter->seq; |
3928a8a2 | 255 | unsigned long long t = ns2usecs(iter->ts); |
769b0441 | 256 | unsigned long usec_rem = do_div(t, USEC_PER_SEC); |
fc5e27ae | 257 | unsigned secs = (unsigned long)t; |
fc5e27ae PP |
258 | |
259 | /* The trailing newline must be in the message. */ | |
a72e10af | 260 | trace_seq_printf(s, "MARK %u.%06lu %s", secs, usec_rem, msg); |
fc5e27ae | 261 | |
a72e10af | 262 | return trace_handle_return(s); |
fc5e27ae PP |
263 | } |
264 | ||
07f4e4f7 | 265 | static enum print_line_t mmio_print_line(struct trace_iterator *iter) |
bd8ac686 PP |
266 | { |
267 | switch (iter->ent->type) { | |
268 | case TRACE_MMIO_RW: | |
269 | return mmio_print_rw(iter); | |
270 | case TRACE_MMIO_MAP: | |
271 | return mmio_print_map(iter); | |
fc5e27ae PP |
272 | case TRACE_PRINT: |
273 | return mmio_print_mark(iter); | |
bd8ac686 | 274 | default: |
07f4e4f7 | 275 | return TRACE_TYPE_HANDLED; /* ignore unknown entries */ |
bd8ac686 | 276 | } |
f984b51e PP |
277 | } |
278 | ||
279 | static struct tracer mmio_tracer __read_mostly = | |
280 | { | |
281 | .name = "mmiotrace", | |
282 | .init = mmio_trace_init, | |
283 | .reset = mmio_trace_reset, | |
bbf5b1a0 | 284 | .start = mmio_trace_start, |
d0a7e8ca PP |
285 | .pipe_open = mmio_pipe_open, |
286 | .close = mmio_close, | |
287 | .read = mmio_read, | |
bd8ac686 | 288 | .print_line = mmio_print_line, |
f984b51e PP |
289 | }; |
290 | ||
291 | __init static int init_mmio_trace(void) | |
292 | { | |
f984b51e PP |
293 | return register_tracer(&mmio_tracer); |
294 | } | |
295 | device_initcall(init_mmio_trace); | |
296 | ||
45dcd8b8 PP |
297 | static void __trace_mmiotrace_rw(struct trace_array *tr, |
298 | struct trace_array_cpu *data, | |
299 | struct mmiotrace_rw *rw) | |
300 | { | |
2425bcb9 | 301 | struct trace_event_call *call = &event_mmiotrace_rw; |
12883efb | 302 | struct ring_buffer *buffer = tr->trace_buffer.buffer; |
3928a8a2 | 303 | struct ring_buffer_event *event; |
777e208d | 304 | struct trace_mmiotrace_rw *entry; |
51a763dd | 305 | int pc = preempt_count(); |
45dcd8b8 | 306 | |
e77405ad | 307 | event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW, |
51a763dd | 308 | sizeof(*entry), 0, pc); |
173ed24e PP |
309 | if (!event) { |
310 | atomic_inc(&dropped_count); | |
3928a8a2 | 311 | return; |
173ed24e | 312 | } |
3928a8a2 | 313 | entry = ring_buffer_event_data(event); |
777e208d | 314 | entry->rw = *rw; |
60ba7702 | 315 | |
f306cc82 | 316 | if (!call_filter_check_discard(call, entry, buffer, event)) |
b7f0c959 | 317 | trace_buffer_unlock_commit(tr, buffer, event, 0, pc); |
45dcd8b8 PP |
318 | } |
319 | ||
bd8ac686 | 320 | void mmio_trace_rw(struct mmiotrace_rw *rw) |
f984b51e PP |
321 | { |
322 | struct trace_array *tr = mmio_trace_array; | |
12883efb | 323 | struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id()); |
bd8ac686 PP |
324 | __trace_mmiotrace_rw(tr, data, rw); |
325 | } | |
f984b51e | 326 | |
45dcd8b8 PP |
327 | static void __trace_mmiotrace_map(struct trace_array *tr, |
328 | struct trace_array_cpu *data, | |
329 | struct mmiotrace_map *map) | |
330 | { | |
2425bcb9 | 331 | struct trace_event_call *call = &event_mmiotrace_map; |
12883efb | 332 | struct ring_buffer *buffer = tr->trace_buffer.buffer; |
3928a8a2 | 333 | struct ring_buffer_event *event; |
777e208d | 334 | struct trace_mmiotrace_map *entry; |
51a763dd | 335 | int pc = preempt_count(); |
45dcd8b8 | 336 | |
e77405ad | 337 | event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP, |
51a763dd | 338 | sizeof(*entry), 0, pc); |
173ed24e PP |
339 | if (!event) { |
340 | atomic_inc(&dropped_count); | |
3928a8a2 | 341 | return; |
173ed24e | 342 | } |
3928a8a2 | 343 | entry = ring_buffer_event_data(event); |
777e208d | 344 | entry->map = *map; |
60ba7702 | 345 | |
f306cc82 | 346 | if (!call_filter_check_discard(call, entry, buffer, event)) |
b7f0c959 | 347 | trace_buffer_unlock_commit(tr, buffer, event, 0, pc); |
45dcd8b8 PP |
348 | } |
349 | ||
bd8ac686 PP |
350 | void mmio_trace_mapping(struct mmiotrace_map *map) |
351 | { | |
352 | struct trace_array *tr = mmio_trace_array; | |
353 | struct trace_array_cpu *data; | |
354 | ||
355 | preempt_disable(); | |
12883efb | 356 | data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id()); |
bd8ac686 PP |
357 | __trace_mmiotrace_map(tr, data, map); |
358 | preempt_enable(); | |
f984b51e | 359 | } |
9e57fb35 PP |
360 | |
361 | int mmio_trace_printk(const char *fmt, va_list args) | |
362 | { | |
40ce74f1 | 363 | return trace_vprintk(0, fmt, args); |
9e57fb35 | 364 | } |