]>
Commit | Line | Data |
---|---|---|
2056a782 | 1 | /* |
0fe23479 | 2 | * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk> |
2056a782 JA |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License | |
14 | * along with this program; if not, write to the Free Software | |
15 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
16 | * | |
17 | */ | |
2056a782 JA |
18 | #include <linux/kernel.h> |
19 | #include <linux/blkdev.h> | |
20 | #include <linux/blktrace_api.h> | |
21 | #include <linux/percpu.h> | |
22 | #include <linux/init.h> | |
23 | #include <linux/mutex.h> | |
24 | #include <linux/debugfs.h> | |
be1c6341 | 25 | #include <linux/time.h> |
5f3ea37c | 26 | #include <trace/block.h> |
2056a782 | 27 | #include <asm/uaccess.h> |
c71a8961 | 28 | #include <../kernel/trace/trace_output.h> |
2056a782 | 29 | |
2056a782 JA |
30 | static unsigned int blktrace_seq __read_mostly = 1; |
31 | ||
c71a8961 ACM |
32 | static struct trace_array *blk_tr; |
33 | static int __read_mostly blk_tracer_enabled; | |
34 | ||
35 | /* Select an alternative, minimalistic output than the original one */ | |
36 | #define TRACE_BLK_OPT_CLASSIC 0x1 | |
37 | ||
38 | static struct tracer_opt blk_tracer_opts[] = { | |
39 | /* Default disable the minimalistic output */ | |
157f9c00 | 40 | { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) }, |
c71a8961 ACM |
41 | { } |
42 | }; | |
43 | ||
44 | static struct tracer_flags blk_tracer_flags = { | |
45 | .val = 0, | |
46 | .opts = blk_tracer_opts, | |
47 | }; | |
48 | ||
5f3ea37c ACM |
49 | /* Global reference count of probes */ |
50 | static DEFINE_MUTEX(blk_probe_mutex); | |
51 | static atomic_t blk_probes_ref = ATOMIC_INIT(0); | |
52 | ||
53 | static int blk_register_tracepoints(void); | |
54 | static void blk_unregister_tracepoints(void); | |
55 | ||
be1c6341 OK |
56 | /* |
57 | * Send out a notify message. | |
58 | */ | |
a863055b JA |
59 | static void trace_note(struct blk_trace *bt, pid_t pid, int action, |
60 | const void *data, size_t len) | |
be1c6341 OK |
61 | { |
62 | struct blk_io_trace *t; | |
be1c6341 | 63 | |
c71a8961 ACM |
64 | if (!bt->rchan) |
65 | return; | |
66 | ||
be1c6341 | 67 | t = relay_reserve(bt->rchan, sizeof(*t) + len); |
d3d9d2a5 JA |
68 | if (t) { |
69 | const int cpu = smp_processor_id(); | |
70 | ||
71 | t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; | |
2997c8c4 | 72 | t->time = ktime_to_ns(ktime_get()); |
d3d9d2a5 JA |
73 | t->device = bt->dev; |
74 | t->action = action; | |
75 | t->pid = pid; | |
76 | t->cpu = cpu; | |
77 | t->pdu_len = len; | |
78 | memcpy((void *) t + sizeof(*t), data, len); | |
79 | } | |
be1c6341 OK |
80 | } |
81 | ||
2056a782 JA |
82 | /* |
83 | * Send out a notify for this process, if we haven't done so since a trace | |
84 | * started | |
85 | */ | |
86 | static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk) | |
87 | { | |
a863055b JA |
88 | tsk->btrace_seq = blktrace_seq; |
89 | trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm)); | |
be1c6341 | 90 | } |
2056a782 | 91 | |
be1c6341 OK |
92 | static void trace_note_time(struct blk_trace *bt) |
93 | { | |
94 | struct timespec now; | |
95 | unsigned long flags; | |
96 | u32 words[2]; | |
97 | ||
98 | getnstimeofday(&now); | |
99 | words[0] = now.tv_sec; | |
100 | words[1] = now.tv_nsec; | |
101 | ||
102 | local_irq_save(flags); | |
103 | trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words)); | |
104 | local_irq_restore(flags); | |
2056a782 JA |
105 | } |
106 | ||
9d5f09a4 AB |
107 | void __trace_note_message(struct blk_trace *bt, const char *fmt, ...) |
108 | { | |
109 | int n; | |
110 | va_list args; | |
14a73f54 | 111 | unsigned long flags; |
64565911 | 112 | char *buf; |
9d5f09a4 | 113 | |
c71a8961 ACM |
114 | if (blk_tr) { |
115 | va_start(args, fmt); | |
116 | ftrace_vprintk(fmt, args); | |
117 | va_end(args); | |
118 | return; | |
119 | } | |
120 | ||
121 | if (!bt->msg_data) | |
122 | return; | |
123 | ||
14a73f54 | 124 | local_irq_save(flags); |
64565911 | 125 | buf = per_cpu_ptr(bt->msg_data, smp_processor_id()); |
9d5f09a4 | 126 | va_start(args, fmt); |
64565911 | 127 | n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args); |
9d5f09a4 AB |
128 | va_end(args); |
129 | ||
64565911 | 130 | trace_note(bt, 0, BLK_TN_MESSAGE, buf, n); |
14a73f54 | 131 | local_irq_restore(flags); |
9d5f09a4 AB |
132 | } |
133 | EXPORT_SYMBOL_GPL(__trace_note_message); | |
134 | ||
2056a782 JA |
135 | static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector, |
136 | pid_t pid) | |
137 | { | |
138 | if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0) | |
139 | return 1; | |
140 | if (sector < bt->start_lba || sector > bt->end_lba) | |
141 | return 1; | |
142 | if (bt->pid && pid != bt->pid) | |
143 | return 1; | |
144 | ||
145 | return 0; | |
146 | } | |
147 | ||
148 | /* | |
149 | * Data direction bit lookup | |
150 | */ | |
151 | static u32 ddir_act[2] __read_mostly = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK_TC_WRITE) }; | |
152 | ||
35ba8f70 DW |
153 | /* The ilog2() calls fall out because they're constant */ |
154 | #define MASK_TC_BIT(rw, __name) ( (rw & (1 << BIO_RW_ ## __name)) << \ | |
155 | (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name) ) | |
2056a782 JA |
156 | |
157 | /* | |
158 | * The worker for the various blk_add_trace*() types. Fills out a | |
159 | * blk_io_trace structure and places it in a per-cpu subbuffer. | |
160 | */ | |
5f3ea37c | 161 | static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, |
2056a782 JA |
162 | int rw, u32 what, int error, int pdu_len, void *pdu_data) |
163 | { | |
164 | struct task_struct *tsk = current; | |
c71a8961 | 165 | struct ring_buffer_event *event = NULL; |
2056a782 JA |
166 | struct blk_io_trace *t; |
167 | unsigned long flags; | |
168 | unsigned long *sequence; | |
169 | pid_t pid; | |
c71a8961 | 170 | int cpu, pc = 0; |
2056a782 | 171 | |
157f9c00 ACM |
172 | if (unlikely(bt->trace_state != Blktrace_running || |
173 | !blk_tracer_enabled)) | |
2056a782 JA |
174 | return; |
175 | ||
176 | what |= ddir_act[rw & WRITE]; | |
35ba8f70 DW |
177 | what |= MASK_TC_BIT(rw, BARRIER); |
178 | what |= MASK_TC_BIT(rw, SYNC); | |
179 | what |= MASK_TC_BIT(rw, AHEAD); | |
180 | what |= MASK_TC_BIT(rw, META); | |
181 | what |= MASK_TC_BIT(rw, DISCARD); | |
2056a782 JA |
182 | |
183 | pid = tsk->pid; | |
184 | if (unlikely(act_log_check(bt, what, sector, pid))) | |
185 | return; | |
c71a8961 ACM |
186 | cpu = raw_smp_processor_id(); |
187 | ||
188 | if (blk_tr) { | |
189 | struct trace_entry *ent; | |
190 | tracing_record_cmdline(current); | |
191 | ||
192 | event = ring_buffer_lock_reserve(blk_tr->buffer, | |
193 | sizeof(*t) + pdu_len, &flags); | |
194 | if (!event) | |
195 | return; | |
157f9c00 | 196 | |
c71a8961 ACM |
197 | ent = ring_buffer_event_data(event); |
198 | t = (struct blk_io_trace *)ent; | |
199 | pc = preempt_count(); | |
200 | tracing_generic_entry_update(ent, 0, pc); | |
201 | ent->type = TRACE_BLK; | |
202 | goto record_it; | |
203 | } | |
2056a782 JA |
204 | |
205 | /* | |
206 | * A word about the locking here - we disable interrupts to reserve | |
207 | * some space in the relay per-cpu buffer, to prevent an irq | |
14a73f54 | 208 | * from coming in and stepping on our toes. |
2056a782 JA |
209 | */ |
210 | local_irq_save(flags); | |
211 | ||
212 | if (unlikely(tsk->btrace_seq != blktrace_seq)) | |
213 | trace_note_tsk(bt, tsk); | |
214 | ||
215 | t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len); | |
216 | if (t) { | |
2056a782 JA |
217 | sequence = per_cpu_ptr(bt->sequence, cpu); |
218 | ||
219 | t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; | |
220 | t->sequence = ++(*sequence); | |
2997c8c4 | 221 | t->time = ktime_to_ns(ktime_get()); |
c71a8961 | 222 | record_it: |
08a06b83 ACM |
223 | /* |
224 | * These two are not needed in ftrace as they are in the | |
225 | * generic trace_entry, filled by tracing_generic_entry_update, | |
226 | * but for the trace_event->bin() synthesizer benefit we do it | |
227 | * here too. | |
228 | */ | |
229 | t->cpu = cpu; | |
230 | t->pid = pid; | |
231 | ||
2056a782 JA |
232 | t->sector = sector; |
233 | t->bytes = bytes; | |
234 | t->action = what; | |
2056a782 | 235 | t->device = bt->dev; |
2056a782 JA |
236 | t->error = error; |
237 | t->pdu_len = pdu_len; | |
238 | ||
239 | if (pdu_len) | |
240 | memcpy((void *) t + sizeof(*t), pdu_data, pdu_len); | |
c71a8961 ACM |
241 | |
242 | if (blk_tr) { | |
243 | ring_buffer_unlock_commit(blk_tr->buffer, event, flags); | |
244 | if (pid != 0 && | |
157f9c00 | 245 | !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC) && |
c71a8961 ACM |
246 | (trace_flags & TRACE_ITER_STACKTRACE) != 0) |
247 | __trace_stack(blk_tr, NULL, flags, 5, pc); | |
248 | trace_wake_up(); | |
249 | return; | |
250 | } | |
2056a782 JA |
251 | } |
252 | ||
253 | local_irq_restore(flags); | |
254 | } | |
255 | ||
2056a782 | 256 | static struct dentry *blk_tree_root; |
11a57153 | 257 | static DEFINE_MUTEX(blk_tree_mutex); |
2056a782 JA |
258 | |
259 | static void blk_trace_cleanup(struct blk_trace *bt) | |
260 | { | |
02c62304 | 261 | debugfs_remove(bt->msg_file); |
2056a782 | 262 | debugfs_remove(bt->dropped_file); |
f48fc4d3 | 263 | relay_close(bt->rchan); |
2056a782 | 264 | free_percpu(bt->sequence); |
64565911 | 265 | free_percpu(bt->msg_data); |
2056a782 | 266 | kfree(bt); |
5f3ea37c ACM |
267 | mutex_lock(&blk_probe_mutex); |
268 | if (atomic_dec_and_test(&blk_probes_ref)) | |
269 | blk_unregister_tracepoints(); | |
270 | mutex_unlock(&blk_probe_mutex); | |
2056a782 JA |
271 | } |
272 | ||
6da127ad | 273 | int blk_trace_remove(struct request_queue *q) |
2056a782 JA |
274 | { |
275 | struct blk_trace *bt; | |
276 | ||
277 | bt = xchg(&q->blk_trace, NULL); | |
278 | if (!bt) | |
279 | return -EINVAL; | |
280 | ||
281 | if (bt->trace_state == Blktrace_setup || | |
282 | bt->trace_state == Blktrace_stopped) | |
283 | blk_trace_cleanup(bt); | |
284 | ||
285 | return 0; | |
286 | } | |
6da127ad | 287 | EXPORT_SYMBOL_GPL(blk_trace_remove); |
2056a782 JA |
288 | |
289 | static int blk_dropped_open(struct inode *inode, struct file *filp) | |
290 | { | |
8e18e294 | 291 | filp->private_data = inode->i_private; |
2056a782 JA |
292 | |
293 | return 0; | |
294 | } | |
295 | ||
296 | static ssize_t blk_dropped_read(struct file *filp, char __user *buffer, | |
297 | size_t count, loff_t *ppos) | |
298 | { | |
299 | struct blk_trace *bt = filp->private_data; | |
300 | char buf[16]; | |
301 | ||
302 | snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped)); | |
303 | ||
304 | return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); | |
305 | } | |
306 | ||
2b8693c0 | 307 | static const struct file_operations blk_dropped_fops = { |
2056a782 JA |
308 | .owner = THIS_MODULE, |
309 | .open = blk_dropped_open, | |
310 | .read = blk_dropped_read, | |
311 | }; | |
312 | ||
02c62304 AB |
313 | static int blk_msg_open(struct inode *inode, struct file *filp) |
314 | { | |
315 | filp->private_data = inode->i_private; | |
316 | ||
317 | return 0; | |
318 | } | |
319 | ||
320 | static ssize_t blk_msg_write(struct file *filp, const char __user *buffer, | |
321 | size_t count, loff_t *ppos) | |
322 | { | |
323 | char *msg; | |
324 | struct blk_trace *bt; | |
325 | ||
326 | if (count > BLK_TN_MAX_MSG) | |
327 | return -EINVAL; | |
328 | ||
329 | msg = kmalloc(count, GFP_KERNEL); | |
330 | if (msg == NULL) | |
331 | return -ENOMEM; | |
332 | ||
333 | if (copy_from_user(msg, buffer, count)) { | |
334 | kfree(msg); | |
335 | return -EFAULT; | |
336 | } | |
337 | ||
338 | bt = filp->private_data; | |
339 | __trace_note_message(bt, "%s", msg); | |
340 | kfree(msg); | |
341 | ||
342 | return count; | |
343 | } | |
344 | ||
345 | static const struct file_operations blk_msg_fops = { | |
346 | .owner = THIS_MODULE, | |
347 | .open = blk_msg_open, | |
348 | .write = blk_msg_write, | |
349 | }; | |
350 | ||
2056a782 JA |
351 | /* |
352 | * Keep track of how many times we encountered a full subbuffer, to aid | |
353 | * the user space app in telling how many lost events there were. | |
354 | */ | |
355 | static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf, | |
356 | void *prev_subbuf, size_t prev_padding) | |
357 | { | |
358 | struct blk_trace *bt; | |
359 | ||
360 | if (!relay_buf_full(buf)) | |
361 | return 1; | |
362 | ||
363 | bt = buf->chan->private_data; | |
364 | atomic_inc(&bt->dropped); | |
365 | return 0; | |
366 | } | |
367 | ||
368 | static int blk_remove_buf_file_callback(struct dentry *dentry) | |
369 | { | |
f48fc4d3 | 370 | struct dentry *parent = dentry->d_parent; |
2056a782 | 371 | debugfs_remove(dentry); |
f48fc4d3 JA |
372 | |
373 | /* | |
374 | * this will fail for all but the last file, but that is ok. what we | |
375 | * care about is the top level buts->name directory going away, when | |
376 | * the last trace file is gone. Then we don't have to rmdir() that | |
377 | * manually on trace stop, so it nicely solves the issue with | |
378 | * force killing of running traces. | |
379 | */ | |
380 | ||
381 | debugfs_remove(parent); | |
2056a782 JA |
382 | return 0; |
383 | } | |
384 | ||
385 | static struct dentry *blk_create_buf_file_callback(const char *filename, | |
386 | struct dentry *parent, | |
387 | int mode, | |
388 | struct rchan_buf *buf, | |
389 | int *is_global) | |
390 | { | |
391 | return debugfs_create_file(filename, mode, parent, buf, | |
392 | &relay_file_operations); | |
393 | } | |
394 | ||
395 | static struct rchan_callbacks blk_relay_callbacks = { | |
396 | .subbuf_start = blk_subbuf_start_callback, | |
397 | .create_buf_file = blk_create_buf_file_callback, | |
398 | .remove_buf_file = blk_remove_buf_file_callback, | |
399 | }; | |
400 | ||
401 | /* | |
402 | * Setup everything required to start tracing | |
403 | */ | |
6da127ad | 404 | int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, |
171044d4 | 405 | struct blk_user_trace_setup *buts) |
2056a782 | 406 | { |
2056a782 JA |
407 | struct blk_trace *old_bt, *bt = NULL; |
408 | struct dentry *dir = NULL; | |
2056a782 JA |
409 | int ret, i; |
410 | ||
171044d4 | 411 | if (!buts->buf_size || !buts->buf_nr) |
2056a782 JA |
412 | return -EINVAL; |
413 | ||
0497b345 JA |
414 | strncpy(buts->name, name, BLKTRACE_BDEV_SIZE); |
415 | buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0'; | |
2056a782 JA |
416 | |
417 | /* | |
418 | * some device names have larger paths - convert the slashes | |
419 | * to underscores for this to work as expected | |
420 | */ | |
171044d4 AB |
421 | for (i = 0; i < strlen(buts->name); i++) |
422 | if (buts->name[i] == '/') | |
423 | buts->name[i] = '_'; | |
2056a782 JA |
424 | |
425 | ret = -ENOMEM; | |
426 | bt = kzalloc(sizeof(*bt), GFP_KERNEL); | |
427 | if (!bt) | |
428 | goto err; | |
429 | ||
430 | bt->sequence = alloc_percpu(unsigned long); | |
431 | if (!bt->sequence) | |
432 | goto err; | |
433 | ||
64565911 JA |
434 | bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG); |
435 | if (!bt->msg_data) | |
436 | goto err; | |
437 | ||
2056a782 | 438 | ret = -ENOENT; |
f48fc4d3 JA |
439 | |
440 | if (!blk_tree_root) { | |
441 | blk_tree_root = debugfs_create_dir("block", NULL); | |
442 | if (!blk_tree_root) | |
443 | return -ENOMEM; | |
444 | } | |
445 | ||
446 | dir = debugfs_create_dir(buts->name, blk_tree_root); | |
447 | ||
2056a782 JA |
448 | if (!dir) |
449 | goto err; | |
450 | ||
451 | bt->dir = dir; | |
6da127ad | 452 | bt->dev = dev; |
2056a782 JA |
453 | atomic_set(&bt->dropped, 0); |
454 | ||
455 | ret = -EIO; | |
456 | bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops); | |
457 | if (!bt->dropped_file) | |
458 | goto err; | |
459 | ||
02c62304 AB |
460 | bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops); |
461 | if (!bt->msg_file) | |
462 | goto err; | |
463 | ||
171044d4 AB |
464 | bt->rchan = relay_open("trace", dir, buts->buf_size, |
465 | buts->buf_nr, &blk_relay_callbacks, bt); | |
2056a782 JA |
466 | if (!bt->rchan) |
467 | goto err; | |
2056a782 | 468 | |
171044d4 | 469 | bt->act_mask = buts->act_mask; |
2056a782 JA |
470 | if (!bt->act_mask) |
471 | bt->act_mask = (u16) -1; | |
472 | ||
171044d4 AB |
473 | bt->start_lba = buts->start_lba; |
474 | bt->end_lba = buts->end_lba; | |
2056a782 JA |
475 | if (!bt->end_lba) |
476 | bt->end_lba = -1ULL; | |
477 | ||
171044d4 | 478 | bt->pid = buts->pid; |
2056a782 JA |
479 | bt->trace_state = Blktrace_setup; |
480 | ||
5f3ea37c ACM |
481 | mutex_lock(&blk_probe_mutex); |
482 | if (atomic_add_return(1, &blk_probes_ref) == 1) { | |
483 | ret = blk_register_tracepoints(); | |
484 | if (ret) | |
485 | goto probe_err; | |
486 | } | |
487 | mutex_unlock(&blk_probe_mutex); | |
488 | ||
2056a782 JA |
489 | ret = -EBUSY; |
490 | old_bt = xchg(&q->blk_trace, bt); | |
491 | if (old_bt) { | |
492 | (void) xchg(&q->blk_trace, old_bt); | |
493 | goto err; | |
494 | } | |
495 | ||
496 | return 0; | |
5f3ea37c ACM |
497 | probe_err: |
498 | atomic_dec(&blk_probes_ref); | |
499 | mutex_unlock(&blk_probe_mutex); | |
2056a782 | 500 | err: |
2056a782 | 501 | if (bt) { |
02c62304 AB |
502 | if (bt->msg_file) |
503 | debugfs_remove(bt->msg_file); | |
2056a782 JA |
504 | if (bt->dropped_file) |
505 | debugfs_remove(bt->dropped_file); | |
a1205868 | 506 | free_percpu(bt->sequence); |
64565911 | 507 | free_percpu(bt->msg_data); |
2056a782 JA |
508 | if (bt->rchan) |
509 | relay_close(bt->rchan); | |
510 | kfree(bt); | |
511 | } | |
512 | return ret; | |
513 | } | |
171044d4 | 514 | |
6da127ad CS |
515 | int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, |
516 | char __user *arg) | |
171044d4 AB |
517 | { |
518 | struct blk_user_trace_setup buts; | |
519 | int ret; | |
520 | ||
521 | ret = copy_from_user(&buts, arg, sizeof(buts)); | |
522 | if (ret) | |
523 | return -EFAULT; | |
524 | ||
6da127ad | 525 | ret = do_blk_trace_setup(q, name, dev, &buts); |
171044d4 AB |
526 | if (ret) |
527 | return ret; | |
528 | ||
529 | if (copy_to_user(arg, &buts, sizeof(buts))) | |
530 | return -EFAULT; | |
531 | ||
532 | return 0; | |
533 | } | |
6da127ad | 534 | EXPORT_SYMBOL_GPL(blk_trace_setup); |
2056a782 | 535 | |
6da127ad | 536 | int blk_trace_startstop(struct request_queue *q, int start) |
2056a782 JA |
537 | { |
538 | struct blk_trace *bt; | |
539 | int ret; | |
540 | ||
541 | if ((bt = q->blk_trace) == NULL) | |
542 | return -EINVAL; | |
543 | ||
544 | /* | |
545 | * For starting a trace, we can transition from a setup or stopped | |
546 | * trace. For stopping a trace, the state must be running | |
547 | */ | |
548 | ret = -EINVAL; | |
549 | if (start) { | |
550 | if (bt->trace_state == Blktrace_setup || | |
551 | bt->trace_state == Blktrace_stopped) { | |
552 | blktrace_seq++; | |
553 | smp_mb(); | |
554 | bt->trace_state = Blktrace_running; | |
be1c6341 OK |
555 | |
556 | trace_note_time(bt); | |
2056a782 JA |
557 | ret = 0; |
558 | } | |
559 | } else { | |
560 | if (bt->trace_state == Blktrace_running) { | |
561 | bt->trace_state = Blktrace_stopped; | |
562 | relay_flush(bt->rchan); | |
563 | ret = 0; | |
564 | } | |
565 | } | |
566 | ||
567 | return ret; | |
568 | } | |
6da127ad | 569 | EXPORT_SYMBOL_GPL(blk_trace_startstop); |
2056a782 JA |
570 | |
571 | /** | |
572 | * blk_trace_ioctl: - handle the ioctls associated with tracing | |
573 | * @bdev: the block device | |
574 | * @cmd: the ioctl cmd | |
575 | * @arg: the argument data, if any | |
576 | * | |
577 | **/ | |
578 | int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) | |
579 | { | |
165125e1 | 580 | struct request_queue *q; |
2056a782 | 581 | int ret, start = 0; |
6da127ad | 582 | char b[BDEVNAME_SIZE]; |
2056a782 JA |
583 | |
584 | q = bdev_get_queue(bdev); | |
585 | if (!q) | |
586 | return -ENXIO; | |
587 | ||
588 | mutex_lock(&bdev->bd_mutex); | |
589 | ||
590 | switch (cmd) { | |
591 | case BLKTRACESETUP: | |
f36f21ec | 592 | bdevname(bdev, b); |
6da127ad | 593 | ret = blk_trace_setup(q, b, bdev->bd_dev, arg); |
2056a782 JA |
594 | break; |
595 | case BLKTRACESTART: | |
596 | start = 1; | |
597 | case BLKTRACESTOP: | |
598 | ret = blk_trace_startstop(q, start); | |
599 | break; | |
600 | case BLKTRACETEARDOWN: | |
601 | ret = blk_trace_remove(q); | |
602 | break; | |
603 | default: | |
604 | ret = -ENOTTY; | |
605 | break; | |
606 | } | |
607 | ||
608 | mutex_unlock(&bdev->bd_mutex); | |
609 | return ret; | |
610 | } | |
611 | ||
612 | /** | |
613 | * blk_trace_shutdown: - stop and cleanup trace structures | |
614 | * @q: the request queue associated with the device | |
615 | * | |
616 | **/ | |
165125e1 | 617 | void blk_trace_shutdown(struct request_queue *q) |
2056a782 | 618 | { |
6c5c9341 AD |
619 | if (q->blk_trace) { |
620 | blk_trace_startstop(q, 0); | |
621 | blk_trace_remove(q); | |
622 | } | |
2056a782 | 623 | } |
5f3ea37c ACM |
624 | |
625 | /* | |
626 | * blktrace probes | |
627 | */ | |
628 | ||
629 | /** | |
630 | * blk_add_trace_rq - Add a trace for a request oriented action | |
631 | * @q: queue the io is for | |
632 | * @rq: the source request | |
633 | * @what: the action | |
634 | * | |
635 | * Description: | |
636 | * Records an action against a request. Will log the bio offset + size. | |
637 | * | |
638 | **/ | |
639 | static void blk_add_trace_rq(struct request_queue *q, struct request *rq, | |
640 | u32 what) | |
641 | { | |
642 | struct blk_trace *bt = q->blk_trace; | |
643 | int rw = rq->cmd_flags & 0x03; | |
644 | ||
645 | if (likely(!bt)) | |
646 | return; | |
647 | ||
648 | if (blk_discard_rq(rq)) | |
649 | rw |= (1 << BIO_RW_DISCARD); | |
650 | ||
651 | if (blk_pc_request(rq)) { | |
652 | what |= BLK_TC_ACT(BLK_TC_PC); | |
653 | __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, | |
654 | sizeof(rq->cmd), rq->cmd); | |
655 | } else { | |
656 | what |= BLK_TC_ACT(BLK_TC_FS); | |
657 | __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, | |
658 | rw, what, rq->errors, 0, NULL); | |
659 | } | |
660 | } | |
661 | ||
662 | static void blk_add_trace_rq_abort(struct request_queue *q, struct request *rq) | |
663 | { | |
664 | blk_add_trace_rq(q, rq, BLK_TA_ABORT); | |
665 | } | |
666 | ||
667 | static void blk_add_trace_rq_insert(struct request_queue *q, struct request *rq) | |
668 | { | |
669 | blk_add_trace_rq(q, rq, BLK_TA_INSERT); | |
670 | } | |
671 | ||
672 | static void blk_add_trace_rq_issue(struct request_queue *q, struct request *rq) | |
673 | { | |
674 | blk_add_trace_rq(q, rq, BLK_TA_ISSUE); | |
675 | } | |
676 | ||
677 | static void blk_add_trace_rq_requeue(struct request_queue *q, struct request *rq) | |
678 | { | |
679 | blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); | |
680 | } | |
681 | ||
682 | static void blk_add_trace_rq_complete(struct request_queue *q, struct request *rq) | |
683 | { | |
684 | blk_add_trace_rq(q, rq, BLK_TA_COMPLETE); | |
685 | } | |
686 | ||
687 | /** | |
688 | * blk_add_trace_bio - Add a trace for a bio oriented action | |
689 | * @q: queue the io is for | |
690 | * @bio: the source bio | |
691 | * @what: the action | |
692 | * | |
693 | * Description: | |
694 | * Records an action against a bio. Will log the bio offset + size. | |
695 | * | |
696 | **/ | |
697 | static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, | |
698 | u32 what) | |
699 | { | |
700 | struct blk_trace *bt = q->blk_trace; | |
701 | ||
702 | if (likely(!bt)) | |
703 | return; | |
704 | ||
705 | __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, | |
706 | !bio_flagged(bio, BIO_UPTODATE), 0, NULL); | |
707 | } | |
708 | ||
709 | static void blk_add_trace_bio_bounce(struct request_queue *q, struct bio *bio) | |
710 | { | |
711 | blk_add_trace_bio(q, bio, BLK_TA_BOUNCE); | |
712 | } | |
713 | ||
714 | static void blk_add_trace_bio_complete(struct request_queue *q, struct bio *bio) | |
715 | { | |
716 | blk_add_trace_bio(q, bio, BLK_TA_COMPLETE); | |
717 | } | |
718 | ||
719 | static void blk_add_trace_bio_backmerge(struct request_queue *q, struct bio *bio) | |
720 | { | |
721 | blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); | |
722 | } | |
723 | ||
724 | static void blk_add_trace_bio_frontmerge(struct request_queue *q, struct bio *bio) | |
725 | { | |
726 | blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); | |
727 | } | |
728 | ||
729 | static void blk_add_trace_bio_queue(struct request_queue *q, struct bio *bio) | |
730 | { | |
731 | blk_add_trace_bio(q, bio, BLK_TA_QUEUE); | |
732 | } | |
733 | ||
734 | static void blk_add_trace_getrq(struct request_queue *q, struct bio *bio, int rw) | |
735 | { | |
736 | if (bio) | |
737 | blk_add_trace_bio(q, bio, BLK_TA_GETRQ); | |
738 | else { | |
739 | struct blk_trace *bt = q->blk_trace; | |
740 | ||
741 | if (bt) | |
742 | __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL); | |
743 | } | |
744 | } | |
745 | ||
746 | ||
747 | static void blk_add_trace_sleeprq(struct request_queue *q, struct bio *bio, int rw) | |
748 | { | |
749 | if (bio) | |
750 | blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ); | |
751 | else { | |
752 | struct blk_trace *bt = q->blk_trace; | |
753 | ||
754 | if (bt) | |
755 | __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ, 0, 0, NULL); | |
756 | } | |
757 | } | |
758 | ||
759 | static void blk_add_trace_plug(struct request_queue *q) | |
760 | { | |
761 | struct blk_trace *bt = q->blk_trace; | |
762 | ||
763 | if (bt) | |
764 | __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); | |
765 | } | |
766 | ||
767 | static void blk_add_trace_unplug_io(struct request_queue *q) | |
768 | { | |
769 | struct blk_trace *bt = q->blk_trace; | |
770 | ||
771 | if (bt) { | |
772 | unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE]; | |
773 | __be64 rpdu = cpu_to_be64(pdu); | |
774 | ||
775 | __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0, | |
776 | sizeof(rpdu), &rpdu); | |
777 | } | |
778 | } | |
779 | ||
780 | static void blk_add_trace_unplug_timer(struct request_queue *q) | |
781 | { | |
782 | struct blk_trace *bt = q->blk_trace; | |
783 | ||
784 | if (bt) { | |
785 | unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE]; | |
786 | __be64 rpdu = cpu_to_be64(pdu); | |
787 | ||
788 | __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0, | |
789 | sizeof(rpdu), &rpdu); | |
790 | } | |
791 | } | |
792 | ||
793 | static void blk_add_trace_split(struct request_queue *q, struct bio *bio, | |
794 | unsigned int pdu) | |
795 | { | |
796 | struct blk_trace *bt = q->blk_trace; | |
797 | ||
798 | if (bt) { | |
799 | __be64 rpdu = cpu_to_be64(pdu); | |
800 | ||
801 | __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, | |
802 | BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE), | |
803 | sizeof(rpdu), &rpdu); | |
804 | } | |
805 | } | |
806 | ||
807 | /** | |
808 | * blk_add_trace_remap - Add a trace for a remap operation | |
809 | * @q: queue the io is for | |
810 | * @bio: the source bio | |
811 | * @dev: target device | |
812 | * @from: source sector | |
813 | * @to: target sector | |
814 | * | |
815 | * Description: | |
816 | * Device mapper or raid target sometimes need to split a bio because | |
817 | * it spans a stripe (or similar). Add a trace for that action. | |
818 | * | |
819 | **/ | |
820 | static void blk_add_trace_remap(struct request_queue *q, struct bio *bio, | |
821 | dev_t dev, sector_t from, sector_t to) | |
822 | { | |
823 | struct blk_trace *bt = q->blk_trace; | |
824 | struct blk_io_trace_remap r; | |
825 | ||
826 | if (likely(!bt)) | |
827 | return; | |
828 | ||
829 | r.device = cpu_to_be32(dev); | |
830 | r.device_from = cpu_to_be32(bio->bi_bdev->bd_dev); | |
831 | r.sector = cpu_to_be64(to); | |
832 | ||
833 | __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, | |
834 | !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r); | |
835 | } | |
836 | ||
837 | /** | |
838 | * blk_add_driver_data - Add binary message with driver-specific data | |
839 | * @q: queue the io is for | |
840 | * @rq: io request | |
841 | * @data: driver-specific data | |
842 | * @len: length of driver-specific data | |
843 | * | |
844 | * Description: | |
845 | * Some drivers might want to write driver-specific data per request. | |
846 | * | |
847 | **/ | |
848 | void blk_add_driver_data(struct request_queue *q, | |
849 | struct request *rq, | |
850 | void *data, size_t len) | |
851 | { | |
852 | struct blk_trace *bt = q->blk_trace; | |
853 | ||
854 | if (likely(!bt)) | |
855 | return; | |
856 | ||
857 | if (blk_pc_request(rq)) | |
858 | __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA, | |
859 | rq->errors, len, data); | |
860 | else | |
861 | __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, | |
862 | 0, BLK_TA_DRV_DATA, rq->errors, len, data); | |
863 | } | |
864 | EXPORT_SYMBOL_GPL(blk_add_driver_data); | |
865 | ||
866 | static int blk_register_tracepoints(void) | |
867 | { | |
868 | int ret; | |
869 | ||
870 | ret = register_trace_block_rq_abort(blk_add_trace_rq_abort); | |
871 | WARN_ON(ret); | |
872 | ret = register_trace_block_rq_insert(blk_add_trace_rq_insert); | |
873 | WARN_ON(ret); | |
874 | ret = register_trace_block_rq_issue(blk_add_trace_rq_issue); | |
875 | WARN_ON(ret); | |
876 | ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue); | |
877 | WARN_ON(ret); | |
878 | ret = register_trace_block_rq_complete(blk_add_trace_rq_complete); | |
879 | WARN_ON(ret); | |
880 | ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce); | |
881 | WARN_ON(ret); | |
882 | ret = register_trace_block_bio_complete(blk_add_trace_bio_complete); | |
883 | WARN_ON(ret); | |
884 | ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge); | |
885 | WARN_ON(ret); | |
886 | ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge); | |
887 | WARN_ON(ret); | |
888 | ret = register_trace_block_bio_queue(blk_add_trace_bio_queue); | |
889 | WARN_ON(ret); | |
890 | ret = register_trace_block_getrq(blk_add_trace_getrq); | |
891 | WARN_ON(ret); | |
892 | ret = register_trace_block_sleeprq(blk_add_trace_sleeprq); | |
893 | WARN_ON(ret); | |
894 | ret = register_trace_block_plug(blk_add_trace_plug); | |
895 | WARN_ON(ret); | |
896 | ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer); | |
897 | WARN_ON(ret); | |
898 | ret = register_trace_block_unplug_io(blk_add_trace_unplug_io); | |
899 | WARN_ON(ret); | |
900 | ret = register_trace_block_split(blk_add_trace_split); | |
901 | WARN_ON(ret); | |
902 | ret = register_trace_block_remap(blk_add_trace_remap); | |
903 | WARN_ON(ret); | |
904 | return 0; | |
905 | } | |
906 | ||
907 | static void blk_unregister_tracepoints(void) | |
908 | { | |
909 | unregister_trace_block_remap(blk_add_trace_remap); | |
910 | unregister_trace_block_split(blk_add_trace_split); | |
911 | unregister_trace_block_unplug_io(blk_add_trace_unplug_io); | |
912 | unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer); | |
913 | unregister_trace_block_plug(blk_add_trace_plug); | |
914 | unregister_trace_block_sleeprq(blk_add_trace_sleeprq); | |
915 | unregister_trace_block_getrq(blk_add_trace_getrq); | |
916 | unregister_trace_block_bio_queue(blk_add_trace_bio_queue); | |
917 | unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge); | |
918 | unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge); | |
919 | unregister_trace_block_bio_complete(blk_add_trace_bio_complete); | |
920 | unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce); | |
921 | unregister_trace_block_rq_complete(blk_add_trace_rq_complete); | |
922 | unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue); | |
923 | unregister_trace_block_rq_issue(blk_add_trace_rq_issue); | |
924 | unregister_trace_block_rq_insert(blk_add_trace_rq_insert); | |
925 | unregister_trace_block_rq_abort(blk_add_trace_rq_abort); | |
926 | ||
927 | tracepoint_synchronize_unregister(); | |
928 | } | |
c71a8961 ACM |
929 | |
930 | /* | |
931 | * struct blk_io_tracer formatting routines | |
932 | */ | |
933 | ||
934 | static void fill_rwbs(char *rwbs, const struct blk_io_trace *t) | |
935 | { | |
157f9c00 ACM |
936 | int i = 0; |
937 | ||
938 | if (t->action & BLK_TC_DISCARD) | |
939 | rwbs[i++] = 'D'; | |
940 | else if (t->action & BLK_TC_WRITE) | |
941 | rwbs[i++] = 'W'; | |
942 | else if (t->bytes) | |
943 | rwbs[i++] = 'R'; | |
944 | else | |
945 | rwbs[i++] = 'N'; | |
946 | ||
947 | if (t->action & BLK_TC_AHEAD) | |
948 | rwbs[i++] = 'A'; | |
949 | if (t->action & BLK_TC_BARRIER) | |
950 | rwbs[i++] = 'B'; | |
951 | if (t->action & BLK_TC_SYNC) | |
952 | rwbs[i++] = 'S'; | |
953 | if (t->action & BLK_TC_META) | |
954 | rwbs[i++] = 'M'; | |
955 | ||
956 | rwbs[i] = '\0'; | |
c71a8961 ACM |
957 | } |
958 | ||
959 | static inline | |
960 | const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent) | |
961 | { | |
962 | return (const struct blk_io_trace *)ent; | |
963 | } | |
964 | ||
965 | static inline const void *pdu_start(const struct trace_entry *ent) | |
966 | { | |
967 | return te_blk_io_trace(ent) + 1; | |
968 | } | |
969 | ||
970 | static inline u32 t_sec(const struct trace_entry *ent) | |
971 | { | |
972 | return te_blk_io_trace(ent)->bytes >> 9; | |
973 | } | |
974 | ||
975 | static inline unsigned long long t_sector(const struct trace_entry *ent) | |
976 | { | |
977 | return te_blk_io_trace(ent)->sector; | |
978 | } | |
979 | ||
980 | static inline __u16 t_error(const struct trace_entry *ent) | |
981 | { | |
982 | return te_blk_io_trace(ent)->sector; | |
983 | } | |
984 | ||
985 | static __u64 get_pdu_int(const struct trace_entry *ent) | |
986 | { | |
987 | const __u64 *val = pdu_start(ent); | |
988 | return be64_to_cpu(*val); | |
989 | } | |
990 | ||
991 | static void get_pdu_remap(const struct trace_entry *ent, | |
992 | struct blk_io_trace_remap *r) | |
993 | { | |
994 | const struct blk_io_trace_remap *__r = pdu_start(ent); | |
995 | __u64 sector = __r->sector; | |
996 | ||
997 | r->device = be32_to_cpu(__r->device); | |
998 | r->device_from = be32_to_cpu(__r->device_from); | |
999 | r->sector = be64_to_cpu(sector); | |
1000 | } | |
1001 | ||
1002 | static int blk_log_action_iter(struct trace_iterator *iter, const char *act) | |
1003 | { | |
1004 | char rwbs[6]; | |
1005 | unsigned long long ts = ns2usecs(iter->ts); | |
1006 | unsigned long usec_rem = do_div(ts, USEC_PER_SEC); | |
1007 | unsigned secs = (unsigned long)ts; | |
1008 | const struct trace_entry *ent = iter->ent; | |
1009 | const struct blk_io_trace *t = (const struct blk_io_trace *)ent; | |
1010 | ||
1011 | fill_rwbs(rwbs, t); | |
1012 | ||
1013 | return trace_seq_printf(&iter->seq, | |
1014 | "%3d,%-3d %2d %5d.%06lu %5u %2s %3s ", | |
1015 | MAJOR(t->device), MINOR(t->device), iter->cpu, | |
1016 | secs, usec_rem, ent->pid, act, rwbs); | |
1017 | } | |
1018 | ||
1019 | static int blk_log_action_seq(struct trace_seq *s, const struct blk_io_trace *t, | |
1020 | const char *act) | |
1021 | { | |
1022 | char rwbs[6]; | |
1023 | fill_rwbs(rwbs, t); | |
1024 | return trace_seq_printf(s, "%3d,%-3d %2s %3s ", | |
1025 | MAJOR(t->device), MINOR(t->device), act, rwbs); | |
1026 | } | |
1027 | ||
1028 | static int blk_log_generic(struct trace_seq *s, const struct trace_entry *ent) | |
1029 | { | |
1030 | const char *cmd = trace_find_cmdline(ent->pid); | |
1031 | ||
1032 | if (t_sec(ent)) | |
1033 | return trace_seq_printf(s, "%llu + %u [%s]\n", | |
1034 | t_sector(ent), t_sec(ent), cmd); | |
1035 | return trace_seq_printf(s, "[%s]\n", cmd); | |
1036 | } | |
1037 | ||
157f9c00 ACM |
1038 | static int blk_log_with_error(struct trace_seq *s, |
1039 | const struct trace_entry *ent) | |
c71a8961 ACM |
1040 | { |
1041 | if (t_sec(ent)) | |
1042 | return trace_seq_printf(s, "%llu + %u [%d]\n", t_sector(ent), | |
1043 | t_sec(ent), t_error(ent)); | |
1044 | return trace_seq_printf(s, "%llu [%d]\n", t_sector(ent), t_error(ent)); | |
1045 | } | |
1046 | ||
1047 | static int blk_log_remap(struct trace_seq *s, const struct trace_entry *ent) | |
1048 | { | |
1049 | struct blk_io_trace_remap r = { .device = 0, }; | |
1050 | ||
1051 | get_pdu_remap(ent, &r); | |
1052 | return trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n", | |
1053 | t_sector(ent), | |
1054 | t_sec(ent), MAJOR(r.device), MINOR(r.device), | |
1055 | (unsigned long long)r.sector); | |
1056 | } | |
1057 | ||
1058 | static int blk_log_plug(struct trace_seq *s, const struct trace_entry *ent) | |
1059 | { | |
1060 | return trace_seq_printf(s, "[%s]\n", trace_find_cmdline(ent->pid)); | |
1061 | } | |
1062 | ||
1063 | static int blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent) | |
1064 | { | |
1065 | return trace_seq_printf(s, "[%s] %llu\n", trace_find_cmdline(ent->pid), | |
1066 | get_pdu_int(ent)); | |
1067 | } | |
1068 | ||
1069 | static int blk_log_split(struct trace_seq *s, const struct trace_entry *ent) | |
1070 | { | |
1071 | return trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent), | |
1072 | get_pdu_int(ent), trace_find_cmdline(ent->pid)); | |
1073 | } | |
1074 | ||
1075 | /* | |
1076 | * struct tracer operations | |
1077 | */ | |
1078 | ||
1079 | static void blk_tracer_print_header(struct seq_file *m) | |
1080 | { | |
1081 | if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC)) | |
1082 | return; | |
1083 | seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n" | |
1084 | "# | | | | | |\n"); | |
1085 | } | |
1086 | ||
1087 | static void blk_tracer_start(struct trace_array *tr) | |
1088 | { | |
700a3dcb | 1089 | tracing_reset_online_cpus(tr); |
c71a8961 ACM |
1090 | |
1091 | mutex_lock(&blk_probe_mutex); | |
1092 | if (atomic_add_return(1, &blk_probes_ref) == 1) | |
1093 | if (blk_register_tracepoints()) | |
1094 | atomic_dec(&blk_probes_ref); | |
1095 | mutex_unlock(&blk_probe_mutex); | |
08a06b83 | 1096 | trace_flags &= ~TRACE_ITER_CONTEXT_INFO; |
c71a8961 ACM |
1097 | } |
1098 | ||
1099 | static int blk_tracer_init(struct trace_array *tr) | |
1100 | { | |
1101 | blk_tr = tr; | |
1102 | blk_tracer_start(tr); | |
1103 | mutex_lock(&blk_probe_mutex); | |
1104 | blk_tracer_enabled++; | |
1105 | mutex_unlock(&blk_probe_mutex); | |
1106 | return 0; | |
1107 | } | |
1108 | ||
1109 | static void blk_tracer_stop(struct trace_array *tr) | |
1110 | { | |
08a06b83 | 1111 | trace_flags |= TRACE_ITER_CONTEXT_INFO; |
c71a8961 ACM |
1112 | mutex_lock(&blk_probe_mutex); |
1113 | if (atomic_dec_and_test(&blk_probes_ref)) | |
1114 | blk_unregister_tracepoints(); | |
1115 | mutex_unlock(&blk_probe_mutex); | |
1116 | } | |
1117 | ||
1118 | static void blk_tracer_reset(struct trace_array *tr) | |
1119 | { | |
1120 | if (!atomic_read(&blk_probes_ref)) | |
1121 | return; | |
1122 | ||
1123 | mutex_lock(&blk_probe_mutex); | |
1124 | blk_tracer_enabled--; | |
1125 | WARN_ON(blk_tracer_enabled < 0); | |
1126 | mutex_unlock(&blk_probe_mutex); | |
1127 | ||
1128 | blk_tracer_stop(tr); | |
1129 | } | |
1130 | ||
1131 | static struct { | |
1132 | const char *act[2]; | |
1133 | int (*print)(struct trace_seq *s, const struct trace_entry *ent); | |
1134 | } what2act[] __read_mostly = { | |
1135 | [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic }, | |
1136 | [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic }, | |
1137 | [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic }, | |
1138 | [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic }, | |
1139 | [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic }, | |
1140 | [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error }, | |
1141 | [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic }, | |
1142 | [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error }, | |
1143 | [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug }, | |
1144 | [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug }, | |
1145 | [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug }, | |
1146 | [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic }, | |
1147 | [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split }, | |
1148 | [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic }, | |
1149 | [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap }, | |
1150 | }; | |
1151 | ||
2c9b238e | 1152 | static int blk_trace_event_print(struct trace_iterator *iter, int flags) |
c71a8961 | 1153 | { |
2c9b238e ACM |
1154 | struct trace_seq *s = &iter->seq; |
1155 | const struct blk_io_trace *t = (struct blk_io_trace *)iter->ent; | |
c71a8961 ACM |
1156 | const u16 what = t->action & ((1 << BLK_TC_SHIFT) - 1); |
1157 | int ret; | |
1158 | ||
08a06b83 ACM |
1159 | if (trace_print_context(iter)) |
1160 | return TRACE_TYPE_PARTIAL_LINE; | |
1161 | ||
c71a8961 ACM |
1162 | if (unlikely(what == 0 || what > ARRAY_SIZE(what2act))) |
1163 | ret = trace_seq_printf(s, "Bad pc action %x\n", what); | |
1164 | else { | |
1165 | const bool long_act = !!(trace_flags & TRACE_ITER_VERBOSE); | |
1166 | ret = blk_log_action_seq(s, t, what2act[what].act[long_act]); | |
1167 | if (ret) | |
2c9b238e | 1168 | ret = what2act[what].print(s, iter->ent); |
c71a8961 ACM |
1169 | } |
1170 | ||
1171 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | |
1172 | } | |
1173 | ||
08a06b83 ACM |
1174 | static int blk_trace_synthesize_old_trace(struct trace_iterator *iter) |
1175 | { | |
1176 | struct trace_seq *s = &iter->seq; | |
1177 | struct blk_io_trace *t = (struct blk_io_trace *)iter->ent; | |
1178 | const int offset = offsetof(struct blk_io_trace, sector); | |
1179 | struct blk_io_trace old = { | |
1180 | .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION, | |
1181 | .time = ns2usecs(iter->ts), | |
1182 | }; | |
1183 | ||
1184 | if (!trace_seq_putmem(s, &old, offset)) | |
1185 | return 0; | |
1186 | return trace_seq_putmem(s, &t->sector, | |
1187 | sizeof(old) - offset + t->pdu_len); | |
1188 | } | |
1189 | ||
1190 | static int blk_trace_event_print_binary(struct trace_iterator *iter, int flags) | |
1191 | { | |
1192 | return blk_trace_synthesize_old_trace(iter) ? | |
1193 | TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | |
1194 | } | |
1195 | ||
c71a8961 ACM |
1196 | static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter) |
1197 | { | |
1198 | const struct blk_io_trace *t; | |
1199 | u16 what; | |
1200 | int ret; | |
1201 | ||
1202 | if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC)) | |
1203 | return TRACE_TYPE_UNHANDLED; | |
1204 | ||
1205 | t = (const struct blk_io_trace *)iter->ent; | |
1206 | what = t->action & ((1 << BLK_TC_SHIFT) - 1); | |
1207 | ||
1208 | if (unlikely(what == 0 || what > ARRAY_SIZE(what2act))) | |
1209 | ret = trace_seq_printf(&iter->seq, "Bad pc action %x\n", what); | |
1210 | else { | |
1211 | const bool long_act = !!(trace_flags & TRACE_ITER_VERBOSE); | |
1212 | ret = blk_log_action_iter(iter, what2act[what].act[long_act]); | |
1213 | if (ret) | |
1214 | ret = what2act[what].print(&iter->seq, iter->ent); | |
1215 | } | |
1216 | ||
1217 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | |
1218 | } | |
1219 | ||
1220 | static struct tracer blk_tracer __read_mostly = { | |
1221 | .name = "blk", | |
1222 | .init = blk_tracer_init, | |
1223 | .reset = blk_tracer_reset, | |
1224 | .start = blk_tracer_start, | |
1225 | .stop = blk_tracer_stop, | |
1226 | .print_header = blk_tracer_print_header, | |
1227 | .print_line = blk_tracer_print_line, | |
1228 | .flags = &blk_tracer_flags, | |
1229 | }; | |
1230 | ||
1231 | static struct trace_event trace_blk_event = { | |
1232 | .type = TRACE_BLK, | |
1233 | .trace = blk_trace_event_print, | |
1234 | .latency_trace = blk_trace_event_print, | |
1235 | .raw = trace_nop_print, | |
1236 | .hex = trace_nop_print, | |
08a06b83 | 1237 | .binary = blk_trace_event_print_binary, |
c71a8961 ACM |
1238 | }; |
1239 | ||
1240 | static int __init init_blk_tracer(void) | |
1241 | { | |
1242 | if (!register_ftrace_event(&trace_blk_event)) { | |
1243 | pr_warning("Warning: could not register block events\n"); | |
1244 | return 1; | |
1245 | } | |
1246 | ||
1247 | if (register_tracer(&blk_tracer) != 0) { | |
1248 | pr_warning("Warning: could not register the block tracer\n"); | |
1249 | unregister_ftrace_event(&trace_blk_event); | |
1250 | return 1; | |
1251 | } | |
1252 | ||
1253 | return 0; | |
1254 | } | |
1255 | ||
1256 | device_initcall(init_blk_tracer); | |
1257 | ||
1258 | static int blk_trace_remove_queue(struct request_queue *q) | |
1259 | { | |
1260 | struct blk_trace *bt; | |
1261 | ||
1262 | bt = xchg(&q->blk_trace, NULL); | |
1263 | if (bt == NULL) | |
1264 | return -EINVAL; | |
1265 | ||
1266 | kfree(bt); | |
1267 | return 0; | |
1268 | } | |
1269 | ||
1270 | /* | |
1271 | * Setup everything required to start tracing | |
1272 | */ | |
1273 | static int blk_trace_setup_queue(struct request_queue *q, dev_t dev) | |
1274 | { | |
1275 | struct blk_trace *old_bt, *bt = NULL; | |
1276 | int ret; | |
1277 | ||
1278 | ret = -ENOMEM; | |
1279 | bt = kzalloc(sizeof(*bt), GFP_KERNEL); | |
1280 | if (!bt) | |
1281 | goto err; | |
1282 | ||
1283 | bt->dev = dev; | |
1284 | bt->act_mask = (u16)-1; | |
1285 | bt->end_lba = -1ULL; | |
1286 | bt->trace_state = Blktrace_running; | |
1287 | ||
1288 | old_bt = xchg(&q->blk_trace, bt); | |
1289 | if (old_bt != NULL) { | |
1290 | (void)xchg(&q->blk_trace, old_bt); | |
1291 | kfree(bt); | |
1292 | ret = -EBUSY; | |
1293 | } | |
1294 | return 0; | |
1295 | err: | |
1296 | return ret; | |
1297 | } | |
1298 | ||
1299 | /* | |
1300 | * sysfs interface to enable and configure tracing | |
1301 | */ | |
1302 | ||
1303 | static ssize_t sysfs_blk_trace_enable_show(struct device *dev, | |
1304 | struct device_attribute *attr, | |
1305 | char *buf) | |
1306 | { | |
1307 | struct hd_struct *p = dev_to_part(dev); | |
1308 | struct block_device *bdev; | |
1309 | ssize_t ret = -ENXIO; | |
1310 | ||
1311 | lock_kernel(); | |
1312 | bdev = bdget(part_devt(p)); | |
1313 | if (bdev != NULL) { | |
1314 | struct request_queue *q = bdev_get_queue(bdev); | |
1315 | ||
1316 | if (q != NULL) { | |
1317 | mutex_lock(&bdev->bd_mutex); | |
1318 | ret = sprintf(buf, "%u\n", !!q->blk_trace); | |
1319 | mutex_unlock(&bdev->bd_mutex); | |
1320 | } | |
1321 | ||
1322 | bdput(bdev); | |
1323 | } | |
1324 | ||
1325 | unlock_kernel(); | |
1326 | return ret; | |
1327 | } | |
1328 | ||
1329 | static ssize_t sysfs_blk_trace_enable_store(struct device *dev, | |
1330 | struct device_attribute *attr, | |
1331 | const char *buf, size_t count) | |
1332 | { | |
1333 | struct block_device *bdev; | |
1334 | struct request_queue *q; | |
1335 | struct hd_struct *p; | |
1336 | int value; | |
1337 | ssize_t ret = -ENXIO; | |
1338 | ||
1339 | if (count == 0 || sscanf(buf, "%d", &value) != 1) | |
1340 | goto out; | |
1341 | ||
1342 | lock_kernel(); | |
1343 | p = dev_to_part(dev); | |
1344 | bdev = bdget(part_devt(p)); | |
1345 | if (bdev == NULL) | |
1346 | goto out_unlock_kernel; | |
1347 | ||
1348 | q = bdev_get_queue(bdev); | |
1349 | if (q == NULL) | |
1350 | goto out_bdput; | |
1351 | ||
1352 | mutex_lock(&bdev->bd_mutex); | |
1353 | if (value) | |
1354 | ret = blk_trace_setup_queue(q, bdev->bd_dev); | |
1355 | else | |
1356 | ret = blk_trace_remove_queue(q); | |
1357 | mutex_unlock(&bdev->bd_mutex); | |
1358 | ||
1359 | if (ret == 0) | |
1360 | ret = count; | |
1361 | out_bdput: | |
1362 | bdput(bdev); | |
1363 | out_unlock_kernel: | |
1364 | unlock_kernel(); | |
1365 | out: | |
1366 | return ret; | |
1367 | } | |
1368 | ||
1369 | static ssize_t sysfs_blk_trace_attr_show(struct device *dev, | |
1370 | struct device_attribute *attr, | |
1371 | char *buf); | |
1372 | static ssize_t sysfs_blk_trace_attr_store(struct device *dev, | |
1373 | struct device_attribute *attr, | |
1374 | const char *buf, size_t count); | |
1375 | #define BLK_TRACE_DEVICE_ATTR(_name) \ | |
1376 | DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \ | |
1377 | sysfs_blk_trace_attr_show, \ | |
1378 | sysfs_blk_trace_attr_store) | |
1379 | ||
1380 | static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, | |
1381 | sysfs_blk_trace_enable_show, sysfs_blk_trace_enable_store); | |
1382 | static BLK_TRACE_DEVICE_ATTR(act_mask); | |
1383 | static BLK_TRACE_DEVICE_ATTR(pid); | |
1384 | static BLK_TRACE_DEVICE_ATTR(start_lba); | |
1385 | static BLK_TRACE_DEVICE_ATTR(end_lba); | |
1386 | ||
1387 | static struct attribute *blk_trace_attrs[] = { | |
1388 | &dev_attr_enable.attr, | |
1389 | &dev_attr_act_mask.attr, | |
1390 | &dev_attr_pid.attr, | |
1391 | &dev_attr_start_lba.attr, | |
1392 | &dev_attr_end_lba.attr, | |
1393 | NULL | |
1394 | }; | |
1395 | ||
1396 | struct attribute_group blk_trace_attr_group = { | |
1397 | .name = "trace", | |
1398 | .attrs = blk_trace_attrs, | |
1399 | }; | |
1400 | ||
1401 | static int blk_str2act_mask(const char *str) | |
1402 | { | |
1403 | int mask = 0; | |
1404 | char *copy = kstrdup(str, GFP_KERNEL), *s; | |
1405 | ||
1406 | if (copy == NULL) | |
1407 | return -ENOMEM; | |
1408 | ||
1409 | s = strstrip(copy); | |
1410 | ||
1411 | while (1) { | |
1412 | char *sep = strchr(s, ','); | |
1413 | ||
1414 | if (sep != NULL) | |
1415 | *sep = '\0'; | |
1416 | ||
1417 | if (strcasecmp(s, "barrier") == 0) | |
1418 | mask |= BLK_TC_BARRIER; | |
1419 | else if (strcasecmp(s, "complete") == 0) | |
1420 | mask |= BLK_TC_COMPLETE; | |
1421 | else if (strcasecmp(s, "fs") == 0) | |
1422 | mask |= BLK_TC_FS; | |
1423 | else if (strcasecmp(s, "issue") == 0) | |
1424 | mask |= BLK_TC_ISSUE; | |
1425 | else if (strcasecmp(s, "pc") == 0) | |
1426 | mask |= BLK_TC_PC; | |
1427 | else if (strcasecmp(s, "queue") == 0) | |
1428 | mask |= BLK_TC_QUEUE; | |
1429 | else if (strcasecmp(s, "read") == 0) | |
1430 | mask |= BLK_TC_READ; | |
1431 | else if (strcasecmp(s, "requeue") == 0) | |
1432 | mask |= BLK_TC_REQUEUE; | |
1433 | else if (strcasecmp(s, "sync") == 0) | |
1434 | mask |= BLK_TC_SYNC; | |
1435 | else if (strcasecmp(s, "write") == 0) | |
1436 | mask |= BLK_TC_WRITE; | |
1437 | ||
1438 | if (sep == NULL) | |
1439 | break; | |
1440 | ||
1441 | s = sep + 1; | |
1442 | } | |
1443 | kfree(copy); | |
1444 | ||
1445 | return mask; | |
1446 | } | |
1447 | ||
1448 | static ssize_t sysfs_blk_trace_attr_show(struct device *dev, | |
1449 | struct device_attribute *attr, | |
1450 | char *buf) | |
1451 | { | |
1452 | struct hd_struct *p = dev_to_part(dev); | |
1453 | struct request_queue *q; | |
1454 | struct block_device *bdev; | |
1455 | ssize_t ret = -ENXIO; | |
1456 | ||
1457 | lock_kernel(); | |
1458 | bdev = bdget(part_devt(p)); | |
1459 | if (bdev == NULL) | |
1460 | goto out_unlock_kernel; | |
1461 | ||
1462 | q = bdev_get_queue(bdev); | |
1463 | if (q == NULL) | |
1464 | goto out_bdput; | |
1465 | mutex_lock(&bdev->bd_mutex); | |
1466 | if (q->blk_trace == NULL) | |
1467 | ret = sprintf(buf, "disabled\n"); | |
1468 | else if (attr == &dev_attr_act_mask) | |
1469 | ret = sprintf(buf, "%#x\n", q->blk_trace->act_mask); | |
1470 | else if (attr == &dev_attr_pid) | |
1471 | ret = sprintf(buf, "%u\n", q->blk_trace->pid); | |
1472 | else if (attr == &dev_attr_start_lba) | |
1473 | ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba); | |
1474 | else if (attr == &dev_attr_end_lba) | |
1475 | ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba); | |
1476 | mutex_unlock(&bdev->bd_mutex); | |
1477 | out_bdput: | |
1478 | bdput(bdev); | |
1479 | out_unlock_kernel: | |
1480 | unlock_kernel(); | |
1481 | return ret; | |
1482 | } | |
1483 | ||
1484 | static ssize_t sysfs_blk_trace_attr_store(struct device *dev, | |
1485 | struct device_attribute *attr, | |
1486 | const char *buf, size_t count) | |
1487 | { | |
1488 | struct block_device *bdev; | |
1489 | struct request_queue *q; | |
1490 | struct hd_struct *p; | |
1491 | u64 value; | |
1492 | ssize_t ret = -ENXIO; | |
1493 | ||
1494 | if (count == 0) | |
1495 | goto out; | |
1496 | ||
1497 | if (attr == &dev_attr_act_mask) { | |
1498 | if (sscanf(buf, "%llx", &value) != 1) { | |
1499 | /* Assume it is a list of trace category names */ | |
1500 | value = blk_str2act_mask(buf); | |
1501 | if (value < 0) | |
1502 | goto out; | |
1503 | } | |
1504 | } else if (sscanf(buf, "%llu", &value) != 1) | |
1505 | goto out; | |
1506 | ||
1507 | lock_kernel(); | |
1508 | p = dev_to_part(dev); | |
1509 | bdev = bdget(part_devt(p)); | |
1510 | if (bdev == NULL) | |
1511 | goto out_unlock_kernel; | |
1512 | ||
1513 | q = bdev_get_queue(bdev); | |
1514 | if (q == NULL) | |
1515 | goto out_bdput; | |
1516 | ||
1517 | mutex_lock(&bdev->bd_mutex); | |
1518 | ret = 0; | |
1519 | if (q->blk_trace == NULL) | |
1520 | ret = blk_trace_setup_queue(q, bdev->bd_dev); | |
1521 | ||
1522 | if (ret == 0) { | |
1523 | if (attr == &dev_attr_act_mask) | |
1524 | q->blk_trace->act_mask = value; | |
1525 | else if (attr == &dev_attr_pid) | |
1526 | q->blk_trace->pid = value; | |
1527 | else if (attr == &dev_attr_start_lba) | |
1528 | q->blk_trace->start_lba = value; | |
1529 | else if (attr == &dev_attr_end_lba) | |
1530 | q->blk_trace->end_lba = value; | |
1531 | ret = count; | |
1532 | } | |
1533 | mutex_unlock(&bdev->bd_mutex); | |
1534 | out_bdput: | |
1535 | bdput(bdev); | |
1536 | out_unlock_kernel: | |
1537 | unlock_kernel(); | |
1538 | out: | |
1539 | return ret; | |
1540 | } |