]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/trace/blktrace.c
Merge tag 'reset-fixes-for-4.14' of git://git.pengutronix.de/git/pza/linux into fixes
[mirror_ubuntu-bionic-kernel.git] / kernel / trace / blktrace.c
1 /*
2 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
16 *
17 */
18 #include <linux/kernel.h>
19 #include <linux/blkdev.h>
20 #include <linux/blktrace_api.h>
21 #include <linux/percpu.h>
22 #include <linux/init.h>
23 #include <linux/mutex.h>
24 #include <linux/slab.h>
25 #include <linux/debugfs.h>
26 #include <linux/export.h>
27 #include <linux/time.h>
28 #include <linux/uaccess.h>
29 #include <linux/list.h>
30 #include <linux/blk-cgroup.h>
31
32 #include "../../block/blk.h"
33
34 #include <trace/events/block.h>
35
36 #include "trace_output.h"
37
38 #ifdef CONFIG_BLK_DEV_IO_TRACE
39
40 static unsigned int blktrace_seq __read_mostly = 1;
41
42 static struct trace_array *blk_tr;
43 static bool blk_tracer_enabled __read_mostly;
44
45 static LIST_HEAD(running_trace_list);
46 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock);
47
48 /* Select an alternative, minimalistic output than the original one */
49 #define TRACE_BLK_OPT_CLASSIC 0x1
50 #define TRACE_BLK_OPT_CGROUP 0x2
51 #define TRACE_BLK_OPT_CGNAME 0x4
52
53 static struct tracer_opt blk_tracer_opts[] = {
54 /* Default disable the minimalistic output */
55 { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
56 #ifdef CONFIG_BLK_CGROUP
57 { TRACER_OPT(blk_cgroup, TRACE_BLK_OPT_CGROUP) },
58 { TRACER_OPT(blk_cgname, TRACE_BLK_OPT_CGNAME) },
59 #endif
60 { }
61 };
62
63 static struct tracer_flags blk_tracer_flags = {
64 .val = 0,
65 .opts = blk_tracer_opts,
66 };
67
68 /* Global reference count of probes */
69 static atomic_t blk_probes_ref = ATOMIC_INIT(0);
70
71 static void blk_register_tracepoints(void);
72 static void blk_unregister_tracepoints(void);
73
74 /*
75 * Send out a notify message.
76 */
77 static void trace_note(struct blk_trace *bt, pid_t pid, int action,
78 const void *data, size_t len,
79 union kernfs_node_id *cgid)
80 {
81 struct blk_io_trace *t;
82 struct ring_buffer_event *event = NULL;
83 struct ring_buffer *buffer = NULL;
84 int pc = 0;
85 int cpu = smp_processor_id();
86 bool blk_tracer = blk_tracer_enabled;
87 ssize_t cgid_len = cgid ? sizeof(*cgid) : 0;
88
89 if (blk_tracer) {
90 buffer = blk_tr->trace_buffer.buffer;
91 pc = preempt_count();
92 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
93 sizeof(*t) + len + cgid_len,
94 0, pc);
95 if (!event)
96 return;
97 t = ring_buffer_event_data(event);
98 goto record_it;
99 }
100
101 if (!bt->rchan)
102 return;
103
104 t = relay_reserve(bt->rchan, sizeof(*t) + len + cgid_len);
105 if (t) {
106 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
107 t->time = ktime_to_ns(ktime_get());
108 record_it:
109 t->device = bt->dev;
110 t->action = action | (cgid ? __BLK_TN_CGROUP : 0);
111 t->pid = pid;
112 t->cpu = cpu;
113 t->pdu_len = len + cgid_len;
114 if (cgid)
115 memcpy((void *)t + sizeof(*t), cgid, cgid_len);
116 memcpy((void *) t + sizeof(*t) + cgid_len, data, len);
117
118 if (blk_tracer)
119 trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
120 }
121 }
122
123 /*
124 * Send out a notify for this process, if we haven't done so since a trace
125 * started
126 */
127 static void trace_note_tsk(struct task_struct *tsk)
128 {
129 unsigned long flags;
130 struct blk_trace *bt;
131
132 tsk->btrace_seq = blktrace_seq;
133 spin_lock_irqsave(&running_trace_lock, flags);
134 list_for_each_entry(bt, &running_trace_list, running_list) {
135 trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
136 sizeof(tsk->comm), NULL);
137 }
138 spin_unlock_irqrestore(&running_trace_lock, flags);
139 }
140
141 static void trace_note_time(struct blk_trace *bt)
142 {
143 struct timespec64 now;
144 unsigned long flags;
145 u32 words[2];
146
147 /* need to check user space to see if this breaks in y2038 or y2106 */
148 ktime_get_real_ts64(&now);
149 words[0] = (u32)now.tv_sec;
150 words[1] = now.tv_nsec;
151
152 local_irq_save(flags);
153 trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), NULL);
154 local_irq_restore(flags);
155 }
156
157 void __trace_note_message(struct blk_trace *bt, struct blkcg *blkcg,
158 const char *fmt, ...)
159 {
160 int n;
161 va_list args;
162 unsigned long flags;
163 char *buf;
164
165 if (unlikely(bt->trace_state != Blktrace_running &&
166 !blk_tracer_enabled))
167 return;
168
169 /*
170 * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
171 * message to the trace.
172 */
173 if (!(bt->act_mask & BLK_TC_NOTIFY))
174 return;
175
176 local_irq_save(flags);
177 buf = this_cpu_ptr(bt->msg_data);
178 va_start(args, fmt);
179 n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
180 va_end(args);
181
182 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
183 blkcg = NULL;
184 #ifdef CONFIG_BLK_CGROUP
185 trace_note(bt, 0, BLK_TN_MESSAGE, buf, n,
186 blkcg ? cgroup_get_kernfs_id(blkcg->css.cgroup) : NULL);
187 #else
188 trace_note(bt, 0, BLK_TN_MESSAGE, buf, n, NULL);
189 #endif
190 local_irq_restore(flags);
191 }
192 EXPORT_SYMBOL_GPL(__trace_note_message);
193
194 static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
195 pid_t pid)
196 {
197 if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
198 return 1;
199 if (sector && (sector < bt->start_lba || sector > bt->end_lba))
200 return 1;
201 if (bt->pid && pid != bt->pid)
202 return 1;
203
204 return 0;
205 }
206
207 /*
208 * Data direction bit lookup
209 */
210 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
211 BLK_TC_ACT(BLK_TC_WRITE) };
212
213 #define BLK_TC_RAHEAD BLK_TC_AHEAD
214 #define BLK_TC_PREFLUSH BLK_TC_FLUSH
215
216 /* The ilog2() calls fall out because they're constant */
217 #define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \
218 (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
219
220 /*
221 * The worker for the various blk_add_trace*() types. Fills out a
222 * blk_io_trace structure and places it in a per-cpu subbuffer.
223 */
224 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
225 int op, int op_flags, u32 what, int error, int pdu_len,
226 void *pdu_data, union kernfs_node_id *cgid)
227 {
228 struct task_struct *tsk = current;
229 struct ring_buffer_event *event = NULL;
230 struct ring_buffer *buffer = NULL;
231 struct blk_io_trace *t;
232 unsigned long flags = 0;
233 unsigned long *sequence;
234 pid_t pid;
235 int cpu, pc = 0;
236 bool blk_tracer = blk_tracer_enabled;
237 ssize_t cgid_len = cgid ? sizeof(*cgid) : 0;
238
239 if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
240 return;
241
242 what |= ddir_act[op_is_write(op) ? WRITE : READ];
243 what |= MASK_TC_BIT(op_flags, SYNC);
244 what |= MASK_TC_BIT(op_flags, RAHEAD);
245 what |= MASK_TC_BIT(op_flags, META);
246 what |= MASK_TC_BIT(op_flags, PREFLUSH);
247 what |= MASK_TC_BIT(op_flags, FUA);
248 if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)
249 what |= BLK_TC_ACT(BLK_TC_DISCARD);
250 if (op == REQ_OP_FLUSH)
251 what |= BLK_TC_ACT(BLK_TC_FLUSH);
252 if (cgid)
253 what |= __BLK_TA_CGROUP;
254
255 pid = tsk->pid;
256 if (act_log_check(bt, what, sector, pid))
257 return;
258 cpu = raw_smp_processor_id();
259
260 if (blk_tracer) {
261 tracing_record_cmdline(current);
262
263 buffer = blk_tr->trace_buffer.buffer;
264 pc = preempt_count();
265 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
266 sizeof(*t) + pdu_len + cgid_len,
267 0, pc);
268 if (!event)
269 return;
270 t = ring_buffer_event_data(event);
271 goto record_it;
272 }
273
274 if (unlikely(tsk->btrace_seq != blktrace_seq))
275 trace_note_tsk(tsk);
276
277 /*
278 * A word about the locking here - we disable interrupts to reserve
279 * some space in the relay per-cpu buffer, to prevent an irq
280 * from coming in and stepping on our toes.
281 */
282 local_irq_save(flags);
283 t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len + cgid_len);
284 if (t) {
285 sequence = per_cpu_ptr(bt->sequence, cpu);
286
287 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
288 t->sequence = ++(*sequence);
289 t->time = ktime_to_ns(ktime_get());
290 record_it:
291 /*
292 * These two are not needed in ftrace as they are in the
293 * generic trace_entry, filled by tracing_generic_entry_update,
294 * but for the trace_event->bin() synthesizer benefit we do it
295 * here too.
296 */
297 t->cpu = cpu;
298 t->pid = pid;
299
300 t->sector = sector;
301 t->bytes = bytes;
302 t->action = what;
303 t->device = bt->dev;
304 t->error = error;
305 t->pdu_len = pdu_len + cgid_len;
306
307 if (cgid_len)
308 memcpy((void *)t + sizeof(*t), cgid, cgid_len);
309 if (pdu_len)
310 memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len);
311
312 if (blk_tracer) {
313 trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
314 return;
315 }
316 }
317
318 local_irq_restore(flags);
319 }
320
321 static void blk_trace_free(struct blk_trace *bt)
322 {
323 debugfs_remove(bt->msg_file);
324 debugfs_remove(bt->dropped_file);
325 relay_close(bt->rchan);
326 debugfs_remove(bt->dir);
327 free_percpu(bt->sequence);
328 free_percpu(bt->msg_data);
329 kfree(bt);
330 }
331
332 static void blk_trace_cleanup(struct blk_trace *bt)
333 {
334 blk_trace_free(bt);
335 if (atomic_dec_and_test(&blk_probes_ref))
336 blk_unregister_tracepoints();
337 }
338
339 int blk_trace_remove(struct request_queue *q)
340 {
341 struct blk_trace *bt;
342
343 bt = xchg(&q->blk_trace, NULL);
344 if (!bt)
345 return -EINVAL;
346
347 if (bt->trace_state != Blktrace_running)
348 blk_trace_cleanup(bt);
349
350 return 0;
351 }
352 EXPORT_SYMBOL_GPL(blk_trace_remove);
353
354 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
355 size_t count, loff_t *ppos)
356 {
357 struct blk_trace *bt = filp->private_data;
358 char buf[16];
359
360 snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
361
362 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
363 }
364
365 static const struct file_operations blk_dropped_fops = {
366 .owner = THIS_MODULE,
367 .open = simple_open,
368 .read = blk_dropped_read,
369 .llseek = default_llseek,
370 };
371
372 static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
373 size_t count, loff_t *ppos)
374 {
375 char *msg;
376 struct blk_trace *bt;
377
378 if (count >= BLK_TN_MAX_MSG)
379 return -EINVAL;
380
381 msg = memdup_user_nul(buffer, count);
382 if (IS_ERR(msg))
383 return PTR_ERR(msg);
384
385 bt = filp->private_data;
386 __trace_note_message(bt, NULL, "%s", msg);
387 kfree(msg);
388
389 return count;
390 }
391
392 static const struct file_operations blk_msg_fops = {
393 .owner = THIS_MODULE,
394 .open = simple_open,
395 .write = blk_msg_write,
396 .llseek = noop_llseek,
397 };
398
399 /*
400 * Keep track of how many times we encountered a full subbuffer, to aid
401 * the user space app in telling how many lost events there were.
402 */
403 static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
404 void *prev_subbuf, size_t prev_padding)
405 {
406 struct blk_trace *bt;
407
408 if (!relay_buf_full(buf))
409 return 1;
410
411 bt = buf->chan->private_data;
412 atomic_inc(&bt->dropped);
413 return 0;
414 }
415
416 static int blk_remove_buf_file_callback(struct dentry *dentry)
417 {
418 debugfs_remove(dentry);
419
420 return 0;
421 }
422
423 static struct dentry *blk_create_buf_file_callback(const char *filename,
424 struct dentry *parent,
425 umode_t mode,
426 struct rchan_buf *buf,
427 int *is_global)
428 {
429 return debugfs_create_file(filename, mode, parent, buf,
430 &relay_file_operations);
431 }
432
433 static struct rchan_callbacks blk_relay_callbacks = {
434 .subbuf_start = blk_subbuf_start_callback,
435 .create_buf_file = blk_create_buf_file_callback,
436 .remove_buf_file = blk_remove_buf_file_callback,
437 };
438
439 static void blk_trace_setup_lba(struct blk_trace *bt,
440 struct block_device *bdev)
441 {
442 struct hd_struct *part = NULL;
443
444 if (bdev)
445 part = bdev->bd_part;
446
447 if (part) {
448 bt->start_lba = part->start_sect;
449 bt->end_lba = part->start_sect + part->nr_sects;
450 } else {
451 bt->start_lba = 0;
452 bt->end_lba = -1ULL;
453 }
454 }
455
456 /*
457 * Setup everything required to start tracing
458 */
459 static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
460 struct block_device *bdev,
461 struct blk_user_trace_setup *buts)
462 {
463 struct blk_trace *bt = NULL;
464 struct dentry *dir = NULL;
465 int ret;
466
467 if (!buts->buf_size || !buts->buf_nr)
468 return -EINVAL;
469
470 strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
471 buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
472
473 /*
474 * some device names have larger paths - convert the slashes
475 * to underscores for this to work as expected
476 */
477 strreplace(buts->name, '/', '_');
478
479 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
480 if (!bt)
481 return -ENOMEM;
482
483 ret = -ENOMEM;
484 bt->sequence = alloc_percpu(unsigned long);
485 if (!bt->sequence)
486 goto err;
487
488 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
489 if (!bt->msg_data)
490 goto err;
491
492 ret = -ENOENT;
493
494 if (!blk_debugfs_root)
495 goto err;
496
497 dir = debugfs_lookup(buts->name, blk_debugfs_root);
498 if (!dir)
499 bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
500 if (!dir)
501 goto err;
502
503 bt->dev = dev;
504 atomic_set(&bt->dropped, 0);
505 INIT_LIST_HEAD(&bt->running_list);
506
507 ret = -EIO;
508 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
509 &blk_dropped_fops);
510 if (!bt->dropped_file)
511 goto err;
512
513 bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
514 if (!bt->msg_file)
515 goto err;
516
517 bt->rchan = relay_open("trace", dir, buts->buf_size,
518 buts->buf_nr, &blk_relay_callbacks, bt);
519 if (!bt->rchan)
520 goto err;
521
522 bt->act_mask = buts->act_mask;
523 if (!bt->act_mask)
524 bt->act_mask = (u16) -1;
525
526 blk_trace_setup_lba(bt, bdev);
527
528 /* overwrite with user settings */
529 if (buts->start_lba)
530 bt->start_lba = buts->start_lba;
531 if (buts->end_lba)
532 bt->end_lba = buts->end_lba;
533
534 bt->pid = buts->pid;
535 bt->trace_state = Blktrace_setup;
536
537 ret = -EBUSY;
538 if (cmpxchg(&q->blk_trace, NULL, bt))
539 goto err;
540
541 if (atomic_inc_return(&blk_probes_ref) == 1)
542 blk_register_tracepoints();
543
544 ret = 0;
545 err:
546 if (dir && !bt->dir)
547 dput(dir);
548 if (ret)
549 blk_trace_free(bt);
550 return ret;
551 }
552
553 int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
554 struct block_device *bdev,
555 char __user *arg)
556 {
557 struct blk_user_trace_setup buts;
558 int ret;
559
560 ret = copy_from_user(&buts, arg, sizeof(buts));
561 if (ret)
562 return -EFAULT;
563
564 ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
565 if (ret)
566 return ret;
567
568 if (copy_to_user(arg, &buts, sizeof(buts))) {
569 blk_trace_remove(q);
570 return -EFAULT;
571 }
572 return 0;
573 }
574 EXPORT_SYMBOL_GPL(blk_trace_setup);
575
576 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
577 static int compat_blk_trace_setup(struct request_queue *q, char *name,
578 dev_t dev, struct block_device *bdev,
579 char __user *arg)
580 {
581 struct blk_user_trace_setup buts;
582 struct compat_blk_user_trace_setup cbuts;
583 int ret;
584
585 if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
586 return -EFAULT;
587
588 buts = (struct blk_user_trace_setup) {
589 .act_mask = cbuts.act_mask,
590 .buf_size = cbuts.buf_size,
591 .buf_nr = cbuts.buf_nr,
592 .start_lba = cbuts.start_lba,
593 .end_lba = cbuts.end_lba,
594 .pid = cbuts.pid,
595 };
596
597 ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
598 if (ret)
599 return ret;
600
601 if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
602 blk_trace_remove(q);
603 return -EFAULT;
604 }
605
606 return 0;
607 }
608 #endif
609
610 int blk_trace_startstop(struct request_queue *q, int start)
611 {
612 int ret;
613 struct blk_trace *bt = q->blk_trace;
614
615 if (bt == NULL)
616 return -EINVAL;
617
618 /*
619 * For starting a trace, we can transition from a setup or stopped
620 * trace. For stopping a trace, the state must be running
621 */
622 ret = -EINVAL;
623 if (start) {
624 if (bt->trace_state == Blktrace_setup ||
625 bt->trace_state == Blktrace_stopped) {
626 blktrace_seq++;
627 smp_mb();
628 bt->trace_state = Blktrace_running;
629 spin_lock_irq(&running_trace_lock);
630 list_add(&bt->running_list, &running_trace_list);
631 spin_unlock_irq(&running_trace_lock);
632
633 trace_note_time(bt);
634 ret = 0;
635 }
636 } else {
637 if (bt->trace_state == Blktrace_running) {
638 bt->trace_state = Blktrace_stopped;
639 spin_lock_irq(&running_trace_lock);
640 list_del_init(&bt->running_list);
641 spin_unlock_irq(&running_trace_lock);
642 relay_flush(bt->rchan);
643 ret = 0;
644 }
645 }
646
647 return ret;
648 }
649 EXPORT_SYMBOL_GPL(blk_trace_startstop);
650
651 /*
652 * When reading or writing the blktrace sysfs files, the references to the
653 * opened sysfs or device files should prevent the underlying block device
654 * from being removed. So no further delete protection is really needed.
655 */
656
657 /**
658 * blk_trace_ioctl: - handle the ioctls associated with tracing
659 * @bdev: the block device
660 * @cmd: the ioctl cmd
661 * @arg: the argument data, if any
662 *
663 **/
664 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
665 {
666 struct request_queue *q;
667 int ret, start = 0;
668 char b[BDEVNAME_SIZE];
669
670 q = bdev_get_queue(bdev);
671 if (!q)
672 return -ENXIO;
673
674 mutex_lock(&q->blk_trace_mutex);
675
676 switch (cmd) {
677 case BLKTRACESETUP:
678 bdevname(bdev, b);
679 ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
680 break;
681 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
682 case BLKTRACESETUP32:
683 bdevname(bdev, b);
684 ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
685 break;
686 #endif
687 case BLKTRACESTART:
688 start = 1;
689 case BLKTRACESTOP:
690 ret = blk_trace_startstop(q, start);
691 break;
692 case BLKTRACETEARDOWN:
693 ret = blk_trace_remove(q);
694 break;
695 default:
696 ret = -ENOTTY;
697 break;
698 }
699
700 mutex_unlock(&q->blk_trace_mutex);
701 return ret;
702 }
703
704 /**
705 * blk_trace_shutdown: - stop and cleanup trace structures
706 * @q: the request queue associated with the device
707 *
708 **/
709 void blk_trace_shutdown(struct request_queue *q)
710 {
711 if (q->blk_trace) {
712 blk_trace_startstop(q, 0);
713 blk_trace_remove(q);
714 }
715 }
716
717 #ifdef CONFIG_BLK_CGROUP
718 static union kernfs_node_id *
719 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
720 {
721 struct blk_trace *bt = q->blk_trace;
722
723 if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
724 return NULL;
725
726 if (!bio->bi_css)
727 return NULL;
728 return cgroup_get_kernfs_id(bio->bi_css->cgroup);
729 }
730 #else
731 static union kernfs_node_id *
732 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
733 {
734 return NULL;
735 }
736 #endif
737
738 static union kernfs_node_id *
739 blk_trace_request_get_cgid(struct request_queue *q, struct request *rq)
740 {
741 if (!rq->bio)
742 return NULL;
743 /* Use the first bio */
744 return blk_trace_bio_get_cgid(q, rq->bio);
745 }
746
747 /*
748 * blktrace probes
749 */
750
751 /**
752 * blk_add_trace_rq - Add a trace for a request oriented action
753 * @rq: the source request
754 * @error: return status to log
755 * @nr_bytes: number of completed bytes
756 * @what: the action
757 * @cgid: the cgroup info
758 *
759 * Description:
760 * Records an action against a request. Will log the bio offset + size.
761 *
762 **/
763 static void blk_add_trace_rq(struct request *rq, int error,
764 unsigned int nr_bytes, u32 what,
765 union kernfs_node_id *cgid)
766 {
767 struct blk_trace *bt = rq->q->blk_trace;
768
769 if (likely(!bt))
770 return;
771
772 if (blk_rq_is_passthrough(rq))
773 what |= BLK_TC_ACT(BLK_TC_PC);
774 else
775 what |= BLK_TC_ACT(BLK_TC_FS);
776
777 __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq),
778 rq->cmd_flags, what, error, 0, NULL, cgid);
779 }
780
781 static void blk_add_trace_rq_insert(void *ignore,
782 struct request_queue *q, struct request *rq)
783 {
784 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT,
785 blk_trace_request_get_cgid(q, rq));
786 }
787
788 static void blk_add_trace_rq_issue(void *ignore,
789 struct request_queue *q, struct request *rq)
790 {
791 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE,
792 blk_trace_request_get_cgid(q, rq));
793 }
794
795 static void blk_add_trace_rq_requeue(void *ignore,
796 struct request_queue *q,
797 struct request *rq)
798 {
799 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE,
800 blk_trace_request_get_cgid(q, rq));
801 }
802
803 static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
804 int error, unsigned int nr_bytes)
805 {
806 blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE,
807 blk_trace_request_get_cgid(rq->q, rq));
808 }
809
810 /**
811 * blk_add_trace_bio - Add a trace for a bio oriented action
812 * @q: queue the io is for
813 * @bio: the source bio
814 * @what: the action
815 * @error: error, if any
816 *
817 * Description:
818 * Records an action against a bio. Will log the bio offset + size.
819 *
820 **/
821 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
822 u32 what, int error, union kernfs_node_id *cgid)
823 {
824 struct blk_trace *bt = q->blk_trace;
825
826 if (likely(!bt))
827 return;
828
829 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
830 bio_op(bio), bio->bi_opf, what, error, 0, NULL, cgid);
831 }
832
833 static void blk_add_trace_bio_bounce(void *ignore,
834 struct request_queue *q, struct bio *bio)
835 {
836 blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0,
837 blk_trace_bio_get_cgid(q, bio));
838 }
839
840 static void blk_add_trace_bio_complete(void *ignore,
841 struct request_queue *q, struct bio *bio,
842 int error)
843 {
844 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error,
845 blk_trace_bio_get_cgid(q, bio));
846 }
847
848 static void blk_add_trace_bio_backmerge(void *ignore,
849 struct request_queue *q,
850 struct request *rq,
851 struct bio *bio)
852 {
853 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0,
854 blk_trace_bio_get_cgid(q, bio));
855 }
856
857 static void blk_add_trace_bio_frontmerge(void *ignore,
858 struct request_queue *q,
859 struct request *rq,
860 struct bio *bio)
861 {
862 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0,
863 blk_trace_bio_get_cgid(q, bio));
864 }
865
866 static void blk_add_trace_bio_queue(void *ignore,
867 struct request_queue *q, struct bio *bio)
868 {
869 blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0,
870 blk_trace_bio_get_cgid(q, bio));
871 }
872
873 static void blk_add_trace_getrq(void *ignore,
874 struct request_queue *q,
875 struct bio *bio, int rw)
876 {
877 if (bio)
878 blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0,
879 blk_trace_bio_get_cgid(q, bio));
880 else {
881 struct blk_trace *bt = q->blk_trace;
882
883 if (bt)
884 __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0,
885 NULL, NULL);
886 }
887 }
888
889
890 static void blk_add_trace_sleeprq(void *ignore,
891 struct request_queue *q,
892 struct bio *bio, int rw)
893 {
894 if (bio)
895 blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0,
896 blk_trace_bio_get_cgid(q, bio));
897 else {
898 struct blk_trace *bt = q->blk_trace;
899
900 if (bt)
901 __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ,
902 0, 0, NULL, NULL);
903 }
904 }
905
906 static void blk_add_trace_plug(void *ignore, struct request_queue *q)
907 {
908 struct blk_trace *bt = q->blk_trace;
909
910 if (bt)
911 __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, NULL);
912 }
913
914 static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
915 unsigned int depth, bool explicit)
916 {
917 struct blk_trace *bt = q->blk_trace;
918
919 if (bt) {
920 __be64 rpdu = cpu_to_be64(depth);
921 u32 what;
922
923 if (explicit)
924 what = BLK_TA_UNPLUG_IO;
925 else
926 what = BLK_TA_UNPLUG_TIMER;
927
928 __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, NULL);
929 }
930 }
931
932 static void blk_add_trace_split(void *ignore,
933 struct request_queue *q, struct bio *bio,
934 unsigned int pdu)
935 {
936 struct blk_trace *bt = q->blk_trace;
937
938 if (bt) {
939 __be64 rpdu = cpu_to_be64(pdu);
940
941 __blk_add_trace(bt, bio->bi_iter.bi_sector,
942 bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf,
943 BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu),
944 &rpdu, blk_trace_bio_get_cgid(q, bio));
945 }
946 }
947
948 /**
949 * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
950 * @ignore: trace callback data parameter (not used)
951 * @q: queue the io is for
952 * @bio: the source bio
953 * @dev: target device
954 * @from: source sector
955 *
956 * Description:
957 * Device mapper or raid target sometimes need to split a bio because
958 * it spans a stripe (or similar). Add a trace for that action.
959 *
960 **/
961 static void blk_add_trace_bio_remap(void *ignore,
962 struct request_queue *q, struct bio *bio,
963 dev_t dev, sector_t from)
964 {
965 struct blk_trace *bt = q->blk_trace;
966 struct blk_io_trace_remap r;
967
968 if (likely(!bt))
969 return;
970
971 r.device_from = cpu_to_be32(dev);
972 r.device_to = cpu_to_be32(bio_dev(bio));
973 r.sector_from = cpu_to_be64(from);
974
975 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
976 bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status,
977 sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
978 }
979
980 /**
981 * blk_add_trace_rq_remap - Add a trace for a request-remap operation
982 * @ignore: trace callback data parameter (not used)
983 * @q: queue the io is for
984 * @rq: the source request
985 * @dev: target device
986 * @from: source sector
987 *
988 * Description:
989 * Device mapper remaps request to other devices.
990 * Add a trace for that action.
991 *
992 **/
993 static void blk_add_trace_rq_remap(void *ignore,
994 struct request_queue *q,
995 struct request *rq, dev_t dev,
996 sector_t from)
997 {
998 struct blk_trace *bt = q->blk_trace;
999 struct blk_io_trace_remap r;
1000
1001 if (likely(!bt))
1002 return;
1003
1004 r.device_from = cpu_to_be32(dev);
1005 r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
1006 r.sector_from = cpu_to_be64(from);
1007
1008 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
1009 rq_data_dir(rq), 0, BLK_TA_REMAP, 0,
1010 sizeof(r), &r, blk_trace_request_get_cgid(q, rq));
1011 }
1012
1013 /**
1014 * blk_add_driver_data - Add binary message with driver-specific data
1015 * @q: queue the io is for
1016 * @rq: io request
1017 * @data: driver-specific data
1018 * @len: length of driver-specific data
1019 *
1020 * Description:
1021 * Some drivers might want to write driver-specific data per request.
1022 *
1023 **/
1024 void blk_add_driver_data(struct request_queue *q,
1025 struct request *rq,
1026 void *data, size_t len)
1027 {
1028 struct blk_trace *bt = q->blk_trace;
1029
1030 if (likely(!bt))
1031 return;
1032
1033 __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0,
1034 BLK_TA_DRV_DATA, 0, len, data,
1035 blk_trace_request_get_cgid(q, rq));
1036 }
1037 EXPORT_SYMBOL_GPL(blk_add_driver_data);
1038
1039 static void blk_register_tracepoints(void)
1040 {
1041 int ret;
1042
1043 ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1044 WARN_ON(ret);
1045 ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1046 WARN_ON(ret);
1047 ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1048 WARN_ON(ret);
1049 ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1050 WARN_ON(ret);
1051 ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1052 WARN_ON(ret);
1053 ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1054 WARN_ON(ret);
1055 ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1056 WARN_ON(ret);
1057 ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1058 WARN_ON(ret);
1059 ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1060 WARN_ON(ret);
1061 ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
1062 WARN_ON(ret);
1063 ret = register_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1064 WARN_ON(ret);
1065 ret = register_trace_block_plug(blk_add_trace_plug, NULL);
1066 WARN_ON(ret);
1067 ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
1068 WARN_ON(ret);
1069 ret = register_trace_block_split(blk_add_trace_split, NULL);
1070 WARN_ON(ret);
1071 ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1072 WARN_ON(ret);
1073 ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1074 WARN_ON(ret);
1075 }
1076
1077 static void blk_unregister_tracepoints(void)
1078 {
1079 unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1080 unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1081 unregister_trace_block_split(blk_add_trace_split, NULL);
1082 unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
1083 unregister_trace_block_plug(blk_add_trace_plug, NULL);
1084 unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1085 unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
1086 unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1087 unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1088 unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1089 unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1090 unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1091 unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1092 unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1093 unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1094 unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1095
1096 tracepoint_synchronize_unregister();
1097 }
1098
1099 /*
1100 * struct blk_io_tracer formatting routines
1101 */
1102
1103 static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
1104 {
1105 int i = 0;
1106 int tc = t->action >> BLK_TC_SHIFT;
1107
1108 if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1109 rwbs[i++] = 'N';
1110 goto out;
1111 }
1112
1113 if (tc & BLK_TC_FLUSH)
1114 rwbs[i++] = 'F';
1115
1116 if (tc & BLK_TC_DISCARD)
1117 rwbs[i++] = 'D';
1118 else if (tc & BLK_TC_WRITE)
1119 rwbs[i++] = 'W';
1120 else if (t->bytes)
1121 rwbs[i++] = 'R';
1122 else
1123 rwbs[i++] = 'N';
1124
1125 if (tc & BLK_TC_FUA)
1126 rwbs[i++] = 'F';
1127 if (tc & BLK_TC_AHEAD)
1128 rwbs[i++] = 'A';
1129 if (tc & BLK_TC_SYNC)
1130 rwbs[i++] = 'S';
1131 if (tc & BLK_TC_META)
1132 rwbs[i++] = 'M';
1133 out:
1134 rwbs[i] = '\0';
1135 }
1136
1137 static inline
1138 const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
1139 {
1140 return (const struct blk_io_trace *)ent;
1141 }
1142
1143 static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg)
1144 {
1145 return (void *)(te_blk_io_trace(ent) + 1) +
1146 (has_cg ? sizeof(union kernfs_node_id) : 0);
1147 }
1148
1149 static inline const void *cgid_start(const struct trace_entry *ent)
1150 {
1151 return (void *)(te_blk_io_trace(ent) + 1);
1152 }
1153
1154 static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg)
1155 {
1156 return te_blk_io_trace(ent)->pdu_len -
1157 (has_cg ? sizeof(union kernfs_node_id) : 0);
1158 }
1159
1160 static inline u32 t_action(const struct trace_entry *ent)
1161 {
1162 return te_blk_io_trace(ent)->action;
1163 }
1164
1165 static inline u32 t_bytes(const struct trace_entry *ent)
1166 {
1167 return te_blk_io_trace(ent)->bytes;
1168 }
1169
1170 static inline u32 t_sec(const struct trace_entry *ent)
1171 {
1172 return te_blk_io_trace(ent)->bytes >> 9;
1173 }
1174
1175 static inline unsigned long long t_sector(const struct trace_entry *ent)
1176 {
1177 return te_blk_io_trace(ent)->sector;
1178 }
1179
1180 static inline __u16 t_error(const struct trace_entry *ent)
1181 {
1182 return te_blk_io_trace(ent)->error;
1183 }
1184
1185 static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg)
1186 {
1187 const __u64 *val = pdu_start(ent, has_cg);
1188 return be64_to_cpu(*val);
1189 }
1190
1191 static void get_pdu_remap(const struct trace_entry *ent,
1192 struct blk_io_trace_remap *r, bool has_cg)
1193 {
1194 const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg);
1195 __u64 sector_from = __r->sector_from;
1196
1197 r->device_from = be32_to_cpu(__r->device_from);
1198 r->device_to = be32_to_cpu(__r->device_to);
1199 r->sector_from = be64_to_cpu(sector_from);
1200 }
1201
1202 typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act,
1203 bool has_cg);
1204
1205 static void blk_log_action_classic(struct trace_iterator *iter, const char *act,
1206 bool has_cg)
1207 {
1208 char rwbs[RWBS_LEN];
1209 unsigned long long ts = iter->ts;
1210 unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
1211 unsigned secs = (unsigned long)ts;
1212 const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1213
1214 fill_rwbs(rwbs, t);
1215
1216 trace_seq_printf(&iter->seq,
1217 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1218 MAJOR(t->device), MINOR(t->device), iter->cpu,
1219 secs, nsec_rem, iter->ent->pid, act, rwbs);
1220 }
1221
1222 static void blk_log_action(struct trace_iterator *iter, const char *act,
1223 bool has_cg)
1224 {
1225 char rwbs[RWBS_LEN];
1226 const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1227
1228 fill_rwbs(rwbs, t);
1229 if (has_cg) {
1230 const union kernfs_node_id *id = cgid_start(iter->ent);
1231
1232 if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) {
1233 char blkcg_name_buf[NAME_MAX + 1] = "<...>";
1234
1235 cgroup_path_from_kernfs_id(id, blkcg_name_buf,
1236 sizeof(blkcg_name_buf));
1237 trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ",
1238 MAJOR(t->device), MINOR(t->device),
1239 blkcg_name_buf, act, rwbs);
1240 } else
1241 trace_seq_printf(&iter->seq,
1242 "%3d,%-3d %x,%-x %2s %3s ",
1243 MAJOR(t->device), MINOR(t->device),
1244 id->ino, id->generation, act, rwbs);
1245 } else
1246 trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1247 MAJOR(t->device), MINOR(t->device), act, rwbs);
1248 }
1249
1250 static void blk_log_dump_pdu(struct trace_seq *s,
1251 const struct trace_entry *ent, bool has_cg)
1252 {
1253 const unsigned char *pdu_buf;
1254 int pdu_len;
1255 int i, end;
1256
1257 pdu_buf = pdu_start(ent, has_cg);
1258 pdu_len = pdu_real_len(ent, has_cg);
1259
1260 if (!pdu_len)
1261 return;
1262
1263 /* find the last zero that needs to be printed */
1264 for (end = pdu_len - 1; end >= 0; end--)
1265 if (pdu_buf[end])
1266 break;
1267 end++;
1268
1269 trace_seq_putc(s, '(');
1270
1271 for (i = 0; i < pdu_len; i++) {
1272
1273 trace_seq_printf(s, "%s%02x",
1274 i == 0 ? "" : " ", pdu_buf[i]);
1275
1276 /*
1277 * stop when the rest is just zeroes and indicate so
1278 * with a ".." appended
1279 */
1280 if (i == end && end != pdu_len - 1) {
1281 trace_seq_puts(s, " ..) ");
1282 return;
1283 }
1284 }
1285
1286 trace_seq_puts(s, ") ");
1287 }
1288
1289 static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1290 {
1291 char cmd[TASK_COMM_LEN];
1292
1293 trace_find_cmdline(ent->pid, cmd);
1294
1295 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1296 trace_seq_printf(s, "%u ", t_bytes(ent));
1297 blk_log_dump_pdu(s, ent, has_cg);
1298 trace_seq_printf(s, "[%s]\n", cmd);
1299 } else {
1300 if (t_sec(ent))
1301 trace_seq_printf(s, "%llu + %u [%s]\n",
1302 t_sector(ent), t_sec(ent), cmd);
1303 else
1304 trace_seq_printf(s, "[%s]\n", cmd);
1305 }
1306 }
1307
1308 static void blk_log_with_error(struct trace_seq *s,
1309 const struct trace_entry *ent, bool has_cg)
1310 {
1311 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1312 blk_log_dump_pdu(s, ent, has_cg);
1313 trace_seq_printf(s, "[%d]\n", t_error(ent));
1314 } else {
1315 if (t_sec(ent))
1316 trace_seq_printf(s, "%llu + %u [%d]\n",
1317 t_sector(ent),
1318 t_sec(ent), t_error(ent));
1319 else
1320 trace_seq_printf(s, "%llu [%d]\n",
1321 t_sector(ent), t_error(ent));
1322 }
1323 }
1324
1325 static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1326 {
1327 struct blk_io_trace_remap r = { .device_from = 0, };
1328
1329 get_pdu_remap(ent, &r, has_cg);
1330 trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1331 t_sector(ent), t_sec(ent),
1332 MAJOR(r.device_from), MINOR(r.device_from),
1333 (unsigned long long)r.sector_from);
1334 }
1335
1336 static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1337 {
1338 char cmd[TASK_COMM_LEN];
1339
1340 trace_find_cmdline(ent->pid, cmd);
1341
1342 trace_seq_printf(s, "[%s]\n", cmd);
1343 }
1344
1345 static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1346 {
1347 char cmd[TASK_COMM_LEN];
1348
1349 trace_find_cmdline(ent->pid, cmd);
1350
1351 trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent, has_cg));
1352 }
1353
1354 static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1355 {
1356 char cmd[TASK_COMM_LEN];
1357
1358 trace_find_cmdline(ent->pid, cmd);
1359
1360 trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
1361 get_pdu_int(ent, has_cg), cmd);
1362 }
1363
1364 static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent,
1365 bool has_cg)
1366 {
1367
1368 trace_seq_putmem(s, pdu_start(ent, has_cg),
1369 pdu_real_len(ent, has_cg));
1370 trace_seq_putc(s, '\n');
1371 }
1372
1373 /*
1374 * struct tracer operations
1375 */
1376
1377 static void blk_tracer_print_header(struct seq_file *m)
1378 {
1379 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1380 return;
1381 seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n"
1382 "# | | | | | |\n");
1383 }
1384
1385 static void blk_tracer_start(struct trace_array *tr)
1386 {
1387 blk_tracer_enabled = true;
1388 }
1389
1390 static int blk_tracer_init(struct trace_array *tr)
1391 {
1392 blk_tr = tr;
1393 blk_tracer_start(tr);
1394 return 0;
1395 }
1396
1397 static void blk_tracer_stop(struct trace_array *tr)
1398 {
1399 blk_tracer_enabled = false;
1400 }
1401
1402 static void blk_tracer_reset(struct trace_array *tr)
1403 {
1404 blk_tracer_stop(tr);
1405 }
1406
1407 static const struct {
1408 const char *act[2];
1409 void (*print)(struct trace_seq *s, const struct trace_entry *ent,
1410 bool has_cg);
1411 } what2act[] = {
1412 [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic },
1413 [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic },
1414 [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic },
1415 [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic },
1416 [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic },
1417 [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error },
1418 [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic },
1419 [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error },
1420 [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug },
1421 [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug },
1422 [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
1423 [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic },
1424 [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split },
1425 [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic },
1426 [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap },
1427 };
1428
1429 static enum print_line_t print_one_line(struct trace_iterator *iter,
1430 bool classic)
1431 {
1432 struct trace_array *tr = iter->tr;
1433 struct trace_seq *s = &iter->seq;
1434 const struct blk_io_trace *t;
1435 u16 what;
1436 bool long_act;
1437 blk_log_action_t *log_action;
1438 bool has_cg;
1439
1440 t = te_blk_io_trace(iter->ent);
1441 what = (t->action & ((1 << BLK_TC_SHIFT) - 1)) & ~__BLK_TA_CGROUP;
1442 long_act = !!(tr->trace_flags & TRACE_ITER_VERBOSE);
1443 log_action = classic ? &blk_log_action_classic : &blk_log_action;
1444 has_cg = t->action & __BLK_TA_CGROUP;
1445
1446 if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1447 log_action(iter, long_act ? "message" : "m", has_cg);
1448 blk_log_msg(s, iter->ent, has_cg);
1449 return trace_handle_return(s);
1450 }
1451
1452 if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
1453 trace_seq_printf(s, "Unknown action %x\n", what);
1454 else {
1455 log_action(iter, what2act[what].act[long_act], has_cg);
1456 what2act[what].print(s, iter->ent, has_cg);
1457 }
1458
1459 return trace_handle_return(s);
1460 }
1461
1462 static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1463 int flags, struct trace_event *event)
1464 {
1465 return print_one_line(iter, false);
1466 }
1467
1468 static void blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1469 {
1470 struct trace_seq *s = &iter->seq;
1471 struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1472 const int offset = offsetof(struct blk_io_trace, sector);
1473 struct blk_io_trace old = {
1474 .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
1475 .time = iter->ts,
1476 };
1477
1478 trace_seq_putmem(s, &old, offset);
1479 trace_seq_putmem(s, &t->sector,
1480 sizeof(old) - offset + t->pdu_len);
1481 }
1482
1483 static enum print_line_t
1484 blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
1485 struct trace_event *event)
1486 {
1487 blk_trace_synthesize_old_trace(iter);
1488
1489 return trace_handle_return(&iter->seq);
1490 }
1491
1492 static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1493 {
1494 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1495 return TRACE_TYPE_UNHANDLED;
1496
1497 return print_one_line(iter, true);
1498 }
1499
1500 static int
1501 blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1502 {
1503 /* don't output context-info for blk_classic output */
1504 if (bit == TRACE_BLK_OPT_CLASSIC) {
1505 if (set)
1506 tr->trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
1507 else
1508 tr->trace_flags |= TRACE_ITER_CONTEXT_INFO;
1509 }
1510 return 0;
1511 }
1512
1513 static struct tracer blk_tracer __read_mostly = {
1514 .name = "blk",
1515 .init = blk_tracer_init,
1516 .reset = blk_tracer_reset,
1517 .start = blk_tracer_start,
1518 .stop = blk_tracer_stop,
1519 .print_header = blk_tracer_print_header,
1520 .print_line = blk_tracer_print_line,
1521 .flags = &blk_tracer_flags,
1522 .set_flag = blk_tracer_set_flag,
1523 };
1524
1525 static struct trace_event_functions trace_blk_event_funcs = {
1526 .trace = blk_trace_event_print,
1527 .binary = blk_trace_event_print_binary,
1528 };
1529
1530 static struct trace_event trace_blk_event = {
1531 .type = TRACE_BLK,
1532 .funcs = &trace_blk_event_funcs,
1533 };
1534
1535 static int __init init_blk_tracer(void)
1536 {
1537 if (!register_trace_event(&trace_blk_event)) {
1538 pr_warn("Warning: could not register block events\n");
1539 return 1;
1540 }
1541
1542 if (register_tracer(&blk_tracer) != 0) {
1543 pr_warn("Warning: could not register the block tracer\n");
1544 unregister_trace_event(&trace_blk_event);
1545 return 1;
1546 }
1547
1548 return 0;
1549 }
1550
1551 device_initcall(init_blk_tracer);
1552
1553 static int blk_trace_remove_queue(struct request_queue *q)
1554 {
1555 struct blk_trace *bt;
1556
1557 bt = xchg(&q->blk_trace, NULL);
1558 if (bt == NULL)
1559 return -EINVAL;
1560
1561 if (atomic_dec_and_test(&blk_probes_ref))
1562 blk_unregister_tracepoints();
1563
1564 blk_trace_free(bt);
1565 return 0;
1566 }
1567
1568 /*
1569 * Setup everything required to start tracing
1570 */
1571 static int blk_trace_setup_queue(struct request_queue *q,
1572 struct block_device *bdev)
1573 {
1574 struct blk_trace *bt = NULL;
1575 int ret = -ENOMEM;
1576
1577 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1578 if (!bt)
1579 return -ENOMEM;
1580
1581 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1582 if (!bt->msg_data)
1583 goto free_bt;
1584
1585 bt->dev = bdev->bd_dev;
1586 bt->act_mask = (u16)-1;
1587
1588 blk_trace_setup_lba(bt, bdev);
1589
1590 ret = -EBUSY;
1591 if (cmpxchg(&q->blk_trace, NULL, bt))
1592 goto free_bt;
1593
1594 if (atomic_inc_return(&blk_probes_ref) == 1)
1595 blk_register_tracepoints();
1596 return 0;
1597
1598 free_bt:
1599 blk_trace_free(bt);
1600 return ret;
1601 }
1602
1603 /*
1604 * sysfs interface to enable and configure tracing
1605 */
1606
1607 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1608 struct device_attribute *attr,
1609 char *buf);
1610 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1611 struct device_attribute *attr,
1612 const char *buf, size_t count);
1613 #define BLK_TRACE_DEVICE_ATTR(_name) \
1614 DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1615 sysfs_blk_trace_attr_show, \
1616 sysfs_blk_trace_attr_store)
1617
1618 static BLK_TRACE_DEVICE_ATTR(enable);
1619 static BLK_TRACE_DEVICE_ATTR(act_mask);
1620 static BLK_TRACE_DEVICE_ATTR(pid);
1621 static BLK_TRACE_DEVICE_ATTR(start_lba);
1622 static BLK_TRACE_DEVICE_ATTR(end_lba);
1623
1624 static struct attribute *blk_trace_attrs[] = {
1625 &dev_attr_enable.attr,
1626 &dev_attr_act_mask.attr,
1627 &dev_attr_pid.attr,
1628 &dev_attr_start_lba.attr,
1629 &dev_attr_end_lba.attr,
1630 NULL
1631 };
1632
1633 struct attribute_group blk_trace_attr_group = {
1634 .name = "trace",
1635 .attrs = blk_trace_attrs,
1636 };
1637
1638 static const struct {
1639 int mask;
1640 const char *str;
1641 } mask_maps[] = {
1642 { BLK_TC_READ, "read" },
1643 { BLK_TC_WRITE, "write" },
1644 { BLK_TC_FLUSH, "flush" },
1645 { BLK_TC_SYNC, "sync" },
1646 { BLK_TC_QUEUE, "queue" },
1647 { BLK_TC_REQUEUE, "requeue" },
1648 { BLK_TC_ISSUE, "issue" },
1649 { BLK_TC_COMPLETE, "complete" },
1650 { BLK_TC_FS, "fs" },
1651 { BLK_TC_PC, "pc" },
1652 { BLK_TC_NOTIFY, "notify" },
1653 { BLK_TC_AHEAD, "ahead" },
1654 { BLK_TC_META, "meta" },
1655 { BLK_TC_DISCARD, "discard" },
1656 { BLK_TC_DRV_DATA, "drv_data" },
1657 { BLK_TC_FUA, "fua" },
1658 };
1659
1660 static int blk_trace_str2mask(const char *str)
1661 {
1662 int i;
1663 int mask = 0;
1664 char *buf, *s, *token;
1665
1666 buf = kstrdup(str, GFP_KERNEL);
1667 if (buf == NULL)
1668 return -ENOMEM;
1669 s = strstrip(buf);
1670
1671 while (1) {
1672 token = strsep(&s, ",");
1673 if (token == NULL)
1674 break;
1675
1676 if (*token == '\0')
1677 continue;
1678
1679 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1680 if (strcasecmp(token, mask_maps[i].str) == 0) {
1681 mask |= mask_maps[i].mask;
1682 break;
1683 }
1684 }
1685 if (i == ARRAY_SIZE(mask_maps)) {
1686 mask = -EINVAL;
1687 break;
1688 }
1689 }
1690 kfree(buf);
1691
1692 return mask;
1693 }
1694
1695 static ssize_t blk_trace_mask2str(char *buf, int mask)
1696 {
1697 int i;
1698 char *p = buf;
1699
1700 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1701 if (mask & mask_maps[i].mask) {
1702 p += sprintf(p, "%s%s",
1703 (p == buf) ? "" : ",", mask_maps[i].str);
1704 }
1705 }
1706 *p++ = '\n';
1707
1708 return p - buf;
1709 }
1710
1711 static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
1712 {
1713 if (bdev->bd_disk == NULL)
1714 return NULL;
1715
1716 return bdev_get_queue(bdev);
1717 }
1718
1719 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1720 struct device_attribute *attr,
1721 char *buf)
1722 {
1723 struct hd_struct *p = dev_to_part(dev);
1724 struct request_queue *q;
1725 struct block_device *bdev;
1726 ssize_t ret = -ENXIO;
1727
1728 bdev = bdget(part_devt(p));
1729 if (bdev == NULL)
1730 goto out;
1731
1732 q = blk_trace_get_queue(bdev);
1733 if (q == NULL)
1734 goto out_bdput;
1735
1736 mutex_lock(&q->blk_trace_mutex);
1737
1738 if (attr == &dev_attr_enable) {
1739 ret = sprintf(buf, "%u\n", !!q->blk_trace);
1740 goto out_unlock_bdev;
1741 }
1742
1743 if (q->blk_trace == NULL)
1744 ret = sprintf(buf, "disabled\n");
1745 else if (attr == &dev_attr_act_mask)
1746 ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
1747 else if (attr == &dev_attr_pid)
1748 ret = sprintf(buf, "%u\n", q->blk_trace->pid);
1749 else if (attr == &dev_attr_start_lba)
1750 ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
1751 else if (attr == &dev_attr_end_lba)
1752 ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
1753
1754 out_unlock_bdev:
1755 mutex_unlock(&q->blk_trace_mutex);
1756 out_bdput:
1757 bdput(bdev);
1758 out:
1759 return ret;
1760 }
1761
1762 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1763 struct device_attribute *attr,
1764 const char *buf, size_t count)
1765 {
1766 struct block_device *bdev;
1767 struct request_queue *q;
1768 struct hd_struct *p;
1769 u64 value;
1770 ssize_t ret = -EINVAL;
1771
1772 if (count == 0)
1773 goto out;
1774
1775 if (attr == &dev_attr_act_mask) {
1776 if (kstrtoull(buf, 0, &value)) {
1777 /* Assume it is a list of trace category names */
1778 ret = blk_trace_str2mask(buf);
1779 if (ret < 0)
1780 goto out;
1781 value = ret;
1782 }
1783 } else if (kstrtoull(buf, 0, &value))
1784 goto out;
1785
1786 ret = -ENXIO;
1787
1788 p = dev_to_part(dev);
1789 bdev = bdget(part_devt(p));
1790 if (bdev == NULL)
1791 goto out;
1792
1793 q = blk_trace_get_queue(bdev);
1794 if (q == NULL)
1795 goto out_bdput;
1796
1797 mutex_lock(&q->blk_trace_mutex);
1798
1799 if (attr == &dev_attr_enable) {
1800 if (value)
1801 ret = blk_trace_setup_queue(q, bdev);
1802 else
1803 ret = blk_trace_remove_queue(q);
1804 goto out_unlock_bdev;
1805 }
1806
1807 ret = 0;
1808 if (q->blk_trace == NULL)
1809 ret = blk_trace_setup_queue(q, bdev);
1810
1811 if (ret == 0) {
1812 if (attr == &dev_attr_act_mask)
1813 q->blk_trace->act_mask = value;
1814 else if (attr == &dev_attr_pid)
1815 q->blk_trace->pid = value;
1816 else if (attr == &dev_attr_start_lba)
1817 q->blk_trace->start_lba = value;
1818 else if (attr == &dev_attr_end_lba)
1819 q->blk_trace->end_lba = value;
1820 }
1821
1822 out_unlock_bdev:
1823 mutex_unlock(&q->blk_trace_mutex);
1824 out_bdput:
1825 bdput(bdev);
1826 out:
1827 return ret ? ret : count;
1828 }
1829
1830 int blk_trace_init_sysfs(struct device *dev)
1831 {
1832 return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
1833 }
1834
1835 void blk_trace_remove_sysfs(struct device *dev)
1836 {
1837 sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
1838 }
1839
1840 #endif /* CONFIG_BLK_DEV_IO_TRACE */
1841
1842 #ifdef CONFIG_EVENT_TRACING
1843
1844 void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes)
1845 {
1846 int i = 0;
1847
1848 if (op & REQ_PREFLUSH)
1849 rwbs[i++] = 'F';
1850
1851 switch (op & REQ_OP_MASK) {
1852 case REQ_OP_WRITE:
1853 case REQ_OP_WRITE_SAME:
1854 rwbs[i++] = 'W';
1855 break;
1856 case REQ_OP_DISCARD:
1857 rwbs[i++] = 'D';
1858 break;
1859 case REQ_OP_SECURE_ERASE:
1860 rwbs[i++] = 'D';
1861 rwbs[i++] = 'E';
1862 break;
1863 case REQ_OP_FLUSH:
1864 rwbs[i++] = 'F';
1865 break;
1866 case REQ_OP_READ:
1867 rwbs[i++] = 'R';
1868 break;
1869 default:
1870 rwbs[i++] = 'N';
1871 }
1872
1873 if (op & REQ_FUA)
1874 rwbs[i++] = 'F';
1875 if (op & REQ_RAHEAD)
1876 rwbs[i++] = 'A';
1877 if (op & REQ_SYNC)
1878 rwbs[i++] = 'S';
1879 if (op & REQ_META)
1880 rwbs[i++] = 'M';
1881
1882 rwbs[i] = '\0';
1883 }
1884 EXPORT_SYMBOL_GPL(blk_fill_rwbs);
1885
1886 #endif /* CONFIG_EVENT_TRACING */
1887