2 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include <linux/kernel.h>
19 #include <linux/blkdev.h>
20 #include <linux/blktrace_api.h>
21 #include <linux/percpu.h>
22 #include <linux/init.h>
23 #include <linux/mutex.h>
24 #include <linux/debugfs.h>
25 #include <linux/time.h>
26 #include <trace/block.h>
27 #include <linux/uaccess.h>
28 #include "trace_output.h"
30 static unsigned int blktrace_seq __read_mostly
= 1;
32 static struct trace_array
*blk_tr
;
33 static int __read_mostly blk_tracer_enabled
;
35 /* Select an alternative, minimalistic output than the original one */
36 #define TRACE_BLK_OPT_CLASSIC 0x1
38 static struct tracer_opt blk_tracer_opts
[] = {
39 /* Default disable the minimalistic output */
40 { TRACER_OPT(blk_classic
, TRACE_BLK_OPT_CLASSIC
) },
44 static struct tracer_flags blk_tracer_flags
= {
46 .opts
= blk_tracer_opts
,
49 /* Global reference count of probes */
50 static DEFINE_MUTEX(blk_probe_mutex
);
51 static atomic_t blk_probes_ref
= ATOMIC_INIT(0);
53 static int blk_register_tracepoints(void);
54 static void blk_unregister_tracepoints(void);
57 * Send out a notify message.
59 static void trace_note(struct blk_trace
*bt
, pid_t pid
, int action
,
60 const void *data
, size_t len
)
62 struct blk_io_trace
*t
;
67 t
= relay_reserve(bt
->rchan
, sizeof(*t
) + len
);
69 const int cpu
= smp_processor_id();
71 t
->magic
= BLK_IO_TRACE_MAGIC
| BLK_IO_TRACE_VERSION
;
72 t
->time
= ktime_to_ns(ktime_get());
78 memcpy((void *) t
+ sizeof(*t
), data
, len
);
83 * Send out a notify for this process, if we haven't done so since a trace
86 static void trace_note_tsk(struct blk_trace
*bt
, struct task_struct
*tsk
)
88 tsk
->btrace_seq
= blktrace_seq
;
89 trace_note(bt
, tsk
->pid
, BLK_TN_PROCESS
, tsk
->comm
, sizeof(tsk
->comm
));
92 static void trace_note_time(struct blk_trace
*bt
)
99 words
[0] = now
.tv_sec
;
100 words
[1] = now
.tv_nsec
;
102 local_irq_save(flags
);
103 trace_note(bt
, 0, BLK_TN_TIMESTAMP
, words
, sizeof(words
));
104 local_irq_restore(flags
);
107 void __trace_note_message(struct blk_trace
*bt
, const char *fmt
, ...)
116 ftrace_vprintk(fmt
, args
);
124 local_irq_save(flags
);
125 buf
= per_cpu_ptr(bt
->msg_data
, smp_processor_id());
127 n
= vscnprintf(buf
, BLK_TN_MAX_MSG
, fmt
, args
);
130 trace_note(bt
, 0, BLK_TN_MESSAGE
, buf
, n
);
131 local_irq_restore(flags
);
133 EXPORT_SYMBOL_GPL(__trace_note_message
);
135 static int act_log_check(struct blk_trace
*bt
, u32 what
, sector_t sector
,
138 if (((bt
->act_mask
<< BLK_TC_SHIFT
) & what
) == 0)
140 if (sector
< bt
->start_lba
|| sector
> bt
->end_lba
)
142 if (bt
->pid
&& pid
!= bt
->pid
)
149 * Data direction bit lookup
151 static u32 ddir_act
[2] __read_mostly
= { BLK_TC_ACT(BLK_TC_READ
),
152 BLK_TC_ACT(BLK_TC_WRITE
) };
154 /* The ilog2() calls fall out because they're constant */
155 #define MASK_TC_BIT(rw, __name) ((rw & (1 << BIO_RW_ ## __name)) << \
156 (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name))
159 * The worker for the various blk_add_trace*() types. Fills out a
160 * blk_io_trace structure and places it in a per-cpu subbuffer.
162 static void __blk_add_trace(struct blk_trace
*bt
, sector_t sector
, int bytes
,
163 int rw
, u32 what
, int error
, int pdu_len
, void *pdu_data
)
165 struct task_struct
*tsk
= current
;
166 struct ring_buffer_event
*event
= NULL
;
167 struct blk_io_trace
*t
;
168 unsigned long flags
= 0;
169 unsigned long *sequence
;
173 if (unlikely(bt
->trace_state
!= Blktrace_running
||
174 !blk_tracer_enabled
))
177 what
|= ddir_act
[rw
& WRITE
];
178 what
|= MASK_TC_BIT(rw
, BARRIER
);
179 what
|= MASK_TC_BIT(rw
, SYNCIO
);
180 what
|= MASK_TC_BIT(rw
, AHEAD
);
181 what
|= MASK_TC_BIT(rw
, META
);
182 what
|= MASK_TC_BIT(rw
, DISCARD
);
185 if (unlikely(act_log_check(bt
, what
, sector
, pid
)))
187 cpu
= raw_smp_processor_id();
190 tracing_record_cmdline(current
);
192 pc
= preempt_count();
193 event
= trace_buffer_lock_reserve(blk_tr
, TRACE_BLK
,
194 sizeof(*t
) + pdu_len
,
198 t
= ring_buffer_event_data(event
);
203 * A word about the locking here - we disable interrupts to reserve
204 * some space in the relay per-cpu buffer, to prevent an irq
205 * from coming in and stepping on our toes.
207 local_irq_save(flags
);
209 if (unlikely(tsk
->btrace_seq
!= blktrace_seq
))
210 trace_note_tsk(bt
, tsk
);
212 t
= relay_reserve(bt
->rchan
, sizeof(*t
) + pdu_len
);
214 sequence
= per_cpu_ptr(bt
->sequence
, cpu
);
216 t
->magic
= BLK_IO_TRACE_MAGIC
| BLK_IO_TRACE_VERSION
;
217 t
->sequence
= ++(*sequence
);
218 t
->time
= ktime_to_ns(ktime_get());
221 * These two are not needed in ftrace as they are in the
222 * generic trace_entry, filled by tracing_generic_entry_update,
223 * but for the trace_event->bin() synthesizer benefit we do it
234 t
->pdu_len
= pdu_len
;
237 memcpy((void *) t
+ sizeof(*t
), pdu_data
, pdu_len
);
240 trace_buffer_unlock_commit(blk_tr
, event
, 0, pc
);
245 local_irq_restore(flags
);
248 static struct dentry
*blk_tree_root
;
249 static DEFINE_MUTEX(blk_tree_mutex
);
251 static void blk_trace_cleanup(struct blk_trace
*bt
)
253 debugfs_remove(bt
->msg_file
);
254 debugfs_remove(bt
->dropped_file
);
255 relay_close(bt
->rchan
);
256 free_percpu(bt
->sequence
);
257 free_percpu(bt
->msg_data
);
259 mutex_lock(&blk_probe_mutex
);
260 if (atomic_dec_and_test(&blk_probes_ref
))
261 blk_unregister_tracepoints();
262 mutex_unlock(&blk_probe_mutex
);
265 int blk_trace_remove(struct request_queue
*q
)
267 struct blk_trace
*bt
;
269 bt
= xchg(&q
->blk_trace
, NULL
);
273 if (bt
->trace_state
== Blktrace_setup
||
274 bt
->trace_state
== Blktrace_stopped
)
275 blk_trace_cleanup(bt
);
279 EXPORT_SYMBOL_GPL(blk_trace_remove
);
281 static int blk_dropped_open(struct inode
*inode
, struct file
*filp
)
283 filp
->private_data
= inode
->i_private
;
288 static ssize_t
blk_dropped_read(struct file
*filp
, char __user
*buffer
,
289 size_t count
, loff_t
*ppos
)
291 struct blk_trace
*bt
= filp
->private_data
;
294 snprintf(buf
, sizeof(buf
), "%u\n", atomic_read(&bt
->dropped
));
296 return simple_read_from_buffer(buffer
, count
, ppos
, buf
, strlen(buf
));
299 static const struct file_operations blk_dropped_fops
= {
300 .owner
= THIS_MODULE
,
301 .open
= blk_dropped_open
,
302 .read
= blk_dropped_read
,
305 static int blk_msg_open(struct inode
*inode
, struct file
*filp
)
307 filp
->private_data
= inode
->i_private
;
312 static ssize_t
blk_msg_write(struct file
*filp
, const char __user
*buffer
,
313 size_t count
, loff_t
*ppos
)
316 struct blk_trace
*bt
;
318 if (count
> BLK_TN_MAX_MSG
)
321 msg
= kmalloc(count
, GFP_KERNEL
);
325 if (copy_from_user(msg
, buffer
, count
)) {
330 bt
= filp
->private_data
;
331 __trace_note_message(bt
, "%s", msg
);
337 static const struct file_operations blk_msg_fops
= {
338 .owner
= THIS_MODULE
,
339 .open
= blk_msg_open
,
340 .write
= blk_msg_write
,
344 * Keep track of how many times we encountered a full subbuffer, to aid
345 * the user space app in telling how many lost events there were.
347 static int blk_subbuf_start_callback(struct rchan_buf
*buf
, void *subbuf
,
348 void *prev_subbuf
, size_t prev_padding
)
350 struct blk_trace
*bt
;
352 if (!relay_buf_full(buf
))
355 bt
= buf
->chan
->private_data
;
356 atomic_inc(&bt
->dropped
);
360 static int blk_remove_buf_file_callback(struct dentry
*dentry
)
362 struct dentry
*parent
= dentry
->d_parent
;
363 debugfs_remove(dentry
);
366 * this will fail for all but the last file, but that is ok. what we
367 * care about is the top level buts->name directory going away, when
368 * the last trace file is gone. Then we don't have to rmdir() that
369 * manually on trace stop, so it nicely solves the issue with
370 * force killing of running traces.
373 debugfs_remove(parent
);
377 static struct dentry
*blk_create_buf_file_callback(const char *filename
,
378 struct dentry
*parent
,
380 struct rchan_buf
*buf
,
383 return debugfs_create_file(filename
, mode
, parent
, buf
,
384 &relay_file_operations
);
387 static struct rchan_callbacks blk_relay_callbacks
= {
388 .subbuf_start
= blk_subbuf_start_callback
,
389 .create_buf_file
= blk_create_buf_file_callback
,
390 .remove_buf_file
= blk_remove_buf_file_callback
,
394 * Setup everything required to start tracing
396 int do_blk_trace_setup(struct request_queue
*q
, char *name
, dev_t dev
,
397 struct blk_user_trace_setup
*buts
)
399 struct blk_trace
*old_bt
, *bt
= NULL
;
400 struct dentry
*dir
= NULL
;
403 if (!buts
->buf_size
|| !buts
->buf_nr
)
406 strncpy(buts
->name
, name
, BLKTRACE_BDEV_SIZE
);
407 buts
->name
[BLKTRACE_BDEV_SIZE
- 1] = '\0';
410 * some device names have larger paths - convert the slashes
411 * to underscores for this to work as expected
413 for (i
= 0; i
< strlen(buts
->name
); i
++)
414 if (buts
->name
[i
] == '/')
418 bt
= kzalloc(sizeof(*bt
), GFP_KERNEL
);
422 bt
->sequence
= alloc_percpu(unsigned long);
426 bt
->msg_data
= __alloc_percpu(BLK_TN_MAX_MSG
);
432 if (!blk_tree_root
) {
433 blk_tree_root
= debugfs_create_dir("block", NULL
);
438 dir
= debugfs_create_dir(buts
->name
, blk_tree_root
);
445 atomic_set(&bt
->dropped
, 0);
448 bt
->dropped_file
= debugfs_create_file("dropped", 0444, dir
, bt
,
450 if (!bt
->dropped_file
)
453 bt
->msg_file
= debugfs_create_file("msg", 0222, dir
, bt
, &blk_msg_fops
);
457 bt
->rchan
= relay_open("trace", dir
, buts
->buf_size
,
458 buts
->buf_nr
, &blk_relay_callbacks
, bt
);
462 bt
->act_mask
= buts
->act_mask
;
464 bt
->act_mask
= (u16
) -1;
466 bt
->start_lba
= buts
->start_lba
;
467 bt
->end_lba
= buts
->end_lba
;
472 bt
->trace_state
= Blktrace_setup
;
474 mutex_lock(&blk_probe_mutex
);
475 if (atomic_add_return(1, &blk_probes_ref
) == 1) {
476 ret
= blk_register_tracepoints();
480 mutex_unlock(&blk_probe_mutex
);
483 old_bt
= xchg(&q
->blk_trace
, bt
);
485 (void) xchg(&q
->blk_trace
, old_bt
);
491 atomic_dec(&blk_probes_ref
);
492 mutex_unlock(&blk_probe_mutex
);
496 debugfs_remove(bt
->msg_file
);
497 if (bt
->dropped_file
)
498 debugfs_remove(bt
->dropped_file
);
499 free_percpu(bt
->sequence
);
500 free_percpu(bt
->msg_data
);
502 relay_close(bt
->rchan
);
508 int blk_trace_setup(struct request_queue
*q
, char *name
, dev_t dev
,
511 struct blk_user_trace_setup buts
;
514 ret
= copy_from_user(&buts
, arg
, sizeof(buts
));
518 ret
= do_blk_trace_setup(q
, name
, dev
, &buts
);
522 if (copy_to_user(arg
, &buts
, sizeof(buts
)))
527 EXPORT_SYMBOL_GPL(blk_trace_setup
);
529 int blk_trace_startstop(struct request_queue
*q
, int start
)
532 struct blk_trace
*bt
= q
->blk_trace
;
538 * For starting a trace, we can transition from a setup or stopped
539 * trace. For stopping a trace, the state must be running
543 if (bt
->trace_state
== Blktrace_setup
||
544 bt
->trace_state
== Blktrace_stopped
) {
547 bt
->trace_state
= Blktrace_running
;
553 if (bt
->trace_state
== Blktrace_running
) {
554 bt
->trace_state
= Blktrace_stopped
;
555 relay_flush(bt
->rchan
);
562 EXPORT_SYMBOL_GPL(blk_trace_startstop
);
565 * blk_trace_ioctl: - handle the ioctls associated with tracing
566 * @bdev: the block device
567 * @cmd: the ioctl cmd
568 * @arg: the argument data, if any
571 int blk_trace_ioctl(struct block_device
*bdev
, unsigned cmd
, char __user
*arg
)
573 struct request_queue
*q
;
575 char b
[BDEVNAME_SIZE
];
577 q
= bdev_get_queue(bdev
);
581 mutex_lock(&bdev
->bd_mutex
);
586 ret
= blk_trace_setup(q
, b
, bdev
->bd_dev
, arg
);
591 ret
= blk_trace_startstop(q
, start
);
593 case BLKTRACETEARDOWN
:
594 ret
= blk_trace_remove(q
);
601 mutex_unlock(&bdev
->bd_mutex
);
606 * blk_trace_shutdown: - stop and cleanup trace structures
607 * @q: the request queue associated with the device
610 void blk_trace_shutdown(struct request_queue
*q
)
613 blk_trace_startstop(q
, 0);
623 * blk_add_trace_rq - Add a trace for a request oriented action
624 * @q: queue the io is for
625 * @rq: the source request
629 * Records an action against a request. Will log the bio offset + size.
632 static void blk_add_trace_rq(struct request_queue
*q
, struct request
*rq
,
635 struct blk_trace
*bt
= q
->blk_trace
;
636 int rw
= rq
->cmd_flags
& 0x03;
641 if (blk_discard_rq(rq
))
642 rw
|= (1 << BIO_RW_DISCARD
);
644 if (blk_pc_request(rq
)) {
645 what
|= BLK_TC_ACT(BLK_TC_PC
);
646 __blk_add_trace(bt
, 0, rq
->data_len
, rw
, what
, rq
->errors
,
647 sizeof(rq
->cmd
), rq
->cmd
);
649 what
|= BLK_TC_ACT(BLK_TC_FS
);
650 __blk_add_trace(bt
, rq
->hard_sector
, rq
->hard_nr_sectors
<< 9,
651 rw
, what
, rq
->errors
, 0, NULL
);
655 static void blk_add_trace_rq_abort(struct request_queue
*q
, struct request
*rq
)
657 blk_add_trace_rq(q
, rq
, BLK_TA_ABORT
);
660 static void blk_add_trace_rq_insert(struct request_queue
*q
, struct request
*rq
)
662 blk_add_trace_rq(q
, rq
, BLK_TA_INSERT
);
665 static void blk_add_trace_rq_issue(struct request_queue
*q
, struct request
*rq
)
667 blk_add_trace_rq(q
, rq
, BLK_TA_ISSUE
);
670 static void blk_add_trace_rq_requeue(struct request_queue
*q
,
673 blk_add_trace_rq(q
, rq
, BLK_TA_REQUEUE
);
676 static void blk_add_trace_rq_complete(struct request_queue
*q
,
679 blk_add_trace_rq(q
, rq
, BLK_TA_COMPLETE
);
683 * blk_add_trace_bio - Add a trace for a bio oriented action
684 * @q: queue the io is for
685 * @bio: the source bio
689 * Records an action against a bio. Will log the bio offset + size.
692 static void blk_add_trace_bio(struct request_queue
*q
, struct bio
*bio
,
695 struct blk_trace
*bt
= q
->blk_trace
;
700 __blk_add_trace(bt
, bio
->bi_sector
, bio
->bi_size
, bio
->bi_rw
, what
,
701 !bio_flagged(bio
, BIO_UPTODATE
), 0, NULL
);
704 static void blk_add_trace_bio_bounce(struct request_queue
*q
, struct bio
*bio
)
706 blk_add_trace_bio(q
, bio
, BLK_TA_BOUNCE
);
709 static void blk_add_trace_bio_complete(struct request_queue
*q
, struct bio
*bio
)
711 blk_add_trace_bio(q
, bio
, BLK_TA_COMPLETE
);
714 static void blk_add_trace_bio_backmerge(struct request_queue
*q
,
717 blk_add_trace_bio(q
, bio
, BLK_TA_BACKMERGE
);
720 static void blk_add_trace_bio_frontmerge(struct request_queue
*q
,
723 blk_add_trace_bio(q
, bio
, BLK_TA_FRONTMERGE
);
726 static void blk_add_trace_bio_queue(struct request_queue
*q
, struct bio
*bio
)
728 blk_add_trace_bio(q
, bio
, BLK_TA_QUEUE
);
731 static void blk_add_trace_getrq(struct request_queue
*q
,
732 struct bio
*bio
, int rw
)
735 blk_add_trace_bio(q
, bio
, BLK_TA_GETRQ
);
737 struct blk_trace
*bt
= q
->blk_trace
;
740 __blk_add_trace(bt
, 0, 0, rw
, BLK_TA_GETRQ
, 0, 0, NULL
);
745 static void blk_add_trace_sleeprq(struct request_queue
*q
,
746 struct bio
*bio
, int rw
)
749 blk_add_trace_bio(q
, bio
, BLK_TA_SLEEPRQ
);
751 struct blk_trace
*bt
= q
->blk_trace
;
754 __blk_add_trace(bt
, 0, 0, rw
, BLK_TA_SLEEPRQ
,
759 static void blk_add_trace_plug(struct request_queue
*q
)
761 struct blk_trace
*bt
= q
->blk_trace
;
764 __blk_add_trace(bt
, 0, 0, 0, BLK_TA_PLUG
, 0, 0, NULL
);
767 static void blk_add_trace_unplug_io(struct request_queue
*q
)
769 struct blk_trace
*bt
= q
->blk_trace
;
772 unsigned int pdu
= q
->rq
.count
[READ
] + q
->rq
.count
[WRITE
];
773 __be64 rpdu
= cpu_to_be64(pdu
);
775 __blk_add_trace(bt
, 0, 0, 0, BLK_TA_UNPLUG_IO
, 0,
776 sizeof(rpdu
), &rpdu
);
780 static void blk_add_trace_unplug_timer(struct request_queue
*q
)
782 struct blk_trace
*bt
= q
->blk_trace
;
785 unsigned int pdu
= q
->rq
.count
[READ
] + q
->rq
.count
[WRITE
];
786 __be64 rpdu
= cpu_to_be64(pdu
);
788 __blk_add_trace(bt
, 0, 0, 0, BLK_TA_UNPLUG_TIMER
, 0,
789 sizeof(rpdu
), &rpdu
);
793 static void blk_add_trace_split(struct request_queue
*q
, struct bio
*bio
,
796 struct blk_trace
*bt
= q
->blk_trace
;
799 __be64 rpdu
= cpu_to_be64(pdu
);
801 __blk_add_trace(bt
, bio
->bi_sector
, bio
->bi_size
, bio
->bi_rw
,
802 BLK_TA_SPLIT
, !bio_flagged(bio
, BIO_UPTODATE
),
803 sizeof(rpdu
), &rpdu
);
808 * blk_add_trace_remap - Add a trace for a remap operation
809 * @q: queue the io is for
810 * @bio: the source bio
811 * @dev: target device
812 * @from: source sector
816 * Device mapper or raid target sometimes need to split a bio because
817 * it spans a stripe (or similar). Add a trace for that action.
820 static void blk_add_trace_remap(struct request_queue
*q
, struct bio
*bio
,
821 dev_t dev
, sector_t from
, sector_t to
)
823 struct blk_trace
*bt
= q
->blk_trace
;
824 struct blk_io_trace_remap r
;
829 r
.device
= cpu_to_be32(dev
);
830 r
.device_from
= cpu_to_be32(bio
->bi_bdev
->bd_dev
);
831 r
.sector
= cpu_to_be64(to
);
833 __blk_add_trace(bt
, from
, bio
->bi_size
, bio
->bi_rw
, BLK_TA_REMAP
,
834 !bio_flagged(bio
, BIO_UPTODATE
), sizeof(r
), &r
);
838 * blk_add_driver_data - Add binary message with driver-specific data
839 * @q: queue the io is for
841 * @data: driver-specific data
842 * @len: length of driver-specific data
845 * Some drivers might want to write driver-specific data per request.
848 void blk_add_driver_data(struct request_queue
*q
,
850 void *data
, size_t len
)
852 struct blk_trace
*bt
= q
->blk_trace
;
857 if (blk_pc_request(rq
))
858 __blk_add_trace(bt
, 0, rq
->data_len
, 0, BLK_TA_DRV_DATA
,
859 rq
->errors
, len
, data
);
861 __blk_add_trace(bt
, rq
->hard_sector
, rq
->hard_nr_sectors
<< 9,
862 0, BLK_TA_DRV_DATA
, rq
->errors
, len
, data
);
864 EXPORT_SYMBOL_GPL(blk_add_driver_data
);
866 static int blk_register_tracepoints(void)
870 ret
= register_trace_block_rq_abort(blk_add_trace_rq_abort
);
872 ret
= register_trace_block_rq_insert(blk_add_trace_rq_insert
);
874 ret
= register_trace_block_rq_issue(blk_add_trace_rq_issue
);
876 ret
= register_trace_block_rq_requeue(blk_add_trace_rq_requeue
);
878 ret
= register_trace_block_rq_complete(blk_add_trace_rq_complete
);
880 ret
= register_trace_block_bio_bounce(blk_add_trace_bio_bounce
);
882 ret
= register_trace_block_bio_complete(blk_add_trace_bio_complete
);
884 ret
= register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge
);
886 ret
= register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge
);
888 ret
= register_trace_block_bio_queue(blk_add_trace_bio_queue
);
890 ret
= register_trace_block_getrq(blk_add_trace_getrq
);
892 ret
= register_trace_block_sleeprq(blk_add_trace_sleeprq
);
894 ret
= register_trace_block_plug(blk_add_trace_plug
);
896 ret
= register_trace_block_unplug_timer(blk_add_trace_unplug_timer
);
898 ret
= register_trace_block_unplug_io(blk_add_trace_unplug_io
);
900 ret
= register_trace_block_split(blk_add_trace_split
);
902 ret
= register_trace_block_remap(blk_add_trace_remap
);
907 static void blk_unregister_tracepoints(void)
909 unregister_trace_block_remap(blk_add_trace_remap
);
910 unregister_trace_block_split(blk_add_trace_split
);
911 unregister_trace_block_unplug_io(blk_add_trace_unplug_io
);
912 unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer
);
913 unregister_trace_block_plug(blk_add_trace_plug
);
914 unregister_trace_block_sleeprq(blk_add_trace_sleeprq
);
915 unregister_trace_block_getrq(blk_add_trace_getrq
);
916 unregister_trace_block_bio_queue(blk_add_trace_bio_queue
);
917 unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge
);
918 unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge
);
919 unregister_trace_block_bio_complete(blk_add_trace_bio_complete
);
920 unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce
);
921 unregister_trace_block_rq_complete(blk_add_trace_rq_complete
);
922 unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue
);
923 unregister_trace_block_rq_issue(blk_add_trace_rq_issue
);
924 unregister_trace_block_rq_insert(blk_add_trace_rq_insert
);
925 unregister_trace_block_rq_abort(blk_add_trace_rq_abort
);
927 tracepoint_synchronize_unregister();
931 * struct blk_io_tracer formatting routines
934 static void fill_rwbs(char *rwbs
, const struct blk_io_trace
*t
)
938 if (t
->action
& BLK_TC_DISCARD
)
940 else if (t
->action
& BLK_TC_WRITE
)
947 if (t
->action
& BLK_TC_AHEAD
)
949 if (t
->action
& BLK_TC_BARRIER
)
951 if (t
->action
& BLK_TC_SYNC
)
953 if (t
->action
& BLK_TC_META
)
960 const struct blk_io_trace
*te_blk_io_trace(const struct trace_entry
*ent
)
962 return (const struct blk_io_trace
*)ent
;
965 static inline const void *pdu_start(const struct trace_entry
*ent
)
967 return te_blk_io_trace(ent
) + 1;
970 static inline u32
t_sec(const struct trace_entry
*ent
)
972 return te_blk_io_trace(ent
)->bytes
>> 9;
975 static inline unsigned long long t_sector(const struct trace_entry
*ent
)
977 return te_blk_io_trace(ent
)->sector
;
980 static inline __u16
t_error(const struct trace_entry
*ent
)
982 return te_blk_io_trace(ent
)->sector
;
985 static __u64
get_pdu_int(const struct trace_entry
*ent
)
987 const __u64
*val
= pdu_start(ent
);
988 return be64_to_cpu(*val
);
991 static void get_pdu_remap(const struct trace_entry
*ent
,
992 struct blk_io_trace_remap
*r
)
994 const struct blk_io_trace_remap
*__r
= pdu_start(ent
);
995 __u64 sector
= __r
->sector
;
997 r
->device
= be32_to_cpu(__r
->device
);
998 r
->device_from
= be32_to_cpu(__r
->device_from
);
999 r
->sector
= be64_to_cpu(sector
);
1002 static int blk_log_action_iter(struct trace_iterator
*iter
, const char *act
)
1005 unsigned long long ts
= ns2usecs(iter
->ts
);
1006 unsigned long usec_rem
= do_div(ts
, USEC_PER_SEC
);
1007 unsigned secs
= (unsigned long)ts
;
1008 const struct trace_entry
*ent
= iter
->ent
;
1009 const struct blk_io_trace
*t
= (const struct blk_io_trace
*)ent
;
1013 return trace_seq_printf(&iter
->seq
,
1014 "%3d,%-3d %2d %5d.%06lu %5u %2s %3s ",
1015 MAJOR(t
->device
), MINOR(t
->device
), iter
->cpu
,
1016 secs
, usec_rem
, ent
->pid
, act
, rwbs
);
1019 static int blk_log_action_seq(struct trace_seq
*s
, const struct blk_io_trace
*t
,
1024 return trace_seq_printf(s
, "%3d,%-3d %2s %3s ",
1025 MAJOR(t
->device
), MINOR(t
->device
), act
, rwbs
);
1028 static int blk_log_generic(struct trace_seq
*s
, const struct trace_entry
*ent
)
1030 const char *cmd
= trace_find_cmdline(ent
->pid
);
1033 return trace_seq_printf(s
, "%llu + %u [%s]\n",
1034 t_sector(ent
), t_sec(ent
), cmd
);
1035 return trace_seq_printf(s
, "[%s]\n", cmd
);
1038 static int blk_log_with_error(struct trace_seq
*s
,
1039 const struct trace_entry
*ent
)
1042 return trace_seq_printf(s
, "%llu + %u [%d]\n", t_sector(ent
),
1043 t_sec(ent
), t_error(ent
));
1044 return trace_seq_printf(s
, "%llu [%d]\n", t_sector(ent
), t_error(ent
));
1047 static int blk_log_remap(struct trace_seq
*s
, const struct trace_entry
*ent
)
1049 struct blk_io_trace_remap r
= { .device
= 0, };
1051 get_pdu_remap(ent
, &r
);
1052 return trace_seq_printf(s
, "%llu + %u <- (%d,%d) %llu\n",
1054 t_sec(ent
), MAJOR(r
.device
), MINOR(r
.device
),
1055 (unsigned long long)r
.sector
);
1058 static int blk_log_plug(struct trace_seq
*s
, const struct trace_entry
*ent
)
1060 return trace_seq_printf(s
, "[%s]\n", trace_find_cmdline(ent
->pid
));
1063 static int blk_log_unplug(struct trace_seq
*s
, const struct trace_entry
*ent
)
1065 return trace_seq_printf(s
, "[%s] %llu\n", trace_find_cmdline(ent
->pid
),
1069 static int blk_log_split(struct trace_seq
*s
, const struct trace_entry
*ent
)
1071 return trace_seq_printf(s
, "%llu / %llu [%s]\n", t_sector(ent
),
1072 get_pdu_int(ent
), trace_find_cmdline(ent
->pid
));
1076 * struct tracer operations
1079 static void blk_tracer_print_header(struct seq_file
*m
)
1081 if (!(blk_tracer_flags
.val
& TRACE_BLK_OPT_CLASSIC
))
1083 seq_puts(m
, "# DEV CPU TIMESTAMP PID ACT FLG\n"
1087 static void blk_tracer_start(struct trace_array
*tr
)
1089 mutex_lock(&blk_probe_mutex
);
1090 if (atomic_add_return(1, &blk_probes_ref
) == 1)
1091 if (blk_register_tracepoints())
1092 atomic_dec(&blk_probes_ref
);
1093 mutex_unlock(&blk_probe_mutex
);
1094 trace_flags
&= ~TRACE_ITER_CONTEXT_INFO
;
1097 static int blk_tracer_init(struct trace_array
*tr
)
1100 blk_tracer_start(tr
);
1101 mutex_lock(&blk_probe_mutex
);
1102 blk_tracer_enabled
++;
1103 mutex_unlock(&blk_probe_mutex
);
1107 static void blk_tracer_stop(struct trace_array
*tr
)
1109 trace_flags
|= TRACE_ITER_CONTEXT_INFO
;
1110 mutex_lock(&blk_probe_mutex
);
1111 if (atomic_dec_and_test(&blk_probes_ref
))
1112 blk_unregister_tracepoints();
1113 mutex_unlock(&blk_probe_mutex
);
1116 static void blk_tracer_reset(struct trace_array
*tr
)
1118 if (!atomic_read(&blk_probes_ref
))
1121 mutex_lock(&blk_probe_mutex
);
1122 blk_tracer_enabled
--;
1123 WARN_ON(blk_tracer_enabled
< 0);
1124 mutex_unlock(&blk_probe_mutex
);
1126 blk_tracer_stop(tr
);
1131 int (*print
)(struct trace_seq
*s
, const struct trace_entry
*ent
);
1132 } what2act
[] __read_mostly
= {
1133 [__BLK_TA_QUEUE
] = {{ "Q", "queue" }, blk_log_generic
},
1134 [__BLK_TA_BACKMERGE
] = {{ "M", "backmerge" }, blk_log_generic
},
1135 [__BLK_TA_FRONTMERGE
] = {{ "F", "frontmerge" }, blk_log_generic
},
1136 [__BLK_TA_GETRQ
] = {{ "G", "getrq" }, blk_log_generic
},
1137 [__BLK_TA_SLEEPRQ
] = {{ "S", "sleeprq" }, blk_log_generic
},
1138 [__BLK_TA_REQUEUE
] = {{ "R", "requeue" }, blk_log_with_error
},
1139 [__BLK_TA_ISSUE
] = {{ "D", "issue" }, blk_log_generic
},
1140 [__BLK_TA_COMPLETE
] = {{ "C", "complete" }, blk_log_with_error
},
1141 [__BLK_TA_PLUG
] = {{ "P", "plug" }, blk_log_plug
},
1142 [__BLK_TA_UNPLUG_IO
] = {{ "U", "unplug_io" }, blk_log_unplug
},
1143 [__BLK_TA_UNPLUG_TIMER
] = {{ "UT", "unplug_timer" }, blk_log_unplug
},
1144 [__BLK_TA_INSERT
] = {{ "I", "insert" }, blk_log_generic
},
1145 [__BLK_TA_SPLIT
] = {{ "X", "split" }, blk_log_split
},
1146 [__BLK_TA_BOUNCE
] = {{ "B", "bounce" }, blk_log_generic
},
1147 [__BLK_TA_REMAP
] = {{ "A", "remap" }, blk_log_remap
},
1150 static enum print_line_t
blk_trace_event_print(struct trace_iterator
*iter
,
1153 struct trace_seq
*s
= &iter
->seq
;
1154 const struct blk_io_trace
*t
= (struct blk_io_trace
*)iter
->ent
;
1155 const u16 what
= t
->action
& ((1 << BLK_TC_SHIFT
) - 1);
1158 if (!trace_print_context(iter
))
1159 return TRACE_TYPE_PARTIAL_LINE
;
1161 if (unlikely(what
== 0 || what
> ARRAY_SIZE(what2act
)))
1162 ret
= trace_seq_printf(s
, "Bad pc action %x\n", what
);
1164 const bool long_act
= !!(trace_flags
& TRACE_ITER_VERBOSE
);
1165 ret
= blk_log_action_seq(s
, t
, what2act
[what
].act
[long_act
]);
1167 ret
= what2act
[what
].print(s
, iter
->ent
);
1170 return ret
? TRACE_TYPE_HANDLED
: TRACE_TYPE_PARTIAL_LINE
;
1173 static int blk_trace_synthesize_old_trace(struct trace_iterator
*iter
)
1175 struct trace_seq
*s
= &iter
->seq
;
1176 struct blk_io_trace
*t
= (struct blk_io_trace
*)iter
->ent
;
1177 const int offset
= offsetof(struct blk_io_trace
, sector
);
1178 struct blk_io_trace old
= {
1179 .magic
= BLK_IO_TRACE_MAGIC
| BLK_IO_TRACE_VERSION
,
1180 .time
= ns2usecs(iter
->ts
),
1183 if (!trace_seq_putmem(s
, &old
, offset
))
1185 return trace_seq_putmem(s
, &t
->sector
,
1186 sizeof(old
) - offset
+ t
->pdu_len
);
1189 static enum print_line_t
1190 blk_trace_event_print_binary(struct trace_iterator
*iter
, int flags
)
1192 return blk_trace_synthesize_old_trace(iter
) ?
1193 TRACE_TYPE_HANDLED
: TRACE_TYPE_PARTIAL_LINE
;
1196 static enum print_line_t
blk_tracer_print_line(struct trace_iterator
*iter
)
1198 const struct blk_io_trace
*t
;
1202 if (!(blk_tracer_flags
.val
& TRACE_BLK_OPT_CLASSIC
))
1203 return TRACE_TYPE_UNHANDLED
;
1205 t
= (const struct blk_io_trace
*)iter
->ent
;
1206 what
= t
->action
& ((1 << BLK_TC_SHIFT
) - 1);
1208 if (unlikely(what
== 0 || what
> ARRAY_SIZE(what2act
)))
1209 ret
= trace_seq_printf(&iter
->seq
, "Bad pc action %x\n", what
);
1211 const bool long_act
= !!(trace_flags
& TRACE_ITER_VERBOSE
);
1212 ret
= blk_log_action_iter(iter
, what2act
[what
].act
[long_act
]);
1214 ret
= what2act
[what
].print(&iter
->seq
, iter
->ent
);
1217 return ret
? TRACE_TYPE_HANDLED
: TRACE_TYPE_PARTIAL_LINE
;
1220 static struct tracer blk_tracer __read_mostly
= {
1222 .init
= blk_tracer_init
,
1223 .reset
= blk_tracer_reset
,
1224 .start
= blk_tracer_start
,
1225 .stop
= blk_tracer_stop
,
1226 .print_header
= blk_tracer_print_header
,
1227 .print_line
= blk_tracer_print_line
,
1228 .flags
= &blk_tracer_flags
,
1231 static struct trace_event trace_blk_event
= {
1233 .trace
= blk_trace_event_print
,
1234 .binary
= blk_trace_event_print_binary
,
1237 static int __init
init_blk_tracer(void)
1239 if (!register_ftrace_event(&trace_blk_event
)) {
1240 pr_warning("Warning: could not register block events\n");
1244 if (register_tracer(&blk_tracer
) != 0) {
1245 pr_warning("Warning: could not register the block tracer\n");
1246 unregister_ftrace_event(&trace_blk_event
);
1253 device_initcall(init_blk_tracer
);
1255 static int blk_trace_remove_queue(struct request_queue
*q
)
1257 struct blk_trace
*bt
;
1259 bt
= xchg(&q
->blk_trace
, NULL
);
1268 * Setup everything required to start tracing
1270 static int blk_trace_setup_queue(struct request_queue
*q
, dev_t dev
)
1272 struct blk_trace
*old_bt
, *bt
= NULL
;
1276 bt
= kzalloc(sizeof(*bt
), GFP_KERNEL
);
1281 bt
->act_mask
= (u16
)-1;
1282 bt
->end_lba
= -1ULL;
1283 bt
->trace_state
= Blktrace_running
;
1285 old_bt
= xchg(&q
->blk_trace
, bt
);
1286 if (old_bt
!= NULL
) {
1287 (void)xchg(&q
->blk_trace
, old_bt
);
1297 * sysfs interface to enable and configure tracing
1300 static ssize_t
sysfs_blk_trace_enable_show(struct device
*dev
,
1301 struct device_attribute
*attr
,
1304 struct hd_struct
*p
= dev_to_part(dev
);
1305 struct block_device
*bdev
;
1306 ssize_t ret
= -ENXIO
;
1309 bdev
= bdget(part_devt(p
));
1311 struct request_queue
*q
= bdev_get_queue(bdev
);
1314 mutex_lock(&bdev
->bd_mutex
);
1315 ret
= sprintf(buf
, "%u\n", !!q
->blk_trace
);
1316 mutex_unlock(&bdev
->bd_mutex
);
1326 static ssize_t
sysfs_blk_trace_enable_store(struct device
*dev
,
1327 struct device_attribute
*attr
,
1328 const char *buf
, size_t count
)
1330 struct block_device
*bdev
;
1331 struct request_queue
*q
;
1332 struct hd_struct
*p
;
1334 ssize_t ret
= -ENXIO
;
1336 if (count
== 0 || sscanf(buf
, "%d", &value
) != 1)
1340 p
= dev_to_part(dev
);
1341 bdev
= bdget(part_devt(p
));
1343 goto out_unlock_kernel
;
1345 q
= bdev_get_queue(bdev
);
1349 mutex_lock(&bdev
->bd_mutex
);
1351 ret
= blk_trace_setup_queue(q
, bdev
->bd_dev
);
1353 ret
= blk_trace_remove_queue(q
);
1354 mutex_unlock(&bdev
->bd_mutex
);
1366 static ssize_t
sysfs_blk_trace_attr_show(struct device
*dev
,
1367 struct device_attribute
*attr
,
1369 static ssize_t
sysfs_blk_trace_attr_store(struct device
*dev
,
1370 struct device_attribute
*attr
,
1371 const char *buf
, size_t count
);
1372 #define BLK_TRACE_DEVICE_ATTR(_name) \
1373 DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1374 sysfs_blk_trace_attr_show, \
1375 sysfs_blk_trace_attr_store)
1377 static DEVICE_ATTR(enable
, S_IRUGO
| S_IWUSR
,
1378 sysfs_blk_trace_enable_show
, sysfs_blk_trace_enable_store
);
1379 static BLK_TRACE_DEVICE_ATTR(act_mask
);
1380 static BLK_TRACE_DEVICE_ATTR(pid
);
1381 static BLK_TRACE_DEVICE_ATTR(start_lba
);
1382 static BLK_TRACE_DEVICE_ATTR(end_lba
);
1384 static struct attribute
*blk_trace_attrs
[] = {
1385 &dev_attr_enable
.attr
,
1386 &dev_attr_act_mask
.attr
,
1388 &dev_attr_start_lba
.attr
,
1389 &dev_attr_end_lba
.attr
,
1393 struct attribute_group blk_trace_attr_group
= {
1395 .attrs
= blk_trace_attrs
,
1398 static int blk_str2act_mask(const char *str
)
1401 char *copy
= kstrdup(str
, GFP_KERNEL
), *s
;
1409 char *sep
= strchr(s
, ',');
1414 if (strcasecmp(s
, "barrier") == 0)
1415 mask
|= BLK_TC_BARRIER
;
1416 else if (strcasecmp(s
, "complete") == 0)
1417 mask
|= BLK_TC_COMPLETE
;
1418 else if (strcasecmp(s
, "fs") == 0)
1420 else if (strcasecmp(s
, "issue") == 0)
1421 mask
|= BLK_TC_ISSUE
;
1422 else if (strcasecmp(s
, "pc") == 0)
1424 else if (strcasecmp(s
, "queue") == 0)
1425 mask
|= BLK_TC_QUEUE
;
1426 else if (strcasecmp(s
, "read") == 0)
1427 mask
|= BLK_TC_READ
;
1428 else if (strcasecmp(s
, "requeue") == 0)
1429 mask
|= BLK_TC_REQUEUE
;
1430 else if (strcasecmp(s
, "sync") == 0)
1431 mask
|= BLK_TC_SYNC
;
1432 else if (strcasecmp(s
, "write") == 0)
1433 mask
|= BLK_TC_WRITE
;
1445 static ssize_t
sysfs_blk_trace_attr_show(struct device
*dev
,
1446 struct device_attribute
*attr
,
1449 struct hd_struct
*p
= dev_to_part(dev
);
1450 struct request_queue
*q
;
1451 struct block_device
*bdev
;
1452 ssize_t ret
= -ENXIO
;
1455 bdev
= bdget(part_devt(p
));
1457 goto out_unlock_kernel
;
1459 q
= bdev_get_queue(bdev
);
1462 mutex_lock(&bdev
->bd_mutex
);
1463 if (q
->blk_trace
== NULL
)
1464 ret
= sprintf(buf
, "disabled\n");
1465 else if (attr
== &dev_attr_act_mask
)
1466 ret
= sprintf(buf
, "%#x\n", q
->blk_trace
->act_mask
);
1467 else if (attr
== &dev_attr_pid
)
1468 ret
= sprintf(buf
, "%u\n", q
->blk_trace
->pid
);
1469 else if (attr
== &dev_attr_start_lba
)
1470 ret
= sprintf(buf
, "%llu\n", q
->blk_trace
->start_lba
);
1471 else if (attr
== &dev_attr_end_lba
)
1472 ret
= sprintf(buf
, "%llu\n", q
->blk_trace
->end_lba
);
1473 mutex_unlock(&bdev
->bd_mutex
);
1481 static ssize_t
sysfs_blk_trace_attr_store(struct device
*dev
,
1482 struct device_attribute
*attr
,
1483 const char *buf
, size_t count
)
1485 struct block_device
*bdev
;
1486 struct request_queue
*q
;
1487 struct hd_struct
*p
;
1489 ssize_t ret
= -ENXIO
;
1494 if (attr
== &dev_attr_act_mask
) {
1495 if (sscanf(buf
, "%llx", &value
) != 1) {
1496 /* Assume it is a list of trace category names */
1497 value
= blk_str2act_mask(buf
);
1501 } else if (sscanf(buf
, "%llu", &value
) != 1)
1505 p
= dev_to_part(dev
);
1506 bdev
= bdget(part_devt(p
));
1508 goto out_unlock_kernel
;
1510 q
= bdev_get_queue(bdev
);
1514 mutex_lock(&bdev
->bd_mutex
);
1516 if (q
->blk_trace
== NULL
)
1517 ret
= blk_trace_setup_queue(q
, bdev
->bd_dev
);
1520 if (attr
== &dev_attr_act_mask
)
1521 q
->blk_trace
->act_mask
= value
;
1522 else if (attr
== &dev_attr_pid
)
1523 q
->blk_trace
->pid
= value
;
1524 else if (attr
== &dev_attr_start_lba
)
1525 q
->blk_trace
->start_lba
= value
;
1526 else if (attr
== &dev_attr_end_lba
)
1527 q
->blk_trace
->end_lba
= value
;
1530 mutex_unlock(&bdev
->bd_mutex
);