]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - block/blktrace.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bart/ide-2.6
[mirror_ubuntu-zesty-kernel.git] / block / blktrace.c
1 /*
2 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
16 *
17 */
18 #include <linux/kernel.h>
19 #include <linux/blkdev.h>
20 #include <linux/blktrace_api.h>
21 #include <linux/percpu.h>
22 #include <linux/init.h>
23 #include <linux/mutex.h>
24 #include <linux/debugfs.h>
25 #include <linux/time.h>
26 #include <asm/uaccess.h>
27
28 static DEFINE_PER_CPU(unsigned long long, blk_trace_cpu_offset) = { 0, };
29 static unsigned int blktrace_seq __read_mostly = 1;
30
31 /*
32 * Send out a notify message.
33 */
34 static void trace_note(struct blk_trace *bt, pid_t pid, int action,
35 const void *data, size_t len)
36 {
37 struct blk_io_trace *t;
38
39 t = relay_reserve(bt->rchan, sizeof(*t) + len);
40 if (t) {
41 const int cpu = smp_processor_id();
42
43 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
44 t->time = cpu_clock(cpu) - per_cpu(blk_trace_cpu_offset, cpu);
45 t->device = bt->dev;
46 t->action = action;
47 t->pid = pid;
48 t->cpu = cpu;
49 t->pdu_len = len;
50 memcpy((void *) t + sizeof(*t), data, len);
51 }
52 }
53
54 /*
55 * Send out a notify for this process, if we haven't done so since a trace
56 * started
57 */
58 static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
59 {
60 tsk->btrace_seq = blktrace_seq;
61 trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm));
62 }
63
64 static void trace_note_time(struct blk_trace *bt)
65 {
66 struct timespec now;
67 unsigned long flags;
68 u32 words[2];
69
70 getnstimeofday(&now);
71 words[0] = now.tv_sec;
72 words[1] = now.tv_nsec;
73
74 local_irq_save(flags);
75 trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words));
76 local_irq_restore(flags);
77 }
78
79 static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
80 pid_t pid)
81 {
82 if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
83 return 1;
84 if (sector < bt->start_lba || sector > bt->end_lba)
85 return 1;
86 if (bt->pid && pid != bt->pid)
87 return 1;
88
89 return 0;
90 }
91
92 /*
93 * Data direction bit lookup
94 */
95 static u32 ddir_act[2] __read_mostly = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK_TC_WRITE) };
96
97 /*
98 * Bio action bits of interest
99 */
100 static u32 bio_act[9] __read_mostly = { 0, BLK_TC_ACT(BLK_TC_BARRIER), BLK_TC_ACT(BLK_TC_SYNC), 0, BLK_TC_ACT(BLK_TC_AHEAD), 0, 0, 0, BLK_TC_ACT(BLK_TC_META) };
101
102 /*
103 * More could be added as needed, taking care to increment the decrementer
104 * to get correct indexing
105 */
106 #define trace_barrier_bit(rw) \
107 (((rw) & (1 << BIO_RW_BARRIER)) >> (BIO_RW_BARRIER - 0))
108 #define trace_sync_bit(rw) \
109 (((rw) & (1 << BIO_RW_SYNC)) >> (BIO_RW_SYNC - 1))
110 #define trace_ahead_bit(rw) \
111 (((rw) & (1 << BIO_RW_AHEAD)) << (2 - BIO_RW_AHEAD))
112 #define trace_meta_bit(rw) \
113 (((rw) & (1 << BIO_RW_META)) >> (BIO_RW_META - 3))
114
115 /*
116 * The worker for the various blk_add_trace*() types. Fills out a
117 * blk_io_trace structure and places it in a per-cpu subbuffer.
118 */
119 void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
120 int rw, u32 what, int error, int pdu_len, void *pdu_data)
121 {
122 struct task_struct *tsk = current;
123 struct blk_io_trace *t;
124 unsigned long flags;
125 unsigned long *sequence;
126 pid_t pid;
127 int cpu;
128
129 if (unlikely(bt->trace_state != Blktrace_running))
130 return;
131
132 what |= ddir_act[rw & WRITE];
133 what |= bio_act[trace_barrier_bit(rw)];
134 what |= bio_act[trace_sync_bit(rw)];
135 what |= bio_act[trace_ahead_bit(rw)];
136 what |= bio_act[trace_meta_bit(rw)];
137
138 pid = tsk->pid;
139 if (unlikely(act_log_check(bt, what, sector, pid)))
140 return;
141
142 /*
143 * A word about the locking here - we disable interrupts to reserve
144 * some space in the relay per-cpu buffer, to prevent an irq
145 * from coming in and stepping on our toes. Once reserved, it's
146 * enough to get preemption disabled to prevent read of this data
147 * before we are through filling it. get_cpu()/put_cpu() does this
148 * for us
149 */
150 local_irq_save(flags);
151
152 if (unlikely(tsk->btrace_seq != blktrace_seq))
153 trace_note_tsk(bt, tsk);
154
155 t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
156 if (t) {
157 cpu = smp_processor_id();
158 sequence = per_cpu_ptr(bt->sequence, cpu);
159
160 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
161 t->sequence = ++(*sequence);
162 t->time = cpu_clock(cpu) - per_cpu(blk_trace_cpu_offset, cpu);
163 t->sector = sector;
164 t->bytes = bytes;
165 t->action = what;
166 t->pid = pid;
167 t->device = bt->dev;
168 t->cpu = cpu;
169 t->error = error;
170 t->pdu_len = pdu_len;
171
172 if (pdu_len)
173 memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
174 }
175
176 local_irq_restore(flags);
177 }
178
179 EXPORT_SYMBOL_GPL(__blk_add_trace);
180
181 static struct dentry *blk_tree_root;
182 static struct mutex blk_tree_mutex;
183 static unsigned int root_users;
184
185 static inline void blk_remove_root(void)
186 {
187 if (blk_tree_root) {
188 debugfs_remove(blk_tree_root);
189 blk_tree_root = NULL;
190 }
191 }
192
193 static void blk_remove_tree(struct dentry *dir)
194 {
195 mutex_lock(&blk_tree_mutex);
196 debugfs_remove(dir);
197 if (--root_users == 0)
198 blk_remove_root();
199 mutex_unlock(&blk_tree_mutex);
200 }
201
202 static struct dentry *blk_create_tree(const char *blk_name)
203 {
204 struct dentry *dir = NULL;
205 int created = 0;
206
207 mutex_lock(&blk_tree_mutex);
208
209 if (!blk_tree_root) {
210 blk_tree_root = debugfs_create_dir("block", NULL);
211 if (!blk_tree_root)
212 goto err;
213 created = 1;
214 }
215
216 dir = debugfs_create_dir(blk_name, blk_tree_root);
217 if (dir)
218 root_users++;
219 else {
220 /* Delete root only if we created it */
221 if (created)
222 blk_remove_root();
223 }
224
225 err:
226 mutex_unlock(&blk_tree_mutex);
227 return dir;
228 }
229
230 static void blk_trace_cleanup(struct blk_trace *bt)
231 {
232 relay_close(bt->rchan);
233 debugfs_remove(bt->dropped_file);
234 blk_remove_tree(bt->dir);
235 free_percpu(bt->sequence);
236 kfree(bt);
237 }
238
239 static int blk_trace_remove(struct request_queue *q)
240 {
241 struct blk_trace *bt;
242
243 bt = xchg(&q->blk_trace, NULL);
244 if (!bt)
245 return -EINVAL;
246
247 if (bt->trace_state == Blktrace_setup ||
248 bt->trace_state == Blktrace_stopped)
249 blk_trace_cleanup(bt);
250
251 return 0;
252 }
253
254 static int blk_dropped_open(struct inode *inode, struct file *filp)
255 {
256 filp->private_data = inode->i_private;
257
258 return 0;
259 }
260
261 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
262 size_t count, loff_t *ppos)
263 {
264 struct blk_trace *bt = filp->private_data;
265 char buf[16];
266
267 snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
268
269 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
270 }
271
272 static const struct file_operations blk_dropped_fops = {
273 .owner = THIS_MODULE,
274 .open = blk_dropped_open,
275 .read = blk_dropped_read,
276 };
277
278 /*
279 * Keep track of how many times we encountered a full subbuffer, to aid
280 * the user space app in telling how many lost events there were.
281 */
282 static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
283 void *prev_subbuf, size_t prev_padding)
284 {
285 struct blk_trace *bt;
286
287 if (!relay_buf_full(buf))
288 return 1;
289
290 bt = buf->chan->private_data;
291 atomic_inc(&bt->dropped);
292 return 0;
293 }
294
295 static int blk_remove_buf_file_callback(struct dentry *dentry)
296 {
297 debugfs_remove(dentry);
298 return 0;
299 }
300
301 static struct dentry *blk_create_buf_file_callback(const char *filename,
302 struct dentry *parent,
303 int mode,
304 struct rchan_buf *buf,
305 int *is_global)
306 {
307 return debugfs_create_file(filename, mode, parent, buf,
308 &relay_file_operations);
309 }
310
311 static struct rchan_callbacks blk_relay_callbacks = {
312 .subbuf_start = blk_subbuf_start_callback,
313 .create_buf_file = blk_create_buf_file_callback,
314 .remove_buf_file = blk_remove_buf_file_callback,
315 };
316
317 /*
318 * Setup everything required to start tracing
319 */
320 int do_blk_trace_setup(struct request_queue *q, struct block_device *bdev,
321 struct blk_user_trace_setup *buts)
322 {
323 struct blk_trace *old_bt, *bt = NULL;
324 struct dentry *dir = NULL;
325 char b[BDEVNAME_SIZE];
326 int ret, i;
327
328 if (!buts->buf_size || !buts->buf_nr)
329 return -EINVAL;
330
331 strcpy(buts->name, bdevname(bdev, b));
332
333 /*
334 * some device names have larger paths - convert the slashes
335 * to underscores for this to work as expected
336 */
337 for (i = 0; i < strlen(buts->name); i++)
338 if (buts->name[i] == '/')
339 buts->name[i] = '_';
340
341 ret = -ENOMEM;
342 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
343 if (!bt)
344 goto err;
345
346 bt->sequence = alloc_percpu(unsigned long);
347 if (!bt->sequence)
348 goto err;
349
350 ret = -ENOENT;
351 dir = blk_create_tree(buts->name);
352 if (!dir)
353 goto err;
354
355 bt->dir = dir;
356 bt->dev = bdev->bd_dev;
357 atomic_set(&bt->dropped, 0);
358
359 ret = -EIO;
360 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops);
361 if (!bt->dropped_file)
362 goto err;
363
364 bt->rchan = relay_open("trace", dir, buts->buf_size,
365 buts->buf_nr, &blk_relay_callbacks, bt);
366 if (!bt->rchan)
367 goto err;
368
369 bt->act_mask = buts->act_mask;
370 if (!bt->act_mask)
371 bt->act_mask = (u16) -1;
372
373 bt->start_lba = buts->start_lba;
374 bt->end_lba = buts->end_lba;
375 if (!bt->end_lba)
376 bt->end_lba = -1ULL;
377
378 bt->pid = buts->pid;
379 bt->trace_state = Blktrace_setup;
380
381 ret = -EBUSY;
382 old_bt = xchg(&q->blk_trace, bt);
383 if (old_bt) {
384 (void) xchg(&q->blk_trace, old_bt);
385 goto err;
386 }
387
388 return 0;
389 err:
390 if (dir)
391 blk_remove_tree(dir);
392 if (bt) {
393 if (bt->dropped_file)
394 debugfs_remove(bt->dropped_file);
395 free_percpu(bt->sequence);
396 if (bt->rchan)
397 relay_close(bt->rchan);
398 kfree(bt);
399 }
400 return ret;
401 }
402
403 static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
404 char __user *arg)
405 {
406 struct blk_user_trace_setup buts;
407 int ret;
408
409 ret = copy_from_user(&buts, arg, sizeof(buts));
410 if (ret)
411 return -EFAULT;
412
413 ret = do_blk_trace_setup(q, bdev, &buts);
414 if (ret)
415 return ret;
416
417 if (copy_to_user(arg, &buts, sizeof(buts)))
418 return -EFAULT;
419
420 return 0;
421 }
422
423 static int blk_trace_startstop(struct request_queue *q, int start)
424 {
425 struct blk_trace *bt;
426 int ret;
427
428 if ((bt = q->blk_trace) == NULL)
429 return -EINVAL;
430
431 /*
432 * For starting a trace, we can transition from a setup or stopped
433 * trace. For stopping a trace, the state must be running
434 */
435 ret = -EINVAL;
436 if (start) {
437 if (bt->trace_state == Blktrace_setup ||
438 bt->trace_state == Blktrace_stopped) {
439 blktrace_seq++;
440 smp_mb();
441 bt->trace_state = Blktrace_running;
442
443 trace_note_time(bt);
444 ret = 0;
445 }
446 } else {
447 if (bt->trace_state == Blktrace_running) {
448 bt->trace_state = Blktrace_stopped;
449 relay_flush(bt->rchan);
450 ret = 0;
451 }
452 }
453
454 return ret;
455 }
456
457 /**
458 * blk_trace_ioctl: - handle the ioctls associated with tracing
459 * @bdev: the block device
460 * @cmd: the ioctl cmd
461 * @arg: the argument data, if any
462 *
463 **/
464 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
465 {
466 struct request_queue *q;
467 int ret, start = 0;
468
469 q = bdev_get_queue(bdev);
470 if (!q)
471 return -ENXIO;
472
473 mutex_lock(&bdev->bd_mutex);
474
475 switch (cmd) {
476 case BLKTRACESETUP:
477 ret = blk_trace_setup(q, bdev, arg);
478 break;
479 case BLKTRACESTART:
480 start = 1;
481 case BLKTRACESTOP:
482 ret = blk_trace_startstop(q, start);
483 break;
484 case BLKTRACETEARDOWN:
485 ret = blk_trace_remove(q);
486 break;
487 default:
488 ret = -ENOTTY;
489 break;
490 }
491
492 mutex_unlock(&bdev->bd_mutex);
493 return ret;
494 }
495
496 /**
497 * blk_trace_shutdown: - stop and cleanup trace structures
498 * @q: the request queue associated with the device
499 *
500 **/
501 void blk_trace_shutdown(struct request_queue *q)
502 {
503 if (q->blk_trace) {
504 blk_trace_startstop(q, 0);
505 blk_trace_remove(q);
506 }
507 }
508
509 /*
510 * Average offset over two calls to cpu_clock() with a gettimeofday()
511 * in the middle
512 */
513 static void blk_check_time(unsigned long long *t, int this_cpu)
514 {
515 unsigned long long a, b;
516 struct timeval tv;
517
518 a = cpu_clock(this_cpu);
519 do_gettimeofday(&tv);
520 b = cpu_clock(this_cpu);
521
522 *t = tv.tv_sec * 1000000000 + tv.tv_usec * 1000;
523 *t -= (a + b) / 2;
524 }
525
526 /*
527 * calibrate our inter-CPU timings
528 */
529 static void blk_trace_check_cpu_time(void *data)
530 {
531 unsigned long long *t;
532 int this_cpu = get_cpu();
533
534 t = &per_cpu(blk_trace_cpu_offset, this_cpu);
535
536 /*
537 * Just call it twice, hopefully the second call will be cache hot
538 * and a little more precise
539 */
540 blk_check_time(t, this_cpu);
541 blk_check_time(t, this_cpu);
542
543 put_cpu();
544 }
545
546 static void blk_trace_set_ht_offsets(void)
547 {
548 #if defined(CONFIG_SCHED_SMT)
549 int cpu, i;
550
551 /*
552 * now make sure HT siblings have the same time offset
553 */
554 preempt_disable();
555 for_each_online_cpu(cpu) {
556 unsigned long long *cpu_off, *sibling_off;
557
558 for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu)) {
559 if (i == cpu)
560 continue;
561
562 cpu_off = &per_cpu(blk_trace_cpu_offset, cpu);
563 sibling_off = &per_cpu(blk_trace_cpu_offset, i);
564 *sibling_off = *cpu_off;
565 }
566 }
567 preempt_enable();
568 #endif
569 }
570
571 static __init int blk_trace_init(void)
572 {
573 mutex_init(&blk_tree_mutex);
574 on_each_cpu(blk_trace_check_cpu_time, NULL, 1, 1);
575 blk_trace_set_ht_offsets();
576
577 return 0;
578 }
579
580 module_init(blk_trace_init);
581