]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/trace/events/bcache.h
Merge branch 'stable-4.8' of git://git.infradead.org/users/pcmoore/audit
[mirror_ubuntu-artful-kernel.git] / include / trace / events / bcache.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM bcache
3
4 #if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_BCACHE_H
6
7 #include <linux/tracepoint.h>
8
9 DECLARE_EVENT_CLASS(bcache_request,
10 TP_PROTO(struct bcache_device *d, struct bio *bio),
11 TP_ARGS(d, bio),
12
13 TP_STRUCT__entry(
14 __field(dev_t, dev )
15 __field(unsigned int, orig_major )
16 __field(unsigned int, orig_minor )
17 __field(sector_t, sector )
18 __field(dev_t, orig_sector )
19 __field(unsigned int, nr_sector )
20 __array(char, rwbs, 6 )
21 ),
22
23 TP_fast_assign(
24 __entry->dev = bio->bi_bdev->bd_dev;
25 __entry->orig_major = d->disk->major;
26 __entry->orig_minor = d->disk->first_minor;
27 __entry->sector = bio->bi_iter.bi_sector;
28 __entry->orig_sector = bio->bi_iter.bi_sector - 16;
29 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
30 blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
31 bio->bi_iter.bi_size);
32 ),
33
34 TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
35 MAJOR(__entry->dev), MINOR(__entry->dev),
36 __entry->rwbs, (unsigned long long)__entry->sector,
37 __entry->nr_sector, __entry->orig_major, __entry->orig_minor,
38 (unsigned long long)__entry->orig_sector)
39 );
40
41 DECLARE_EVENT_CLASS(bkey,
42 TP_PROTO(struct bkey *k),
43 TP_ARGS(k),
44
45 TP_STRUCT__entry(
46 __field(u32, size )
47 __field(u32, inode )
48 __field(u64, offset )
49 __field(bool, dirty )
50 ),
51
52 TP_fast_assign(
53 __entry->inode = KEY_INODE(k);
54 __entry->offset = KEY_OFFSET(k);
55 __entry->size = KEY_SIZE(k);
56 __entry->dirty = KEY_DIRTY(k);
57 ),
58
59 TP_printk("%u:%llu len %u dirty %u", __entry->inode,
60 __entry->offset, __entry->size, __entry->dirty)
61 );
62
63 DECLARE_EVENT_CLASS(btree_node,
64 TP_PROTO(struct btree *b),
65 TP_ARGS(b),
66
67 TP_STRUCT__entry(
68 __field(size_t, bucket )
69 ),
70
71 TP_fast_assign(
72 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
73 ),
74
75 TP_printk("bucket %zu", __entry->bucket)
76 );
77
78 /* request.c */
79
80 DEFINE_EVENT(bcache_request, bcache_request_start,
81 TP_PROTO(struct bcache_device *d, struct bio *bio),
82 TP_ARGS(d, bio)
83 );
84
85 DEFINE_EVENT(bcache_request, bcache_request_end,
86 TP_PROTO(struct bcache_device *d, struct bio *bio),
87 TP_ARGS(d, bio)
88 );
89
90 DECLARE_EVENT_CLASS(bcache_bio,
91 TP_PROTO(struct bio *bio),
92 TP_ARGS(bio),
93
94 TP_STRUCT__entry(
95 __field(dev_t, dev )
96 __field(sector_t, sector )
97 __field(unsigned int, nr_sector )
98 __array(char, rwbs, 6 )
99 ),
100
101 TP_fast_assign(
102 __entry->dev = bio->bi_bdev->bd_dev;
103 __entry->sector = bio->bi_iter.bi_sector;
104 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
105 blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
106 bio->bi_iter.bi_size);
107 ),
108
109 TP_printk("%d,%d %s %llu + %u",
110 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
111 (unsigned long long)__entry->sector, __entry->nr_sector)
112 );
113
114 DEFINE_EVENT(bcache_bio, bcache_bypass_sequential,
115 TP_PROTO(struct bio *bio),
116 TP_ARGS(bio)
117 );
118
119 DEFINE_EVENT(bcache_bio, bcache_bypass_congested,
120 TP_PROTO(struct bio *bio),
121 TP_ARGS(bio)
122 );
123
124 TRACE_EVENT(bcache_read,
125 TP_PROTO(struct bio *bio, bool hit, bool bypass),
126 TP_ARGS(bio, hit, bypass),
127
128 TP_STRUCT__entry(
129 __field(dev_t, dev )
130 __field(sector_t, sector )
131 __field(unsigned int, nr_sector )
132 __array(char, rwbs, 6 )
133 __field(bool, cache_hit )
134 __field(bool, bypass )
135 ),
136
137 TP_fast_assign(
138 __entry->dev = bio->bi_bdev->bd_dev;
139 __entry->sector = bio->bi_iter.bi_sector;
140 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
141 blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
142 bio->bi_iter.bi_size);
143 __entry->cache_hit = hit;
144 __entry->bypass = bypass;
145 ),
146
147 TP_printk("%d,%d %s %llu + %u hit %u bypass %u",
148 MAJOR(__entry->dev), MINOR(__entry->dev),
149 __entry->rwbs, (unsigned long long)__entry->sector,
150 __entry->nr_sector, __entry->cache_hit, __entry->bypass)
151 );
152
153 TRACE_EVENT(bcache_write,
154 TP_PROTO(struct cache_set *c, u64 inode, struct bio *bio,
155 bool writeback, bool bypass),
156 TP_ARGS(c, inode, bio, writeback, bypass),
157
158 TP_STRUCT__entry(
159 __array(char, uuid, 16 )
160 __field(u64, inode )
161 __field(sector_t, sector )
162 __field(unsigned int, nr_sector )
163 __array(char, rwbs, 6 )
164 __field(bool, writeback )
165 __field(bool, bypass )
166 ),
167
168 TP_fast_assign(
169 memcpy(__entry->uuid, c->sb.set_uuid, 16);
170 __entry->inode = inode;
171 __entry->sector = bio->bi_iter.bi_sector;
172 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
173 blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
174 bio->bi_iter.bi_size);
175 __entry->writeback = writeback;
176 __entry->bypass = bypass;
177 ),
178
179 TP_printk("%pU inode %llu %s %llu + %u hit %u bypass %u",
180 __entry->uuid, __entry->inode,
181 __entry->rwbs, (unsigned long long)__entry->sector,
182 __entry->nr_sector, __entry->writeback, __entry->bypass)
183 );
184
185 DEFINE_EVENT(bcache_bio, bcache_read_retry,
186 TP_PROTO(struct bio *bio),
187 TP_ARGS(bio)
188 );
189
190 DEFINE_EVENT(bkey, bcache_cache_insert,
191 TP_PROTO(struct bkey *k),
192 TP_ARGS(k)
193 );
194
195 /* Journal */
196
197 DECLARE_EVENT_CLASS(cache_set,
198 TP_PROTO(struct cache_set *c),
199 TP_ARGS(c),
200
201 TP_STRUCT__entry(
202 __array(char, uuid, 16 )
203 ),
204
205 TP_fast_assign(
206 memcpy(__entry->uuid, c->sb.set_uuid, 16);
207 ),
208
209 TP_printk("%pU", __entry->uuid)
210 );
211
212 DEFINE_EVENT(bkey, bcache_journal_replay_key,
213 TP_PROTO(struct bkey *k),
214 TP_ARGS(k)
215 );
216
217 DEFINE_EVENT(cache_set, bcache_journal_full,
218 TP_PROTO(struct cache_set *c),
219 TP_ARGS(c)
220 );
221
222 DEFINE_EVENT(cache_set, bcache_journal_entry_full,
223 TP_PROTO(struct cache_set *c),
224 TP_ARGS(c)
225 );
226
227 DEFINE_EVENT(bcache_bio, bcache_journal_write,
228 TP_PROTO(struct bio *bio),
229 TP_ARGS(bio)
230 );
231
232 /* Btree */
233
234 DEFINE_EVENT(cache_set, bcache_btree_cache_cannibalize,
235 TP_PROTO(struct cache_set *c),
236 TP_ARGS(c)
237 );
238
239 DEFINE_EVENT(btree_node, bcache_btree_read,
240 TP_PROTO(struct btree *b),
241 TP_ARGS(b)
242 );
243
244 TRACE_EVENT(bcache_btree_write,
245 TP_PROTO(struct btree *b),
246 TP_ARGS(b),
247
248 TP_STRUCT__entry(
249 __field(size_t, bucket )
250 __field(unsigned, block )
251 __field(unsigned, keys )
252 ),
253
254 TP_fast_assign(
255 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
256 __entry->block = b->written;
257 __entry->keys = b->keys.set[b->keys.nsets].data->keys;
258 ),
259
260 TP_printk("bucket %zu", __entry->bucket)
261 );
262
263 DEFINE_EVENT(btree_node, bcache_btree_node_alloc,
264 TP_PROTO(struct btree *b),
265 TP_ARGS(b)
266 );
267
268 DEFINE_EVENT(cache_set, bcache_btree_node_alloc_fail,
269 TP_PROTO(struct cache_set *c),
270 TP_ARGS(c)
271 );
272
273 DEFINE_EVENT(btree_node, bcache_btree_node_free,
274 TP_PROTO(struct btree *b),
275 TP_ARGS(b)
276 );
277
278 TRACE_EVENT(bcache_btree_gc_coalesce,
279 TP_PROTO(unsigned nodes),
280 TP_ARGS(nodes),
281
282 TP_STRUCT__entry(
283 __field(unsigned, nodes )
284 ),
285
286 TP_fast_assign(
287 __entry->nodes = nodes;
288 ),
289
290 TP_printk("coalesced %u nodes", __entry->nodes)
291 );
292
293 DEFINE_EVENT(cache_set, bcache_gc_start,
294 TP_PROTO(struct cache_set *c),
295 TP_ARGS(c)
296 );
297
298 DEFINE_EVENT(cache_set, bcache_gc_end,
299 TP_PROTO(struct cache_set *c),
300 TP_ARGS(c)
301 );
302
303 DEFINE_EVENT(bkey, bcache_gc_copy,
304 TP_PROTO(struct bkey *k),
305 TP_ARGS(k)
306 );
307
308 DEFINE_EVENT(bkey, bcache_gc_copy_collision,
309 TP_PROTO(struct bkey *k),
310 TP_ARGS(k)
311 );
312
313 TRACE_EVENT(bcache_btree_insert_key,
314 TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status),
315 TP_ARGS(b, k, op, status),
316
317 TP_STRUCT__entry(
318 __field(u64, btree_node )
319 __field(u32, btree_level )
320 __field(u32, inode )
321 __field(u64, offset )
322 __field(u32, size )
323 __field(u8, dirty )
324 __field(u8, op )
325 __field(u8, status )
326 ),
327
328 TP_fast_assign(
329 __entry->btree_node = PTR_BUCKET_NR(b->c, &b->key, 0);
330 __entry->btree_level = b->level;
331 __entry->inode = KEY_INODE(k);
332 __entry->offset = KEY_OFFSET(k);
333 __entry->size = KEY_SIZE(k);
334 __entry->dirty = KEY_DIRTY(k);
335 __entry->op = op;
336 __entry->status = status;
337 ),
338
339 TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u",
340 __entry->status, __entry->op,
341 __entry->btree_node, __entry->btree_level,
342 __entry->inode, __entry->offset,
343 __entry->size, __entry->dirty)
344 );
345
346 DECLARE_EVENT_CLASS(btree_split,
347 TP_PROTO(struct btree *b, unsigned keys),
348 TP_ARGS(b, keys),
349
350 TP_STRUCT__entry(
351 __field(size_t, bucket )
352 __field(unsigned, keys )
353 ),
354
355 TP_fast_assign(
356 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
357 __entry->keys = keys;
358 ),
359
360 TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys)
361 );
362
363 DEFINE_EVENT(btree_split, bcache_btree_node_split,
364 TP_PROTO(struct btree *b, unsigned keys),
365 TP_ARGS(b, keys)
366 );
367
368 DEFINE_EVENT(btree_split, bcache_btree_node_compact,
369 TP_PROTO(struct btree *b, unsigned keys),
370 TP_ARGS(b, keys)
371 );
372
373 DEFINE_EVENT(btree_node, bcache_btree_set_root,
374 TP_PROTO(struct btree *b),
375 TP_ARGS(b)
376 );
377
378 TRACE_EVENT(bcache_keyscan,
379 TP_PROTO(unsigned nr_found,
380 unsigned start_inode, uint64_t start_offset,
381 unsigned end_inode, uint64_t end_offset),
382 TP_ARGS(nr_found,
383 start_inode, start_offset,
384 end_inode, end_offset),
385
386 TP_STRUCT__entry(
387 __field(__u32, nr_found )
388 __field(__u32, start_inode )
389 __field(__u64, start_offset )
390 __field(__u32, end_inode )
391 __field(__u64, end_offset )
392 ),
393
394 TP_fast_assign(
395 __entry->nr_found = nr_found;
396 __entry->start_inode = start_inode;
397 __entry->start_offset = start_offset;
398 __entry->end_inode = end_inode;
399 __entry->end_offset = end_offset;
400 ),
401
402 TP_printk("found %u keys from %u:%llu to %u:%llu", __entry->nr_found,
403 __entry->start_inode, __entry->start_offset,
404 __entry->end_inode, __entry->end_offset)
405 );
406
407 /* Allocator */
408
409 TRACE_EVENT(bcache_invalidate,
410 TP_PROTO(struct cache *ca, size_t bucket),
411 TP_ARGS(ca, bucket),
412
413 TP_STRUCT__entry(
414 __field(unsigned, sectors )
415 __field(dev_t, dev )
416 __field(__u64, offset )
417 ),
418
419 TP_fast_assign(
420 __entry->dev = ca->bdev->bd_dev;
421 __entry->offset = bucket << ca->set->bucket_bits;
422 __entry->sectors = GC_SECTORS_USED(&ca->buckets[bucket]);
423 ),
424
425 TP_printk("invalidated %u sectors at %d,%d sector=%llu",
426 __entry->sectors, MAJOR(__entry->dev),
427 MINOR(__entry->dev), __entry->offset)
428 );
429
430 TRACE_EVENT(bcache_alloc,
431 TP_PROTO(struct cache *ca, size_t bucket),
432 TP_ARGS(ca, bucket),
433
434 TP_STRUCT__entry(
435 __field(dev_t, dev )
436 __field(__u64, offset )
437 ),
438
439 TP_fast_assign(
440 __entry->dev = ca->bdev->bd_dev;
441 __entry->offset = bucket << ca->set->bucket_bits;
442 ),
443
444 TP_printk("allocated %d,%d sector=%llu", MAJOR(__entry->dev),
445 MINOR(__entry->dev), __entry->offset)
446 );
447
448 TRACE_EVENT(bcache_alloc_fail,
449 TP_PROTO(struct cache *ca, unsigned reserve),
450 TP_ARGS(ca, reserve),
451
452 TP_STRUCT__entry(
453 __field(dev_t, dev )
454 __field(unsigned, free )
455 __field(unsigned, free_inc )
456 __field(unsigned, blocked )
457 ),
458
459 TP_fast_assign(
460 __entry->dev = ca->bdev->bd_dev;
461 __entry->free = fifo_used(&ca->free[reserve]);
462 __entry->free_inc = fifo_used(&ca->free_inc);
463 __entry->blocked = atomic_read(&ca->set->prio_blocked);
464 ),
465
466 TP_printk("alloc fail %d,%d free %u free_inc %u blocked %u",
467 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->free,
468 __entry->free_inc, __entry->blocked)
469 );
470
471 /* Background writeback */
472
473 DEFINE_EVENT(bkey, bcache_writeback,
474 TP_PROTO(struct bkey *k),
475 TP_ARGS(k)
476 );
477
478 DEFINE_EVENT(bkey, bcache_writeback_collision,
479 TP_PROTO(struct bkey *k),
480 TP_ARGS(k)
481 );
482
483 #endif /* _TRACE_BCACHE_H */
484
485 /* This part must be outside protection */
486 #include <trace/define_trace.h>