1 /* SPDX-License-Identifier: GPL-2.0 */
3 #define TRACE_SYSTEM writeback
5 #if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_WRITEBACK_H
8 #include <linux/tracepoint.h>
9 #include <linux/backing-dev.h>
10 #include <linux/writeback.h>
12 #define show_inode_state(state) \
13 __print_flags(state, "|", \
14 {I_DIRTY_SYNC, "I_DIRTY_SYNC"}, \
15 {I_DIRTY_DATASYNC, "I_DIRTY_DATASYNC"}, \
16 {I_DIRTY_PAGES, "I_DIRTY_PAGES"}, \
18 {I_WILL_FREE, "I_WILL_FREE"}, \
19 {I_FREEING, "I_FREEING"}, \
20 {I_CLEAR, "I_CLEAR"}, \
22 {I_DIRTY_TIME, "I_DIRTY_TIME"}, \
23 {I_REFERENCED, "I_REFERENCED"} \
26 /* enums need to be exported to user space */
29 #define EM(a,b) TRACE_DEFINE_ENUM(a);
30 #define EMe(a,b) TRACE_DEFINE_ENUM(a);
32 #define WB_WORK_REASON \
33 EM( WB_REASON_BACKGROUND, "background") \
34 EM( WB_REASON_VMSCAN, "vmscan") \
35 EM( WB_REASON_SYNC, "sync") \
36 EM( WB_REASON_PERIODIC, "periodic") \
37 EM( WB_REASON_LAPTOP_TIMER, "laptop_timer") \
38 EM( WB_REASON_FS_FREE_SPACE, "fs_free_space") \
39 EM( WB_REASON_FORKER_THREAD, "forker_thread") \
40 EMe(WB_REASON_FOREIGN_FLUSH, "foreign_flush")
45 * Now redefine the EM() and EMe() macros to map the enums to the strings
46 * that will be printed in the output.
50 #define EM(a,b) { a, b },
51 #define EMe(a,b) { a, b }
53 struct wb_writeback_work
;
55 DECLARE_EVENT_CLASS(writeback_page_template
,
57 TP_PROTO(struct page
*page
, struct address_space
*mapping
),
59 TP_ARGS(page
, mapping
),
62 __array(char, name
, 32)
64 __field(pgoff_t
, index
)
68 strscpy_pad(__entry
->name
,
69 bdi_dev_name(mapping
? inode_to_bdi(mapping
->host
) :
71 __entry
->ino
= mapping
? mapping
->host
->i_ino
: 0;
72 __entry
->index
= page
->index
;
75 TP_printk("bdi %s: ino=%lu index=%lu",
77 (unsigned long)__entry
->ino
,
82 DEFINE_EVENT(writeback_page_template
, writeback_dirty_page
,
84 TP_PROTO(struct page
*page
, struct address_space
*mapping
),
86 TP_ARGS(page
, mapping
)
89 DEFINE_EVENT(writeback_page_template
, wait_on_page_writeback
,
91 TP_PROTO(struct page
*page
, struct address_space
*mapping
),
93 TP_ARGS(page
, mapping
)
96 DECLARE_EVENT_CLASS(writeback_dirty_inode_template
,
98 TP_PROTO(struct inode
*inode
, int flags
),
100 TP_ARGS(inode
, flags
),
103 __array(char, name
, 32)
105 __field(unsigned long, state
)
106 __field(unsigned long, flags
)
110 struct backing_dev_info
*bdi
= inode_to_bdi(inode
);
112 /* may be called for files on pseudo FSes w/ unregistered bdi */
113 strscpy_pad(__entry
->name
, bdi_dev_name(bdi
), 32);
114 __entry
->ino
= inode
->i_ino
;
115 __entry
->state
= inode
->i_state
;
116 __entry
->flags
= flags
;
119 TP_printk("bdi %s: ino=%lu state=%s flags=%s",
121 (unsigned long)__entry
->ino
,
122 show_inode_state(__entry
->state
),
123 show_inode_state(__entry
->flags
)
127 DEFINE_EVENT(writeback_dirty_inode_template
, writeback_mark_inode_dirty
,
129 TP_PROTO(struct inode
*inode
, int flags
),
131 TP_ARGS(inode
, flags
)
134 DEFINE_EVENT(writeback_dirty_inode_template
, writeback_dirty_inode_start
,
136 TP_PROTO(struct inode
*inode
, int flags
),
138 TP_ARGS(inode
, flags
)
141 DEFINE_EVENT(writeback_dirty_inode_template
, writeback_dirty_inode
,
143 TP_PROTO(struct inode
*inode
, int flags
),
145 TP_ARGS(inode
, flags
)
148 #ifdef CREATE_TRACE_POINTS
149 #ifdef CONFIG_CGROUP_WRITEBACK
151 static inline ino_t
__trace_wb_assign_cgroup(struct bdi_writeback
*wb
)
153 return cgroup_ino(wb
->memcg_css
->cgroup
);
156 static inline ino_t
__trace_wbc_assign_cgroup(struct writeback_control
*wbc
)
159 return __trace_wb_assign_cgroup(wbc
->wb
);
163 #else /* CONFIG_CGROUP_WRITEBACK */
165 static inline ino_t
__trace_wb_assign_cgroup(struct bdi_writeback
*wb
)
170 static inline ino_t
__trace_wbc_assign_cgroup(struct writeback_control
*wbc
)
175 #endif /* CONFIG_CGROUP_WRITEBACK */
176 #endif /* CREATE_TRACE_POINTS */
178 #ifdef CONFIG_CGROUP_WRITEBACK
179 TRACE_EVENT(inode_foreign_history
,
181 TP_PROTO(struct inode
*inode
, struct writeback_control
*wbc
,
182 unsigned int history
),
184 TP_ARGS(inode
, wbc
, history
),
187 __array(char, name
, 32)
189 __field(ino_t
, cgroup_ino
)
190 __field(unsigned int, history
)
194 strscpy_pad(__entry
->name
, bdi_dev_name(inode_to_bdi(inode
)), 32);
195 __entry
->ino
= inode
->i_ino
;
196 __entry
->cgroup_ino
= __trace_wbc_assign_cgroup(wbc
);
197 __entry
->history
= history
;
200 TP_printk("bdi %s: ino=%lu cgroup_ino=%lu history=0x%x",
202 (unsigned long)__entry
->ino
,
203 (unsigned long)__entry
->cgroup_ino
,
208 TRACE_EVENT(inode_switch_wbs
,
210 TP_PROTO(struct inode
*inode
, struct bdi_writeback
*old_wb
,
211 struct bdi_writeback
*new_wb
),
213 TP_ARGS(inode
, old_wb
, new_wb
),
216 __array(char, name
, 32)
218 __field(ino_t
, old_cgroup_ino
)
219 __field(ino_t
, new_cgroup_ino
)
223 strscpy_pad(__entry
->name
, bdi_dev_name(old_wb
->bdi
), 32);
224 __entry
->ino
= inode
->i_ino
;
225 __entry
->old_cgroup_ino
= __trace_wb_assign_cgroup(old_wb
);
226 __entry
->new_cgroup_ino
= __trace_wb_assign_cgroup(new_wb
);
229 TP_printk("bdi %s: ino=%lu old_cgroup_ino=%lu new_cgroup_ino=%lu",
231 (unsigned long)__entry
->ino
,
232 (unsigned long)__entry
->old_cgroup_ino
,
233 (unsigned long)__entry
->new_cgroup_ino
237 TRACE_EVENT(track_foreign_dirty
,
239 TP_PROTO(struct page
*page
, struct bdi_writeback
*wb
),
244 __array(char, name
, 32)
247 __field(unsigned int, memcg_id
)
248 __field(ino_t
, cgroup_ino
)
249 __field(ino_t
, page_cgroup_ino
)
253 struct address_space
*mapping
= page_mapping(page
);
254 struct inode
*inode
= mapping
? mapping
->host
: NULL
;
256 strscpy_pad(__entry
->name
, bdi_dev_name(wb
->bdi
), 32);
257 __entry
->bdi_id
= wb
->bdi
->id
;
258 __entry
->ino
= inode
? inode
->i_ino
: 0;
259 __entry
->memcg_id
= wb
->memcg_css
->id
;
260 __entry
->cgroup_ino
= __trace_wb_assign_cgroup(wb
);
261 __entry
->page_cgroup_ino
= cgroup_ino(page_memcg(page
)->css
.cgroup
);
264 TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%lu page_cgroup_ino=%lu",
267 (unsigned long)__entry
->ino
,
269 (unsigned long)__entry
->cgroup_ino
,
270 (unsigned long)__entry
->page_cgroup_ino
274 TRACE_EVENT(flush_foreign
,
276 TP_PROTO(struct bdi_writeback
*wb
, unsigned int frn_bdi_id
,
277 unsigned int frn_memcg_id
),
279 TP_ARGS(wb
, frn_bdi_id
, frn_memcg_id
),
282 __array(char, name
, 32)
283 __field(ino_t
, cgroup_ino
)
284 __field(unsigned int, frn_bdi_id
)
285 __field(unsigned int, frn_memcg_id
)
289 strscpy_pad(__entry
->name
, bdi_dev_name(wb
->bdi
), 32);
290 __entry
->cgroup_ino
= __trace_wb_assign_cgroup(wb
);
291 __entry
->frn_bdi_id
= frn_bdi_id
;
292 __entry
->frn_memcg_id
= frn_memcg_id
;
295 TP_printk("bdi %s: cgroup_ino=%lu frn_bdi_id=%u frn_memcg_id=%u",
297 (unsigned long)__entry
->cgroup_ino
,
299 __entry
->frn_memcg_id
304 DECLARE_EVENT_CLASS(writeback_write_inode_template
,
306 TP_PROTO(struct inode
*inode
, struct writeback_control
*wbc
),
311 __array(char, name
, 32)
313 __field(int, sync_mode
)
314 __field(ino_t
, cgroup_ino
)
318 strscpy_pad(__entry
->name
,
319 bdi_dev_name(inode_to_bdi(inode
)), 32);
320 __entry
->ino
= inode
->i_ino
;
321 __entry
->sync_mode
= wbc
->sync_mode
;
322 __entry
->cgroup_ino
= __trace_wbc_assign_cgroup(wbc
);
325 TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%lu",
327 (unsigned long)__entry
->ino
,
329 (unsigned long)__entry
->cgroup_ino
333 DEFINE_EVENT(writeback_write_inode_template
, writeback_write_inode_start
,
335 TP_PROTO(struct inode
*inode
, struct writeback_control
*wbc
),
340 DEFINE_EVENT(writeback_write_inode_template
, writeback_write_inode
,
342 TP_PROTO(struct inode
*inode
, struct writeback_control
*wbc
),
347 DECLARE_EVENT_CLASS(writeback_work_class
,
348 TP_PROTO(struct bdi_writeback
*wb
, struct wb_writeback_work
*work
),
351 __array(char, name
, 32)
352 __field(long, nr_pages
)
353 __field(dev_t
, sb_dev
)
354 __field(int, sync_mode
)
355 __field(int, for_kupdate
)
356 __field(int, range_cyclic
)
357 __field(int, for_background
)
359 __field(ino_t
, cgroup_ino
)
362 strscpy_pad(__entry
->name
, bdi_dev_name(wb
->bdi
), 32);
363 __entry
->nr_pages
= work
->nr_pages
;
364 __entry
->sb_dev
= work
->sb
? work
->sb
->s_dev
: 0;
365 __entry
->sync_mode
= work
->sync_mode
;
366 __entry
->for_kupdate
= work
->for_kupdate
;
367 __entry
->range_cyclic
= work
->range_cyclic
;
368 __entry
->for_background
= work
->for_background
;
369 __entry
->reason
= work
->reason
;
370 __entry
->cgroup_ino
= __trace_wb_assign_cgroup(wb
);
372 TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
373 "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%lu",
375 MAJOR(__entry
->sb_dev
), MINOR(__entry
->sb_dev
),
378 __entry
->for_kupdate
,
379 __entry
->range_cyclic
,
380 __entry
->for_background
,
381 __print_symbolic(__entry
->reason
, WB_WORK_REASON
),
382 (unsigned long)__entry
->cgroup_ino
385 #define DEFINE_WRITEBACK_WORK_EVENT(name) \
386 DEFINE_EVENT(writeback_work_class, name, \
387 TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \
389 DEFINE_WRITEBACK_WORK_EVENT(writeback_queue
);
390 DEFINE_WRITEBACK_WORK_EVENT(writeback_exec
);
391 DEFINE_WRITEBACK_WORK_EVENT(writeback_start
);
392 DEFINE_WRITEBACK_WORK_EVENT(writeback_written
);
393 DEFINE_WRITEBACK_WORK_EVENT(writeback_wait
);
395 TRACE_EVENT(writeback_pages_written
,
396 TP_PROTO(long pages_written
),
397 TP_ARGS(pages_written
),
402 __entry
->pages
= pages_written
;
404 TP_printk("%ld", __entry
->pages
)
407 DECLARE_EVENT_CLASS(writeback_class
,
408 TP_PROTO(struct bdi_writeback
*wb
),
411 __array(char, name
, 32)
412 __field(ino_t
, cgroup_ino
)
415 strscpy_pad(__entry
->name
, bdi_dev_name(wb
->bdi
), 32);
416 __entry
->cgroup_ino
= __trace_wb_assign_cgroup(wb
);
418 TP_printk("bdi %s: cgroup_ino=%lu",
420 (unsigned long)__entry
->cgroup_ino
423 #define DEFINE_WRITEBACK_EVENT(name) \
424 DEFINE_EVENT(writeback_class, name, \
425 TP_PROTO(struct bdi_writeback *wb), \
428 DEFINE_WRITEBACK_EVENT(writeback_wake_background
);
430 TRACE_EVENT(writeback_bdi_register
,
431 TP_PROTO(struct backing_dev_info
*bdi
),
434 __array(char, name
, 32)
437 strscpy_pad(__entry
->name
, bdi_dev_name(bdi
), 32);
444 DECLARE_EVENT_CLASS(wbc_class
,
445 TP_PROTO(struct writeback_control
*wbc
, struct backing_dev_info
*bdi
),
448 __array(char, name
, 32)
449 __field(long, nr_to_write
)
450 __field(long, pages_skipped
)
451 __field(int, sync_mode
)
452 __field(int, for_kupdate
)
453 __field(int, for_background
)
454 __field(int, for_reclaim
)
455 __field(int, range_cyclic
)
456 __field(long, range_start
)
457 __field(long, range_end
)
458 __field(ino_t
, cgroup_ino
)
462 strscpy_pad(__entry
->name
, bdi_dev_name(bdi
), 32);
463 __entry
->nr_to_write
= wbc
->nr_to_write
;
464 __entry
->pages_skipped
= wbc
->pages_skipped
;
465 __entry
->sync_mode
= wbc
->sync_mode
;
466 __entry
->for_kupdate
= wbc
->for_kupdate
;
467 __entry
->for_background
= wbc
->for_background
;
468 __entry
->for_reclaim
= wbc
->for_reclaim
;
469 __entry
->range_cyclic
= wbc
->range_cyclic
;
470 __entry
->range_start
= (long)wbc
->range_start
;
471 __entry
->range_end
= (long)wbc
->range_end
;
472 __entry
->cgroup_ino
= __trace_wbc_assign_cgroup(wbc
);
475 TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
476 "bgrd=%d reclm=%d cyclic=%d "
477 "start=0x%lx end=0x%lx cgroup_ino=%lu",
479 __entry
->nr_to_write
,
480 __entry
->pages_skipped
,
482 __entry
->for_kupdate
,
483 __entry
->for_background
,
484 __entry
->for_reclaim
,
485 __entry
->range_cyclic
,
486 __entry
->range_start
,
488 (unsigned long)__entry
->cgroup_ino
492 #define DEFINE_WBC_EVENT(name) \
493 DEFINE_EVENT(wbc_class, name, \
494 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
496 DEFINE_WBC_EVENT(wbc_writepage
);
498 TRACE_EVENT(writeback_queue_io
,
499 TP_PROTO(struct bdi_writeback
*wb
,
500 struct wb_writeback_work
*work
,
501 unsigned long dirtied_before
,
503 TP_ARGS(wb
, work
, dirtied_before
, moved
),
505 __array(char, name
, 32)
506 __field(unsigned long, older
)
510 __field(ino_t
, cgroup_ino
)
513 strscpy_pad(__entry
->name
, bdi_dev_name(wb
->bdi
), 32);
514 __entry
->older
= dirtied_before
;
515 __entry
->age
= (jiffies
- dirtied_before
) * 1000 / HZ
;
516 __entry
->moved
= moved
;
517 __entry
->reason
= work
->reason
;
518 __entry
->cgroup_ino
= __trace_wb_assign_cgroup(wb
);
520 TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%lu",
522 __entry
->older
, /* dirtied_before in jiffies */
523 __entry
->age
, /* dirtied_before in relative milliseconds */
525 __print_symbolic(__entry
->reason
, WB_WORK_REASON
),
526 (unsigned long)__entry
->cgroup_ino
530 TRACE_EVENT(global_dirty_state
,
532 TP_PROTO(unsigned long background_thresh
,
533 unsigned long dirty_thresh
536 TP_ARGS(background_thresh
,
541 __field(unsigned long, nr_dirty
)
542 __field(unsigned long, nr_writeback
)
543 __field(unsigned long, background_thresh
)
544 __field(unsigned long, dirty_thresh
)
545 __field(unsigned long, dirty_limit
)
546 __field(unsigned long, nr_dirtied
)
547 __field(unsigned long, nr_written
)
551 __entry
->nr_dirty
= global_node_page_state(NR_FILE_DIRTY
);
552 __entry
->nr_writeback
= global_node_page_state(NR_WRITEBACK
);
553 __entry
->nr_dirtied
= global_node_page_state(NR_DIRTIED
);
554 __entry
->nr_written
= global_node_page_state(NR_WRITTEN
);
555 __entry
->background_thresh
= background_thresh
;
556 __entry
->dirty_thresh
= dirty_thresh
;
557 __entry
->dirty_limit
= global_wb_domain
.dirty_limit
;
560 TP_printk("dirty=%lu writeback=%lu "
561 "bg_thresh=%lu thresh=%lu limit=%lu "
562 "dirtied=%lu written=%lu",
564 __entry
->nr_writeback
,
565 __entry
->background_thresh
,
566 __entry
->dirty_thresh
,
567 __entry
->dirty_limit
,
573 #define KBps(x) ((x) << (PAGE_SHIFT - 10))
575 TRACE_EVENT(bdi_dirty_ratelimit
,
577 TP_PROTO(struct bdi_writeback
*wb
,
578 unsigned long dirty_rate
,
579 unsigned long task_ratelimit
),
581 TP_ARGS(wb
, dirty_rate
, task_ratelimit
),
584 __array(char, bdi
, 32)
585 __field(unsigned long, write_bw
)
586 __field(unsigned long, avg_write_bw
)
587 __field(unsigned long, dirty_rate
)
588 __field(unsigned long, dirty_ratelimit
)
589 __field(unsigned long, task_ratelimit
)
590 __field(unsigned long, balanced_dirty_ratelimit
)
591 __field(ino_t
, cgroup_ino
)
595 strscpy_pad(__entry
->bdi
, bdi_dev_name(wb
->bdi
), 32);
596 __entry
->write_bw
= KBps(wb
->write_bandwidth
);
597 __entry
->avg_write_bw
= KBps(wb
->avg_write_bandwidth
);
598 __entry
->dirty_rate
= KBps(dirty_rate
);
599 __entry
->dirty_ratelimit
= KBps(wb
->dirty_ratelimit
);
600 __entry
->task_ratelimit
= KBps(task_ratelimit
);
601 __entry
->balanced_dirty_ratelimit
=
602 KBps(wb
->balanced_dirty_ratelimit
);
603 __entry
->cgroup_ino
= __trace_wb_assign_cgroup(wb
);
607 "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
608 "dirty_ratelimit=%lu task_ratelimit=%lu "
609 "balanced_dirty_ratelimit=%lu cgroup_ino=%lu",
611 __entry
->write_bw
, /* write bandwidth */
612 __entry
->avg_write_bw
, /* avg write bandwidth */
613 __entry
->dirty_rate
, /* bdi dirty rate */
614 __entry
->dirty_ratelimit
, /* base ratelimit */
615 __entry
->task_ratelimit
, /* ratelimit with position control */
616 __entry
->balanced_dirty_ratelimit
, /* the balanced ratelimit */
617 (unsigned long)__entry
->cgroup_ino
621 TRACE_EVENT(balance_dirty_pages
,
623 TP_PROTO(struct bdi_writeback
*wb
,
624 unsigned long thresh
,
625 unsigned long bg_thresh
,
627 unsigned long bdi_thresh
,
628 unsigned long bdi_dirty
,
629 unsigned long dirty_ratelimit
,
630 unsigned long task_ratelimit
,
631 unsigned long dirtied
,
632 unsigned long period
,
634 unsigned long start_time
),
636 TP_ARGS(wb
, thresh
, bg_thresh
, dirty
, bdi_thresh
, bdi_dirty
,
637 dirty_ratelimit
, task_ratelimit
,
638 dirtied
, period
, pause
, start_time
),
641 __array( char, bdi
, 32)
642 __field(unsigned long, limit
)
643 __field(unsigned long, setpoint
)
644 __field(unsigned long, dirty
)
645 __field(unsigned long, bdi_setpoint
)
646 __field(unsigned long, bdi_dirty
)
647 __field(unsigned long, dirty_ratelimit
)
648 __field(unsigned long, task_ratelimit
)
649 __field(unsigned int, dirtied
)
650 __field(unsigned int, dirtied_pause
)
651 __field(unsigned long, paused
)
652 __field( long, pause
)
653 __field(unsigned long, period
)
654 __field( long, think
)
655 __field(ino_t
, cgroup_ino
)
659 unsigned long freerun
= (thresh
+ bg_thresh
) / 2;
660 strscpy_pad(__entry
->bdi
, bdi_dev_name(wb
->bdi
), 32);
662 __entry
->limit
= global_wb_domain
.dirty_limit
;
663 __entry
->setpoint
= (global_wb_domain
.dirty_limit
+
665 __entry
->dirty
= dirty
;
666 __entry
->bdi_setpoint
= __entry
->setpoint
*
667 bdi_thresh
/ (thresh
+ 1);
668 __entry
->bdi_dirty
= bdi_dirty
;
669 __entry
->dirty_ratelimit
= KBps(dirty_ratelimit
);
670 __entry
->task_ratelimit
= KBps(task_ratelimit
);
671 __entry
->dirtied
= dirtied
;
672 __entry
->dirtied_pause
= current
->nr_dirtied_pause
;
673 __entry
->think
= current
->dirty_paused_when
== 0 ? 0 :
674 (long)(jiffies
- current
->dirty_paused_when
) * 1000/HZ
;
675 __entry
->period
= period
* 1000 / HZ
;
676 __entry
->pause
= pause
* 1000 / HZ
;
677 __entry
->paused
= (jiffies
- start_time
) * 1000 / HZ
;
678 __entry
->cgroup_ino
= __trace_wb_assign_cgroup(wb
);
683 "limit=%lu setpoint=%lu dirty=%lu "
684 "bdi_setpoint=%lu bdi_dirty=%lu "
685 "dirty_ratelimit=%lu task_ratelimit=%lu "
686 "dirtied=%u dirtied_pause=%u "
687 "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%lu",
692 __entry
->bdi_setpoint
,
694 __entry
->dirty_ratelimit
,
695 __entry
->task_ratelimit
,
697 __entry
->dirtied_pause
,
698 __entry
->paused
, /* ms */
699 __entry
->pause
, /* ms */
700 __entry
->period
, /* ms */
701 __entry
->think
, /* ms */
702 (unsigned long)__entry
->cgroup_ino
706 TRACE_EVENT(writeback_sb_inodes_requeue
,
708 TP_PROTO(struct inode
*inode
),
712 __array(char, name
, 32)
714 __field(unsigned long, state
)
715 __field(unsigned long, dirtied_when
)
716 __field(ino_t
, cgroup_ino
)
720 strscpy_pad(__entry
->name
,
721 bdi_dev_name(inode_to_bdi(inode
)), 32);
722 __entry
->ino
= inode
->i_ino
;
723 __entry
->state
= inode
->i_state
;
724 __entry
->dirtied_when
= inode
->dirtied_when
;
725 __entry
->cgroup_ino
= __trace_wb_assign_cgroup(inode_to_wb(inode
));
728 TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%lu",
730 (unsigned long)__entry
->ino
,
731 show_inode_state(__entry
->state
),
732 __entry
->dirtied_when
,
733 (jiffies
- __entry
->dirtied_when
) / HZ
,
734 (unsigned long)__entry
->cgroup_ino
738 DECLARE_EVENT_CLASS(writeback_congest_waited_template
,
740 TP_PROTO(unsigned int usec_timeout
, unsigned int usec_delayed
),
742 TP_ARGS(usec_timeout
, usec_delayed
),
745 __field( unsigned int, usec_timeout
)
746 __field( unsigned int, usec_delayed
)
750 __entry
->usec_timeout
= usec_timeout
;
751 __entry
->usec_delayed
= usec_delayed
;
754 TP_printk("usec_timeout=%u usec_delayed=%u",
755 __entry
->usec_timeout
,
756 __entry
->usec_delayed
)
759 DEFINE_EVENT(writeback_congest_waited_template
, writeback_congestion_wait
,
761 TP_PROTO(unsigned int usec_timeout
, unsigned int usec_delayed
),
763 TP_ARGS(usec_timeout
, usec_delayed
)
766 DEFINE_EVENT(writeback_congest_waited_template
, writeback_wait_iff_congested
,
768 TP_PROTO(unsigned int usec_timeout
, unsigned int usec_delayed
),
770 TP_ARGS(usec_timeout
, usec_delayed
)
773 DECLARE_EVENT_CLASS(writeback_single_inode_template
,
775 TP_PROTO(struct inode
*inode
,
776 struct writeback_control
*wbc
,
777 unsigned long nr_to_write
780 TP_ARGS(inode
, wbc
, nr_to_write
),
783 __array(char, name
, 32)
785 __field(unsigned long, state
)
786 __field(unsigned long, dirtied_when
)
787 __field(unsigned long, writeback_index
)
788 __field(long, nr_to_write
)
789 __field(unsigned long, wrote
)
790 __field(ino_t
, cgroup_ino
)
794 strscpy_pad(__entry
->name
,
795 bdi_dev_name(inode_to_bdi(inode
)), 32);
796 __entry
->ino
= inode
->i_ino
;
797 __entry
->state
= inode
->i_state
;
798 __entry
->dirtied_when
= inode
->dirtied_when
;
799 __entry
->writeback_index
= inode
->i_mapping
->writeback_index
;
800 __entry
->nr_to_write
= nr_to_write
;
801 __entry
->wrote
= nr_to_write
- wbc
->nr_to_write
;
802 __entry
->cgroup_ino
= __trace_wbc_assign_cgroup(wbc
);
805 TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
806 "index=%lu to_write=%ld wrote=%lu cgroup_ino=%lu",
808 (unsigned long)__entry
->ino
,
809 show_inode_state(__entry
->state
),
810 __entry
->dirtied_when
,
811 (jiffies
- __entry
->dirtied_when
) / HZ
,
812 __entry
->writeback_index
,
813 __entry
->nr_to_write
,
815 (unsigned long)__entry
->cgroup_ino
819 DEFINE_EVENT(writeback_single_inode_template
, writeback_single_inode_start
,
820 TP_PROTO(struct inode
*inode
,
821 struct writeback_control
*wbc
,
822 unsigned long nr_to_write
),
823 TP_ARGS(inode
, wbc
, nr_to_write
)
826 DEFINE_EVENT(writeback_single_inode_template
, writeback_single_inode
,
827 TP_PROTO(struct inode
*inode
,
828 struct writeback_control
*wbc
,
829 unsigned long nr_to_write
),
830 TP_ARGS(inode
, wbc
, nr_to_write
)
833 DECLARE_EVENT_CLASS(writeback_inode_template
,
834 TP_PROTO(struct inode
*inode
),
839 __field( dev_t
, dev
)
840 __field( ino_t
, ino
)
841 __field(unsigned long, state
)
842 __field( __u16
, mode
)
843 __field(unsigned long, dirtied_when
)
847 __entry
->dev
= inode
->i_sb
->s_dev
;
848 __entry
->ino
= inode
->i_ino
;
849 __entry
->state
= inode
->i_state
;
850 __entry
->mode
= inode
->i_mode
;
851 __entry
->dirtied_when
= inode
->dirtied_when
;
854 TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o",
855 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
856 (unsigned long)__entry
->ino
, __entry
->dirtied_when
,
857 show_inode_state(__entry
->state
), __entry
->mode
)
860 DEFINE_EVENT(writeback_inode_template
, writeback_lazytime
,
861 TP_PROTO(struct inode
*inode
),
866 DEFINE_EVENT(writeback_inode_template
, writeback_lazytime_iput
,
867 TP_PROTO(struct inode
*inode
),
872 DEFINE_EVENT(writeback_inode_template
, writeback_dirty_inode_enqueue
,
874 TP_PROTO(struct inode
*inode
),
880 * Inode writeback list tracking.
883 DEFINE_EVENT(writeback_inode_template
, sb_mark_inode_writeback
,
884 TP_PROTO(struct inode
*inode
),
888 DEFINE_EVENT(writeback_inode_template
, sb_clear_inode_writeback
,
889 TP_PROTO(struct inode
*inode
),
893 #endif /* _TRACE_WRITEBACK_H */
895 /* This part must be outside protection */
896 #include <trace/define_trace.h>