]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - include/trace/events/writeback.h
writeback: Add a 'reason' to wb_writeback_work
[mirror_ubuntu-hirsute-kernel.git] / include / trace / events / writeback.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM writeback
3
4 #if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_WRITEBACK_H
6
7 #include <linux/backing-dev.h>
8 #include <linux/device.h>
9 #include <linux/writeback.h>
10
11 #define show_inode_state(state) \
12 __print_flags(state, "|", \
13 {I_DIRTY_SYNC, "I_DIRTY_SYNC"}, \
14 {I_DIRTY_DATASYNC, "I_DIRTY_DATASYNC"}, \
15 {I_DIRTY_PAGES, "I_DIRTY_PAGES"}, \
16 {I_NEW, "I_NEW"}, \
17 {I_WILL_FREE, "I_WILL_FREE"}, \
18 {I_FREEING, "I_FREEING"}, \
19 {I_CLEAR, "I_CLEAR"}, \
20 {I_SYNC, "I_SYNC"}, \
21 {I_REFERENCED, "I_REFERENCED"} \
22 )
23
24 struct wb_writeback_work;
25
26 DECLARE_EVENT_CLASS(writeback_work_class,
27 TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work),
28 TP_ARGS(bdi, work),
29 TP_STRUCT__entry(
30 __array(char, name, 32)
31 __field(long, nr_pages)
32 __field(dev_t, sb_dev)
33 __field(int, sync_mode)
34 __field(int, for_kupdate)
35 __field(int, range_cyclic)
36 __field(int, for_background)
37 __field(int, reason)
38 ),
39 TP_fast_assign(
40 strncpy(__entry->name, dev_name(bdi->dev), 32);
41 __entry->nr_pages = work->nr_pages;
42 __entry->sb_dev = work->sb ? work->sb->s_dev : 0;
43 __entry->sync_mode = work->sync_mode;
44 __entry->for_kupdate = work->for_kupdate;
45 __entry->range_cyclic = work->range_cyclic;
46 __entry->for_background = work->for_background;
47 __entry->reason = work->reason;
48 ),
49 TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
50 "kupdate=%d range_cyclic=%d background=%d reason=%s",
51 __entry->name,
52 MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
53 __entry->nr_pages,
54 __entry->sync_mode,
55 __entry->for_kupdate,
56 __entry->range_cyclic,
57 __entry->for_background,
58 wb_reason_name[__entry->reason]
59 )
60 );
61 #define DEFINE_WRITEBACK_WORK_EVENT(name) \
62 DEFINE_EVENT(writeback_work_class, name, \
63 TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), \
64 TP_ARGS(bdi, work))
65 DEFINE_WRITEBACK_WORK_EVENT(writeback_nothread);
66 DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
67 DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
68 DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
69 DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
70 DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
71
72 TRACE_EVENT(writeback_pages_written,
73 TP_PROTO(long pages_written),
74 TP_ARGS(pages_written),
75 TP_STRUCT__entry(
76 __field(long, pages)
77 ),
78 TP_fast_assign(
79 __entry->pages = pages_written;
80 ),
81 TP_printk("%ld", __entry->pages)
82 );
83
84 DECLARE_EVENT_CLASS(writeback_class,
85 TP_PROTO(struct backing_dev_info *bdi),
86 TP_ARGS(bdi),
87 TP_STRUCT__entry(
88 __array(char, name, 32)
89 ),
90 TP_fast_assign(
91 strncpy(__entry->name, dev_name(bdi->dev), 32);
92 ),
93 TP_printk("bdi %s",
94 __entry->name
95 )
96 );
97 #define DEFINE_WRITEBACK_EVENT(name) \
98 DEFINE_EVENT(writeback_class, name, \
99 TP_PROTO(struct backing_dev_info *bdi), \
100 TP_ARGS(bdi))
101
102 DEFINE_WRITEBACK_EVENT(writeback_nowork);
103 DEFINE_WRITEBACK_EVENT(writeback_wake_background);
104 DEFINE_WRITEBACK_EVENT(writeback_wake_thread);
105 DEFINE_WRITEBACK_EVENT(writeback_wake_forker_thread);
106 DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
107 DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister);
108 DEFINE_WRITEBACK_EVENT(writeback_thread_start);
109 DEFINE_WRITEBACK_EVENT(writeback_thread_stop);
110
111 DECLARE_EVENT_CLASS(wbc_class,
112 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
113 TP_ARGS(wbc, bdi),
114 TP_STRUCT__entry(
115 __array(char, name, 32)
116 __field(long, nr_to_write)
117 __field(long, pages_skipped)
118 __field(int, sync_mode)
119 __field(int, for_kupdate)
120 __field(int, for_background)
121 __field(int, for_reclaim)
122 __field(int, range_cyclic)
123 __field(long, range_start)
124 __field(long, range_end)
125 ),
126
127 TP_fast_assign(
128 strncpy(__entry->name, dev_name(bdi->dev), 32);
129 __entry->nr_to_write = wbc->nr_to_write;
130 __entry->pages_skipped = wbc->pages_skipped;
131 __entry->sync_mode = wbc->sync_mode;
132 __entry->for_kupdate = wbc->for_kupdate;
133 __entry->for_background = wbc->for_background;
134 __entry->for_reclaim = wbc->for_reclaim;
135 __entry->range_cyclic = wbc->range_cyclic;
136 __entry->range_start = (long)wbc->range_start;
137 __entry->range_end = (long)wbc->range_end;
138 ),
139
140 TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
141 "bgrd=%d reclm=%d cyclic=%d "
142 "start=0x%lx end=0x%lx",
143 __entry->name,
144 __entry->nr_to_write,
145 __entry->pages_skipped,
146 __entry->sync_mode,
147 __entry->for_kupdate,
148 __entry->for_background,
149 __entry->for_reclaim,
150 __entry->range_cyclic,
151 __entry->range_start,
152 __entry->range_end)
153 )
154
155 #define DEFINE_WBC_EVENT(name) \
156 DEFINE_EVENT(wbc_class, name, \
157 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
158 TP_ARGS(wbc, bdi))
159 DEFINE_WBC_EVENT(wbc_writepage);
160
161 TRACE_EVENT(writeback_queue_io,
162 TP_PROTO(struct bdi_writeback *wb,
163 struct wb_writeback_work *work,
164 int moved),
165 TP_ARGS(wb, work, moved),
166 TP_STRUCT__entry(
167 __array(char, name, 32)
168 __field(unsigned long, older)
169 __field(long, age)
170 __field(int, moved)
171 __field(int, reason)
172 ),
173 TP_fast_assign(
174 unsigned long *older_than_this = work->older_than_this;
175 strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
176 __entry->older = older_than_this ? *older_than_this : 0;
177 __entry->age = older_than_this ?
178 (jiffies - *older_than_this) * 1000 / HZ : -1;
179 __entry->moved = moved;
180 __entry->reason = work->reason;
181 ),
182 TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s",
183 __entry->name,
184 __entry->older, /* older_than_this in jiffies */
185 __entry->age, /* older_than_this in relative milliseconds */
186 __entry->moved,
187 wb_reason_name[__entry->reason])
188 );
189
190 TRACE_EVENT(global_dirty_state,
191
192 TP_PROTO(unsigned long background_thresh,
193 unsigned long dirty_thresh
194 ),
195
196 TP_ARGS(background_thresh,
197 dirty_thresh
198 ),
199
200 TP_STRUCT__entry(
201 __field(unsigned long, nr_dirty)
202 __field(unsigned long, nr_writeback)
203 __field(unsigned long, nr_unstable)
204 __field(unsigned long, background_thresh)
205 __field(unsigned long, dirty_thresh)
206 __field(unsigned long, dirty_limit)
207 __field(unsigned long, nr_dirtied)
208 __field(unsigned long, nr_written)
209 ),
210
211 TP_fast_assign(
212 __entry->nr_dirty = global_page_state(NR_FILE_DIRTY);
213 __entry->nr_writeback = global_page_state(NR_WRITEBACK);
214 __entry->nr_unstable = global_page_state(NR_UNSTABLE_NFS);
215 __entry->nr_dirtied = global_page_state(NR_DIRTIED);
216 __entry->nr_written = global_page_state(NR_WRITTEN);
217 __entry->background_thresh = background_thresh;
218 __entry->dirty_thresh = dirty_thresh;
219 __entry->dirty_limit = global_dirty_limit;
220 ),
221
222 TP_printk("dirty=%lu writeback=%lu unstable=%lu "
223 "bg_thresh=%lu thresh=%lu limit=%lu "
224 "dirtied=%lu written=%lu",
225 __entry->nr_dirty,
226 __entry->nr_writeback,
227 __entry->nr_unstable,
228 __entry->background_thresh,
229 __entry->dirty_thresh,
230 __entry->dirty_limit,
231 __entry->nr_dirtied,
232 __entry->nr_written
233 )
234 );
235
236 #define KBps(x) ((x) << (PAGE_SHIFT - 10))
237
238 TRACE_EVENT(bdi_dirty_ratelimit,
239
240 TP_PROTO(struct backing_dev_info *bdi,
241 unsigned long dirty_rate,
242 unsigned long task_ratelimit),
243
244 TP_ARGS(bdi, dirty_rate, task_ratelimit),
245
246 TP_STRUCT__entry(
247 __array(char, bdi, 32)
248 __field(unsigned long, write_bw)
249 __field(unsigned long, avg_write_bw)
250 __field(unsigned long, dirty_rate)
251 __field(unsigned long, dirty_ratelimit)
252 __field(unsigned long, task_ratelimit)
253 __field(unsigned long, balanced_dirty_ratelimit)
254 ),
255
256 TP_fast_assign(
257 strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
258 __entry->write_bw = KBps(bdi->write_bandwidth);
259 __entry->avg_write_bw = KBps(bdi->avg_write_bandwidth);
260 __entry->dirty_rate = KBps(dirty_rate);
261 __entry->dirty_ratelimit = KBps(bdi->dirty_ratelimit);
262 __entry->task_ratelimit = KBps(task_ratelimit);
263 __entry->balanced_dirty_ratelimit =
264 KBps(bdi->balanced_dirty_ratelimit);
265 ),
266
267 TP_printk("bdi %s: "
268 "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
269 "dirty_ratelimit=%lu task_ratelimit=%lu "
270 "balanced_dirty_ratelimit=%lu",
271 __entry->bdi,
272 __entry->write_bw, /* write bandwidth */
273 __entry->avg_write_bw, /* avg write bandwidth */
274 __entry->dirty_rate, /* bdi dirty rate */
275 __entry->dirty_ratelimit, /* base ratelimit */
276 __entry->task_ratelimit, /* ratelimit with position control */
277 __entry->balanced_dirty_ratelimit /* the balanced ratelimit */
278 )
279 );
280
281 TRACE_EVENT(balance_dirty_pages,
282
283 TP_PROTO(struct backing_dev_info *bdi,
284 unsigned long thresh,
285 unsigned long bg_thresh,
286 unsigned long dirty,
287 unsigned long bdi_thresh,
288 unsigned long bdi_dirty,
289 unsigned long dirty_ratelimit,
290 unsigned long task_ratelimit,
291 unsigned long dirtied,
292 long pause,
293 unsigned long start_time),
294
295 TP_ARGS(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
296 dirty_ratelimit, task_ratelimit,
297 dirtied, pause, start_time),
298
299 TP_STRUCT__entry(
300 __array( char, bdi, 32)
301 __field(unsigned long, limit)
302 __field(unsigned long, setpoint)
303 __field(unsigned long, dirty)
304 __field(unsigned long, bdi_setpoint)
305 __field(unsigned long, bdi_dirty)
306 __field(unsigned long, dirty_ratelimit)
307 __field(unsigned long, task_ratelimit)
308 __field(unsigned int, dirtied)
309 __field(unsigned int, dirtied_pause)
310 __field(unsigned long, paused)
311 __field( long, pause)
312 ),
313
314 TP_fast_assign(
315 unsigned long freerun = (thresh + bg_thresh) / 2;
316 strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
317
318 __entry->limit = global_dirty_limit;
319 __entry->setpoint = (global_dirty_limit + freerun) / 2;
320 __entry->dirty = dirty;
321 __entry->bdi_setpoint = __entry->setpoint *
322 bdi_thresh / (thresh + 1);
323 __entry->bdi_dirty = bdi_dirty;
324 __entry->dirty_ratelimit = KBps(dirty_ratelimit);
325 __entry->task_ratelimit = KBps(task_ratelimit);
326 __entry->dirtied = dirtied;
327 __entry->dirtied_pause = current->nr_dirtied_pause;
328 __entry->pause = pause * 1000 / HZ;
329 __entry->paused = (jiffies - start_time) * 1000 / HZ;
330 ),
331
332
333 TP_printk("bdi %s: "
334 "limit=%lu setpoint=%lu dirty=%lu "
335 "bdi_setpoint=%lu bdi_dirty=%lu "
336 "dirty_ratelimit=%lu task_ratelimit=%lu "
337 "dirtied=%u dirtied_pause=%u "
338 "paused=%lu pause=%ld",
339 __entry->bdi,
340 __entry->limit,
341 __entry->setpoint,
342 __entry->dirty,
343 __entry->bdi_setpoint,
344 __entry->bdi_dirty,
345 __entry->dirty_ratelimit,
346 __entry->task_ratelimit,
347 __entry->dirtied,
348 __entry->dirtied_pause,
349 __entry->paused, /* ms */
350 __entry->pause /* ms */
351 )
352 );
353
354 DECLARE_EVENT_CLASS(writeback_congest_waited_template,
355
356 TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
357
358 TP_ARGS(usec_timeout, usec_delayed),
359
360 TP_STRUCT__entry(
361 __field( unsigned int, usec_timeout )
362 __field( unsigned int, usec_delayed )
363 ),
364
365 TP_fast_assign(
366 __entry->usec_timeout = usec_timeout;
367 __entry->usec_delayed = usec_delayed;
368 ),
369
370 TP_printk("usec_timeout=%u usec_delayed=%u",
371 __entry->usec_timeout,
372 __entry->usec_delayed)
373 );
374
375 DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
376
377 TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
378
379 TP_ARGS(usec_timeout, usec_delayed)
380 );
381
382 DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
383
384 TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
385
386 TP_ARGS(usec_timeout, usec_delayed)
387 );
388
389 DECLARE_EVENT_CLASS(writeback_single_inode_template,
390
391 TP_PROTO(struct inode *inode,
392 struct writeback_control *wbc,
393 unsigned long nr_to_write
394 ),
395
396 TP_ARGS(inode, wbc, nr_to_write),
397
398 TP_STRUCT__entry(
399 __array(char, name, 32)
400 __field(unsigned long, ino)
401 __field(unsigned long, state)
402 __field(unsigned long, dirtied_when)
403 __field(unsigned long, writeback_index)
404 __field(long, nr_to_write)
405 __field(unsigned long, wrote)
406 ),
407
408 TP_fast_assign(
409 strncpy(__entry->name,
410 dev_name(inode->i_mapping->backing_dev_info->dev), 32);
411 __entry->ino = inode->i_ino;
412 __entry->state = inode->i_state;
413 __entry->dirtied_when = inode->dirtied_when;
414 __entry->writeback_index = inode->i_mapping->writeback_index;
415 __entry->nr_to_write = nr_to_write;
416 __entry->wrote = nr_to_write - wbc->nr_to_write;
417 ),
418
419 TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
420 "index=%lu to_write=%ld wrote=%lu",
421 __entry->name,
422 __entry->ino,
423 show_inode_state(__entry->state),
424 __entry->dirtied_when,
425 (jiffies - __entry->dirtied_when) / HZ,
426 __entry->writeback_index,
427 __entry->nr_to_write,
428 __entry->wrote
429 )
430 );
431
432 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_requeue,
433 TP_PROTO(struct inode *inode,
434 struct writeback_control *wbc,
435 unsigned long nr_to_write),
436 TP_ARGS(inode, wbc, nr_to_write)
437 );
438
439 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
440 TP_PROTO(struct inode *inode,
441 struct writeback_control *wbc,
442 unsigned long nr_to_write),
443 TP_ARGS(inode, wbc, nr_to_write)
444 );
445
446 #endif /* _TRACE_WRITEBACK_H */
447
448 /* This part must be outside protection */
449 #include <trace/define_trace.h>