]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/trace/trace_selftest.c
ftrace: add README
[mirror_ubuntu-jammy-kernel.git] / kernel / trace / trace_selftest.c
CommitLineData
60a11774
SR
1/* Include in trace.c */
2
3#include <linux/kthread.h>
c7aafc54 4#include <linux/delay.h>
60a11774
SR
5
6static inline int trace_valid_entry(struct trace_entry *entry)
7{
8 switch (entry->type) {
9 case TRACE_FN:
10 case TRACE_CTX:
11 return 1;
12 }
13 return 0;
14}
15
16static int
17trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
18{
60a11774 19 struct trace_entry *entries;
c7aafc54 20 struct page *page;
60a11774
SR
21 int idx = 0;
22 int i;
23
c7aafc54 24 BUG_ON(list_empty(&data->trace_pages));
60a11774
SR
25 page = list_entry(data->trace_pages.next, struct page, lru);
26 entries = page_address(page);
27
c7aafc54 28 if (head_page(data) != entries)
60a11774
SR
29 goto failed;
30
31 /*
32 * The starting trace buffer always has valid elements,
c7aafc54 33 * if any element exists.
60a11774 34 */
c7aafc54 35 entries = head_page(data);
60a11774
SR
36
37 for (i = 0; i < tr->entries; i++) {
38
c7aafc54
IM
39 if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) {
40 printk(KERN_CONT ".. invalid entry %d ",
41 entries[idx].type);
60a11774
SR
42 goto failed;
43 }
44
45 idx++;
46 if (idx >= ENTRIES_PER_PAGE) {
47 page = virt_to_page(entries);
48 if (page->lru.next == &data->trace_pages) {
49 if (i != tr->entries - 1) {
50 printk(KERN_CONT ".. entries buffer mismatch");
51 goto failed;
52 }
53 } else {
54 page = list_entry(page->lru.next, struct page, lru);
55 entries = page_address(page);
56 }
57 idx = 0;
58 }
59 }
60
61 page = virt_to_page(entries);
62 if (page->lru.next != &data->trace_pages) {
63 printk(KERN_CONT ".. too many entries");
64 goto failed;
65 }
66
67 return 0;
68
69 failed:
70 printk(KERN_CONT ".. corrupted trace buffer .. ");
71 return -1;
72}
73
74/*
75 * Test the trace buffer to see if all the elements
76 * are still sane.
77 */
78static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
79{
80 unsigned long cnt = 0;
81 int cpu;
82 int ret = 0;
83
84 for_each_possible_cpu(cpu) {
c7aafc54 85 if (!head_page(tr->data[cpu]))
60a11774
SR
86 continue;
87
88 cnt += tr->data[cpu]->trace_idx;
60a11774
SR
89
90 ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
91 if (ret)
92 break;
93 }
94
95 if (count)
96 *count = cnt;
97
98 return ret;
99}
100
101#ifdef CONFIG_FTRACE
102/*
103 * Simple verification test of ftrace function tracer.
104 * Enable ftrace, sleep 1/10 second, and then read the trace
105 * buffer to see if all is in order.
106 */
107int
108trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
109{
110 unsigned long count;
111 int ret;
112
113 /* make sure functions have been recorded */
114 ret = ftrace_force_update();
115 if (ret) {
116 printk(KERN_CONT ".. ftraced failed .. ");
117 return ret;
118 }
119
120 /* start the tracing */
c7aafc54
IM
121 ftrace_enabled = 1;
122
60a11774
SR
123 tr->ctrl = 1;
124 trace->init(tr);
125 /* Sleep for a 1/10 of a second */
126 msleep(100);
127 /* stop the tracing. */
128 tr->ctrl = 0;
129 trace->ctrl_update(tr);
c7aafc54
IM
130 ftrace_enabled = 0;
131
60a11774
SR
132 /* check the trace buffer */
133 ret = trace_test_buffer(tr, &count);
134 trace->reset(tr);
135
136 if (!ret && !count) {
137 printk(KERN_CONT ".. no entries found ..");
138 ret = -1;
139 }
140
141 return ret;
142}
143#endif /* CONFIG_FTRACE */
144
145#ifdef CONFIG_IRQSOFF_TRACER
146int
147trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
148{
149 unsigned long save_max = tracing_max_latency;
150 unsigned long count;
151 int ret;
152
153 /* start the tracing */
154 tr->ctrl = 1;
155 trace->init(tr);
156 /* reset the max latency */
157 tracing_max_latency = 0;
158 /* disable interrupts for a bit */
159 local_irq_disable();
160 udelay(100);
161 local_irq_enable();
162 /* stop the tracing. */
163 tr->ctrl = 0;
164 trace->ctrl_update(tr);
165 /* check both trace buffers */
166 ret = trace_test_buffer(tr, NULL);
167 if (!ret)
168 ret = trace_test_buffer(&max_tr, &count);
169 trace->reset(tr);
170
171 if (!ret && !count) {
172 printk(KERN_CONT ".. no entries found ..");
173 ret = -1;
174 }
175
176 tracing_max_latency = save_max;
177
178 return ret;
179}
180#endif /* CONFIG_IRQSOFF_TRACER */
181
182#ifdef CONFIG_PREEMPT_TRACER
183int
184trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
185{
186 unsigned long save_max = tracing_max_latency;
187 unsigned long count;
188 int ret;
189
190 /* start the tracing */
191 tr->ctrl = 1;
192 trace->init(tr);
193 /* reset the max latency */
194 tracing_max_latency = 0;
195 /* disable preemption for a bit */
196 preempt_disable();
197 udelay(100);
198 preempt_enable();
199 /* stop the tracing. */
200 tr->ctrl = 0;
201 trace->ctrl_update(tr);
202 /* check both trace buffers */
203 ret = trace_test_buffer(tr, NULL);
204 if (!ret)
205 ret = trace_test_buffer(&max_tr, &count);
206 trace->reset(tr);
207
208 if (!ret && !count) {
209 printk(KERN_CONT ".. no entries found ..");
210 ret = -1;
211 }
212
213 tracing_max_latency = save_max;
214
215 return ret;
216}
217#endif /* CONFIG_PREEMPT_TRACER */
218
219#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
220int
221trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
222{
223 unsigned long save_max = tracing_max_latency;
224 unsigned long count;
225 int ret;
226
227 /* start the tracing */
228 tr->ctrl = 1;
229 trace->init(tr);
230
231 /* reset the max latency */
232 tracing_max_latency = 0;
233
234 /* disable preemption and interrupts for a bit */
235 preempt_disable();
236 local_irq_disable();
237 udelay(100);
238 preempt_enable();
239 /* reverse the order of preempt vs irqs */
240 local_irq_enable();
241
242 /* stop the tracing. */
243 tr->ctrl = 0;
244 trace->ctrl_update(tr);
245 /* check both trace buffers */
246 ret = trace_test_buffer(tr, NULL);
247 if (ret)
248 goto out;
249
250 ret = trace_test_buffer(&max_tr, &count);
251 if (ret)
252 goto out;
253
254 if (!ret && !count) {
255 printk(KERN_CONT ".. no entries found ..");
256 ret = -1;
257 goto out;
258 }
259
260 /* do the test by disabling interrupts first this time */
261 tracing_max_latency = 0;
262 tr->ctrl = 1;
263 trace->ctrl_update(tr);
264 preempt_disable();
265 local_irq_disable();
266 udelay(100);
267 preempt_enable();
268 /* reverse the order of preempt vs irqs */
269 local_irq_enable();
270
271 /* stop the tracing. */
272 tr->ctrl = 0;
273 trace->ctrl_update(tr);
274 /* check both trace buffers */
275 ret = trace_test_buffer(tr, NULL);
276 if (ret)
277 goto out;
278
279 ret = trace_test_buffer(&max_tr, &count);
280
281 if (!ret && !count) {
282 printk(KERN_CONT ".. no entries found ..");
283 ret = -1;
284 goto out;
285 }
286
287 out:
288 trace->reset(tr);
289 tracing_max_latency = save_max;
290
291 return ret;
292}
293#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
294
295#ifdef CONFIG_SCHED_TRACER
296static int trace_wakeup_test_thread(void *data)
297{
298 struct completion *x = data;
299
300 /* Make this a RT thread, doesn't need to be too high */
301
302 rt_mutex_setprio(current, MAX_RT_PRIO - 5);
303
304 /* Make it know we have a new prio */
305 complete(x);
306
307 /* now go to sleep and let the test wake us up */
308 set_current_state(TASK_INTERRUPTIBLE);
309 schedule();
310
311 /* we are awake, now wait to disappear */
312 while (!kthread_should_stop()) {
313 /*
314 * This is an RT task, do short sleeps to let
315 * others run.
316 */
317 msleep(100);
318 }
319
320 return 0;
321}
322
323int
324trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
325{
326 unsigned long save_max = tracing_max_latency;
327 struct task_struct *p;
328 struct completion isrt;
329 unsigned long count;
330 int ret;
331
332 init_completion(&isrt);
333
334 /* create a high prio thread */
335 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
c7aafc54 336 if (IS_ERR(p)) {
60a11774
SR
337 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
338 return -1;
339 }
340
341 /* make sure the thread is running at an RT prio */
342 wait_for_completion(&isrt);
343
344 /* start the tracing */
345 tr->ctrl = 1;
346 trace->init(tr);
347 /* reset the max latency */
348 tracing_max_latency = 0;
349
350 /* sleep to let the RT thread sleep too */
351 msleep(100);
352
353 /*
354 * Yes this is slightly racy. It is possible that for some
355 * strange reason that the RT thread we created, did not
356 * call schedule for 100ms after doing the completion,
357 * and we do a wakeup on a task that already is awake.
358 * But that is extremely unlikely, and the worst thing that
359 * happens in such a case, is that we disable tracing.
360 * Honestly, if this race does happen something is horrible
361 * wrong with the system.
362 */
363
364 wake_up_process(p);
365
366 /* stop the tracing. */
367 tr->ctrl = 0;
368 trace->ctrl_update(tr);
369 /* check both trace buffers */
370 ret = trace_test_buffer(tr, NULL);
371 if (!ret)
372 ret = trace_test_buffer(&max_tr, &count);
373
374
375 trace->reset(tr);
376
377 tracing_max_latency = save_max;
378
379 /* kill the thread */
380 kthread_stop(p);
381
382 if (!ret && !count) {
383 printk(KERN_CONT ".. no entries found ..");
384 ret = -1;
385 }
386
387 return ret;
388}
389#endif /* CONFIG_SCHED_TRACER */
390
391#ifdef CONFIG_CONTEXT_SWITCH_TRACER
392int
393trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
394{
395 unsigned long count;
396 int ret;
397
398 /* start the tracing */
399 tr->ctrl = 1;
400 trace->init(tr);
401 /* Sleep for a 1/10 of a second */
402 msleep(100);
403 /* stop the tracing. */
404 tr->ctrl = 0;
405 trace->ctrl_update(tr);
406 /* check the trace buffer */
407 ret = trace_test_buffer(tr, &count);
408 trace->reset(tr);
409
410 if (!ret && !count) {
411 printk(KERN_CONT ".. no entries found ..");
412 ret = -1;
413 }
414
415 return ret;
416}
417#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
418
419#ifdef CONFIG_DYNAMIC_FTRACE
420#endif /* CONFIG_DYNAMIC_FTRACE */