]>
Commit | Line | Data |
---|---|---|
60a11774 SR |
1 | /* Include in trace.c */ |
2 | ||
3 | #include <linux/kthread.h> | |
c7aafc54 | 4 | #include <linux/delay.h> |
60a11774 | 5 | |
e309b41d | 6 | static inline int trace_valid_entry(struct trace_entry *entry) |
60a11774 SR |
7 | { |
8 | switch (entry->type) { | |
9 | case TRACE_FN: | |
10 | case TRACE_CTX: | |
57422797 | 11 | case TRACE_WAKE: |
dd0e545f | 12 | case TRACE_CONT: |
06fa75ab | 13 | case TRACE_STACK: |
dd0e545f | 14 | case TRACE_PRINT: |
06fa75ab | 15 | case TRACE_SPECIAL: |
80e5ea45 | 16 | case TRACE_BRANCH: |
60a11774 SR |
17 | return 1; |
18 | } | |
19 | return 0; | |
20 | } | |
21 | ||
3928a8a2 | 22 | static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) |
60a11774 | 23 | { |
3928a8a2 SR |
24 | struct ring_buffer_event *event; |
25 | struct trace_entry *entry; | |
60a11774 | 26 | |
3928a8a2 SR |
27 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { |
28 | entry = ring_buffer_event_data(event); | |
60a11774 | 29 | |
3928a8a2 | 30 | if (!trace_valid_entry(entry)) { |
c7aafc54 | 31 | printk(KERN_CONT ".. invalid entry %d ", |
3928a8a2 | 32 | entry->type); |
60a11774 SR |
33 | goto failed; |
34 | } | |
60a11774 | 35 | } |
60a11774 SR |
36 | return 0; |
37 | ||
38 | failed: | |
08bafa0e SR |
39 | /* disable tracing */ |
40 | tracing_disabled = 1; | |
60a11774 SR |
41 | printk(KERN_CONT ".. corrupted trace buffer .. "); |
42 | return -1; | |
43 | } | |
44 | ||
45 | /* | |
46 | * Test the trace buffer to see if all the elements | |
47 | * are still sane. | |
48 | */ | |
49 | static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |
50 | { | |
30afdcb1 SR |
51 | unsigned long flags, cnt = 0; |
52 | int cpu, ret = 0; | |
60a11774 | 53 | |
30afdcb1 SR |
54 | /* Don't allow flipping of max traces now */ |
55 | raw_local_irq_save(flags); | |
56 | __raw_spin_lock(&ftrace_max_lock); | |
60a11774 | 57 | |
3928a8a2 | 58 | cnt = ring_buffer_entries(tr->buffer); |
60a11774 | 59 | |
3928a8a2 SR |
60 | for_each_possible_cpu(cpu) { |
61 | ret = trace_test_buffer_cpu(tr, cpu); | |
60a11774 SR |
62 | if (ret) |
63 | break; | |
64 | } | |
30afdcb1 SR |
65 | __raw_spin_unlock(&ftrace_max_lock); |
66 | raw_local_irq_restore(flags); | |
60a11774 SR |
67 | |
68 | if (count) | |
69 | *count = cnt; | |
70 | ||
71 | return ret; | |
72 | } | |
73 | ||
606576ce | 74 | #ifdef CONFIG_FUNCTION_TRACER |
77a2b37d SR |
75 | |
76 | #ifdef CONFIG_DYNAMIC_FTRACE | |
77 | ||
77a2b37d SR |
78 | #define __STR(x) #x |
79 | #define STR(x) __STR(x) | |
77a2b37d SR |
80 | |
81 | /* Test dynamic code modification and ftrace filters */ | |
82 | int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |
83 | struct trace_array *tr, | |
84 | int (*func)(void)) | |
85 | { | |
77a2b37d SR |
86 | int save_ftrace_enabled = ftrace_enabled; |
87 | int save_tracer_enabled = tracer_enabled; | |
dd0e545f | 88 | unsigned long count; |
4e491d14 | 89 | char *func_name; |
dd0e545f | 90 | int ret; |
77a2b37d SR |
91 | |
92 | /* The ftrace test PASSED */ | |
93 | printk(KERN_CONT "PASSED\n"); | |
94 | pr_info("Testing dynamic ftrace: "); | |
95 | ||
96 | /* enable tracing, and record the filter function */ | |
97 | ftrace_enabled = 1; | |
98 | tracer_enabled = 1; | |
99 | ||
100 | /* passed in by parameter to fool gcc from optimizing */ | |
101 | func(); | |
102 | ||
4e491d14 SR |
103 | /* |
104 | * Some archs *cough*PowerPC*cough* add charachters to the | |
105 | * start of the function names. We simply put a '*' to | |
106 | * accomodate them. | |
107 | */ | |
108 | func_name = "*" STR(DYN_FTRACE_TEST_NAME); | |
109 | ||
77a2b37d | 110 | /* filter only on our function */ |
4e491d14 | 111 | ftrace_set_filter(func_name, strlen(func_name), 1); |
77a2b37d SR |
112 | |
113 | /* enable tracing */ | |
77a2b37d | 114 | trace->init(tr); |
dd0e545f | 115 | |
77a2b37d SR |
116 | /* Sleep for a 1/10 of a second */ |
117 | msleep(100); | |
118 | ||
119 | /* we should have nothing in the buffer */ | |
120 | ret = trace_test_buffer(tr, &count); | |
121 | if (ret) | |
122 | goto out; | |
123 | ||
124 | if (count) { | |
125 | ret = -1; | |
126 | printk(KERN_CONT ".. filter did not filter .. "); | |
127 | goto out; | |
128 | } | |
129 | ||
130 | /* call our function again */ | |
131 | func(); | |
132 | ||
133 | /* sleep again */ | |
134 | msleep(100); | |
135 | ||
136 | /* stop the tracing. */ | |
bbf5b1a0 | 137 | tracing_stop(); |
77a2b37d SR |
138 | ftrace_enabled = 0; |
139 | ||
140 | /* check the trace buffer */ | |
141 | ret = trace_test_buffer(tr, &count); | |
142 | trace->reset(tr); | |
bbf5b1a0 | 143 | tracing_start(); |
77a2b37d SR |
144 | |
145 | /* we should only have one item */ | |
146 | if (!ret && count != 1) { | |
06fa75ab | 147 | printk(KERN_CONT ".. filter failed count=%ld ..", count); |
77a2b37d SR |
148 | ret = -1; |
149 | goto out; | |
150 | } | |
bbf5b1a0 | 151 | |
77a2b37d SR |
152 | out: |
153 | ftrace_enabled = save_ftrace_enabled; | |
154 | tracer_enabled = save_tracer_enabled; | |
155 | ||
156 | /* Enable tracing on all functions again */ | |
157 | ftrace_set_filter(NULL, 0, 1); | |
158 | ||
159 | return ret; | |
160 | } | |
161 | #else | |
162 | # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) | |
163 | #endif /* CONFIG_DYNAMIC_FTRACE */ | |
60a11774 SR |
164 | /* |
165 | * Simple verification test of ftrace function tracer. | |
166 | * Enable ftrace, sleep 1/10 second, and then read the trace | |
167 | * buffer to see if all is in order. | |
168 | */ | |
169 | int | |
170 | trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |
171 | { | |
77a2b37d SR |
172 | int save_ftrace_enabled = ftrace_enabled; |
173 | int save_tracer_enabled = tracer_enabled; | |
dd0e545f SR |
174 | unsigned long count; |
175 | int ret; | |
60a11774 | 176 | |
77a2b37d SR |
177 | /* make sure msleep has been recorded */ |
178 | msleep(1); | |
179 | ||
60a11774 | 180 | /* start the tracing */ |
c7aafc54 | 181 | ftrace_enabled = 1; |
77a2b37d | 182 | tracer_enabled = 1; |
c7aafc54 | 183 | |
60a11774 SR |
184 | trace->init(tr); |
185 | /* Sleep for a 1/10 of a second */ | |
186 | msleep(100); | |
187 | /* stop the tracing. */ | |
bbf5b1a0 | 188 | tracing_stop(); |
c7aafc54 IM |
189 | ftrace_enabled = 0; |
190 | ||
60a11774 SR |
191 | /* check the trace buffer */ |
192 | ret = trace_test_buffer(tr, &count); | |
193 | trace->reset(tr); | |
bbf5b1a0 | 194 | tracing_start(); |
60a11774 SR |
195 | |
196 | if (!ret && !count) { | |
197 | printk(KERN_CONT ".. no entries found .."); | |
198 | ret = -1; | |
77a2b37d | 199 | goto out; |
60a11774 SR |
200 | } |
201 | ||
77a2b37d SR |
202 | ret = trace_selftest_startup_dynamic_tracing(trace, tr, |
203 | DYN_FTRACE_TEST_NAME); | |
204 | ||
205 | out: | |
206 | ftrace_enabled = save_ftrace_enabled; | |
207 | tracer_enabled = save_tracer_enabled; | |
208 | ||
4eebcc81 SR |
209 | /* kill ftrace totally if we failed */ |
210 | if (ret) | |
211 | ftrace_kill(); | |
212 | ||
60a11774 SR |
213 | return ret; |
214 | } | |
606576ce | 215 | #endif /* CONFIG_FUNCTION_TRACER */ |
60a11774 SR |
216 | |
217 | #ifdef CONFIG_IRQSOFF_TRACER | |
218 | int | |
219 | trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | |
220 | { | |
221 | unsigned long save_max = tracing_max_latency; | |
222 | unsigned long count; | |
223 | int ret; | |
224 | ||
225 | /* start the tracing */ | |
60a11774 SR |
226 | trace->init(tr); |
227 | /* reset the max latency */ | |
228 | tracing_max_latency = 0; | |
229 | /* disable interrupts for a bit */ | |
230 | local_irq_disable(); | |
231 | udelay(100); | |
232 | local_irq_enable(); | |
233 | /* stop the tracing. */ | |
bbf5b1a0 | 234 | tracing_stop(); |
60a11774 SR |
235 | /* check both trace buffers */ |
236 | ret = trace_test_buffer(tr, NULL); | |
237 | if (!ret) | |
238 | ret = trace_test_buffer(&max_tr, &count); | |
239 | trace->reset(tr); | |
bbf5b1a0 | 240 | tracing_start(); |
60a11774 SR |
241 | |
242 | if (!ret && !count) { | |
243 | printk(KERN_CONT ".. no entries found .."); | |
244 | ret = -1; | |
245 | } | |
246 | ||
247 | tracing_max_latency = save_max; | |
248 | ||
249 | return ret; | |
250 | } | |
251 | #endif /* CONFIG_IRQSOFF_TRACER */ | |
252 | ||
253 | #ifdef CONFIG_PREEMPT_TRACER | |
254 | int | |
255 | trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |
256 | { | |
257 | unsigned long save_max = tracing_max_latency; | |
258 | unsigned long count; | |
259 | int ret; | |
260 | ||
769c48eb SR |
261 | /* |
262 | * Now that the big kernel lock is no longer preemptable, | |
263 | * and this is called with the BKL held, it will always | |
264 | * fail. If preemption is already disabled, simply | |
265 | * pass the test. When the BKL is removed, or becomes | |
266 | * preemptible again, we will once again test this, | |
267 | * so keep it in. | |
268 | */ | |
269 | if (preempt_count()) { | |
270 | printk(KERN_CONT "can not test ... force "); | |
271 | return 0; | |
272 | } | |
273 | ||
60a11774 | 274 | /* start the tracing */ |
60a11774 SR |
275 | trace->init(tr); |
276 | /* reset the max latency */ | |
277 | tracing_max_latency = 0; | |
278 | /* disable preemption for a bit */ | |
279 | preempt_disable(); | |
280 | udelay(100); | |
281 | preempt_enable(); | |
282 | /* stop the tracing. */ | |
bbf5b1a0 | 283 | tracing_stop(); |
60a11774 SR |
284 | /* check both trace buffers */ |
285 | ret = trace_test_buffer(tr, NULL); | |
286 | if (!ret) | |
287 | ret = trace_test_buffer(&max_tr, &count); | |
288 | trace->reset(tr); | |
bbf5b1a0 | 289 | tracing_start(); |
60a11774 SR |
290 | |
291 | if (!ret && !count) { | |
292 | printk(KERN_CONT ".. no entries found .."); | |
293 | ret = -1; | |
294 | } | |
295 | ||
296 | tracing_max_latency = save_max; | |
297 | ||
298 | return ret; | |
299 | } | |
300 | #endif /* CONFIG_PREEMPT_TRACER */ | |
301 | ||
302 | #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) | |
303 | int | |
304 | trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) | |
305 | { | |
306 | unsigned long save_max = tracing_max_latency; | |
307 | unsigned long count; | |
308 | int ret; | |
309 | ||
769c48eb SR |
310 | /* |
311 | * Now that the big kernel lock is no longer preemptable, | |
312 | * and this is called with the BKL held, it will always | |
313 | * fail. If preemption is already disabled, simply | |
314 | * pass the test. When the BKL is removed, or becomes | |
315 | * preemptible again, we will once again test this, | |
316 | * so keep it in. | |
317 | */ | |
318 | if (preempt_count()) { | |
319 | printk(KERN_CONT "can not test ... force "); | |
320 | return 0; | |
321 | } | |
322 | ||
60a11774 | 323 | /* start the tracing */ |
60a11774 SR |
324 | trace->init(tr); |
325 | ||
326 | /* reset the max latency */ | |
327 | tracing_max_latency = 0; | |
328 | ||
329 | /* disable preemption and interrupts for a bit */ | |
330 | preempt_disable(); | |
331 | local_irq_disable(); | |
332 | udelay(100); | |
333 | preempt_enable(); | |
334 | /* reverse the order of preempt vs irqs */ | |
335 | local_irq_enable(); | |
336 | ||
337 | /* stop the tracing. */ | |
bbf5b1a0 | 338 | tracing_stop(); |
60a11774 SR |
339 | /* check both trace buffers */ |
340 | ret = trace_test_buffer(tr, NULL); | |
bbf5b1a0 SR |
341 | if (ret) { |
342 | tracing_start(); | |
60a11774 | 343 | goto out; |
bbf5b1a0 | 344 | } |
60a11774 SR |
345 | |
346 | ret = trace_test_buffer(&max_tr, &count); | |
bbf5b1a0 SR |
347 | if (ret) { |
348 | tracing_start(); | |
60a11774 | 349 | goto out; |
bbf5b1a0 | 350 | } |
60a11774 SR |
351 | |
352 | if (!ret && !count) { | |
353 | printk(KERN_CONT ".. no entries found .."); | |
354 | ret = -1; | |
bbf5b1a0 | 355 | tracing_start(); |
60a11774 SR |
356 | goto out; |
357 | } | |
358 | ||
359 | /* do the test by disabling interrupts first this time */ | |
360 | tracing_max_latency = 0; | |
bbf5b1a0 | 361 | tracing_start(); |
60a11774 SR |
362 | preempt_disable(); |
363 | local_irq_disable(); | |
364 | udelay(100); | |
365 | preempt_enable(); | |
366 | /* reverse the order of preempt vs irqs */ | |
367 | local_irq_enable(); | |
368 | ||
369 | /* stop the tracing. */ | |
bbf5b1a0 | 370 | tracing_stop(); |
60a11774 SR |
371 | /* check both trace buffers */ |
372 | ret = trace_test_buffer(tr, NULL); | |
373 | if (ret) | |
374 | goto out; | |
375 | ||
376 | ret = trace_test_buffer(&max_tr, &count); | |
377 | ||
378 | if (!ret && !count) { | |
379 | printk(KERN_CONT ".. no entries found .."); | |
380 | ret = -1; | |
381 | goto out; | |
382 | } | |
383 | ||
384 | out: | |
385 | trace->reset(tr); | |
bbf5b1a0 | 386 | tracing_start(); |
60a11774 SR |
387 | tracing_max_latency = save_max; |
388 | ||
389 | return ret; | |
390 | } | |
391 | #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ | |
392 | ||
fb1b6d8b SN |
393 | #ifdef CONFIG_NOP_TRACER |
394 | int | |
395 | trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) | |
396 | { | |
397 | /* What could possibly go wrong? */ | |
398 | return 0; | |
399 | } | |
400 | #endif | |
401 | ||
60a11774 SR |
402 | #ifdef CONFIG_SCHED_TRACER |
403 | static int trace_wakeup_test_thread(void *data) | |
404 | { | |
60a11774 | 405 | /* Make this a RT thread, doesn't need to be too high */ |
05bd68c5 SR |
406 | struct sched_param param = { .sched_priority = 5 }; |
407 | struct completion *x = data; | |
60a11774 | 408 | |
05bd68c5 | 409 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
60a11774 SR |
410 | |
411 | /* Make it know we have a new prio */ | |
412 | complete(x); | |
413 | ||
414 | /* now go to sleep and let the test wake us up */ | |
415 | set_current_state(TASK_INTERRUPTIBLE); | |
416 | schedule(); | |
417 | ||
418 | /* we are awake, now wait to disappear */ | |
419 | while (!kthread_should_stop()) { | |
420 | /* | |
421 | * This is an RT task, do short sleeps to let | |
422 | * others run. | |
423 | */ | |
424 | msleep(100); | |
425 | } | |
426 | ||
427 | return 0; | |
428 | } | |
429 | ||
430 | int | |
431 | trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |
432 | { | |
433 | unsigned long save_max = tracing_max_latency; | |
434 | struct task_struct *p; | |
435 | struct completion isrt; | |
436 | unsigned long count; | |
437 | int ret; | |
438 | ||
439 | init_completion(&isrt); | |
440 | ||
441 | /* create a high prio thread */ | |
442 | p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test"); | |
c7aafc54 | 443 | if (IS_ERR(p)) { |
60a11774 SR |
444 | printk(KERN_CONT "Failed to create ftrace wakeup test thread "); |
445 | return -1; | |
446 | } | |
447 | ||
448 | /* make sure the thread is running at an RT prio */ | |
449 | wait_for_completion(&isrt); | |
450 | ||
451 | /* start the tracing */ | |
60a11774 SR |
452 | trace->init(tr); |
453 | /* reset the max latency */ | |
454 | tracing_max_latency = 0; | |
455 | ||
456 | /* sleep to let the RT thread sleep too */ | |
457 | msleep(100); | |
458 | ||
459 | /* | |
460 | * Yes this is slightly racy. It is possible that for some | |
461 | * strange reason that the RT thread we created, did not | |
462 | * call schedule for 100ms after doing the completion, | |
463 | * and we do a wakeup on a task that already is awake. | |
464 | * But that is extremely unlikely, and the worst thing that | |
465 | * happens in such a case, is that we disable tracing. | |
466 | * Honestly, if this race does happen something is horrible | |
467 | * wrong with the system. | |
468 | */ | |
469 | ||
470 | wake_up_process(p); | |
471 | ||
5aa60c60 SR |
472 | /* give a little time to let the thread wake up */ |
473 | msleep(100); | |
474 | ||
60a11774 | 475 | /* stop the tracing. */ |
bbf5b1a0 | 476 | tracing_stop(); |
60a11774 SR |
477 | /* check both trace buffers */ |
478 | ret = trace_test_buffer(tr, NULL); | |
479 | if (!ret) | |
480 | ret = trace_test_buffer(&max_tr, &count); | |
481 | ||
482 | ||
483 | trace->reset(tr); | |
bbf5b1a0 | 484 | tracing_start(); |
60a11774 SR |
485 | |
486 | tracing_max_latency = save_max; | |
487 | ||
488 | /* kill the thread */ | |
489 | kthread_stop(p); | |
490 | ||
491 | if (!ret && !count) { | |
492 | printk(KERN_CONT ".. no entries found .."); | |
493 | ret = -1; | |
494 | } | |
495 | ||
496 | return ret; | |
497 | } | |
498 | #endif /* CONFIG_SCHED_TRACER */ | |
499 | ||
500 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER | |
501 | int | |
502 | trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr) | |
503 | { | |
504 | unsigned long count; | |
505 | int ret; | |
506 | ||
507 | /* start the tracing */ | |
60a11774 SR |
508 | trace->init(tr); |
509 | /* Sleep for a 1/10 of a second */ | |
510 | msleep(100); | |
511 | /* stop the tracing. */ | |
bbf5b1a0 | 512 | tracing_stop(); |
60a11774 SR |
513 | /* check the trace buffer */ |
514 | ret = trace_test_buffer(tr, &count); | |
515 | trace->reset(tr); | |
bbf5b1a0 | 516 | tracing_start(); |
60a11774 SR |
517 | |
518 | if (!ret && !count) { | |
519 | printk(KERN_CONT ".. no entries found .."); | |
520 | ret = -1; | |
521 | } | |
522 | ||
523 | return ret; | |
524 | } | |
525 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ | |
a6dd24f8 IM |
526 | |
527 | #ifdef CONFIG_SYSPROF_TRACER | |
528 | int | |
529 | trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr) | |
530 | { | |
531 | unsigned long count; | |
532 | int ret; | |
533 | ||
534 | /* start the tracing */ | |
a6dd24f8 IM |
535 | trace->init(tr); |
536 | /* Sleep for a 1/10 of a second */ | |
537 | msleep(100); | |
538 | /* stop the tracing. */ | |
bbf5b1a0 | 539 | tracing_stop(); |
a6dd24f8 IM |
540 | /* check the trace buffer */ |
541 | ret = trace_test_buffer(tr, &count); | |
542 | trace->reset(tr); | |
bbf5b1a0 | 543 | tracing_start(); |
a6dd24f8 | 544 | |
a6dd24f8 IM |
545 | return ret; |
546 | } | |
547 | #endif /* CONFIG_SYSPROF_TRACER */ | |
80e5ea45 SR |
548 | |
549 | #ifdef CONFIG_BRANCH_TRACER | |
550 | int | |
551 | trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | |
552 | { | |
553 | unsigned long count; | |
554 | int ret; | |
555 | ||
556 | /* start the tracing */ | |
557 | trace->init(tr); | |
558 | /* Sleep for a 1/10 of a second */ | |
559 | msleep(100); | |
560 | /* stop the tracing. */ | |
561 | tracing_stop(); | |
562 | /* check the trace buffer */ | |
563 | ret = trace_test_buffer(tr, &count); | |
564 | trace->reset(tr); | |
565 | tracing_start(); | |
566 | ||
567 | return ret; | |
568 | } | |
569 | #endif /* CONFIG_BRANCH_TRACER */ |