]>
Commit | Line | Data |
---|---|---|
60a11774 SR |
1 | /* Include in trace.c */ |
2 | ||
9cc26a26 | 3 | #include <linux/stringify.h> |
60a11774 | 4 | #include <linux/kthread.h> |
c7aafc54 | 5 | #include <linux/delay.h> |
60a11774 | 6 | |
e309b41d | 7 | static inline int trace_valid_entry(struct trace_entry *entry) |
60a11774 SR |
8 | { |
9 | switch (entry->type) { | |
10 | case TRACE_FN: | |
11 | case TRACE_CTX: | |
57422797 | 12 | case TRACE_WAKE: |
06fa75ab | 13 | case TRACE_STACK: |
dd0e545f | 14 | case TRACE_PRINT: |
06fa75ab | 15 | case TRACE_SPECIAL: |
80e5ea45 | 16 | case TRACE_BRANCH: |
7447dce9 FW |
17 | case TRACE_GRAPH_ENT: |
18 | case TRACE_GRAPH_RET: | |
321bb5e1 | 19 | case TRACE_HW_BRANCHES: |
60a11774 SR |
20 | return 1; |
21 | } | |
22 | return 0; | |
23 | } | |
24 | ||
3928a8a2 | 25 | static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) |
60a11774 | 26 | { |
3928a8a2 SR |
27 | struct ring_buffer_event *event; |
28 | struct trace_entry *entry; | |
4b3e3d22 | 29 | unsigned int loops = 0; |
60a11774 | 30 | |
3928a8a2 SR |
31 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { |
32 | entry = ring_buffer_event_data(event); | |
60a11774 | 33 | |
4b3e3d22 SR |
34 | /* |
35 | * The ring buffer is a size of trace_buf_size, if | |
36 | * we loop more than the size, there's something wrong | |
37 | * with the ring buffer. | |
38 | */ | |
39 | if (loops++ > trace_buf_size) { | |
40 | printk(KERN_CONT ".. bad ring buffer "); | |
41 | goto failed; | |
42 | } | |
3928a8a2 | 43 | if (!trace_valid_entry(entry)) { |
c7aafc54 | 44 | printk(KERN_CONT ".. invalid entry %d ", |
3928a8a2 | 45 | entry->type); |
60a11774 SR |
46 | goto failed; |
47 | } | |
60a11774 | 48 | } |
60a11774 SR |
49 | return 0; |
50 | ||
51 | failed: | |
08bafa0e SR |
52 | /* disable tracing */ |
53 | tracing_disabled = 1; | |
60a11774 SR |
54 | printk(KERN_CONT ".. corrupted trace buffer .. "); |
55 | return -1; | |
56 | } | |
57 | ||
58 | /* | |
59 | * Test the trace buffer to see if all the elements | |
60 | * are still sane. | |
61 | */ | |
62 | static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |
63 | { | |
30afdcb1 SR |
64 | unsigned long flags, cnt = 0; |
65 | int cpu, ret = 0; | |
60a11774 | 66 | |
30afdcb1 | 67 | /* Don't allow flipping of max traces now */ |
d51ad7ac | 68 | local_irq_save(flags); |
30afdcb1 | 69 | __raw_spin_lock(&ftrace_max_lock); |
60a11774 | 70 | |
3928a8a2 | 71 | cnt = ring_buffer_entries(tr->buffer); |
60a11774 | 72 | |
0c5119c1 SR |
73 | /* |
74 | * The trace_test_buffer_cpu runs a while loop to consume all data. | |
75 | * If the calling tracer is broken, and is constantly filling | |
76 | * the buffer, this will run forever, and hard lock the box. | |
77 | * We disable the ring buffer while we do this test to prevent | |
78 | * a hard lock up. | |
79 | */ | |
80 | tracing_off(); | |
3928a8a2 SR |
81 | for_each_possible_cpu(cpu) { |
82 | ret = trace_test_buffer_cpu(tr, cpu); | |
60a11774 SR |
83 | if (ret) |
84 | break; | |
85 | } | |
0c5119c1 | 86 | tracing_on(); |
30afdcb1 | 87 | __raw_spin_unlock(&ftrace_max_lock); |
d51ad7ac | 88 | local_irq_restore(flags); |
60a11774 SR |
89 | |
90 | if (count) | |
91 | *count = cnt; | |
92 | ||
93 | return ret; | |
94 | } | |
95 | ||
1c80025a FW |
96 | static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) |
97 | { | |
98 | printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n", | |
99 | trace->name, init_ret); | |
100 | } | |
606576ce | 101 | #ifdef CONFIG_FUNCTION_TRACER |
77a2b37d SR |
102 | |
103 | #ifdef CONFIG_DYNAMIC_FTRACE | |
104 | ||
77a2b37d SR |
105 | /* Test dynamic code modification and ftrace filters */ |
106 | int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |
107 | struct trace_array *tr, | |
108 | int (*func)(void)) | |
109 | { | |
77a2b37d SR |
110 | int save_ftrace_enabled = ftrace_enabled; |
111 | int save_tracer_enabled = tracer_enabled; | |
dd0e545f | 112 | unsigned long count; |
4e491d14 | 113 | char *func_name; |
dd0e545f | 114 | int ret; |
77a2b37d SR |
115 | |
116 | /* The ftrace test PASSED */ | |
117 | printk(KERN_CONT "PASSED\n"); | |
118 | pr_info("Testing dynamic ftrace: "); | |
119 | ||
120 | /* enable tracing, and record the filter function */ | |
121 | ftrace_enabled = 1; | |
122 | tracer_enabled = 1; | |
123 | ||
124 | /* passed in by parameter to fool gcc from optimizing */ | |
125 | func(); | |
126 | ||
4e491d14 | 127 | /* |
73d8b8bc | 128 | * Some archs *cough*PowerPC*cough* add characters to the |
4e491d14 | 129 | * start of the function names. We simply put a '*' to |
73d8b8bc | 130 | * accommodate them. |
4e491d14 | 131 | */ |
9cc26a26 | 132 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); |
4e491d14 | 133 | |
77a2b37d | 134 | /* filter only on our function */ |
4e491d14 | 135 | ftrace_set_filter(func_name, strlen(func_name), 1); |
77a2b37d SR |
136 | |
137 | /* enable tracing */ | |
b6f11df2 | 138 | ret = tracer_init(trace, tr); |
1c80025a FW |
139 | if (ret) { |
140 | warn_failed_init_tracer(trace, ret); | |
141 | goto out; | |
142 | } | |
dd0e545f | 143 | |
77a2b37d SR |
144 | /* Sleep for a 1/10 of a second */ |
145 | msleep(100); | |
146 | ||
147 | /* we should have nothing in the buffer */ | |
148 | ret = trace_test_buffer(tr, &count); | |
149 | if (ret) | |
150 | goto out; | |
151 | ||
152 | if (count) { | |
153 | ret = -1; | |
154 | printk(KERN_CONT ".. filter did not filter .. "); | |
155 | goto out; | |
156 | } | |
157 | ||
158 | /* call our function again */ | |
159 | func(); | |
160 | ||
161 | /* sleep again */ | |
162 | msleep(100); | |
163 | ||
164 | /* stop the tracing. */ | |
bbf5b1a0 | 165 | tracing_stop(); |
77a2b37d SR |
166 | ftrace_enabled = 0; |
167 | ||
168 | /* check the trace buffer */ | |
169 | ret = trace_test_buffer(tr, &count); | |
170 | trace->reset(tr); | |
bbf5b1a0 | 171 | tracing_start(); |
77a2b37d SR |
172 | |
173 | /* we should only have one item */ | |
174 | if (!ret && count != 1) { | |
06fa75ab | 175 | printk(KERN_CONT ".. filter failed count=%ld ..", count); |
77a2b37d SR |
176 | ret = -1; |
177 | goto out; | |
178 | } | |
bbf5b1a0 | 179 | |
77a2b37d SR |
180 | out: |
181 | ftrace_enabled = save_ftrace_enabled; | |
182 | tracer_enabled = save_tracer_enabled; | |
183 | ||
184 | /* Enable tracing on all functions again */ | |
185 | ftrace_set_filter(NULL, 0, 1); | |
186 | ||
187 | return ret; | |
188 | } | |
189 | #else | |
190 | # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) | |
191 | #endif /* CONFIG_DYNAMIC_FTRACE */ | |
e9a22d1f | 192 | |
60a11774 SR |
193 | /* |
194 | * Simple verification test of ftrace function tracer. | |
195 | * Enable ftrace, sleep 1/10 second, and then read the trace | |
196 | * buffer to see if all is in order. | |
197 | */ | |
198 | int | |
199 | trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |
200 | { | |
77a2b37d SR |
201 | int save_ftrace_enabled = ftrace_enabled; |
202 | int save_tracer_enabled = tracer_enabled; | |
dd0e545f SR |
203 | unsigned long count; |
204 | int ret; | |
60a11774 | 205 | |
77a2b37d SR |
206 | /* make sure msleep has been recorded */ |
207 | msleep(1); | |
208 | ||
60a11774 | 209 | /* start the tracing */ |
c7aafc54 | 210 | ftrace_enabled = 1; |
77a2b37d | 211 | tracer_enabled = 1; |
c7aafc54 | 212 | |
b6f11df2 | 213 | ret = tracer_init(trace, tr); |
1c80025a FW |
214 | if (ret) { |
215 | warn_failed_init_tracer(trace, ret); | |
216 | goto out; | |
217 | } | |
218 | ||
60a11774 SR |
219 | /* Sleep for a 1/10 of a second */ |
220 | msleep(100); | |
221 | /* stop the tracing. */ | |
bbf5b1a0 | 222 | tracing_stop(); |
c7aafc54 IM |
223 | ftrace_enabled = 0; |
224 | ||
60a11774 SR |
225 | /* check the trace buffer */ |
226 | ret = trace_test_buffer(tr, &count); | |
227 | trace->reset(tr); | |
bbf5b1a0 | 228 | tracing_start(); |
60a11774 SR |
229 | |
230 | if (!ret && !count) { | |
231 | printk(KERN_CONT ".. no entries found .."); | |
232 | ret = -1; | |
77a2b37d | 233 | goto out; |
60a11774 SR |
234 | } |
235 | ||
77a2b37d SR |
236 | ret = trace_selftest_startup_dynamic_tracing(trace, tr, |
237 | DYN_FTRACE_TEST_NAME); | |
238 | ||
239 | out: | |
240 | ftrace_enabled = save_ftrace_enabled; | |
241 | tracer_enabled = save_tracer_enabled; | |
242 | ||
4eebcc81 SR |
243 | /* kill ftrace totally if we failed */ |
244 | if (ret) | |
245 | ftrace_kill(); | |
246 | ||
60a11774 SR |
247 | return ret; |
248 | } | |
606576ce | 249 | #endif /* CONFIG_FUNCTION_TRACER */ |
60a11774 | 250 | |
7447dce9 FW |
251 | |
252 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
cf586b61 FW |
253 | |
254 | /* Maximum number of functions to trace before diagnosing a hang */ | |
255 | #define GRAPH_MAX_FUNC_TEST 100000000 | |
256 | ||
257 | static void __ftrace_dump(bool disable_tracing); | |
258 | static unsigned int graph_hang_thresh; | |
259 | ||
260 | /* Wrap the real function entry probe to avoid possible hanging */ | |
261 | static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) | |
262 | { | |
263 | /* This is harmlessly racy, we want to approximately detect a hang */ | |
264 | if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { | |
265 | ftrace_graph_stop(); | |
266 | printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); | |
267 | if (ftrace_dump_on_oops) | |
268 | __ftrace_dump(false); | |
269 | return 0; | |
270 | } | |
271 | ||
272 | return trace_graph_entry(trace); | |
273 | } | |
274 | ||
7447dce9 FW |
275 | /* |
276 | * Pretty much the same than for the function tracer from which the selftest | |
277 | * has been borrowed. | |
278 | */ | |
279 | int | |
280 | trace_selftest_startup_function_graph(struct tracer *trace, | |
281 | struct trace_array *tr) | |
282 | { | |
283 | int ret; | |
284 | unsigned long count; | |
285 | ||
cf586b61 FW |
286 | /* |
287 | * Simulate the init() callback but we attach a watchdog callback | |
288 | * to detect and recover from possible hangs | |
289 | */ | |
290 | tracing_reset_online_cpus(tr); | |
291 | ret = register_ftrace_graph(&trace_graph_return, | |
292 | &trace_graph_entry_watchdog); | |
7447dce9 FW |
293 | if (ret) { |
294 | warn_failed_init_tracer(trace, ret); | |
295 | goto out; | |
296 | } | |
cf586b61 | 297 | tracing_start_cmdline_record(); |
7447dce9 FW |
298 | |
299 | /* Sleep for a 1/10 of a second */ | |
300 | msleep(100); | |
301 | ||
cf586b61 FW |
302 | /* Have we just recovered from a hang? */ |
303 | if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { | |
0cf53ff6 | 304 | tracing_selftest_disabled = true; |
cf586b61 FW |
305 | ret = -1; |
306 | goto out; | |
307 | } | |
308 | ||
7447dce9 FW |
309 | tracing_stop(); |
310 | ||
311 | /* check the trace buffer */ | |
312 | ret = trace_test_buffer(tr, &count); | |
313 | ||
314 | trace->reset(tr); | |
315 | tracing_start(); | |
316 | ||
317 | if (!ret && !count) { | |
318 | printk(KERN_CONT ".. no entries found .."); | |
319 | ret = -1; | |
320 | goto out; | |
321 | } | |
322 | ||
323 | /* Don't test dynamic tracing, the function tracer already did */ | |
324 | ||
325 | out: | |
326 | /* Stop it if we failed */ | |
327 | if (ret) | |
328 | ftrace_graph_stop(); | |
329 | ||
330 | return ret; | |
331 | } | |
332 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | |
333 | ||
334 | ||
60a11774 SR |
335 | #ifdef CONFIG_IRQSOFF_TRACER |
336 | int | |
337 | trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | |
338 | { | |
339 | unsigned long save_max = tracing_max_latency; | |
340 | unsigned long count; | |
341 | int ret; | |
342 | ||
343 | /* start the tracing */ | |
b6f11df2 | 344 | ret = tracer_init(trace, tr); |
1c80025a FW |
345 | if (ret) { |
346 | warn_failed_init_tracer(trace, ret); | |
347 | return ret; | |
348 | } | |
349 | ||
60a11774 SR |
350 | /* reset the max latency */ |
351 | tracing_max_latency = 0; | |
352 | /* disable interrupts for a bit */ | |
353 | local_irq_disable(); | |
354 | udelay(100); | |
355 | local_irq_enable(); | |
49036200 FW |
356 | |
357 | /* | |
358 | * Stop the tracer to avoid a warning subsequent | |
359 | * to buffer flipping failure because tracing_stop() | |
360 | * disables the tr and max buffers, making flipping impossible | |
361 | * in case of parallels max irqs off latencies. | |
362 | */ | |
363 | trace->stop(tr); | |
60a11774 | 364 | /* stop the tracing. */ |
bbf5b1a0 | 365 | tracing_stop(); |
60a11774 SR |
366 | /* check both trace buffers */ |
367 | ret = trace_test_buffer(tr, NULL); | |
368 | if (!ret) | |
369 | ret = trace_test_buffer(&max_tr, &count); | |
370 | trace->reset(tr); | |
bbf5b1a0 | 371 | tracing_start(); |
60a11774 SR |
372 | |
373 | if (!ret && !count) { | |
374 | printk(KERN_CONT ".. no entries found .."); | |
375 | ret = -1; | |
376 | } | |
377 | ||
378 | tracing_max_latency = save_max; | |
379 | ||
380 | return ret; | |
381 | } | |
382 | #endif /* CONFIG_IRQSOFF_TRACER */ | |
383 | ||
384 | #ifdef CONFIG_PREEMPT_TRACER | |
385 | int | |
386 | trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |
387 | { | |
388 | unsigned long save_max = tracing_max_latency; | |
389 | unsigned long count; | |
390 | int ret; | |
391 | ||
769c48eb SR |
392 | /* |
393 | * Now that the big kernel lock is no longer preemptable, | |
394 | * and this is called with the BKL held, it will always | |
395 | * fail. If preemption is already disabled, simply | |
396 | * pass the test. When the BKL is removed, or becomes | |
397 | * preemptible again, we will once again test this, | |
398 | * so keep it in. | |
399 | */ | |
400 | if (preempt_count()) { | |
401 | printk(KERN_CONT "can not test ... force "); | |
402 | return 0; | |
403 | } | |
404 | ||
60a11774 | 405 | /* start the tracing */ |
b6f11df2 | 406 | ret = tracer_init(trace, tr); |
1c80025a FW |
407 | if (ret) { |
408 | warn_failed_init_tracer(trace, ret); | |
409 | return ret; | |
410 | } | |
411 | ||
60a11774 SR |
412 | /* reset the max latency */ |
413 | tracing_max_latency = 0; | |
414 | /* disable preemption for a bit */ | |
415 | preempt_disable(); | |
416 | udelay(100); | |
417 | preempt_enable(); | |
49036200 FW |
418 | |
419 | /* | |
420 | * Stop the tracer to avoid a warning subsequent | |
421 | * to buffer flipping failure because tracing_stop() | |
422 | * disables the tr and max buffers, making flipping impossible | |
423 | * in case of parallels max preempt off latencies. | |
424 | */ | |
425 | trace->stop(tr); | |
60a11774 | 426 | /* stop the tracing. */ |
bbf5b1a0 | 427 | tracing_stop(); |
60a11774 SR |
428 | /* check both trace buffers */ |
429 | ret = trace_test_buffer(tr, NULL); | |
430 | if (!ret) | |
431 | ret = trace_test_buffer(&max_tr, &count); | |
432 | trace->reset(tr); | |
bbf5b1a0 | 433 | tracing_start(); |
60a11774 SR |
434 | |
435 | if (!ret && !count) { | |
436 | printk(KERN_CONT ".. no entries found .."); | |
437 | ret = -1; | |
438 | } | |
439 | ||
440 | tracing_max_latency = save_max; | |
441 | ||
442 | return ret; | |
443 | } | |
444 | #endif /* CONFIG_PREEMPT_TRACER */ | |
445 | ||
446 | #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) | |
447 | int | |
448 | trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) | |
449 | { | |
450 | unsigned long save_max = tracing_max_latency; | |
451 | unsigned long count; | |
452 | int ret; | |
453 | ||
769c48eb SR |
454 | /* |
455 | * Now that the big kernel lock is no longer preemptable, | |
456 | * and this is called with the BKL held, it will always | |
457 | * fail. If preemption is already disabled, simply | |
458 | * pass the test. When the BKL is removed, or becomes | |
459 | * preemptible again, we will once again test this, | |
460 | * so keep it in. | |
461 | */ | |
462 | if (preempt_count()) { | |
463 | printk(KERN_CONT "can not test ... force "); | |
464 | return 0; | |
465 | } | |
466 | ||
60a11774 | 467 | /* start the tracing */ |
b6f11df2 | 468 | ret = tracer_init(trace, tr); |
1c80025a FW |
469 | if (ret) { |
470 | warn_failed_init_tracer(trace, ret); | |
ac1d52d0 | 471 | goto out_no_start; |
1c80025a | 472 | } |
60a11774 SR |
473 | |
474 | /* reset the max latency */ | |
475 | tracing_max_latency = 0; | |
476 | ||
477 | /* disable preemption and interrupts for a bit */ | |
478 | preempt_disable(); | |
479 | local_irq_disable(); | |
480 | udelay(100); | |
481 | preempt_enable(); | |
482 | /* reverse the order of preempt vs irqs */ | |
483 | local_irq_enable(); | |
484 | ||
49036200 FW |
485 | /* |
486 | * Stop the tracer to avoid a warning subsequent | |
487 | * to buffer flipping failure because tracing_stop() | |
488 | * disables the tr and max buffers, making flipping impossible | |
489 | * in case of parallels max irqs/preempt off latencies. | |
490 | */ | |
491 | trace->stop(tr); | |
60a11774 | 492 | /* stop the tracing. */ |
bbf5b1a0 | 493 | tracing_stop(); |
60a11774 SR |
494 | /* check both trace buffers */ |
495 | ret = trace_test_buffer(tr, NULL); | |
ac1d52d0 | 496 | if (ret) |
60a11774 SR |
497 | goto out; |
498 | ||
499 | ret = trace_test_buffer(&max_tr, &count); | |
ac1d52d0 | 500 | if (ret) |
60a11774 SR |
501 | goto out; |
502 | ||
503 | if (!ret && !count) { | |
504 | printk(KERN_CONT ".. no entries found .."); | |
505 | ret = -1; | |
506 | goto out; | |
507 | } | |
508 | ||
509 | /* do the test by disabling interrupts first this time */ | |
510 | tracing_max_latency = 0; | |
bbf5b1a0 | 511 | tracing_start(); |
49036200 FW |
512 | trace->start(tr); |
513 | ||
60a11774 SR |
514 | preempt_disable(); |
515 | local_irq_disable(); | |
516 | udelay(100); | |
517 | preempt_enable(); | |
518 | /* reverse the order of preempt vs irqs */ | |
519 | local_irq_enable(); | |
520 | ||
49036200 | 521 | trace->stop(tr); |
60a11774 | 522 | /* stop the tracing. */ |
bbf5b1a0 | 523 | tracing_stop(); |
60a11774 SR |
524 | /* check both trace buffers */ |
525 | ret = trace_test_buffer(tr, NULL); | |
526 | if (ret) | |
527 | goto out; | |
528 | ||
529 | ret = trace_test_buffer(&max_tr, &count); | |
530 | ||
531 | if (!ret && !count) { | |
532 | printk(KERN_CONT ".. no entries found .."); | |
533 | ret = -1; | |
534 | goto out; | |
535 | } | |
536 | ||
ac1d52d0 | 537 | out: |
bbf5b1a0 | 538 | tracing_start(); |
ac1d52d0 FW |
539 | out_no_start: |
540 | trace->reset(tr); | |
60a11774 SR |
541 | tracing_max_latency = save_max; |
542 | ||
543 | return ret; | |
544 | } | |
545 | #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ | |
546 | ||
fb1b6d8b SN |
547 | #ifdef CONFIG_NOP_TRACER |
548 | int | |
549 | trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) | |
550 | { | |
551 | /* What could possibly go wrong? */ | |
552 | return 0; | |
553 | } | |
554 | #endif | |
555 | ||
60a11774 SR |
556 | #ifdef CONFIG_SCHED_TRACER |
557 | static int trace_wakeup_test_thread(void *data) | |
558 | { | |
60a11774 | 559 | /* Make this a RT thread, doesn't need to be too high */ |
05bd68c5 SR |
560 | struct sched_param param = { .sched_priority = 5 }; |
561 | struct completion *x = data; | |
60a11774 | 562 | |
05bd68c5 | 563 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
60a11774 SR |
564 | |
565 | /* Make it know we have a new prio */ | |
566 | complete(x); | |
567 | ||
568 | /* now go to sleep and let the test wake us up */ | |
569 | set_current_state(TASK_INTERRUPTIBLE); | |
570 | schedule(); | |
571 | ||
572 | /* we are awake, now wait to disappear */ | |
573 | while (!kthread_should_stop()) { | |
574 | /* | |
575 | * This is an RT task, do short sleeps to let | |
576 | * others run. | |
577 | */ | |
578 | msleep(100); | |
579 | } | |
580 | ||
581 | return 0; | |
582 | } | |
583 | ||
584 | int | |
585 | trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |
586 | { | |
587 | unsigned long save_max = tracing_max_latency; | |
588 | struct task_struct *p; | |
589 | struct completion isrt; | |
590 | unsigned long count; | |
591 | int ret; | |
592 | ||
593 | init_completion(&isrt); | |
594 | ||
595 | /* create a high prio thread */ | |
596 | p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test"); | |
c7aafc54 | 597 | if (IS_ERR(p)) { |
60a11774 SR |
598 | printk(KERN_CONT "Failed to create ftrace wakeup test thread "); |
599 | return -1; | |
600 | } | |
601 | ||
602 | /* make sure the thread is running at an RT prio */ | |
603 | wait_for_completion(&isrt); | |
604 | ||
605 | /* start the tracing */ | |
b6f11df2 | 606 | ret = tracer_init(trace, tr); |
1c80025a FW |
607 | if (ret) { |
608 | warn_failed_init_tracer(trace, ret); | |
609 | return ret; | |
610 | } | |
611 | ||
60a11774 SR |
612 | /* reset the max latency */ |
613 | tracing_max_latency = 0; | |
614 | ||
615 | /* sleep to let the RT thread sleep too */ | |
616 | msleep(100); | |
617 | ||
618 | /* | |
619 | * Yes this is slightly racy. It is possible that for some | |
620 | * strange reason that the RT thread we created, did not | |
621 | * call schedule for 100ms after doing the completion, | |
622 | * and we do a wakeup on a task that already is awake. | |
623 | * But that is extremely unlikely, and the worst thing that | |
624 | * happens in such a case, is that we disable tracing. | |
625 | * Honestly, if this race does happen something is horrible | |
626 | * wrong with the system. | |
627 | */ | |
628 | ||
629 | wake_up_process(p); | |
630 | ||
5aa60c60 SR |
631 | /* give a little time to let the thread wake up */ |
632 | msleep(100); | |
633 | ||
60a11774 | 634 | /* stop the tracing. */ |
bbf5b1a0 | 635 | tracing_stop(); |
60a11774 SR |
636 | /* check both trace buffers */ |
637 | ret = trace_test_buffer(tr, NULL); | |
638 | if (!ret) | |
639 | ret = trace_test_buffer(&max_tr, &count); | |
640 | ||
641 | ||
642 | trace->reset(tr); | |
bbf5b1a0 | 643 | tracing_start(); |
60a11774 SR |
644 | |
645 | tracing_max_latency = save_max; | |
646 | ||
647 | /* kill the thread */ | |
648 | kthread_stop(p); | |
649 | ||
650 | if (!ret && !count) { | |
651 | printk(KERN_CONT ".. no entries found .."); | |
652 | ret = -1; | |
653 | } | |
654 | ||
655 | return ret; | |
656 | } | |
657 | #endif /* CONFIG_SCHED_TRACER */ | |
658 | ||
659 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER | |
660 | int | |
661 | trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr) | |
662 | { | |
663 | unsigned long count; | |
664 | int ret; | |
665 | ||
666 | /* start the tracing */ | |
b6f11df2 | 667 | ret = tracer_init(trace, tr); |
1c80025a FW |
668 | if (ret) { |
669 | warn_failed_init_tracer(trace, ret); | |
670 | return ret; | |
671 | } | |
672 | ||
60a11774 SR |
673 | /* Sleep for a 1/10 of a second */ |
674 | msleep(100); | |
675 | /* stop the tracing. */ | |
bbf5b1a0 | 676 | tracing_stop(); |
60a11774 SR |
677 | /* check the trace buffer */ |
678 | ret = trace_test_buffer(tr, &count); | |
679 | trace->reset(tr); | |
bbf5b1a0 | 680 | tracing_start(); |
60a11774 SR |
681 | |
682 | if (!ret && !count) { | |
683 | printk(KERN_CONT ".. no entries found .."); | |
684 | ret = -1; | |
685 | } | |
686 | ||
687 | return ret; | |
688 | } | |
689 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ | |
a6dd24f8 IM |
690 | |
691 | #ifdef CONFIG_SYSPROF_TRACER | |
692 | int | |
693 | trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr) | |
694 | { | |
695 | unsigned long count; | |
696 | int ret; | |
697 | ||
698 | /* start the tracing */ | |
b6f11df2 | 699 | ret = tracer_init(trace, tr); |
1c80025a FW |
700 | if (ret) { |
701 | warn_failed_init_tracer(trace, ret); | |
d2ef7c2f | 702 | return ret; |
1c80025a FW |
703 | } |
704 | ||
a6dd24f8 IM |
705 | /* Sleep for a 1/10 of a second */ |
706 | msleep(100); | |
707 | /* stop the tracing. */ | |
bbf5b1a0 | 708 | tracing_stop(); |
a6dd24f8 IM |
709 | /* check the trace buffer */ |
710 | ret = trace_test_buffer(tr, &count); | |
711 | trace->reset(tr); | |
bbf5b1a0 | 712 | tracing_start(); |
a6dd24f8 | 713 | |
d2ef7c2f WH |
714 | if (!ret && !count) { |
715 | printk(KERN_CONT ".. no entries found .."); | |
716 | ret = -1; | |
717 | } | |
718 | ||
a6dd24f8 IM |
719 | return ret; |
720 | } | |
721 | #endif /* CONFIG_SYSPROF_TRACER */ | |
80e5ea45 SR |
722 | |
723 | #ifdef CONFIG_BRANCH_TRACER | |
724 | int | |
725 | trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | |
726 | { | |
727 | unsigned long count; | |
728 | int ret; | |
729 | ||
730 | /* start the tracing */ | |
b6f11df2 | 731 | ret = tracer_init(trace, tr); |
1c80025a FW |
732 | if (ret) { |
733 | warn_failed_init_tracer(trace, ret); | |
734 | return ret; | |
735 | } | |
736 | ||
80e5ea45 SR |
737 | /* Sleep for a 1/10 of a second */ |
738 | msleep(100); | |
739 | /* stop the tracing. */ | |
740 | tracing_stop(); | |
741 | /* check the trace buffer */ | |
742 | ret = trace_test_buffer(tr, &count); | |
743 | trace->reset(tr); | |
744 | tracing_start(); | |
745 | ||
d2ef7c2f WH |
746 | if (!ret && !count) { |
747 | printk(KERN_CONT ".. no entries found .."); | |
748 | ret = -1; | |
749 | } | |
750 | ||
80e5ea45 SR |
751 | return ret; |
752 | } | |
753 | #endif /* CONFIG_BRANCH_TRACER */ | |
321bb5e1 MM |
754 | |
755 | #ifdef CONFIG_HW_BRANCH_TRACER | |
756 | int | |
757 | trace_selftest_startup_hw_branches(struct tracer *trace, | |
758 | struct trace_array *tr) | |
759 | { | |
4d657e51 | 760 | struct trace_iterator *iter; |
321bb5e1 | 761 | struct tracer tracer; |
e9a22d1f IM |
762 | unsigned long count; |
763 | int ret; | |
321bb5e1 MM |
764 | |
765 | if (!trace->open) { | |
766 | printk(KERN_CONT "missing open function..."); | |
767 | return -1; | |
768 | } | |
769 | ||
770 | ret = tracer_init(trace, tr); | |
771 | if (ret) { | |
772 | warn_failed_init_tracer(trace, ret); | |
773 | return ret; | |
774 | } | |
775 | ||
776 | /* | |
777 | * The hw-branch tracer needs to collect the trace from the various | |
778 | * cpu trace buffers - before tracing is stopped. | |
779 | */ | |
4d657e51 MM |
780 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
781 | if (!iter) | |
782 | return -ENOMEM; | |
783 | ||
321bb5e1 MM |
784 | memcpy(&tracer, trace, sizeof(tracer)); |
785 | ||
4d657e51 MM |
786 | iter->trace = &tracer; |
787 | iter->tr = tr; | |
788 | iter->pos = -1; | |
789 | mutex_init(&iter->mutex); | |
321bb5e1 | 790 | |
4d657e51 | 791 | trace->open(iter); |
321bb5e1 | 792 | |
4d657e51 MM |
793 | mutex_destroy(&iter->mutex); |
794 | kfree(iter); | |
321bb5e1 MM |
795 | |
796 | tracing_stop(); | |
797 | ||
798 | ret = trace_test_buffer(tr, &count); | |
799 | trace->reset(tr); | |
800 | tracing_start(); | |
801 | ||
802 | if (!ret && !count) { | |
803 | printk(KERN_CONT "no entries found.."); | |
804 | ret = -1; | |
805 | } | |
806 | ||
807 | return ret; | |
808 | } | |
809 | #endif /* CONFIG_HW_BRANCH_TRACER */ |