]>
Commit | Line | Data |
---|---|---|
60a11774 SR |
1 | /* Include in trace.c */ |
2 | ||
9cc26a26 | 3 | #include <linux/stringify.h> |
60a11774 | 4 | #include <linux/kthread.h> |
c7aafc54 | 5 | #include <linux/delay.h> |
5a0e3ad6 | 6 | #include <linux/slab.h> |
60a11774 | 7 | |
e309b41d | 8 | static inline int trace_valid_entry(struct trace_entry *entry) |
60a11774 SR |
9 | { |
10 | switch (entry->type) { | |
11 | case TRACE_FN: | |
12 | case TRACE_CTX: | |
57422797 | 13 | case TRACE_WAKE: |
06fa75ab | 14 | case TRACE_STACK: |
dd0e545f | 15 | case TRACE_PRINT: |
80e5ea45 | 16 | case TRACE_BRANCH: |
7447dce9 FW |
17 | case TRACE_GRAPH_ENT: |
18 | case TRACE_GRAPH_RET: | |
60a11774 SR |
19 | return 1; |
20 | } | |
21 | return 0; | |
22 | } | |
23 | ||
3928a8a2 | 24 | static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) |
60a11774 | 25 | { |
3928a8a2 SR |
26 | struct ring_buffer_event *event; |
27 | struct trace_entry *entry; | |
4b3e3d22 | 28 | unsigned int loops = 0; |
60a11774 | 29 | |
66a8cb95 | 30 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) { |
3928a8a2 | 31 | entry = ring_buffer_event_data(event); |
60a11774 | 32 | |
4b3e3d22 SR |
33 | /* |
34 | * The ring buffer is a size of trace_buf_size, if | |
35 | * we loop more than the size, there's something wrong | |
36 | * with the ring buffer. | |
37 | */ | |
38 | if (loops++ > trace_buf_size) { | |
39 | printk(KERN_CONT ".. bad ring buffer "); | |
40 | goto failed; | |
41 | } | |
3928a8a2 | 42 | if (!trace_valid_entry(entry)) { |
c7aafc54 | 43 | printk(KERN_CONT ".. invalid entry %d ", |
3928a8a2 | 44 | entry->type); |
60a11774 SR |
45 | goto failed; |
46 | } | |
60a11774 | 47 | } |
60a11774 SR |
48 | return 0; |
49 | ||
50 | failed: | |
08bafa0e SR |
51 | /* disable tracing */ |
52 | tracing_disabled = 1; | |
60a11774 SR |
53 | printk(KERN_CONT ".. corrupted trace buffer .. "); |
54 | return -1; | |
55 | } | |
56 | ||
57 | /* | |
58 | * Test the trace buffer to see if all the elements | |
59 | * are still sane. | |
60 | */ | |
61 | static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |
62 | { | |
30afdcb1 SR |
63 | unsigned long flags, cnt = 0; |
64 | int cpu, ret = 0; | |
60a11774 | 65 | |
30afdcb1 | 66 | /* Don't allow flipping of max traces now */ |
d51ad7ac | 67 | local_irq_save(flags); |
0199c4e6 | 68 | arch_spin_lock(&ftrace_max_lock); |
60a11774 | 69 | |
3928a8a2 | 70 | cnt = ring_buffer_entries(tr->buffer); |
60a11774 | 71 | |
0c5119c1 SR |
72 | /* |
73 | * The trace_test_buffer_cpu runs a while loop to consume all data. | |
74 | * If the calling tracer is broken, and is constantly filling | |
75 | * the buffer, this will run forever, and hard lock the box. | |
76 | * We disable the ring buffer while we do this test to prevent | |
77 | * a hard lock up. | |
78 | */ | |
79 | tracing_off(); | |
3928a8a2 SR |
80 | for_each_possible_cpu(cpu) { |
81 | ret = trace_test_buffer_cpu(tr, cpu); | |
60a11774 SR |
82 | if (ret) |
83 | break; | |
84 | } | |
0c5119c1 | 85 | tracing_on(); |
0199c4e6 | 86 | arch_spin_unlock(&ftrace_max_lock); |
d51ad7ac | 87 | local_irq_restore(flags); |
60a11774 SR |
88 | |
89 | if (count) | |
90 | *count = cnt; | |
91 | ||
92 | return ret; | |
93 | } | |
94 | ||
1c80025a FW |
95 | static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) |
96 | { | |
97 | printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n", | |
98 | trace->name, init_ret); | |
99 | } | |
606576ce | 100 | #ifdef CONFIG_FUNCTION_TRACER |
77a2b37d SR |
101 | |
102 | #ifdef CONFIG_DYNAMIC_FTRACE | |
103 | ||
77a2b37d SR |
104 | /* Test dynamic code modification and ftrace filters */ |
105 | int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |
106 | struct trace_array *tr, | |
107 | int (*func)(void)) | |
108 | { | |
77a2b37d SR |
109 | int save_ftrace_enabled = ftrace_enabled; |
110 | int save_tracer_enabled = tracer_enabled; | |
dd0e545f | 111 | unsigned long count; |
4e491d14 | 112 | char *func_name; |
dd0e545f | 113 | int ret; |
77a2b37d SR |
114 | |
115 | /* The ftrace test PASSED */ | |
116 | printk(KERN_CONT "PASSED\n"); | |
117 | pr_info("Testing dynamic ftrace: "); | |
118 | ||
119 | /* enable tracing, and record the filter function */ | |
120 | ftrace_enabled = 1; | |
121 | tracer_enabled = 1; | |
122 | ||
123 | /* passed in by parameter to fool gcc from optimizing */ | |
124 | func(); | |
125 | ||
4e491d14 | 126 | /* |
73d8b8bc | 127 | * Some archs *cough*PowerPC*cough* add characters to the |
4e491d14 | 128 | * start of the function names. We simply put a '*' to |
73d8b8bc | 129 | * accommodate them. |
4e491d14 | 130 | */ |
9cc26a26 | 131 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); |
4e491d14 | 132 | |
77a2b37d | 133 | /* filter only on our function */ |
4e491d14 | 134 | ftrace_set_filter(func_name, strlen(func_name), 1); |
77a2b37d SR |
135 | |
136 | /* enable tracing */ | |
b6f11df2 | 137 | ret = tracer_init(trace, tr); |
1c80025a FW |
138 | if (ret) { |
139 | warn_failed_init_tracer(trace, ret); | |
140 | goto out; | |
141 | } | |
dd0e545f | 142 | |
77a2b37d SR |
143 | /* Sleep for a 1/10 of a second */ |
144 | msleep(100); | |
145 | ||
146 | /* we should have nothing in the buffer */ | |
147 | ret = trace_test_buffer(tr, &count); | |
148 | if (ret) | |
149 | goto out; | |
150 | ||
151 | if (count) { | |
152 | ret = -1; | |
153 | printk(KERN_CONT ".. filter did not filter .. "); | |
154 | goto out; | |
155 | } | |
156 | ||
157 | /* call our function again */ | |
158 | func(); | |
159 | ||
160 | /* sleep again */ | |
161 | msleep(100); | |
162 | ||
163 | /* stop the tracing. */ | |
bbf5b1a0 | 164 | tracing_stop(); |
77a2b37d SR |
165 | ftrace_enabled = 0; |
166 | ||
167 | /* check the trace buffer */ | |
168 | ret = trace_test_buffer(tr, &count); | |
169 | trace->reset(tr); | |
bbf5b1a0 | 170 | tracing_start(); |
77a2b37d SR |
171 | |
172 | /* we should only have one item */ | |
173 | if (!ret && count != 1) { | |
06fa75ab | 174 | printk(KERN_CONT ".. filter failed count=%ld ..", count); |
77a2b37d SR |
175 | ret = -1; |
176 | goto out; | |
177 | } | |
bbf5b1a0 | 178 | |
77a2b37d SR |
179 | out: |
180 | ftrace_enabled = save_ftrace_enabled; | |
181 | tracer_enabled = save_tracer_enabled; | |
182 | ||
183 | /* Enable tracing on all functions again */ | |
184 | ftrace_set_filter(NULL, 0, 1); | |
185 | ||
186 | return ret; | |
187 | } | |
188 | #else | |
189 | # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) | |
190 | #endif /* CONFIG_DYNAMIC_FTRACE */ | |
e9a22d1f | 191 | |
60a11774 SR |
192 | /* |
193 | * Simple verification test of ftrace function tracer. | |
194 | * Enable ftrace, sleep 1/10 second, and then read the trace | |
195 | * buffer to see if all is in order. | |
196 | */ | |
197 | int | |
198 | trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |
199 | { | |
77a2b37d SR |
200 | int save_ftrace_enabled = ftrace_enabled; |
201 | int save_tracer_enabled = tracer_enabled; | |
dd0e545f SR |
202 | unsigned long count; |
203 | int ret; | |
60a11774 | 204 | |
77a2b37d SR |
205 | /* make sure msleep has been recorded */ |
206 | msleep(1); | |
207 | ||
60a11774 | 208 | /* start the tracing */ |
c7aafc54 | 209 | ftrace_enabled = 1; |
77a2b37d | 210 | tracer_enabled = 1; |
c7aafc54 | 211 | |
b6f11df2 | 212 | ret = tracer_init(trace, tr); |
1c80025a FW |
213 | if (ret) { |
214 | warn_failed_init_tracer(trace, ret); | |
215 | goto out; | |
216 | } | |
217 | ||
60a11774 SR |
218 | /* Sleep for a 1/10 of a second */ |
219 | msleep(100); | |
220 | /* stop the tracing. */ | |
bbf5b1a0 | 221 | tracing_stop(); |
c7aafc54 IM |
222 | ftrace_enabled = 0; |
223 | ||
60a11774 SR |
224 | /* check the trace buffer */ |
225 | ret = trace_test_buffer(tr, &count); | |
226 | trace->reset(tr); | |
bbf5b1a0 | 227 | tracing_start(); |
60a11774 SR |
228 | |
229 | if (!ret && !count) { | |
230 | printk(KERN_CONT ".. no entries found .."); | |
231 | ret = -1; | |
77a2b37d | 232 | goto out; |
60a11774 SR |
233 | } |
234 | ||
77a2b37d SR |
235 | ret = trace_selftest_startup_dynamic_tracing(trace, tr, |
236 | DYN_FTRACE_TEST_NAME); | |
237 | ||
238 | out: | |
239 | ftrace_enabled = save_ftrace_enabled; | |
240 | tracer_enabled = save_tracer_enabled; | |
241 | ||
4eebcc81 SR |
242 | /* kill ftrace totally if we failed */ |
243 | if (ret) | |
244 | ftrace_kill(); | |
245 | ||
60a11774 SR |
246 | return ret; |
247 | } | |
606576ce | 248 | #endif /* CONFIG_FUNCTION_TRACER */ |
60a11774 | 249 | |
7447dce9 FW |
250 | |
251 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
cf586b61 FW |
252 | |
253 | /* Maximum number of functions to trace before diagnosing a hang */ | |
254 | #define GRAPH_MAX_FUNC_TEST 100000000 | |
255 | ||
cecbca96 FW |
256 | static void |
257 | __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode); | |
cf586b61 FW |
258 | static unsigned int graph_hang_thresh; |
259 | ||
260 | /* Wrap the real function entry probe to avoid possible hanging */ | |
261 | static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) | |
262 | { | |
263 | /* This is harmlessly racy, we want to approximately detect a hang */ | |
264 | if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { | |
265 | ftrace_graph_stop(); | |
266 | printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); | |
267 | if (ftrace_dump_on_oops) | |
cecbca96 | 268 | __ftrace_dump(false, DUMP_ALL); |
cf586b61 FW |
269 | return 0; |
270 | } | |
271 | ||
272 | return trace_graph_entry(trace); | |
273 | } | |
274 | ||
7447dce9 FW |
275 | /* |
276 | * Pretty much the same than for the function tracer from which the selftest | |
277 | * has been borrowed. | |
278 | */ | |
279 | int | |
280 | trace_selftest_startup_function_graph(struct tracer *trace, | |
281 | struct trace_array *tr) | |
282 | { | |
283 | int ret; | |
284 | unsigned long count; | |
285 | ||
cf586b61 FW |
286 | /* |
287 | * Simulate the init() callback but we attach a watchdog callback | |
288 | * to detect and recover from possible hangs | |
289 | */ | |
290 | tracing_reset_online_cpus(tr); | |
1a0799a8 | 291 | set_graph_array(tr); |
cf586b61 FW |
292 | ret = register_ftrace_graph(&trace_graph_return, |
293 | &trace_graph_entry_watchdog); | |
7447dce9 FW |
294 | if (ret) { |
295 | warn_failed_init_tracer(trace, ret); | |
296 | goto out; | |
297 | } | |
cf586b61 | 298 | tracing_start_cmdline_record(); |
7447dce9 FW |
299 | |
300 | /* Sleep for a 1/10 of a second */ | |
301 | msleep(100); | |
302 | ||
cf586b61 FW |
303 | /* Have we just recovered from a hang? */ |
304 | if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { | |
0cf53ff6 | 305 | tracing_selftest_disabled = true; |
cf586b61 FW |
306 | ret = -1; |
307 | goto out; | |
308 | } | |
309 | ||
7447dce9 FW |
310 | tracing_stop(); |
311 | ||
312 | /* check the trace buffer */ | |
313 | ret = trace_test_buffer(tr, &count); | |
314 | ||
315 | trace->reset(tr); | |
316 | tracing_start(); | |
317 | ||
318 | if (!ret && !count) { | |
319 | printk(KERN_CONT ".. no entries found .."); | |
320 | ret = -1; | |
321 | goto out; | |
322 | } | |
323 | ||
324 | /* Don't test dynamic tracing, the function tracer already did */ | |
325 | ||
326 | out: | |
327 | /* Stop it if we failed */ | |
328 | if (ret) | |
329 | ftrace_graph_stop(); | |
330 | ||
331 | return ret; | |
332 | } | |
333 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | |
334 | ||
335 | ||
60a11774 SR |
336 | #ifdef CONFIG_IRQSOFF_TRACER |
337 | int | |
338 | trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | |
339 | { | |
340 | unsigned long save_max = tracing_max_latency; | |
341 | unsigned long count; | |
342 | int ret; | |
343 | ||
344 | /* start the tracing */ | |
b6f11df2 | 345 | ret = tracer_init(trace, tr); |
1c80025a FW |
346 | if (ret) { |
347 | warn_failed_init_tracer(trace, ret); | |
348 | return ret; | |
349 | } | |
350 | ||
60a11774 SR |
351 | /* reset the max latency */ |
352 | tracing_max_latency = 0; | |
353 | /* disable interrupts for a bit */ | |
354 | local_irq_disable(); | |
355 | udelay(100); | |
356 | local_irq_enable(); | |
49036200 FW |
357 | |
358 | /* | |
359 | * Stop the tracer to avoid a warning subsequent | |
360 | * to buffer flipping failure because tracing_stop() | |
361 | * disables the tr and max buffers, making flipping impossible | |
362 | * in case of parallels max irqs off latencies. | |
363 | */ | |
364 | trace->stop(tr); | |
60a11774 | 365 | /* stop the tracing. */ |
bbf5b1a0 | 366 | tracing_stop(); |
60a11774 SR |
367 | /* check both trace buffers */ |
368 | ret = trace_test_buffer(tr, NULL); | |
369 | if (!ret) | |
370 | ret = trace_test_buffer(&max_tr, &count); | |
371 | trace->reset(tr); | |
bbf5b1a0 | 372 | tracing_start(); |
60a11774 SR |
373 | |
374 | if (!ret && !count) { | |
375 | printk(KERN_CONT ".. no entries found .."); | |
376 | ret = -1; | |
377 | } | |
378 | ||
379 | tracing_max_latency = save_max; | |
380 | ||
381 | return ret; | |
382 | } | |
383 | #endif /* CONFIG_IRQSOFF_TRACER */ | |
384 | ||
385 | #ifdef CONFIG_PREEMPT_TRACER | |
386 | int | |
387 | trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |
388 | { | |
389 | unsigned long save_max = tracing_max_latency; | |
390 | unsigned long count; | |
391 | int ret; | |
392 | ||
769c48eb SR |
393 | /* |
394 | * Now that the big kernel lock is no longer preemptable, | |
395 | * and this is called with the BKL held, it will always | |
396 | * fail. If preemption is already disabled, simply | |
397 | * pass the test. When the BKL is removed, or becomes | |
398 | * preemptible again, we will once again test this, | |
399 | * so keep it in. | |
400 | */ | |
401 | if (preempt_count()) { | |
402 | printk(KERN_CONT "can not test ... force "); | |
403 | return 0; | |
404 | } | |
405 | ||
60a11774 | 406 | /* start the tracing */ |
b6f11df2 | 407 | ret = tracer_init(trace, tr); |
1c80025a FW |
408 | if (ret) { |
409 | warn_failed_init_tracer(trace, ret); | |
410 | return ret; | |
411 | } | |
412 | ||
60a11774 SR |
413 | /* reset the max latency */ |
414 | tracing_max_latency = 0; | |
415 | /* disable preemption for a bit */ | |
416 | preempt_disable(); | |
417 | udelay(100); | |
418 | preempt_enable(); | |
49036200 FW |
419 | |
420 | /* | |
421 | * Stop the tracer to avoid a warning subsequent | |
422 | * to buffer flipping failure because tracing_stop() | |
423 | * disables the tr and max buffers, making flipping impossible | |
424 | * in case of parallels max preempt off latencies. | |
425 | */ | |
426 | trace->stop(tr); | |
60a11774 | 427 | /* stop the tracing. */ |
bbf5b1a0 | 428 | tracing_stop(); |
60a11774 SR |
429 | /* check both trace buffers */ |
430 | ret = trace_test_buffer(tr, NULL); | |
431 | if (!ret) | |
432 | ret = trace_test_buffer(&max_tr, &count); | |
433 | trace->reset(tr); | |
bbf5b1a0 | 434 | tracing_start(); |
60a11774 SR |
435 | |
436 | if (!ret && !count) { | |
437 | printk(KERN_CONT ".. no entries found .."); | |
438 | ret = -1; | |
439 | } | |
440 | ||
441 | tracing_max_latency = save_max; | |
442 | ||
443 | return ret; | |
444 | } | |
445 | #endif /* CONFIG_PREEMPT_TRACER */ | |
446 | ||
447 | #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) | |
448 | int | |
449 | trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) | |
450 | { | |
451 | unsigned long save_max = tracing_max_latency; | |
452 | unsigned long count; | |
453 | int ret; | |
454 | ||
769c48eb SR |
455 | /* |
456 | * Now that the big kernel lock is no longer preemptable, | |
457 | * and this is called with the BKL held, it will always | |
458 | * fail. If preemption is already disabled, simply | |
459 | * pass the test. When the BKL is removed, or becomes | |
460 | * preemptible again, we will once again test this, | |
461 | * so keep it in. | |
462 | */ | |
463 | if (preempt_count()) { | |
464 | printk(KERN_CONT "can not test ... force "); | |
465 | return 0; | |
466 | } | |
467 | ||
60a11774 | 468 | /* start the tracing */ |
b6f11df2 | 469 | ret = tracer_init(trace, tr); |
1c80025a FW |
470 | if (ret) { |
471 | warn_failed_init_tracer(trace, ret); | |
ac1d52d0 | 472 | goto out_no_start; |
1c80025a | 473 | } |
60a11774 SR |
474 | |
475 | /* reset the max latency */ | |
476 | tracing_max_latency = 0; | |
477 | ||
478 | /* disable preemption and interrupts for a bit */ | |
479 | preempt_disable(); | |
480 | local_irq_disable(); | |
481 | udelay(100); | |
482 | preempt_enable(); | |
483 | /* reverse the order of preempt vs irqs */ | |
484 | local_irq_enable(); | |
485 | ||
49036200 FW |
486 | /* |
487 | * Stop the tracer to avoid a warning subsequent | |
488 | * to buffer flipping failure because tracing_stop() | |
489 | * disables the tr and max buffers, making flipping impossible | |
490 | * in case of parallels max irqs/preempt off latencies. | |
491 | */ | |
492 | trace->stop(tr); | |
60a11774 | 493 | /* stop the tracing. */ |
bbf5b1a0 | 494 | tracing_stop(); |
60a11774 SR |
495 | /* check both trace buffers */ |
496 | ret = trace_test_buffer(tr, NULL); | |
ac1d52d0 | 497 | if (ret) |
60a11774 SR |
498 | goto out; |
499 | ||
500 | ret = trace_test_buffer(&max_tr, &count); | |
ac1d52d0 | 501 | if (ret) |
60a11774 SR |
502 | goto out; |
503 | ||
504 | if (!ret && !count) { | |
505 | printk(KERN_CONT ".. no entries found .."); | |
506 | ret = -1; | |
507 | goto out; | |
508 | } | |
509 | ||
510 | /* do the test by disabling interrupts first this time */ | |
511 | tracing_max_latency = 0; | |
bbf5b1a0 | 512 | tracing_start(); |
49036200 FW |
513 | trace->start(tr); |
514 | ||
60a11774 SR |
515 | preempt_disable(); |
516 | local_irq_disable(); | |
517 | udelay(100); | |
518 | preempt_enable(); | |
519 | /* reverse the order of preempt vs irqs */ | |
520 | local_irq_enable(); | |
521 | ||
49036200 | 522 | trace->stop(tr); |
60a11774 | 523 | /* stop the tracing. */ |
bbf5b1a0 | 524 | tracing_stop(); |
60a11774 SR |
525 | /* check both trace buffers */ |
526 | ret = trace_test_buffer(tr, NULL); | |
527 | if (ret) | |
528 | goto out; | |
529 | ||
530 | ret = trace_test_buffer(&max_tr, &count); | |
531 | ||
532 | if (!ret && !count) { | |
533 | printk(KERN_CONT ".. no entries found .."); | |
534 | ret = -1; | |
535 | goto out; | |
536 | } | |
537 | ||
ac1d52d0 | 538 | out: |
bbf5b1a0 | 539 | tracing_start(); |
ac1d52d0 FW |
540 | out_no_start: |
541 | trace->reset(tr); | |
60a11774 SR |
542 | tracing_max_latency = save_max; |
543 | ||
544 | return ret; | |
545 | } | |
546 | #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ | |
547 | ||
fb1b6d8b SN |
548 | #ifdef CONFIG_NOP_TRACER |
549 | int | |
550 | trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) | |
551 | { | |
552 | /* What could possibly go wrong? */ | |
553 | return 0; | |
554 | } | |
555 | #endif | |
556 | ||
60a11774 SR |
557 | #ifdef CONFIG_SCHED_TRACER |
558 | static int trace_wakeup_test_thread(void *data) | |
559 | { | |
60a11774 | 560 | /* Make this a RT thread, doesn't need to be too high */ |
c9b5f501 | 561 | static const struct sched_param param = { .sched_priority = 5 }; |
05bd68c5 | 562 | struct completion *x = data; |
60a11774 | 563 | |
05bd68c5 | 564 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
60a11774 SR |
565 | |
566 | /* Make it know we have a new prio */ | |
567 | complete(x); | |
568 | ||
569 | /* now go to sleep and let the test wake us up */ | |
570 | set_current_state(TASK_INTERRUPTIBLE); | |
571 | schedule(); | |
572 | ||
573 | /* we are awake, now wait to disappear */ | |
574 | while (!kthread_should_stop()) { | |
575 | /* | |
576 | * This is an RT task, do short sleeps to let | |
577 | * others run. | |
578 | */ | |
579 | msleep(100); | |
580 | } | |
581 | ||
582 | return 0; | |
583 | } | |
584 | ||
585 | int | |
586 | trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |
587 | { | |
588 | unsigned long save_max = tracing_max_latency; | |
589 | struct task_struct *p; | |
590 | struct completion isrt; | |
591 | unsigned long count; | |
592 | int ret; | |
593 | ||
594 | init_completion(&isrt); | |
595 | ||
596 | /* create a high prio thread */ | |
597 | p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test"); | |
c7aafc54 | 598 | if (IS_ERR(p)) { |
60a11774 SR |
599 | printk(KERN_CONT "Failed to create ftrace wakeup test thread "); |
600 | return -1; | |
601 | } | |
602 | ||
603 | /* make sure the thread is running at an RT prio */ | |
604 | wait_for_completion(&isrt); | |
605 | ||
606 | /* start the tracing */ | |
b6f11df2 | 607 | ret = tracer_init(trace, tr); |
1c80025a FW |
608 | if (ret) { |
609 | warn_failed_init_tracer(trace, ret); | |
610 | return ret; | |
611 | } | |
612 | ||
60a11774 SR |
613 | /* reset the max latency */ |
614 | tracing_max_latency = 0; | |
615 | ||
616 | /* sleep to let the RT thread sleep too */ | |
617 | msleep(100); | |
618 | ||
619 | /* | |
620 | * Yes this is slightly racy. It is possible that for some | |
621 | * strange reason that the RT thread we created, did not | |
622 | * call schedule for 100ms after doing the completion, | |
623 | * and we do a wakeup on a task that already is awake. | |
624 | * But that is extremely unlikely, and the worst thing that | |
625 | * happens in such a case, is that we disable tracing. | |
626 | * Honestly, if this race does happen something is horrible | |
627 | * wrong with the system. | |
628 | */ | |
629 | ||
630 | wake_up_process(p); | |
631 | ||
5aa60c60 SR |
632 | /* give a little time to let the thread wake up */ |
633 | msleep(100); | |
634 | ||
60a11774 | 635 | /* stop the tracing. */ |
bbf5b1a0 | 636 | tracing_stop(); |
60a11774 SR |
637 | /* check both trace buffers */ |
638 | ret = trace_test_buffer(tr, NULL); | |
639 | if (!ret) | |
640 | ret = trace_test_buffer(&max_tr, &count); | |
641 | ||
642 | ||
643 | trace->reset(tr); | |
bbf5b1a0 | 644 | tracing_start(); |
60a11774 SR |
645 | |
646 | tracing_max_latency = save_max; | |
647 | ||
648 | /* kill the thread */ | |
649 | kthread_stop(p); | |
650 | ||
651 | if (!ret && !count) { | |
652 | printk(KERN_CONT ".. no entries found .."); | |
653 | ret = -1; | |
654 | } | |
655 | ||
656 | return ret; | |
657 | } | |
658 | #endif /* CONFIG_SCHED_TRACER */ | |
659 | ||
660 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER | |
661 | int | |
662 | trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr) | |
663 | { | |
664 | unsigned long count; | |
665 | int ret; | |
666 | ||
667 | /* start the tracing */ | |
b6f11df2 | 668 | ret = tracer_init(trace, tr); |
1c80025a FW |
669 | if (ret) { |
670 | warn_failed_init_tracer(trace, ret); | |
671 | return ret; | |
672 | } | |
673 | ||
60a11774 SR |
674 | /* Sleep for a 1/10 of a second */ |
675 | msleep(100); | |
676 | /* stop the tracing. */ | |
bbf5b1a0 | 677 | tracing_stop(); |
60a11774 SR |
678 | /* check the trace buffer */ |
679 | ret = trace_test_buffer(tr, &count); | |
680 | trace->reset(tr); | |
bbf5b1a0 | 681 | tracing_start(); |
60a11774 SR |
682 | |
683 | if (!ret && !count) { | |
684 | printk(KERN_CONT ".. no entries found .."); | |
685 | ret = -1; | |
686 | } | |
687 | ||
688 | return ret; | |
689 | } | |
690 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ | |
a6dd24f8 | 691 | |
80e5ea45 SR |
692 | #ifdef CONFIG_BRANCH_TRACER |
693 | int | |
694 | trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | |
695 | { | |
696 | unsigned long count; | |
697 | int ret; | |
698 | ||
699 | /* start the tracing */ | |
b6f11df2 | 700 | ret = tracer_init(trace, tr); |
1c80025a FW |
701 | if (ret) { |
702 | warn_failed_init_tracer(trace, ret); | |
703 | return ret; | |
704 | } | |
705 | ||
80e5ea45 SR |
706 | /* Sleep for a 1/10 of a second */ |
707 | msleep(100); | |
708 | /* stop the tracing. */ | |
709 | tracing_stop(); | |
710 | /* check the trace buffer */ | |
711 | ret = trace_test_buffer(tr, &count); | |
712 | trace->reset(tr); | |
713 | tracing_start(); | |
714 | ||
d2ef7c2f WH |
715 | if (!ret && !count) { |
716 | printk(KERN_CONT ".. no entries found .."); | |
717 | ret = -1; | |
718 | } | |
719 | ||
80e5ea45 SR |
720 | return ret; |
721 | } | |
722 | #endif /* CONFIG_BRANCH_TRACER */ | |
321bb5e1 | 723 |