]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - kernel/trace/trace_selftest.c
UBUNTU: SAUCE: media: uvcvideo: Support realtek's UVC 1.5 device
[mirror_ubuntu-artful-kernel.git] / kernel / trace / trace_selftest.c
CommitLineData
60a11774
SR
1/* Include in trace.c */
2
ae7e81c0 3#include <uapi/linux/sched/types.h>
9cc26a26 4#include <linux/stringify.h>
60a11774 5#include <linux/kthread.h>
c7aafc54 6#include <linux/delay.h>
5a0e3ad6 7#include <linux/slab.h>
60a11774 8
e309b41d 9static inline int trace_valid_entry(struct trace_entry *entry)
60a11774
SR
10{
11 switch (entry->type) {
12 case TRACE_FN:
13 case TRACE_CTX:
57422797 14 case TRACE_WAKE:
06fa75ab 15 case TRACE_STACK:
dd0e545f 16 case TRACE_PRINT:
80e5ea45 17 case TRACE_BRANCH:
7447dce9
FW
18 case TRACE_GRAPH_ENT:
19 case TRACE_GRAPH_RET:
60a11774
SR
20 return 1;
21 }
22 return 0;
23}
24
12883efb 25static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
60a11774 26{
3928a8a2
SR
27 struct ring_buffer_event *event;
28 struct trace_entry *entry;
4b3e3d22 29 unsigned int loops = 0;
60a11774 30
12883efb 31 while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
3928a8a2 32 entry = ring_buffer_event_data(event);
60a11774 33
4b3e3d22
SR
34 /*
35 * The ring buffer is a size of trace_buf_size, if
36 * we loop more than the size, there's something wrong
37 * with the ring buffer.
38 */
39 if (loops++ > trace_buf_size) {
40 printk(KERN_CONT ".. bad ring buffer ");
41 goto failed;
42 }
3928a8a2 43 if (!trace_valid_entry(entry)) {
c7aafc54 44 printk(KERN_CONT ".. invalid entry %d ",
3928a8a2 45 entry->type);
60a11774
SR
46 goto failed;
47 }
60a11774 48 }
60a11774
SR
49 return 0;
50
51 failed:
08bafa0e
SR
52 /* disable tracing */
53 tracing_disabled = 1;
60a11774
SR
54 printk(KERN_CONT ".. corrupted trace buffer .. ");
55 return -1;
56}
57
58/*
59 * Test the trace buffer to see if all the elements
60 * are still sane.
61 */
12883efb 62static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
60a11774 63{
30afdcb1
SR
64 unsigned long flags, cnt = 0;
65 int cpu, ret = 0;
60a11774 66
30afdcb1 67 /* Don't allow flipping of max traces now */
d51ad7ac 68 local_irq_save(flags);
0b9b12c1 69 arch_spin_lock(&buf->tr->max_lock);
60a11774 70
12883efb 71 cnt = ring_buffer_entries(buf->buffer);
60a11774 72
0c5119c1
SR
73 /*
74 * The trace_test_buffer_cpu runs a while loop to consume all data.
75 * If the calling tracer is broken, and is constantly filling
76 * the buffer, this will run forever, and hard lock the box.
77 * We disable the ring buffer while we do this test to prevent
78 * a hard lock up.
79 */
80 tracing_off();
3928a8a2 81 for_each_possible_cpu(cpu) {
12883efb 82 ret = trace_test_buffer_cpu(buf, cpu);
60a11774
SR
83 if (ret)
84 break;
85 }
0c5119c1 86 tracing_on();
0b9b12c1 87 arch_spin_unlock(&buf->tr->max_lock);
d51ad7ac 88 local_irq_restore(flags);
60a11774
SR
89
90 if (count)
91 *count = cnt;
92
93 return ret;
94}
95
1c80025a
FW
96static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
97{
98 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
99 trace->name, init_ret);
100}
606576ce 101#ifdef CONFIG_FUNCTION_TRACER
77a2b37d
SR
102
103#ifdef CONFIG_DYNAMIC_FTRACE
104
95950c2e
SR
105static int trace_selftest_test_probe1_cnt;
106static void trace_selftest_test_probe1_func(unsigned long ip,
2f5f6ad9 107 unsigned long pip,
a1e2e31d
SR
108 struct ftrace_ops *op,
109 struct pt_regs *pt_regs)
95950c2e
SR
110{
111 trace_selftest_test_probe1_cnt++;
112}
113
114static int trace_selftest_test_probe2_cnt;
115static void trace_selftest_test_probe2_func(unsigned long ip,
2f5f6ad9 116 unsigned long pip,
a1e2e31d
SR
117 struct ftrace_ops *op,
118 struct pt_regs *pt_regs)
95950c2e
SR
119{
120 trace_selftest_test_probe2_cnt++;
121}
122
123static int trace_selftest_test_probe3_cnt;
124static void trace_selftest_test_probe3_func(unsigned long ip,
2f5f6ad9 125 unsigned long pip,
a1e2e31d
SR
126 struct ftrace_ops *op,
127 struct pt_regs *pt_regs)
95950c2e
SR
128{
129 trace_selftest_test_probe3_cnt++;
130}
131
132static int trace_selftest_test_global_cnt;
133static void trace_selftest_test_global_func(unsigned long ip,
2f5f6ad9 134 unsigned long pip,
a1e2e31d
SR
135 struct ftrace_ops *op,
136 struct pt_regs *pt_regs)
95950c2e
SR
137{
138 trace_selftest_test_global_cnt++;
139}
140
141static int trace_selftest_test_dyn_cnt;
142static void trace_selftest_test_dyn_func(unsigned long ip,
2f5f6ad9 143 unsigned long pip,
a1e2e31d
SR
144 struct ftrace_ops *op,
145 struct pt_regs *pt_regs)
95950c2e
SR
146{
147 trace_selftest_test_dyn_cnt++;
148}
149
150static struct ftrace_ops test_probe1 = {
151 .func = trace_selftest_test_probe1_func,
4740974a 152 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
95950c2e
SR
153};
154
155static struct ftrace_ops test_probe2 = {
156 .func = trace_selftest_test_probe2_func,
4740974a 157 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
95950c2e
SR
158};
159
160static struct ftrace_ops test_probe3 = {
161 .func = trace_selftest_test_probe3_func,
4740974a 162 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
95950c2e
SR
163};
164
95950c2e
SR
165static void print_counts(void)
166{
167 printk("(%d %d %d %d %d) ",
168 trace_selftest_test_probe1_cnt,
169 trace_selftest_test_probe2_cnt,
170 trace_selftest_test_probe3_cnt,
171 trace_selftest_test_global_cnt,
172 trace_selftest_test_dyn_cnt);
173}
174
175static void reset_counts(void)
176{
177 trace_selftest_test_probe1_cnt = 0;
178 trace_selftest_test_probe2_cnt = 0;
179 trace_selftest_test_probe3_cnt = 0;
180 trace_selftest_test_global_cnt = 0;
181 trace_selftest_test_dyn_cnt = 0;
182}
183
4104d326 184static int trace_selftest_ops(struct trace_array *tr, int cnt)
95950c2e
SR
185{
186 int save_ftrace_enabled = ftrace_enabled;
187 struct ftrace_ops *dyn_ops;
188 char *func1_name;
189 char *func2_name;
190 int len1;
191 int len2;
192 int ret = -1;
193
194 printk(KERN_CONT "PASSED\n");
195 pr_info("Testing dynamic ftrace ops #%d: ", cnt);
196
197 ftrace_enabled = 1;
198 reset_counts();
199
200 /* Handle PPC64 '.' name */
201 func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
202 func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
203 len1 = strlen(func1_name);
204 len2 = strlen(func2_name);
205
206 /*
207 * Probe 1 will trace function 1.
208 * Probe 2 will trace function 2.
209 * Probe 3 will trace functions 1 and 2.
210 */
211 ftrace_set_filter(&test_probe1, func1_name, len1, 1);
212 ftrace_set_filter(&test_probe2, func2_name, len2, 1);
213 ftrace_set_filter(&test_probe3, func1_name, len1, 1);
214 ftrace_set_filter(&test_probe3, func2_name, len2, 0);
215
216 register_ftrace_function(&test_probe1);
217 register_ftrace_function(&test_probe2);
218 register_ftrace_function(&test_probe3);
4104d326
SRRH
219 /* First time we are running with main function */
220 if (cnt > 1) {
221 ftrace_init_array_ops(tr, trace_selftest_test_global_func);
222 register_ftrace_function(tr->ops);
223 }
95950c2e
SR
224
225 DYN_FTRACE_TEST_NAME();
226
227 print_counts();
228
229 if (trace_selftest_test_probe1_cnt != 1)
230 goto out;
231 if (trace_selftest_test_probe2_cnt != 0)
232 goto out;
233 if (trace_selftest_test_probe3_cnt != 1)
234 goto out;
4104d326
SRRH
235 if (cnt > 1) {
236 if (trace_selftest_test_global_cnt == 0)
237 goto out;
238 }
95950c2e
SR
239
240 DYN_FTRACE_TEST_NAME2();
241
242 print_counts();
243
244 if (trace_selftest_test_probe1_cnt != 1)
245 goto out;
246 if (trace_selftest_test_probe2_cnt != 1)
247 goto out;
248 if (trace_selftest_test_probe3_cnt != 2)
249 goto out;
250
251 /* Add a dynamic probe */
252 dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
253 if (!dyn_ops) {
254 printk("MEMORY ERROR ");
255 goto out;
256 }
257
258 dyn_ops->func = trace_selftest_test_dyn_func;
259
260 register_ftrace_function(dyn_ops);
261
262 trace_selftest_test_global_cnt = 0;
263
264 DYN_FTRACE_TEST_NAME();
265
266 print_counts();
267
268 if (trace_selftest_test_probe1_cnt != 2)
269 goto out_free;
270 if (trace_selftest_test_probe2_cnt != 1)
271 goto out_free;
272 if (trace_selftest_test_probe3_cnt != 3)
273 goto out_free;
4104d326
SRRH
274 if (cnt > 1) {
275 if (trace_selftest_test_global_cnt == 0)
911c60fb 276 goto out_free;
4104d326 277 }
95950c2e
SR
278 if (trace_selftest_test_dyn_cnt == 0)
279 goto out_free;
280
281 DYN_FTRACE_TEST_NAME2();
282
283 print_counts();
284
285 if (trace_selftest_test_probe1_cnt != 2)
286 goto out_free;
287 if (trace_selftest_test_probe2_cnt != 2)
288 goto out_free;
289 if (trace_selftest_test_probe3_cnt != 4)
290 goto out_free;
291
292 ret = 0;
293 out_free:
294 unregister_ftrace_function(dyn_ops);
295 kfree(dyn_ops);
296
297 out:
298 /* Purposely unregister in the same order */
299 unregister_ftrace_function(&test_probe1);
300 unregister_ftrace_function(&test_probe2);
301 unregister_ftrace_function(&test_probe3);
4104d326
SRRH
302 if (cnt > 1)
303 unregister_ftrace_function(tr->ops);
304 ftrace_reset_array_ops(tr);
95950c2e
SR
305
306 /* Make sure everything is off */
307 reset_counts();
308 DYN_FTRACE_TEST_NAME();
309 DYN_FTRACE_TEST_NAME();
310
311 if (trace_selftest_test_probe1_cnt ||
312 trace_selftest_test_probe2_cnt ||
313 trace_selftest_test_probe3_cnt ||
314 trace_selftest_test_global_cnt ||
315 trace_selftest_test_dyn_cnt)
316 ret = -1;
317
318 ftrace_enabled = save_ftrace_enabled;
319
320 return ret;
321}
322
77a2b37d 323/* Test dynamic code modification and ftrace filters */
ad1438a0
FF
324static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
325 struct trace_array *tr,
326 int (*func)(void))
77a2b37d 327{
77a2b37d 328 int save_ftrace_enabled = ftrace_enabled;
dd0e545f 329 unsigned long count;
4e491d14 330 char *func_name;
dd0e545f 331 int ret;
77a2b37d
SR
332
333 /* The ftrace test PASSED */
334 printk(KERN_CONT "PASSED\n");
335 pr_info("Testing dynamic ftrace: ");
336
337 /* enable tracing, and record the filter function */
338 ftrace_enabled = 1;
77a2b37d
SR
339
340 /* passed in by parameter to fool gcc from optimizing */
341 func();
342
4e491d14 343 /*
73d8b8bc 344 * Some archs *cough*PowerPC*cough* add characters to the
4e491d14 345 * start of the function names. We simply put a '*' to
73d8b8bc 346 * accommodate them.
4e491d14 347 */
9cc26a26 348 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
4e491d14 349
77a2b37d 350 /* filter only on our function */
936e074b 351 ftrace_set_global_filter(func_name, strlen(func_name), 1);
77a2b37d
SR
352
353 /* enable tracing */
b6f11df2 354 ret = tracer_init(trace, tr);
1c80025a
FW
355 if (ret) {
356 warn_failed_init_tracer(trace, ret);
357 goto out;
358 }
dd0e545f 359
77a2b37d
SR
360 /* Sleep for a 1/10 of a second */
361 msleep(100);
362
363 /* we should have nothing in the buffer */
12883efb 364 ret = trace_test_buffer(&tr->trace_buffer, &count);
77a2b37d
SR
365 if (ret)
366 goto out;
367
368 if (count) {
369 ret = -1;
370 printk(KERN_CONT ".. filter did not filter .. ");
371 goto out;
372 }
373
374 /* call our function again */
375 func();
376
377 /* sleep again */
378 msleep(100);
379
380 /* stop the tracing. */
bbf5b1a0 381 tracing_stop();
77a2b37d
SR
382 ftrace_enabled = 0;
383
384 /* check the trace buffer */
12883efb 385 ret = trace_test_buffer(&tr->trace_buffer, &count);
3ddee63a
SRRH
386
387 ftrace_enabled = 1;
bbf5b1a0 388 tracing_start();
77a2b37d
SR
389
390 /* we should only have one item */
391 if (!ret && count != 1) {
95950c2e 392 trace->reset(tr);
06fa75ab 393 printk(KERN_CONT ".. filter failed count=%ld ..", count);
77a2b37d
SR
394 ret = -1;
395 goto out;
396 }
bbf5b1a0 397
95950c2e 398 /* Test the ops with global tracing running */
4104d326 399 ret = trace_selftest_ops(tr, 1);
95950c2e
SR
400 trace->reset(tr);
401
77a2b37d
SR
402 out:
403 ftrace_enabled = save_ftrace_enabled;
77a2b37d
SR
404
405 /* Enable tracing on all functions again */
936e074b 406 ftrace_set_global_filter(NULL, 0, 1);
77a2b37d 407
95950c2e
SR
408 /* Test the ops with global tracing off */
409 if (!ret)
4104d326 410 ret = trace_selftest_ops(tr, 2);
95950c2e 411
77a2b37d
SR
412 return ret;
413}
ea701f11
SR
414
415static int trace_selftest_recursion_cnt;
416static void trace_selftest_test_recursion_func(unsigned long ip,
417 unsigned long pip,
418 struct ftrace_ops *op,
419 struct pt_regs *pt_regs)
420{
421 /*
422 * This function is registered without the recursion safe flag.
423 * The ftrace infrastructure should provide the recursion
424 * protection. If not, this will crash the kernel!
425 */
9640388b
SR
426 if (trace_selftest_recursion_cnt++ > 10)
427 return;
ea701f11
SR
428 DYN_FTRACE_TEST_NAME();
429}
430
431static void trace_selftest_test_recursion_safe_func(unsigned long ip,
432 unsigned long pip,
433 struct ftrace_ops *op,
434 struct pt_regs *pt_regs)
435{
436 /*
437 * We said we would provide our own recursion. By calling
438 * this function again, we should recurse back into this function
439 * and count again. But this only happens if the arch supports
440 * all of ftrace features and nothing else is using the function
441 * tracing utility.
442 */
443 if (trace_selftest_recursion_cnt++)
444 return;
445 DYN_FTRACE_TEST_NAME();
446}
447
448static struct ftrace_ops test_rec_probe = {
449 .func = trace_selftest_test_recursion_func,
450};
451
452static struct ftrace_ops test_recsafe_probe = {
453 .func = trace_selftest_test_recursion_safe_func,
454 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
455};
456
457static int
458trace_selftest_function_recursion(void)
459{
460 int save_ftrace_enabled = ftrace_enabled;
ea701f11
SR
461 char *func_name;
462 int len;
463 int ret;
ea701f11
SR
464
465 /* The previous test PASSED */
466 pr_cont("PASSED\n");
467 pr_info("Testing ftrace recursion: ");
468
469
470 /* enable tracing, and record the filter function */
471 ftrace_enabled = 1;
ea701f11
SR
472
473 /* Handle PPC64 '.' name */
474 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
475 len = strlen(func_name);
476
477 ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
478 if (ret) {
479 pr_cont("*Could not set filter* ");
480 goto out;
481 }
482
483 ret = register_ftrace_function(&test_rec_probe);
484 if (ret) {
485 pr_cont("*could not register callback* ");
486 goto out;
487 }
488
489 DYN_FTRACE_TEST_NAME();
490
491 unregister_ftrace_function(&test_rec_probe);
492
493 ret = -1;
494 if (trace_selftest_recursion_cnt != 1) {
495 pr_cont("*callback not called once (%d)* ",
496 trace_selftest_recursion_cnt);
497 goto out;
498 }
499
500 trace_selftest_recursion_cnt = 1;
501
502 pr_cont("PASSED\n");
503 pr_info("Testing ftrace recursion safe: ");
504
505 ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
506 if (ret) {
507 pr_cont("*Could not set filter* ");
508 goto out;
509 }
510
511 ret = register_ftrace_function(&test_recsafe_probe);
512 if (ret) {
513 pr_cont("*could not register callback* ");
514 goto out;
515 }
516
517 DYN_FTRACE_TEST_NAME();
518
519 unregister_ftrace_function(&test_recsafe_probe);
520
ea701f11 521 ret = -1;
05cbbf64
SR
522 if (trace_selftest_recursion_cnt != 2) {
523 pr_cont("*callback not called expected 2 times (%d)* ",
524 trace_selftest_recursion_cnt);
ea701f11
SR
525 goto out;
526 }
527
528 ret = 0;
529out:
530 ftrace_enabled = save_ftrace_enabled;
ea701f11
SR
531
532 return ret;
533}
77a2b37d
SR
534#else
535# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
ea701f11 536# define trace_selftest_function_recursion() ({ 0; })
77a2b37d 537#endif /* CONFIG_DYNAMIC_FTRACE */
e9a22d1f 538
ad97772a
SR
539static enum {
540 TRACE_SELFTEST_REGS_START,
541 TRACE_SELFTEST_REGS_FOUND,
542 TRACE_SELFTEST_REGS_NOT_FOUND,
543} trace_selftest_regs_stat;
544
545static void trace_selftest_test_regs_func(unsigned long ip,
546 unsigned long pip,
547 struct ftrace_ops *op,
548 struct pt_regs *pt_regs)
549{
550 if (pt_regs)
551 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
552 else
553 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
554}
555
556static struct ftrace_ops test_regs_probe = {
557 .func = trace_selftest_test_regs_func,
558 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS,
559};
560
561static int
562trace_selftest_function_regs(void)
563{
564 int save_ftrace_enabled = ftrace_enabled;
ad97772a
SR
565 char *func_name;
566 int len;
567 int ret;
568 int supported = 0;
569
06aeaaea 570#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
ad97772a
SR
571 supported = 1;
572#endif
573
574 /* The previous test PASSED */
575 pr_cont("PASSED\n");
576 pr_info("Testing ftrace regs%s: ",
577 !supported ? "(no arch support)" : "");
578
579 /* enable tracing, and record the filter function */
580 ftrace_enabled = 1;
ad97772a
SR
581
582 /* Handle PPC64 '.' name */
583 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
584 len = strlen(func_name);
585
586 ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
587 /*
588 * If DYNAMIC_FTRACE is not set, then we just trace all functions.
589 * This test really doesn't care.
590 */
591 if (ret && ret != -ENODEV) {
592 pr_cont("*Could not set filter* ");
593 goto out;
594 }
595
596 ret = register_ftrace_function(&test_regs_probe);
597 /*
598 * Now if the arch does not support passing regs, then this should
599 * have failed.
600 */
601 if (!supported) {
602 if (!ret) {
603 pr_cont("*registered save-regs without arch support* ");
604 goto out;
605 }
606 test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
607 ret = register_ftrace_function(&test_regs_probe);
608 }
609 if (ret) {
610 pr_cont("*could not register callback* ");
611 goto out;
612 }
613
614
615 DYN_FTRACE_TEST_NAME();
616
617 unregister_ftrace_function(&test_regs_probe);
618
619 ret = -1;
620
621 switch (trace_selftest_regs_stat) {
622 case TRACE_SELFTEST_REGS_START:
623 pr_cont("*callback never called* ");
624 goto out;
625
626 case TRACE_SELFTEST_REGS_FOUND:
627 if (supported)
628 break;
629 pr_cont("*callback received regs without arch support* ");
630 goto out;
631
632 case TRACE_SELFTEST_REGS_NOT_FOUND:
633 if (!supported)
634 break;
635 pr_cont("*callback received NULL regs* ");
636 goto out;
637 }
638
639 ret = 0;
640out:
641 ftrace_enabled = save_ftrace_enabled;
ad97772a
SR
642
643 return ret;
644}
645
60a11774
SR
646/*
647 * Simple verification test of ftrace function tracer.
648 * Enable ftrace, sleep 1/10 second, and then read the trace
649 * buffer to see if all is in order.
650 */
f1ed7c74 651__init int
60a11774
SR
652trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
653{
77a2b37d 654 int save_ftrace_enabled = ftrace_enabled;
dd0e545f
SR
655 unsigned long count;
656 int ret;
60a11774 657
f1ed7c74
SRRH
658#ifdef CONFIG_DYNAMIC_FTRACE
659 if (ftrace_filter_param) {
660 printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
661 return 0;
662 }
663#endif
664
77a2b37d
SR
665 /* make sure msleep has been recorded */
666 msleep(1);
667
60a11774 668 /* start the tracing */
c7aafc54
IM
669 ftrace_enabled = 1;
670
b6f11df2 671 ret = tracer_init(trace, tr);
1c80025a
FW
672 if (ret) {
673 warn_failed_init_tracer(trace, ret);
674 goto out;
675 }
676
60a11774
SR
677 /* Sleep for a 1/10 of a second */
678 msleep(100);
679 /* stop the tracing. */
bbf5b1a0 680 tracing_stop();
c7aafc54
IM
681 ftrace_enabled = 0;
682
60a11774 683 /* check the trace buffer */
12883efb 684 ret = trace_test_buffer(&tr->trace_buffer, &count);
3ddee63a
SRRH
685
686 ftrace_enabled = 1;
60a11774 687 trace->reset(tr);
bbf5b1a0 688 tracing_start();
60a11774
SR
689
690 if (!ret && !count) {
691 printk(KERN_CONT ".. no entries found ..");
692 ret = -1;
77a2b37d 693 goto out;
60a11774
SR
694 }
695
77a2b37d
SR
696 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
697 DYN_FTRACE_TEST_NAME);
ea701f11
SR
698 if (ret)
699 goto out;
77a2b37d 700
ea701f11 701 ret = trace_selftest_function_recursion();
ad97772a
SR
702 if (ret)
703 goto out;
704
705 ret = trace_selftest_function_regs();
77a2b37d
SR
706 out:
707 ftrace_enabled = save_ftrace_enabled;
77a2b37d 708
4eebcc81
SR
709 /* kill ftrace totally if we failed */
710 if (ret)
711 ftrace_kill();
712
60a11774
SR
713 return ret;
714}
606576ce 715#endif /* CONFIG_FUNCTION_TRACER */
60a11774 716
7447dce9
FW
717
718#ifdef CONFIG_FUNCTION_GRAPH_TRACER
cf586b61
FW
719
720/* Maximum number of functions to trace before diagnosing a hang */
721#define GRAPH_MAX_FUNC_TEST 100000000
722
cf586b61
FW
723static unsigned int graph_hang_thresh;
724
725/* Wrap the real function entry probe to avoid possible hanging */
726static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
727{
728 /* This is harmlessly racy, we want to approximately detect a hang */
729 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
730 ftrace_graph_stop();
731 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
7fe70b57
SRRH
732 if (ftrace_dump_on_oops) {
733 ftrace_dump(DUMP_ALL);
734 /* ftrace_dump() disables tracing */
735 tracing_on();
736 }
cf586b61
FW
737 return 0;
738 }
739
740 return trace_graph_entry(trace);
741}
742
7447dce9
FW
743/*
744 * Pretty much the same than for the function tracer from which the selftest
745 * has been borrowed.
746 */
f1ed7c74 747__init int
7447dce9
FW
748trace_selftest_startup_function_graph(struct tracer *trace,
749 struct trace_array *tr)
750{
751 int ret;
752 unsigned long count;
753
f1ed7c74
SRRH
754#ifdef CONFIG_DYNAMIC_FTRACE
755 if (ftrace_filter_param) {
756 printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
757 return 0;
758 }
759#endif
760
cf586b61
FW
761 /*
762 * Simulate the init() callback but we attach a watchdog callback
763 * to detect and recover from possible hangs
764 */
12883efb 765 tracing_reset_online_cpus(&tr->trace_buffer);
1a0799a8 766 set_graph_array(tr);
cf586b61
FW
767 ret = register_ftrace_graph(&trace_graph_return,
768 &trace_graph_entry_watchdog);
7447dce9
FW
769 if (ret) {
770 warn_failed_init_tracer(trace, ret);
771 goto out;
772 }
cf586b61 773 tracing_start_cmdline_record();
7447dce9
FW
774
775 /* Sleep for a 1/10 of a second */
776 msleep(100);
777
cf586b61
FW
778 /* Have we just recovered from a hang? */
779 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
0cf53ff6 780 tracing_selftest_disabled = true;
cf586b61
FW
781 ret = -1;
782 goto out;
783 }
784
7447dce9
FW
785 tracing_stop();
786
787 /* check the trace buffer */
12883efb 788 ret = trace_test_buffer(&tr->trace_buffer, &count);
7447dce9
FW
789
790 trace->reset(tr);
791 tracing_start();
792
793 if (!ret && !count) {
794 printk(KERN_CONT ".. no entries found ..");
795 ret = -1;
796 goto out;
797 }
798
799 /* Don't test dynamic tracing, the function tracer already did */
800
801out:
802 /* Stop it if we failed */
803 if (ret)
804 ftrace_graph_stop();
805
806 return ret;
807}
808#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
809
810
60a11774
SR
811#ifdef CONFIG_IRQSOFF_TRACER
812int
813trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
814{
6d9b3fa5 815 unsigned long save_max = tr->max_latency;
60a11774
SR
816 unsigned long count;
817 int ret;
818
819 /* start the tracing */
b6f11df2 820 ret = tracer_init(trace, tr);
1c80025a
FW
821 if (ret) {
822 warn_failed_init_tracer(trace, ret);
823 return ret;
824 }
825
60a11774 826 /* reset the max latency */
6d9b3fa5 827 tr->max_latency = 0;
60a11774
SR
828 /* disable interrupts for a bit */
829 local_irq_disable();
830 udelay(100);
831 local_irq_enable();
49036200
FW
832
833 /*
834 * Stop the tracer to avoid a warning subsequent
835 * to buffer flipping failure because tracing_stop()
836 * disables the tr and max buffers, making flipping impossible
837 * in case of parallels max irqs off latencies.
838 */
839 trace->stop(tr);
60a11774 840 /* stop the tracing. */
bbf5b1a0 841 tracing_stop();
60a11774 842 /* check both trace buffers */
12883efb 843 ret = trace_test_buffer(&tr->trace_buffer, NULL);
60a11774 844 if (!ret)
12883efb 845 ret = trace_test_buffer(&tr->max_buffer, &count);
60a11774 846 trace->reset(tr);
bbf5b1a0 847 tracing_start();
60a11774
SR
848
849 if (!ret && !count) {
850 printk(KERN_CONT ".. no entries found ..");
851 ret = -1;
852 }
853
6d9b3fa5 854 tr->max_latency = save_max;
60a11774
SR
855
856 return ret;
857}
858#endif /* CONFIG_IRQSOFF_TRACER */
859
860#ifdef CONFIG_PREEMPT_TRACER
861int
862trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
863{
6d9b3fa5 864 unsigned long save_max = tr->max_latency;
60a11774
SR
865 unsigned long count;
866 int ret;
867
769c48eb
SR
868 /*
869 * Now that the big kernel lock is no longer preemptable,
870 * and this is called with the BKL held, it will always
871 * fail. If preemption is already disabled, simply
872 * pass the test. When the BKL is removed, or becomes
873 * preemptible again, we will once again test this,
874 * so keep it in.
875 */
876 if (preempt_count()) {
877 printk(KERN_CONT "can not test ... force ");
878 return 0;
879 }
880
60a11774 881 /* start the tracing */
b6f11df2 882 ret = tracer_init(trace, tr);
1c80025a
FW
883 if (ret) {
884 warn_failed_init_tracer(trace, ret);
885 return ret;
886 }
887
60a11774 888 /* reset the max latency */
6d9b3fa5 889 tr->max_latency = 0;
60a11774
SR
890 /* disable preemption for a bit */
891 preempt_disable();
892 udelay(100);
893 preempt_enable();
49036200
FW
894
895 /*
896 * Stop the tracer to avoid a warning subsequent
897 * to buffer flipping failure because tracing_stop()
898 * disables the tr and max buffers, making flipping impossible
899 * in case of parallels max preempt off latencies.
900 */
901 trace->stop(tr);
60a11774 902 /* stop the tracing. */
bbf5b1a0 903 tracing_stop();
60a11774 904 /* check both trace buffers */
12883efb 905 ret = trace_test_buffer(&tr->trace_buffer, NULL);
60a11774 906 if (!ret)
12883efb 907 ret = trace_test_buffer(&tr->max_buffer, &count);
60a11774 908 trace->reset(tr);
bbf5b1a0 909 tracing_start();
60a11774
SR
910
911 if (!ret && !count) {
912 printk(KERN_CONT ".. no entries found ..");
913 ret = -1;
914 }
915
6d9b3fa5 916 tr->max_latency = save_max;
60a11774
SR
917
918 return ret;
919}
920#endif /* CONFIG_PREEMPT_TRACER */
921
922#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
923int
924trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
925{
6d9b3fa5 926 unsigned long save_max = tr->max_latency;
60a11774
SR
927 unsigned long count;
928 int ret;
929
769c48eb
SR
930 /*
931 * Now that the big kernel lock is no longer preemptable,
932 * and this is called with the BKL held, it will always
933 * fail. If preemption is already disabled, simply
934 * pass the test. When the BKL is removed, or becomes
935 * preemptible again, we will once again test this,
936 * so keep it in.
937 */
938 if (preempt_count()) {
939 printk(KERN_CONT "can not test ... force ");
940 return 0;
941 }
942
60a11774 943 /* start the tracing */
b6f11df2 944 ret = tracer_init(trace, tr);
1c80025a
FW
945 if (ret) {
946 warn_failed_init_tracer(trace, ret);
ac1d52d0 947 goto out_no_start;
1c80025a 948 }
60a11774
SR
949
950 /* reset the max latency */
6d9b3fa5 951 tr->max_latency = 0;
60a11774
SR
952
953 /* disable preemption and interrupts for a bit */
954 preempt_disable();
955 local_irq_disable();
956 udelay(100);
957 preempt_enable();
958 /* reverse the order of preempt vs irqs */
959 local_irq_enable();
960
49036200
FW
961 /*
962 * Stop the tracer to avoid a warning subsequent
963 * to buffer flipping failure because tracing_stop()
964 * disables the tr and max buffers, making flipping impossible
965 * in case of parallels max irqs/preempt off latencies.
966 */
967 trace->stop(tr);
60a11774 968 /* stop the tracing. */
bbf5b1a0 969 tracing_stop();
60a11774 970 /* check both trace buffers */
12883efb 971 ret = trace_test_buffer(&tr->trace_buffer, NULL);
ac1d52d0 972 if (ret)
60a11774
SR
973 goto out;
974
12883efb 975 ret = trace_test_buffer(&tr->max_buffer, &count);
ac1d52d0 976 if (ret)
60a11774
SR
977 goto out;
978
979 if (!ret && !count) {
980 printk(KERN_CONT ".. no entries found ..");
981 ret = -1;
982 goto out;
983 }
984
985 /* do the test by disabling interrupts first this time */
6d9b3fa5 986 tr->max_latency = 0;
bbf5b1a0 987 tracing_start();
49036200
FW
988 trace->start(tr);
989
60a11774
SR
990 preempt_disable();
991 local_irq_disable();
992 udelay(100);
993 preempt_enable();
994 /* reverse the order of preempt vs irqs */
995 local_irq_enable();
996
49036200 997 trace->stop(tr);
60a11774 998 /* stop the tracing. */
bbf5b1a0 999 tracing_stop();
60a11774 1000 /* check both trace buffers */
12883efb 1001 ret = trace_test_buffer(&tr->trace_buffer, NULL);
60a11774
SR
1002 if (ret)
1003 goto out;
1004
12883efb 1005 ret = trace_test_buffer(&tr->max_buffer, &count);
60a11774
SR
1006
1007 if (!ret && !count) {
1008 printk(KERN_CONT ".. no entries found ..");
1009 ret = -1;
1010 goto out;
1011 }
1012
ac1d52d0 1013out:
bbf5b1a0 1014 tracing_start();
ac1d52d0
FW
1015out_no_start:
1016 trace->reset(tr);
6d9b3fa5 1017 tr->max_latency = save_max;
60a11774
SR
1018
1019 return ret;
1020}
1021#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
1022
fb1b6d8b
SN
1023#ifdef CONFIG_NOP_TRACER
1024int
1025trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1026{
1027 /* What could possibly go wrong? */
1028 return 0;
1029}
1030#endif
1031
60a11774 1032#ifdef CONFIG_SCHED_TRACER
addff1fe
SR
1033
1034struct wakeup_test_data {
1035 struct completion is_ready;
1036 int go;
1037};
1038
60a11774
SR
1039static int trace_wakeup_test_thread(void *data)
1040{
af6ace76
DF
1041 /* Make this a -deadline thread */
1042 static const struct sched_attr attr = {
1043 .sched_policy = SCHED_DEADLINE,
1044 .sched_runtime = 100000ULL,
1045 .sched_deadline = 10000000ULL,
1046 .sched_period = 10000000ULL
1047 };
addff1fe 1048 struct wakeup_test_data *x = data;
60a11774 1049
af6ace76 1050 sched_setattr(current, &attr);
60a11774
SR
1051
1052 /* Make it know we have a new prio */
addff1fe 1053 complete(&x->is_ready);
60a11774
SR
1054
1055 /* now go to sleep and let the test wake us up */
1056 set_current_state(TASK_INTERRUPTIBLE);
addff1fe
SR
1057 while (!x->go) {
1058 schedule();
1059 set_current_state(TASK_INTERRUPTIBLE);
1060 }
60a11774 1061
addff1fe
SR
1062 complete(&x->is_ready);
1063
1064 set_current_state(TASK_INTERRUPTIBLE);
3c18c10b 1065
60a11774
SR
1066 /* we are awake, now wait to disappear */
1067 while (!kthread_should_stop()) {
addff1fe
SR
1068 schedule();
1069 set_current_state(TASK_INTERRUPTIBLE);
60a11774
SR
1070 }
1071
addff1fe
SR
1072 __set_current_state(TASK_RUNNING);
1073
60a11774
SR
1074 return 0;
1075}
60a11774
SR
1076int
1077trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1078{
6d9b3fa5 1079 unsigned long save_max = tr->max_latency;
60a11774 1080 struct task_struct *p;
addff1fe 1081 struct wakeup_test_data data;
60a11774
SR
1082 unsigned long count;
1083 int ret;
1084
addff1fe
SR
1085 memset(&data, 0, sizeof(data));
1086
1087 init_completion(&data.is_ready);
60a11774 1088
af6ace76 1089 /* create a -deadline thread */
addff1fe 1090 p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test");
c7aafc54 1091 if (IS_ERR(p)) {
60a11774
SR
1092 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1093 return -1;
1094 }
1095
af6ace76 1096 /* make sure the thread is running at -deadline policy */
addff1fe 1097 wait_for_completion(&data.is_ready);
60a11774
SR
1098
1099 /* start the tracing */
b6f11df2 1100 ret = tracer_init(trace, tr);
1c80025a
FW
1101 if (ret) {
1102 warn_failed_init_tracer(trace, ret);
1103 return ret;
1104 }
1105
60a11774 1106 /* reset the max latency */
6d9b3fa5 1107 tr->max_latency = 0;
60a11774 1108
3c18c10b
SR
1109 while (p->on_rq) {
1110 /*
af6ace76 1111 * Sleep to make sure the -deadline thread is asleep too.
3c18c10b
SR
1112 * On virtual machines we can't rely on timings,
1113 * but we want to make sure this test still works.
1114 */
1115 msleep(100);
1116 }
60a11774 1117
addff1fe
SR
1118 init_completion(&data.is_ready);
1119
1120 data.go = 1;
1121 /* memory barrier is in the wake_up_process() */
60a11774
SR
1122
1123 wake_up_process(p);
1124
3c18c10b 1125 /* Wait for the task to wake up */
addff1fe 1126 wait_for_completion(&data.is_ready);
5aa60c60 1127
60a11774 1128 /* stop the tracing. */
bbf5b1a0 1129 tracing_stop();
60a11774 1130 /* check both trace buffers */
12883efb 1131 ret = trace_test_buffer(&tr->trace_buffer, NULL);
60a11774 1132 if (!ret)
12883efb 1133 ret = trace_test_buffer(&tr->max_buffer, &count);
60a11774
SR
1134
1135
1136 trace->reset(tr);
bbf5b1a0 1137 tracing_start();
60a11774 1138
6d9b3fa5 1139 tr->max_latency = save_max;
60a11774
SR
1140
1141 /* kill the thread */
1142 kthread_stop(p);
1143
1144 if (!ret && !count) {
1145 printk(KERN_CONT ".. no entries found ..");
1146 ret = -1;
1147 }
1148
1149 return ret;
1150}
1151#endif /* CONFIG_SCHED_TRACER */
1152
1153#ifdef CONFIG_CONTEXT_SWITCH_TRACER
1154int
1155trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
1156{
1157 unsigned long count;
1158 int ret;
1159
1160 /* start the tracing */
b6f11df2 1161 ret = tracer_init(trace, tr);
1c80025a
FW
1162 if (ret) {
1163 warn_failed_init_tracer(trace, ret);
1164 return ret;
1165 }
1166
60a11774
SR
1167 /* Sleep for a 1/10 of a second */
1168 msleep(100);
1169 /* stop the tracing. */
bbf5b1a0 1170 tracing_stop();
60a11774 1171 /* check the trace buffer */
12883efb 1172 ret = trace_test_buffer(&tr->trace_buffer, &count);
60a11774 1173 trace->reset(tr);
bbf5b1a0 1174 tracing_start();
60a11774
SR
1175
1176 if (!ret && !count) {
1177 printk(KERN_CONT ".. no entries found ..");
1178 ret = -1;
1179 }
1180
1181 return ret;
1182}
1183#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
a6dd24f8 1184
80e5ea45
SR
1185#ifdef CONFIG_BRANCH_TRACER
1186int
1187trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1188{
1189 unsigned long count;
1190 int ret;
1191
1192 /* start the tracing */
b6f11df2 1193 ret = tracer_init(trace, tr);
1c80025a
FW
1194 if (ret) {
1195 warn_failed_init_tracer(trace, ret);
1196 return ret;
1197 }
1198
80e5ea45
SR
1199 /* Sleep for a 1/10 of a second */
1200 msleep(100);
1201 /* stop the tracing. */
1202 tracing_stop();
1203 /* check the trace buffer */
0184d50f 1204 ret = trace_test_buffer(&tr->trace_buffer, &count);
80e5ea45
SR
1205 trace->reset(tr);
1206 tracing_start();
1207
d2ef7c2f
WH
1208 if (!ret && !count) {
1209 printk(KERN_CONT ".. no entries found ..");
1210 ret = -1;
1211 }
1212
80e5ea45
SR
1213 return ret;
1214}
1215#endif /* CONFIG_BRANCH_TRACER */
321bb5e1 1216