]>
Commit | Line | Data |
---|---|---|
60a11774 SR |
1 | /* Include in trace.c */ |
2 | ||
9cc26a26 | 3 | #include <linux/stringify.h> |
60a11774 | 4 | #include <linux/kthread.h> |
c7aafc54 | 5 | #include <linux/delay.h> |
5a0e3ad6 | 6 | #include <linux/slab.h> |
60a11774 | 7 | |
e309b41d | 8 | static inline int trace_valid_entry(struct trace_entry *entry) |
60a11774 SR |
9 | { |
10 | switch (entry->type) { | |
11 | case TRACE_FN: | |
12 | case TRACE_CTX: | |
57422797 | 13 | case TRACE_WAKE: |
06fa75ab | 14 | case TRACE_STACK: |
dd0e545f | 15 | case TRACE_PRINT: |
80e5ea45 | 16 | case TRACE_BRANCH: |
7447dce9 FW |
17 | case TRACE_GRAPH_ENT: |
18 | case TRACE_GRAPH_RET: | |
60a11774 SR |
19 | return 1; |
20 | } | |
21 | return 0; | |
22 | } | |
23 | ||
12883efb | 24 | static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu) |
60a11774 | 25 | { |
3928a8a2 SR |
26 | struct ring_buffer_event *event; |
27 | struct trace_entry *entry; | |
4b3e3d22 | 28 | unsigned int loops = 0; |
60a11774 | 29 | |
12883efb | 30 | while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) { |
3928a8a2 | 31 | entry = ring_buffer_event_data(event); |
60a11774 | 32 | |
4b3e3d22 SR |
33 | /* |
34 | * The ring buffer is a size of trace_buf_size, if | |
35 | * we loop more than the size, there's something wrong | |
36 | * with the ring buffer. | |
37 | */ | |
38 | if (loops++ > trace_buf_size) { | |
39 | printk(KERN_CONT ".. bad ring buffer "); | |
40 | goto failed; | |
41 | } | |
3928a8a2 | 42 | if (!trace_valid_entry(entry)) { |
c7aafc54 | 43 | printk(KERN_CONT ".. invalid entry %d ", |
3928a8a2 | 44 | entry->type); |
60a11774 SR |
45 | goto failed; |
46 | } | |
60a11774 | 47 | } |
60a11774 SR |
48 | return 0; |
49 | ||
50 | failed: | |
08bafa0e SR |
51 | /* disable tracing */ |
52 | tracing_disabled = 1; | |
60a11774 SR |
53 | printk(KERN_CONT ".. corrupted trace buffer .. "); |
54 | return -1; | |
55 | } | |
56 | ||
57 | /* | |
58 | * Test the trace buffer to see if all the elements | |
59 | * are still sane. | |
60 | */ | |
12883efb | 61 | static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count) |
60a11774 | 62 | { |
30afdcb1 SR |
63 | unsigned long flags, cnt = 0; |
64 | int cpu, ret = 0; | |
60a11774 | 65 | |
30afdcb1 | 66 | /* Don't allow flipping of max traces now */ |
d51ad7ac | 67 | local_irq_save(flags); |
0b9b12c1 | 68 | arch_spin_lock(&buf->tr->max_lock); |
60a11774 | 69 | |
12883efb | 70 | cnt = ring_buffer_entries(buf->buffer); |
60a11774 | 71 | |
0c5119c1 SR |
72 | /* |
73 | * The trace_test_buffer_cpu runs a while loop to consume all data. | |
74 | * If the calling tracer is broken, and is constantly filling | |
75 | * the buffer, this will run forever, and hard lock the box. | |
76 | * We disable the ring buffer while we do this test to prevent | |
77 | * a hard lock up. | |
78 | */ | |
79 | tracing_off(); | |
3928a8a2 | 80 | for_each_possible_cpu(cpu) { |
12883efb | 81 | ret = trace_test_buffer_cpu(buf, cpu); |
60a11774 SR |
82 | if (ret) |
83 | break; | |
84 | } | |
0c5119c1 | 85 | tracing_on(); |
0b9b12c1 | 86 | arch_spin_unlock(&buf->tr->max_lock); |
d51ad7ac | 87 | local_irq_restore(flags); |
60a11774 SR |
88 | |
89 | if (count) | |
90 | *count = cnt; | |
91 | ||
92 | return ret; | |
93 | } | |
94 | ||
1c80025a FW |
95 | static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) |
96 | { | |
97 | printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n", | |
98 | trace->name, init_ret); | |
99 | } | |
606576ce | 100 | #ifdef CONFIG_FUNCTION_TRACER |
77a2b37d SR |
101 | |
102 | #ifdef CONFIG_DYNAMIC_FTRACE | |
103 | ||
95950c2e SR |
104 | static int trace_selftest_test_probe1_cnt; |
105 | static void trace_selftest_test_probe1_func(unsigned long ip, | |
2f5f6ad9 | 106 | unsigned long pip, |
a1e2e31d SR |
107 | struct ftrace_ops *op, |
108 | struct pt_regs *pt_regs) | |
95950c2e SR |
109 | { |
110 | trace_selftest_test_probe1_cnt++; | |
111 | } | |
112 | ||
113 | static int trace_selftest_test_probe2_cnt; | |
114 | static void trace_selftest_test_probe2_func(unsigned long ip, | |
2f5f6ad9 | 115 | unsigned long pip, |
a1e2e31d SR |
116 | struct ftrace_ops *op, |
117 | struct pt_regs *pt_regs) | |
95950c2e SR |
118 | { |
119 | trace_selftest_test_probe2_cnt++; | |
120 | } | |
121 | ||
122 | static int trace_selftest_test_probe3_cnt; | |
123 | static void trace_selftest_test_probe3_func(unsigned long ip, | |
2f5f6ad9 | 124 | unsigned long pip, |
a1e2e31d SR |
125 | struct ftrace_ops *op, |
126 | struct pt_regs *pt_regs) | |
95950c2e SR |
127 | { |
128 | trace_selftest_test_probe3_cnt++; | |
129 | } | |
130 | ||
131 | static int trace_selftest_test_global_cnt; | |
132 | static void trace_selftest_test_global_func(unsigned long ip, | |
2f5f6ad9 | 133 | unsigned long pip, |
a1e2e31d SR |
134 | struct ftrace_ops *op, |
135 | struct pt_regs *pt_regs) | |
95950c2e SR |
136 | { |
137 | trace_selftest_test_global_cnt++; | |
138 | } | |
139 | ||
140 | static int trace_selftest_test_dyn_cnt; | |
141 | static void trace_selftest_test_dyn_func(unsigned long ip, | |
2f5f6ad9 | 142 | unsigned long pip, |
a1e2e31d SR |
143 | struct ftrace_ops *op, |
144 | struct pt_regs *pt_regs) | |
95950c2e SR |
145 | { |
146 | trace_selftest_test_dyn_cnt++; | |
147 | } | |
148 | ||
149 | static struct ftrace_ops test_probe1 = { | |
150 | .func = trace_selftest_test_probe1_func, | |
4740974a | 151 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, |
95950c2e SR |
152 | }; |
153 | ||
154 | static struct ftrace_ops test_probe2 = { | |
155 | .func = trace_selftest_test_probe2_func, | |
4740974a | 156 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, |
95950c2e SR |
157 | }; |
158 | ||
159 | static struct ftrace_ops test_probe3 = { | |
160 | .func = trace_selftest_test_probe3_func, | |
4740974a | 161 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, |
95950c2e SR |
162 | }; |
163 | ||
95950c2e SR |
164 | static void print_counts(void) |
165 | { | |
166 | printk("(%d %d %d %d %d) ", | |
167 | trace_selftest_test_probe1_cnt, | |
168 | trace_selftest_test_probe2_cnt, | |
169 | trace_selftest_test_probe3_cnt, | |
170 | trace_selftest_test_global_cnt, | |
171 | trace_selftest_test_dyn_cnt); | |
172 | } | |
173 | ||
174 | static void reset_counts(void) | |
175 | { | |
176 | trace_selftest_test_probe1_cnt = 0; | |
177 | trace_selftest_test_probe2_cnt = 0; | |
178 | trace_selftest_test_probe3_cnt = 0; | |
179 | trace_selftest_test_global_cnt = 0; | |
180 | trace_selftest_test_dyn_cnt = 0; | |
181 | } | |
182 | ||
4104d326 | 183 | static int trace_selftest_ops(struct trace_array *tr, int cnt) |
95950c2e SR |
184 | { |
185 | int save_ftrace_enabled = ftrace_enabled; | |
186 | struct ftrace_ops *dyn_ops; | |
187 | char *func1_name; | |
188 | char *func2_name; | |
189 | int len1; | |
190 | int len2; | |
191 | int ret = -1; | |
192 | ||
193 | printk(KERN_CONT "PASSED\n"); | |
194 | pr_info("Testing dynamic ftrace ops #%d: ", cnt); | |
195 | ||
196 | ftrace_enabled = 1; | |
197 | reset_counts(); | |
198 | ||
199 | /* Handle PPC64 '.' name */ | |
200 | func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | |
201 | func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2); | |
202 | len1 = strlen(func1_name); | |
203 | len2 = strlen(func2_name); | |
204 | ||
205 | /* | |
206 | * Probe 1 will trace function 1. | |
207 | * Probe 2 will trace function 2. | |
208 | * Probe 3 will trace functions 1 and 2. | |
209 | */ | |
210 | ftrace_set_filter(&test_probe1, func1_name, len1, 1); | |
211 | ftrace_set_filter(&test_probe2, func2_name, len2, 1); | |
212 | ftrace_set_filter(&test_probe3, func1_name, len1, 1); | |
213 | ftrace_set_filter(&test_probe3, func2_name, len2, 0); | |
214 | ||
215 | register_ftrace_function(&test_probe1); | |
216 | register_ftrace_function(&test_probe2); | |
217 | register_ftrace_function(&test_probe3); | |
4104d326 SRRH |
218 | /* First time we are running with main function */ |
219 | if (cnt > 1) { | |
220 | ftrace_init_array_ops(tr, trace_selftest_test_global_func); | |
221 | register_ftrace_function(tr->ops); | |
222 | } | |
95950c2e SR |
223 | |
224 | DYN_FTRACE_TEST_NAME(); | |
225 | ||
226 | print_counts(); | |
227 | ||
228 | if (trace_selftest_test_probe1_cnt != 1) | |
229 | goto out; | |
230 | if (trace_selftest_test_probe2_cnt != 0) | |
231 | goto out; | |
232 | if (trace_selftest_test_probe3_cnt != 1) | |
233 | goto out; | |
4104d326 SRRH |
234 | if (cnt > 1) { |
235 | if (trace_selftest_test_global_cnt == 0) | |
236 | goto out; | |
237 | } | |
95950c2e SR |
238 | |
239 | DYN_FTRACE_TEST_NAME2(); | |
240 | ||
241 | print_counts(); | |
242 | ||
243 | if (trace_selftest_test_probe1_cnt != 1) | |
244 | goto out; | |
245 | if (trace_selftest_test_probe2_cnt != 1) | |
246 | goto out; | |
247 | if (trace_selftest_test_probe3_cnt != 2) | |
248 | goto out; | |
249 | ||
250 | /* Add a dynamic probe */ | |
251 | dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL); | |
252 | if (!dyn_ops) { | |
253 | printk("MEMORY ERROR "); | |
254 | goto out; | |
255 | } | |
256 | ||
257 | dyn_ops->func = trace_selftest_test_dyn_func; | |
258 | ||
259 | register_ftrace_function(dyn_ops); | |
260 | ||
261 | trace_selftest_test_global_cnt = 0; | |
262 | ||
263 | DYN_FTRACE_TEST_NAME(); | |
264 | ||
265 | print_counts(); | |
266 | ||
267 | if (trace_selftest_test_probe1_cnt != 2) | |
268 | goto out_free; | |
269 | if (trace_selftest_test_probe2_cnt != 1) | |
270 | goto out_free; | |
271 | if (trace_selftest_test_probe3_cnt != 3) | |
272 | goto out_free; | |
4104d326 SRRH |
273 | if (cnt > 1) { |
274 | if (trace_selftest_test_global_cnt == 0) | |
275 | goto out; | |
276 | } | |
95950c2e SR |
277 | if (trace_selftest_test_dyn_cnt == 0) |
278 | goto out_free; | |
279 | ||
280 | DYN_FTRACE_TEST_NAME2(); | |
281 | ||
282 | print_counts(); | |
283 | ||
284 | if (trace_selftest_test_probe1_cnt != 2) | |
285 | goto out_free; | |
286 | if (trace_selftest_test_probe2_cnt != 2) | |
287 | goto out_free; | |
288 | if (trace_selftest_test_probe3_cnt != 4) | |
289 | goto out_free; | |
290 | ||
291 | ret = 0; | |
292 | out_free: | |
293 | unregister_ftrace_function(dyn_ops); | |
294 | kfree(dyn_ops); | |
295 | ||
296 | out: | |
297 | /* Purposely unregister in the same order */ | |
298 | unregister_ftrace_function(&test_probe1); | |
299 | unregister_ftrace_function(&test_probe2); | |
300 | unregister_ftrace_function(&test_probe3); | |
4104d326 SRRH |
301 | if (cnt > 1) |
302 | unregister_ftrace_function(tr->ops); | |
303 | ftrace_reset_array_ops(tr); | |
95950c2e SR |
304 | |
305 | /* Make sure everything is off */ | |
306 | reset_counts(); | |
307 | DYN_FTRACE_TEST_NAME(); | |
308 | DYN_FTRACE_TEST_NAME(); | |
309 | ||
310 | if (trace_selftest_test_probe1_cnt || | |
311 | trace_selftest_test_probe2_cnt || | |
312 | trace_selftest_test_probe3_cnt || | |
313 | trace_selftest_test_global_cnt || | |
314 | trace_selftest_test_dyn_cnt) | |
315 | ret = -1; | |
316 | ||
317 | ftrace_enabled = save_ftrace_enabled; | |
318 | ||
319 | return ret; | |
320 | } | |
321 | ||
77a2b37d | 322 | /* Test dynamic code modification and ftrace filters */ |
ad1438a0 FF |
323 | static int trace_selftest_startup_dynamic_tracing(struct tracer *trace, |
324 | struct trace_array *tr, | |
325 | int (*func)(void)) | |
77a2b37d | 326 | { |
77a2b37d | 327 | int save_ftrace_enabled = ftrace_enabled; |
dd0e545f | 328 | unsigned long count; |
4e491d14 | 329 | char *func_name; |
dd0e545f | 330 | int ret; |
77a2b37d SR |
331 | |
332 | /* The ftrace test PASSED */ | |
333 | printk(KERN_CONT "PASSED\n"); | |
334 | pr_info("Testing dynamic ftrace: "); | |
335 | ||
336 | /* enable tracing, and record the filter function */ | |
337 | ftrace_enabled = 1; | |
77a2b37d SR |
338 | |
339 | /* passed in by parameter to fool gcc from optimizing */ | |
340 | func(); | |
341 | ||
4e491d14 | 342 | /* |
73d8b8bc | 343 | * Some archs *cough*PowerPC*cough* add characters to the |
4e491d14 | 344 | * start of the function names. We simply put a '*' to |
73d8b8bc | 345 | * accommodate them. |
4e491d14 | 346 | */ |
9cc26a26 | 347 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); |
4e491d14 | 348 | |
77a2b37d | 349 | /* filter only on our function */ |
936e074b | 350 | ftrace_set_global_filter(func_name, strlen(func_name), 1); |
77a2b37d SR |
351 | |
352 | /* enable tracing */ | |
b6f11df2 | 353 | ret = tracer_init(trace, tr); |
1c80025a FW |
354 | if (ret) { |
355 | warn_failed_init_tracer(trace, ret); | |
356 | goto out; | |
357 | } | |
dd0e545f | 358 | |
77a2b37d SR |
359 | /* Sleep for a 1/10 of a second */ |
360 | msleep(100); | |
361 | ||
362 | /* we should have nothing in the buffer */ | |
12883efb | 363 | ret = trace_test_buffer(&tr->trace_buffer, &count); |
77a2b37d SR |
364 | if (ret) |
365 | goto out; | |
366 | ||
367 | if (count) { | |
368 | ret = -1; | |
369 | printk(KERN_CONT ".. filter did not filter .. "); | |
370 | goto out; | |
371 | } | |
372 | ||
373 | /* call our function again */ | |
374 | func(); | |
375 | ||
376 | /* sleep again */ | |
377 | msleep(100); | |
378 | ||
379 | /* stop the tracing. */ | |
bbf5b1a0 | 380 | tracing_stop(); |
77a2b37d SR |
381 | ftrace_enabled = 0; |
382 | ||
383 | /* check the trace buffer */ | |
12883efb | 384 | ret = trace_test_buffer(&tr->trace_buffer, &count); |
3ddee63a SRRH |
385 | |
386 | ftrace_enabled = 1; | |
bbf5b1a0 | 387 | tracing_start(); |
77a2b37d SR |
388 | |
389 | /* we should only have one item */ | |
390 | if (!ret && count != 1) { | |
95950c2e | 391 | trace->reset(tr); |
06fa75ab | 392 | printk(KERN_CONT ".. filter failed count=%ld ..", count); |
77a2b37d SR |
393 | ret = -1; |
394 | goto out; | |
395 | } | |
bbf5b1a0 | 396 | |
95950c2e | 397 | /* Test the ops with global tracing running */ |
4104d326 | 398 | ret = trace_selftest_ops(tr, 1); |
95950c2e SR |
399 | trace->reset(tr); |
400 | ||
77a2b37d SR |
401 | out: |
402 | ftrace_enabled = save_ftrace_enabled; | |
77a2b37d SR |
403 | |
404 | /* Enable tracing on all functions again */ | |
936e074b | 405 | ftrace_set_global_filter(NULL, 0, 1); |
77a2b37d | 406 | |
95950c2e SR |
407 | /* Test the ops with global tracing off */ |
408 | if (!ret) | |
4104d326 | 409 | ret = trace_selftest_ops(tr, 2); |
95950c2e | 410 | |
77a2b37d SR |
411 | return ret; |
412 | } | |
ea701f11 SR |
413 | |
414 | static int trace_selftest_recursion_cnt; | |
415 | static void trace_selftest_test_recursion_func(unsigned long ip, | |
416 | unsigned long pip, | |
417 | struct ftrace_ops *op, | |
418 | struct pt_regs *pt_regs) | |
419 | { | |
420 | /* | |
421 | * This function is registered without the recursion safe flag. | |
422 | * The ftrace infrastructure should provide the recursion | |
423 | * protection. If not, this will crash the kernel! | |
424 | */ | |
9640388b SR |
425 | if (trace_selftest_recursion_cnt++ > 10) |
426 | return; | |
ea701f11 SR |
427 | DYN_FTRACE_TEST_NAME(); |
428 | } | |
429 | ||
430 | static void trace_selftest_test_recursion_safe_func(unsigned long ip, | |
431 | unsigned long pip, | |
432 | struct ftrace_ops *op, | |
433 | struct pt_regs *pt_regs) | |
434 | { | |
435 | /* | |
436 | * We said we would provide our own recursion. By calling | |
437 | * this function again, we should recurse back into this function | |
438 | * and count again. But this only happens if the arch supports | |
439 | * all of ftrace features and nothing else is using the function | |
440 | * tracing utility. | |
441 | */ | |
442 | if (trace_selftest_recursion_cnt++) | |
443 | return; | |
444 | DYN_FTRACE_TEST_NAME(); | |
445 | } | |
446 | ||
447 | static struct ftrace_ops test_rec_probe = { | |
448 | .func = trace_selftest_test_recursion_func, | |
449 | }; | |
450 | ||
451 | static struct ftrace_ops test_recsafe_probe = { | |
452 | .func = trace_selftest_test_recursion_safe_func, | |
453 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, | |
454 | }; | |
455 | ||
456 | static int | |
457 | trace_selftest_function_recursion(void) | |
458 | { | |
459 | int save_ftrace_enabled = ftrace_enabled; | |
ea701f11 SR |
460 | char *func_name; |
461 | int len; | |
462 | int ret; | |
ea701f11 SR |
463 | |
464 | /* The previous test PASSED */ | |
465 | pr_cont("PASSED\n"); | |
466 | pr_info("Testing ftrace recursion: "); | |
467 | ||
468 | ||
469 | /* enable tracing, and record the filter function */ | |
470 | ftrace_enabled = 1; | |
ea701f11 SR |
471 | |
472 | /* Handle PPC64 '.' name */ | |
473 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | |
474 | len = strlen(func_name); | |
475 | ||
476 | ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1); | |
477 | if (ret) { | |
478 | pr_cont("*Could not set filter* "); | |
479 | goto out; | |
480 | } | |
481 | ||
482 | ret = register_ftrace_function(&test_rec_probe); | |
483 | if (ret) { | |
484 | pr_cont("*could not register callback* "); | |
485 | goto out; | |
486 | } | |
487 | ||
488 | DYN_FTRACE_TEST_NAME(); | |
489 | ||
490 | unregister_ftrace_function(&test_rec_probe); | |
491 | ||
492 | ret = -1; | |
493 | if (trace_selftest_recursion_cnt != 1) { | |
494 | pr_cont("*callback not called once (%d)* ", | |
495 | trace_selftest_recursion_cnt); | |
496 | goto out; | |
497 | } | |
498 | ||
499 | trace_selftest_recursion_cnt = 1; | |
500 | ||
501 | pr_cont("PASSED\n"); | |
502 | pr_info("Testing ftrace recursion safe: "); | |
503 | ||
504 | ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1); | |
505 | if (ret) { | |
506 | pr_cont("*Could not set filter* "); | |
507 | goto out; | |
508 | } | |
509 | ||
510 | ret = register_ftrace_function(&test_recsafe_probe); | |
511 | if (ret) { | |
512 | pr_cont("*could not register callback* "); | |
513 | goto out; | |
514 | } | |
515 | ||
516 | DYN_FTRACE_TEST_NAME(); | |
517 | ||
518 | unregister_ftrace_function(&test_recsafe_probe); | |
519 | ||
ea701f11 | 520 | ret = -1; |
05cbbf64 SR |
521 | if (trace_selftest_recursion_cnt != 2) { |
522 | pr_cont("*callback not called expected 2 times (%d)* ", | |
523 | trace_selftest_recursion_cnt); | |
ea701f11 SR |
524 | goto out; |
525 | } | |
526 | ||
527 | ret = 0; | |
528 | out: | |
529 | ftrace_enabled = save_ftrace_enabled; | |
ea701f11 SR |
530 | |
531 | return ret; | |
532 | } | |
77a2b37d SR |
533 | #else |
534 | # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) | |
ea701f11 | 535 | # define trace_selftest_function_recursion() ({ 0; }) |
77a2b37d | 536 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
e9a22d1f | 537 | |
ad97772a SR |
538 | static enum { |
539 | TRACE_SELFTEST_REGS_START, | |
540 | TRACE_SELFTEST_REGS_FOUND, | |
541 | TRACE_SELFTEST_REGS_NOT_FOUND, | |
542 | } trace_selftest_regs_stat; | |
543 | ||
544 | static void trace_selftest_test_regs_func(unsigned long ip, | |
545 | unsigned long pip, | |
546 | struct ftrace_ops *op, | |
547 | struct pt_regs *pt_regs) | |
548 | { | |
549 | if (pt_regs) | |
550 | trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND; | |
551 | else | |
552 | trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND; | |
553 | } | |
554 | ||
555 | static struct ftrace_ops test_regs_probe = { | |
556 | .func = trace_selftest_test_regs_func, | |
557 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS, | |
558 | }; | |
559 | ||
560 | static int | |
561 | trace_selftest_function_regs(void) | |
562 | { | |
563 | int save_ftrace_enabled = ftrace_enabled; | |
ad97772a SR |
564 | char *func_name; |
565 | int len; | |
566 | int ret; | |
567 | int supported = 0; | |
568 | ||
06aeaaea | 569 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
ad97772a SR |
570 | supported = 1; |
571 | #endif | |
572 | ||
573 | /* The previous test PASSED */ | |
574 | pr_cont("PASSED\n"); | |
575 | pr_info("Testing ftrace regs%s: ", | |
576 | !supported ? "(no arch support)" : ""); | |
577 | ||
578 | /* enable tracing, and record the filter function */ | |
579 | ftrace_enabled = 1; | |
ad97772a SR |
580 | |
581 | /* Handle PPC64 '.' name */ | |
582 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | |
583 | len = strlen(func_name); | |
584 | ||
585 | ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1); | |
586 | /* | |
587 | * If DYNAMIC_FTRACE is not set, then we just trace all functions. | |
588 | * This test really doesn't care. | |
589 | */ | |
590 | if (ret && ret != -ENODEV) { | |
591 | pr_cont("*Could not set filter* "); | |
592 | goto out; | |
593 | } | |
594 | ||
595 | ret = register_ftrace_function(&test_regs_probe); | |
596 | /* | |
597 | * Now if the arch does not support passing regs, then this should | |
598 | * have failed. | |
599 | */ | |
600 | if (!supported) { | |
601 | if (!ret) { | |
602 | pr_cont("*registered save-regs without arch support* "); | |
603 | goto out; | |
604 | } | |
605 | test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED; | |
606 | ret = register_ftrace_function(&test_regs_probe); | |
607 | } | |
608 | if (ret) { | |
609 | pr_cont("*could not register callback* "); | |
610 | goto out; | |
611 | } | |
612 | ||
613 | ||
614 | DYN_FTRACE_TEST_NAME(); | |
615 | ||
616 | unregister_ftrace_function(&test_regs_probe); | |
617 | ||
618 | ret = -1; | |
619 | ||
620 | switch (trace_selftest_regs_stat) { | |
621 | case TRACE_SELFTEST_REGS_START: | |
622 | pr_cont("*callback never called* "); | |
623 | goto out; | |
624 | ||
625 | case TRACE_SELFTEST_REGS_FOUND: | |
626 | if (supported) | |
627 | break; | |
628 | pr_cont("*callback received regs without arch support* "); | |
629 | goto out; | |
630 | ||
631 | case TRACE_SELFTEST_REGS_NOT_FOUND: | |
632 | if (!supported) | |
633 | break; | |
634 | pr_cont("*callback received NULL regs* "); | |
635 | goto out; | |
636 | } | |
637 | ||
638 | ret = 0; | |
639 | out: | |
640 | ftrace_enabled = save_ftrace_enabled; | |
ad97772a SR |
641 | |
642 | return ret; | |
643 | } | |
644 | ||
60a11774 SR |
645 | /* |
646 | * Simple verification test of ftrace function tracer. | |
647 | * Enable ftrace, sleep 1/10 second, and then read the trace | |
648 | * buffer to see if all is in order. | |
649 | */ | |
f1ed7c74 | 650 | __init int |
60a11774 SR |
651 | trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) |
652 | { | |
77a2b37d | 653 | int save_ftrace_enabled = ftrace_enabled; |
dd0e545f SR |
654 | unsigned long count; |
655 | int ret; | |
60a11774 | 656 | |
f1ed7c74 SRRH |
657 | #ifdef CONFIG_DYNAMIC_FTRACE |
658 | if (ftrace_filter_param) { | |
659 | printk(KERN_CONT " ... kernel command line filter set: force PASS ... "); | |
660 | return 0; | |
661 | } | |
662 | #endif | |
663 | ||
77a2b37d SR |
664 | /* make sure msleep has been recorded */ |
665 | msleep(1); | |
666 | ||
60a11774 | 667 | /* start the tracing */ |
c7aafc54 IM |
668 | ftrace_enabled = 1; |
669 | ||
b6f11df2 | 670 | ret = tracer_init(trace, tr); |
1c80025a FW |
671 | if (ret) { |
672 | warn_failed_init_tracer(trace, ret); | |
673 | goto out; | |
674 | } | |
675 | ||
60a11774 SR |
676 | /* Sleep for a 1/10 of a second */ |
677 | msleep(100); | |
678 | /* stop the tracing. */ | |
bbf5b1a0 | 679 | tracing_stop(); |
c7aafc54 IM |
680 | ftrace_enabled = 0; |
681 | ||
60a11774 | 682 | /* check the trace buffer */ |
12883efb | 683 | ret = trace_test_buffer(&tr->trace_buffer, &count); |
3ddee63a SRRH |
684 | |
685 | ftrace_enabled = 1; | |
60a11774 | 686 | trace->reset(tr); |
bbf5b1a0 | 687 | tracing_start(); |
60a11774 SR |
688 | |
689 | if (!ret && !count) { | |
690 | printk(KERN_CONT ".. no entries found .."); | |
691 | ret = -1; | |
77a2b37d | 692 | goto out; |
60a11774 SR |
693 | } |
694 | ||
77a2b37d SR |
695 | ret = trace_selftest_startup_dynamic_tracing(trace, tr, |
696 | DYN_FTRACE_TEST_NAME); | |
ea701f11 SR |
697 | if (ret) |
698 | goto out; | |
77a2b37d | 699 | |
ea701f11 | 700 | ret = trace_selftest_function_recursion(); |
ad97772a SR |
701 | if (ret) |
702 | goto out; | |
703 | ||
704 | ret = trace_selftest_function_regs(); | |
77a2b37d SR |
705 | out: |
706 | ftrace_enabled = save_ftrace_enabled; | |
77a2b37d | 707 | |
4eebcc81 SR |
708 | /* kill ftrace totally if we failed */ |
709 | if (ret) | |
710 | ftrace_kill(); | |
711 | ||
60a11774 SR |
712 | return ret; |
713 | } | |
606576ce | 714 | #endif /* CONFIG_FUNCTION_TRACER */ |
60a11774 | 715 | |
7447dce9 FW |
716 | |
717 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
cf586b61 FW |
718 | |
719 | /* Maximum number of functions to trace before diagnosing a hang */ | |
720 | #define GRAPH_MAX_FUNC_TEST 100000000 | |
721 | ||
cf586b61 FW |
722 | static unsigned int graph_hang_thresh; |
723 | ||
724 | /* Wrap the real function entry probe to avoid possible hanging */ | |
725 | static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) | |
726 | { | |
727 | /* This is harmlessly racy, we want to approximately detect a hang */ | |
728 | if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { | |
729 | ftrace_graph_stop(); | |
730 | printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); | |
7fe70b57 SRRH |
731 | if (ftrace_dump_on_oops) { |
732 | ftrace_dump(DUMP_ALL); | |
733 | /* ftrace_dump() disables tracing */ | |
734 | tracing_on(); | |
735 | } | |
cf586b61 FW |
736 | return 0; |
737 | } | |
738 | ||
739 | return trace_graph_entry(trace); | |
740 | } | |
741 | ||
7447dce9 FW |
742 | /* |
743 | * Pretty much the same than for the function tracer from which the selftest | |
744 | * has been borrowed. | |
745 | */ | |
f1ed7c74 | 746 | __init int |
7447dce9 FW |
747 | trace_selftest_startup_function_graph(struct tracer *trace, |
748 | struct trace_array *tr) | |
749 | { | |
750 | int ret; | |
751 | unsigned long count; | |
752 | ||
f1ed7c74 SRRH |
753 | #ifdef CONFIG_DYNAMIC_FTRACE |
754 | if (ftrace_filter_param) { | |
755 | printk(KERN_CONT " ... kernel command line filter set: force PASS ... "); | |
756 | return 0; | |
757 | } | |
758 | #endif | |
759 | ||
cf586b61 FW |
760 | /* |
761 | * Simulate the init() callback but we attach a watchdog callback | |
762 | * to detect and recover from possible hangs | |
763 | */ | |
12883efb | 764 | tracing_reset_online_cpus(&tr->trace_buffer); |
1a0799a8 | 765 | set_graph_array(tr); |
cf586b61 FW |
766 | ret = register_ftrace_graph(&trace_graph_return, |
767 | &trace_graph_entry_watchdog); | |
7447dce9 FW |
768 | if (ret) { |
769 | warn_failed_init_tracer(trace, ret); | |
770 | goto out; | |
771 | } | |
cf586b61 | 772 | tracing_start_cmdline_record(); |
7447dce9 FW |
773 | |
774 | /* Sleep for a 1/10 of a second */ | |
775 | msleep(100); | |
776 | ||
cf586b61 FW |
777 | /* Have we just recovered from a hang? */ |
778 | if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { | |
0cf53ff6 | 779 | tracing_selftest_disabled = true; |
cf586b61 FW |
780 | ret = -1; |
781 | goto out; | |
782 | } | |
783 | ||
7447dce9 FW |
784 | tracing_stop(); |
785 | ||
786 | /* check the trace buffer */ | |
12883efb | 787 | ret = trace_test_buffer(&tr->trace_buffer, &count); |
7447dce9 FW |
788 | |
789 | trace->reset(tr); | |
790 | tracing_start(); | |
791 | ||
792 | if (!ret && !count) { | |
793 | printk(KERN_CONT ".. no entries found .."); | |
794 | ret = -1; | |
795 | goto out; | |
796 | } | |
797 | ||
798 | /* Don't test dynamic tracing, the function tracer already did */ | |
799 | ||
800 | out: | |
801 | /* Stop it if we failed */ | |
802 | if (ret) | |
803 | ftrace_graph_stop(); | |
804 | ||
805 | return ret; | |
806 | } | |
807 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | |
808 | ||
809 | ||
60a11774 SR |
810 | #ifdef CONFIG_IRQSOFF_TRACER |
811 | int | |
812 | trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | |
813 | { | |
6d9b3fa5 | 814 | unsigned long save_max = tr->max_latency; |
60a11774 SR |
815 | unsigned long count; |
816 | int ret; | |
817 | ||
818 | /* start the tracing */ | |
b6f11df2 | 819 | ret = tracer_init(trace, tr); |
1c80025a FW |
820 | if (ret) { |
821 | warn_failed_init_tracer(trace, ret); | |
822 | return ret; | |
823 | } | |
824 | ||
60a11774 | 825 | /* reset the max latency */ |
6d9b3fa5 | 826 | tr->max_latency = 0; |
60a11774 SR |
827 | /* disable interrupts for a bit */ |
828 | local_irq_disable(); | |
829 | udelay(100); | |
830 | local_irq_enable(); | |
49036200 FW |
831 | |
832 | /* | |
833 | * Stop the tracer to avoid a warning subsequent | |
834 | * to buffer flipping failure because tracing_stop() | |
835 | * disables the tr and max buffers, making flipping impossible | |
836 | * in case of parallels max irqs off latencies. | |
837 | */ | |
838 | trace->stop(tr); | |
60a11774 | 839 | /* stop the tracing. */ |
bbf5b1a0 | 840 | tracing_stop(); |
60a11774 | 841 | /* check both trace buffers */ |
12883efb | 842 | ret = trace_test_buffer(&tr->trace_buffer, NULL); |
60a11774 | 843 | if (!ret) |
12883efb | 844 | ret = trace_test_buffer(&tr->max_buffer, &count); |
60a11774 | 845 | trace->reset(tr); |
bbf5b1a0 | 846 | tracing_start(); |
60a11774 SR |
847 | |
848 | if (!ret && !count) { | |
849 | printk(KERN_CONT ".. no entries found .."); | |
850 | ret = -1; | |
851 | } | |
852 | ||
6d9b3fa5 | 853 | tr->max_latency = save_max; |
60a11774 SR |
854 | |
855 | return ret; | |
856 | } | |
857 | #endif /* CONFIG_IRQSOFF_TRACER */ | |
858 | ||
859 | #ifdef CONFIG_PREEMPT_TRACER | |
860 | int | |
861 | trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |
862 | { | |
6d9b3fa5 | 863 | unsigned long save_max = tr->max_latency; |
60a11774 SR |
864 | unsigned long count; |
865 | int ret; | |
866 | ||
769c48eb SR |
867 | /* |
868 | * Now that the big kernel lock is no longer preemptable, | |
869 | * and this is called with the BKL held, it will always | |
870 | * fail. If preemption is already disabled, simply | |
871 | * pass the test. When the BKL is removed, or becomes | |
872 | * preemptible again, we will once again test this, | |
873 | * so keep it in. | |
874 | */ | |
875 | if (preempt_count()) { | |
876 | printk(KERN_CONT "can not test ... force "); | |
877 | return 0; | |
878 | } | |
879 | ||
60a11774 | 880 | /* start the tracing */ |
b6f11df2 | 881 | ret = tracer_init(trace, tr); |
1c80025a FW |
882 | if (ret) { |
883 | warn_failed_init_tracer(trace, ret); | |
884 | return ret; | |
885 | } | |
886 | ||
60a11774 | 887 | /* reset the max latency */ |
6d9b3fa5 | 888 | tr->max_latency = 0; |
60a11774 SR |
889 | /* disable preemption for a bit */ |
890 | preempt_disable(); | |
891 | udelay(100); | |
892 | preempt_enable(); | |
49036200 FW |
893 | |
894 | /* | |
895 | * Stop the tracer to avoid a warning subsequent | |
896 | * to buffer flipping failure because tracing_stop() | |
897 | * disables the tr and max buffers, making flipping impossible | |
898 | * in case of parallels max preempt off latencies. | |
899 | */ | |
900 | trace->stop(tr); | |
60a11774 | 901 | /* stop the tracing. */ |
bbf5b1a0 | 902 | tracing_stop(); |
60a11774 | 903 | /* check both trace buffers */ |
12883efb | 904 | ret = trace_test_buffer(&tr->trace_buffer, NULL); |
60a11774 | 905 | if (!ret) |
12883efb | 906 | ret = trace_test_buffer(&tr->max_buffer, &count); |
60a11774 | 907 | trace->reset(tr); |
bbf5b1a0 | 908 | tracing_start(); |
60a11774 SR |
909 | |
910 | if (!ret && !count) { | |
911 | printk(KERN_CONT ".. no entries found .."); | |
912 | ret = -1; | |
913 | } | |
914 | ||
6d9b3fa5 | 915 | tr->max_latency = save_max; |
60a11774 SR |
916 | |
917 | return ret; | |
918 | } | |
919 | #endif /* CONFIG_PREEMPT_TRACER */ | |
920 | ||
921 | #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) | |
922 | int | |
923 | trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) | |
924 | { | |
6d9b3fa5 | 925 | unsigned long save_max = tr->max_latency; |
60a11774 SR |
926 | unsigned long count; |
927 | int ret; | |
928 | ||
769c48eb SR |
929 | /* |
930 | * Now that the big kernel lock is no longer preemptable, | |
931 | * and this is called with the BKL held, it will always | |
932 | * fail. If preemption is already disabled, simply | |
933 | * pass the test. When the BKL is removed, or becomes | |
934 | * preemptible again, we will once again test this, | |
935 | * so keep it in. | |
936 | */ | |
937 | if (preempt_count()) { | |
938 | printk(KERN_CONT "can not test ... force "); | |
939 | return 0; | |
940 | } | |
941 | ||
60a11774 | 942 | /* start the tracing */ |
b6f11df2 | 943 | ret = tracer_init(trace, tr); |
1c80025a FW |
944 | if (ret) { |
945 | warn_failed_init_tracer(trace, ret); | |
ac1d52d0 | 946 | goto out_no_start; |
1c80025a | 947 | } |
60a11774 SR |
948 | |
949 | /* reset the max latency */ | |
6d9b3fa5 | 950 | tr->max_latency = 0; |
60a11774 SR |
951 | |
952 | /* disable preemption and interrupts for a bit */ | |
953 | preempt_disable(); | |
954 | local_irq_disable(); | |
955 | udelay(100); | |
956 | preempt_enable(); | |
957 | /* reverse the order of preempt vs irqs */ | |
958 | local_irq_enable(); | |
959 | ||
49036200 FW |
960 | /* |
961 | * Stop the tracer to avoid a warning subsequent | |
962 | * to buffer flipping failure because tracing_stop() | |
963 | * disables the tr and max buffers, making flipping impossible | |
964 | * in case of parallels max irqs/preempt off latencies. | |
965 | */ | |
966 | trace->stop(tr); | |
60a11774 | 967 | /* stop the tracing. */ |
bbf5b1a0 | 968 | tracing_stop(); |
60a11774 | 969 | /* check both trace buffers */ |
12883efb | 970 | ret = trace_test_buffer(&tr->trace_buffer, NULL); |
ac1d52d0 | 971 | if (ret) |
60a11774 SR |
972 | goto out; |
973 | ||
12883efb | 974 | ret = trace_test_buffer(&tr->max_buffer, &count); |
ac1d52d0 | 975 | if (ret) |
60a11774 SR |
976 | goto out; |
977 | ||
978 | if (!ret && !count) { | |
979 | printk(KERN_CONT ".. no entries found .."); | |
980 | ret = -1; | |
981 | goto out; | |
982 | } | |
983 | ||
984 | /* do the test by disabling interrupts first this time */ | |
6d9b3fa5 | 985 | tr->max_latency = 0; |
bbf5b1a0 | 986 | tracing_start(); |
49036200 FW |
987 | trace->start(tr); |
988 | ||
60a11774 SR |
989 | preempt_disable(); |
990 | local_irq_disable(); | |
991 | udelay(100); | |
992 | preempt_enable(); | |
993 | /* reverse the order of preempt vs irqs */ | |
994 | local_irq_enable(); | |
995 | ||
49036200 | 996 | trace->stop(tr); |
60a11774 | 997 | /* stop the tracing. */ |
bbf5b1a0 | 998 | tracing_stop(); |
60a11774 | 999 | /* check both trace buffers */ |
12883efb | 1000 | ret = trace_test_buffer(&tr->trace_buffer, NULL); |
60a11774 SR |
1001 | if (ret) |
1002 | goto out; | |
1003 | ||
12883efb | 1004 | ret = trace_test_buffer(&tr->max_buffer, &count); |
60a11774 SR |
1005 | |
1006 | if (!ret && !count) { | |
1007 | printk(KERN_CONT ".. no entries found .."); | |
1008 | ret = -1; | |
1009 | goto out; | |
1010 | } | |
1011 | ||
ac1d52d0 | 1012 | out: |
bbf5b1a0 | 1013 | tracing_start(); |
ac1d52d0 FW |
1014 | out_no_start: |
1015 | trace->reset(tr); | |
6d9b3fa5 | 1016 | tr->max_latency = save_max; |
60a11774 SR |
1017 | |
1018 | return ret; | |
1019 | } | |
1020 | #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ | |
1021 | ||
fb1b6d8b SN |
1022 | #ifdef CONFIG_NOP_TRACER |
1023 | int | |
1024 | trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) | |
1025 | { | |
1026 | /* What could possibly go wrong? */ | |
1027 | return 0; | |
1028 | } | |
1029 | #endif | |
1030 | ||
60a11774 | 1031 | #ifdef CONFIG_SCHED_TRACER |
addff1fe SR |
1032 | |
1033 | struct wakeup_test_data { | |
1034 | struct completion is_ready; | |
1035 | int go; | |
1036 | }; | |
1037 | ||
60a11774 SR |
1038 | static int trace_wakeup_test_thread(void *data) |
1039 | { | |
af6ace76 DF |
1040 | /* Make this a -deadline thread */ |
1041 | static const struct sched_attr attr = { | |
1042 | .sched_policy = SCHED_DEADLINE, | |
1043 | .sched_runtime = 100000ULL, | |
1044 | .sched_deadline = 10000000ULL, | |
1045 | .sched_period = 10000000ULL | |
1046 | }; | |
addff1fe | 1047 | struct wakeup_test_data *x = data; |
60a11774 | 1048 | |
af6ace76 | 1049 | sched_setattr(current, &attr); |
60a11774 SR |
1050 | |
1051 | /* Make it know we have a new prio */ | |
addff1fe | 1052 | complete(&x->is_ready); |
60a11774 SR |
1053 | |
1054 | /* now go to sleep and let the test wake us up */ | |
1055 | set_current_state(TASK_INTERRUPTIBLE); | |
addff1fe SR |
1056 | while (!x->go) { |
1057 | schedule(); | |
1058 | set_current_state(TASK_INTERRUPTIBLE); | |
1059 | } | |
60a11774 | 1060 | |
addff1fe SR |
1061 | complete(&x->is_ready); |
1062 | ||
1063 | set_current_state(TASK_INTERRUPTIBLE); | |
3c18c10b | 1064 | |
60a11774 SR |
1065 | /* we are awake, now wait to disappear */ |
1066 | while (!kthread_should_stop()) { | |
addff1fe SR |
1067 | schedule(); |
1068 | set_current_state(TASK_INTERRUPTIBLE); | |
60a11774 SR |
1069 | } |
1070 | ||
addff1fe SR |
1071 | __set_current_state(TASK_RUNNING); |
1072 | ||
60a11774 SR |
1073 | return 0; |
1074 | } | |
60a11774 SR |
1075 | int |
1076 | trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |
1077 | { | |
6d9b3fa5 | 1078 | unsigned long save_max = tr->max_latency; |
60a11774 | 1079 | struct task_struct *p; |
addff1fe | 1080 | struct wakeup_test_data data; |
60a11774 SR |
1081 | unsigned long count; |
1082 | int ret; | |
1083 | ||
addff1fe SR |
1084 | memset(&data, 0, sizeof(data)); |
1085 | ||
1086 | init_completion(&data.is_ready); | |
60a11774 | 1087 | |
af6ace76 | 1088 | /* create a -deadline thread */ |
addff1fe | 1089 | p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test"); |
c7aafc54 | 1090 | if (IS_ERR(p)) { |
60a11774 SR |
1091 | printk(KERN_CONT "Failed to create ftrace wakeup test thread "); |
1092 | return -1; | |
1093 | } | |
1094 | ||
af6ace76 | 1095 | /* make sure the thread is running at -deadline policy */ |
addff1fe | 1096 | wait_for_completion(&data.is_ready); |
60a11774 SR |
1097 | |
1098 | /* start the tracing */ | |
b6f11df2 | 1099 | ret = tracer_init(trace, tr); |
1c80025a FW |
1100 | if (ret) { |
1101 | warn_failed_init_tracer(trace, ret); | |
1102 | return ret; | |
1103 | } | |
1104 | ||
60a11774 | 1105 | /* reset the max latency */ |
6d9b3fa5 | 1106 | tr->max_latency = 0; |
60a11774 | 1107 | |
3c18c10b SR |
1108 | while (p->on_rq) { |
1109 | /* | |
af6ace76 | 1110 | * Sleep to make sure the -deadline thread is asleep too. |
3c18c10b SR |
1111 | * On virtual machines we can't rely on timings, |
1112 | * but we want to make sure this test still works. | |
1113 | */ | |
1114 | msleep(100); | |
1115 | } | |
60a11774 | 1116 | |
addff1fe SR |
1117 | init_completion(&data.is_ready); |
1118 | ||
1119 | data.go = 1; | |
1120 | /* memory barrier is in the wake_up_process() */ | |
60a11774 SR |
1121 | |
1122 | wake_up_process(p); | |
1123 | ||
3c18c10b | 1124 | /* Wait for the task to wake up */ |
addff1fe | 1125 | wait_for_completion(&data.is_ready); |
5aa60c60 | 1126 | |
60a11774 | 1127 | /* stop the tracing. */ |
bbf5b1a0 | 1128 | tracing_stop(); |
60a11774 | 1129 | /* check both trace buffers */ |
12883efb | 1130 | ret = trace_test_buffer(&tr->trace_buffer, NULL); |
60a11774 | 1131 | if (!ret) |
12883efb | 1132 | ret = trace_test_buffer(&tr->max_buffer, &count); |
60a11774 SR |
1133 | |
1134 | ||
1135 | trace->reset(tr); | |
bbf5b1a0 | 1136 | tracing_start(); |
60a11774 | 1137 | |
6d9b3fa5 | 1138 | tr->max_latency = save_max; |
60a11774 SR |
1139 | |
1140 | /* kill the thread */ | |
1141 | kthread_stop(p); | |
1142 | ||
1143 | if (!ret && !count) { | |
1144 | printk(KERN_CONT ".. no entries found .."); | |
1145 | ret = -1; | |
1146 | } | |
1147 | ||
1148 | return ret; | |
1149 | } | |
1150 | #endif /* CONFIG_SCHED_TRACER */ | |
1151 | ||
1152 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER | |
1153 | int | |
1154 | trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr) | |
1155 | { | |
1156 | unsigned long count; | |
1157 | int ret; | |
1158 | ||
1159 | /* start the tracing */ | |
b6f11df2 | 1160 | ret = tracer_init(trace, tr); |
1c80025a FW |
1161 | if (ret) { |
1162 | warn_failed_init_tracer(trace, ret); | |
1163 | return ret; | |
1164 | } | |
1165 | ||
60a11774 SR |
1166 | /* Sleep for a 1/10 of a second */ |
1167 | msleep(100); | |
1168 | /* stop the tracing. */ | |
bbf5b1a0 | 1169 | tracing_stop(); |
60a11774 | 1170 | /* check the trace buffer */ |
12883efb | 1171 | ret = trace_test_buffer(&tr->trace_buffer, &count); |
60a11774 | 1172 | trace->reset(tr); |
bbf5b1a0 | 1173 | tracing_start(); |
60a11774 SR |
1174 | |
1175 | if (!ret && !count) { | |
1176 | printk(KERN_CONT ".. no entries found .."); | |
1177 | ret = -1; | |
1178 | } | |
1179 | ||
1180 | return ret; | |
1181 | } | |
1182 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ | |
a6dd24f8 | 1183 | |
80e5ea45 SR |
1184 | #ifdef CONFIG_BRANCH_TRACER |
1185 | int | |
1186 | trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | |
1187 | { | |
1188 | unsigned long count; | |
1189 | int ret; | |
1190 | ||
1191 | /* start the tracing */ | |
b6f11df2 | 1192 | ret = tracer_init(trace, tr); |
1c80025a FW |
1193 | if (ret) { |
1194 | warn_failed_init_tracer(trace, ret); | |
1195 | return ret; | |
1196 | } | |
1197 | ||
80e5ea45 SR |
1198 | /* Sleep for a 1/10 of a second */ |
1199 | msleep(100); | |
1200 | /* stop the tracing. */ | |
1201 | tracing_stop(); | |
1202 | /* check the trace buffer */ | |
0184d50f | 1203 | ret = trace_test_buffer(&tr->trace_buffer, &count); |
80e5ea45 SR |
1204 | trace->reset(tr); |
1205 | tracing_start(); | |
1206 | ||
d2ef7c2f WH |
1207 | if (!ret && !count) { |
1208 | printk(KERN_CONT ".. no entries found .."); | |
1209 | ret = -1; | |
1210 | } | |
1211 | ||
80e5ea45 SR |
1212 | return ret; |
1213 | } | |
1214 | #endif /* CONFIG_BRANCH_TRACER */ | |
321bb5e1 | 1215 |